You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by su...@apache.org on 2018/10/05 12:56:25 UTC

[30/51] [partial] hadoop git commit: HADOOP-15791. Remove Ozone related sources from the 3.2 branch. Contributed by Elek, Marton.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/states/endpoint/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/states/endpoint/package-info.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/states/endpoint/package-info.java
deleted file mode 100644
index d120a5c..0000000
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/states/endpoint/package-info.java
+++ /dev/null
@@ -1,18 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.ozone.container.common.states.endpoint;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestHddsVolume.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestHddsVolume.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestHddsVolume.java
deleted file mode 100644
index 7755345..0000000
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestHddsVolume.java
+++ /dev/null
@@ -1,145 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.container.common.volume;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.GetSpaceUsed;
-import org.apache.hadoop.fs.StorageType;
-import org.apache.hadoop.ozone.container.common.helpers.DatanodeVersionFile;
-import org.apache.hadoop.ozone.container.common.utils.HddsVolumeUtil;
-import static org.junit.Assert.*;
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.TemporaryFolder;
-import org.mockito.Mockito;
-
-import java.io.File;
-import java.util.Properties;
-import java.util.UUID;
-
-/**
- * Unit tests for {@link HddsVolume}.
- */
-public class TestHddsVolume {
-
-  @Rule
-  public TemporaryFolder folder = new TemporaryFolder();
-
-  private static final String DATANODE_UUID = UUID.randomUUID().toString();
-  private static final String CLUSTER_ID = UUID.randomUUID().toString();
-  private static final Configuration CONF = new Configuration();
-  private static final String DU_CACHE_FILE = "scmUsed";
-
-  private File rootDir;
-  private HddsVolume volume;
-  private File versionFile;
-
-  @Before
-  public void setup() throws Exception {
-    rootDir = new File(folder.getRoot(), HddsVolume.HDDS_VOLUME_DIR);
-    volume = new HddsVolume.Builder(folder.getRoot().getPath())
-        .datanodeUuid(DATANODE_UUID)
-        .conf(CONF)
-        .build();
-    versionFile = HddsVolumeUtil.getVersionFile(rootDir);
-  }
-
-  @Test
-  public void testHddsVolumeInitialization() throws Exception {
-
-    // The initial state of HddsVolume should be "NOT_FORMATTED" when
-    // clusterID is not specified and the version file should not be written
-    // to disk.
-    assertTrue(volume.getClusterID() == null);
-    assertEquals(volume.getStorageType(), StorageType.DEFAULT);
-    assertEquals(volume.getStorageState(),
-        HddsVolume.VolumeState.NOT_FORMATTED);
-    assertFalse("Version file should not be created when clusterID is not " +
-        "known.", versionFile.exists());
-
-
-    // Format the volume with clusterID.
-    volume.format(CLUSTER_ID);
-
-    // The state of HddsVolume after formatting with clusterID should be
-    // NORMAL and the version file should exist.
-    assertTrue("Volume format should create Version file",
-        versionFile.exists());
-    assertEquals(volume.getClusterID(), CLUSTER_ID);
-    assertEquals(volume.getStorageState(), HddsVolume.VolumeState.NORMAL);
-  }
-
-  @Test
-  public void testReadPropertiesFromVersionFile() throws Exception {
-    volume.format(CLUSTER_ID);
-
-    Properties properties = DatanodeVersionFile.readFrom(versionFile);
-
-    String storageID = HddsVolumeUtil.getStorageID(properties, versionFile);
-    String clusterID = HddsVolumeUtil.getClusterID(
-        properties, versionFile, CLUSTER_ID);
-    String datanodeUuid = HddsVolumeUtil.getDatanodeUUID(
-        properties, versionFile, DATANODE_UUID);
-    long cTime = HddsVolumeUtil.getCreationTime(
-        properties, versionFile);
-    int layoutVersion = HddsVolumeUtil.getLayOutVersion(
-        properties, versionFile);
-
-    assertEquals(volume.getStorageID(), storageID);
-    assertEquals(volume.getClusterID(), clusterID);
-    assertEquals(volume.getDatanodeUuid(), datanodeUuid);
-    assertEquals(volume.getCTime(), cTime);
-    assertEquals(volume.getLayoutVersion(), layoutVersion);
-  }
-
-  @Test
-  public void testShutdown() throws Exception{
-    // Return dummy value > 0 for scmUsage so that scm cache file is written
-    // during shutdown.
-    GetSpaceUsed scmUsageMock = Mockito.mock(GetSpaceUsed.class);
-    volume.setScmUsageForTesting(scmUsageMock);
-    Mockito.when(scmUsageMock.getUsed()).thenReturn(Long.valueOf(100));
-
-    assertTrue("Available volume should be positive",
-        volume.getAvailable() > 0);
-
-    // Shutdown the volume.
-    volume.shutdown();
-
-    // Volume state should be "NON_EXISTENT" when volume is shutdown.
-    assertEquals(volume.getStorageState(),
-        HddsVolume.VolumeState.NON_EXISTENT);
-
-    // Volume should save scmUsed cache file once volume is shutdown
-    File scmUsedFile = new File(folder.getRoot(), DU_CACHE_FILE);
-    System.out.println("scmUsedFile: " + scmUsedFile);
-    assertTrue("scmUsed cache file should be saved on shutdown",
-        scmUsedFile.exists());
-
-    try {
-      // Volume.getAvailable() should fail with NullPointerException as usage
-      // is shutdown.
-      volume.getAvailable();
-      fail("HddsVolume#shutdown test failed");
-    } catch (Exception ex){
-      assertTrue(ex instanceof NullPointerException);
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestRoundRobinVolumeChoosingPolicy.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestRoundRobinVolumeChoosingPolicy.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestRoundRobinVolumeChoosingPolicy.java
deleted file mode 100644
index 80594d35..0000000
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestRoundRobinVolumeChoosingPolicy.java
+++ /dev/null
@@ -1,126 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.container.common.volume;
-
-import org.apache.hadoop.fs.GetSpaceUsed;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException;
-import org.apache.hadoop.util.ReflectionUtils;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
-import org.mockito.Mockito;
-
-import java.io.IOException;
-import java.util.List;
-import java.util.UUID;
-
-/**
- * Tests {@link RoundRobinVolumeChoosingPolicy}.
- */
-public class TestRoundRobinVolumeChoosingPolicy {
-
-  private RoundRobinVolumeChoosingPolicy policy;
-  private List<HddsVolume> volumes;
-
-  private final String baseDir = MiniDFSCluster.getBaseDirectory();
-  private final String volume1 = baseDir + "disk1";
-  private final String volume2 = baseDir + "disk2";
-  private static final String DUMMY_IP_ADDR = "0.0.0.0";
-
-  @Before
-  public void setup() throws Exception {
-    OzoneConfiguration conf = new OzoneConfiguration();
-    String dataDirKey = volume1 + "," + volume2;
-    conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, dataDirKey);
-    policy = ReflectionUtils.newInstance(
-        RoundRobinVolumeChoosingPolicy.class, null);
-    VolumeSet volumeSet = new VolumeSet(UUID.randomUUID().toString(), conf);
-    volumes = volumeSet.getVolumesList();
-  }
-
-  @Test
-  public void testRRVolumeChoosingPolicy() throws Exception {
-    HddsVolume hddsVolume1 = volumes.get(0);
-    HddsVolume hddsVolume2 = volumes.get(1);
-
-    // Set available space in volume1 to 100L
-    setAvailableSpace(hddsVolume1, 100L);
-
-    // Set available space in volume1 to 200L
-    setAvailableSpace(hddsVolume2, 200L);
-
-    Assert.assertEquals(100L, hddsVolume1.getAvailable());
-    Assert.assertEquals(200L, hddsVolume2.getAvailable());
-
-    // Test two rounds of round-robin choosing
-    Assert.assertEquals(hddsVolume1, policy.chooseVolume(volumes, 0));
-    Assert.assertEquals(hddsVolume2, policy.chooseVolume(volumes, 0));
-    Assert.assertEquals(hddsVolume1, policy.chooseVolume(volumes, 0));
-    Assert.assertEquals(hddsVolume2, policy.chooseVolume(volumes, 0));
-
-    // The first volume has only 100L space, so the policy should
-    // choose the second one in case we ask for more.
-    Assert.assertEquals(hddsVolume2,
-        policy.chooseVolume(volumes, 150));
-
-    // Fail if no volume has enough space available
-    try {
-      policy.chooseVolume(volumes, Long.MAX_VALUE);
-      Assert.fail();
-    } catch (IOException e) {
-      // Passed.
-    }
-  }
-
-  @Test
-  public void testRRPolicyExceptionMessage() throws Exception {
-    HddsVolume hddsVolume1 = volumes.get(0);
-    HddsVolume hddsVolume2 = volumes.get(1);
-
-    // Set available space in volume1 to 100L
-    setAvailableSpace(hddsVolume1, 100L);
-
-    // Set available space in volume1 to 200L
-    setAvailableSpace(hddsVolume2, 200L);
-
-    int blockSize = 300;
-    try {
-      policy.chooseVolume(volumes, blockSize);
-      Assert.fail("expected to throw DiskOutOfSpaceException");
-    } catch(DiskOutOfSpaceException e) {
-      Assert.assertEquals("Not returnig the expected message",
-          "Out of space: The volume with the most available space (=" + 200
-              + " B) is less than the container size (=" + blockSize + " B).",
-          e.getMessage());
-    }
-  }
-
-  private void setAvailableSpace(HddsVolume hddsVolume, long availableSpace)
-      throws IOException {
-    GetSpaceUsed scmUsageMock = Mockito.mock(GetSpaceUsed.class);
-    hddsVolume.setScmUsageForTesting(scmUsageMock);
-    // Set used space to capacity -requiredAvailableSpace so that
-    // getAvailable() returns us the specified availableSpace.
-    Mockito.when(scmUsageMock.getUsed()).thenReturn(
-        (hddsVolume.getCapacity() - availableSpace));
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java
deleted file mode 100644
index fca68b1..0000000
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java
+++ /dev/null
@@ -1,254 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.container.common.volume;
-
-import java.io.IOException;
-import org.apache.commons.io.FileUtils;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.fs.FileUtil;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.ozone.container.common.utils.HddsVolumeUtil;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
-
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_KEY;
-import static org.apache.hadoop.ozone.container.common.volume.HddsVolume
-    .HDDS_VOLUME_DIR;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.Timeout;
-
-import java.io.File;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.UUID;
-
-/**
- * Tests {@link VolumeSet} operations.
- */
-public class TestVolumeSet {
-
-  private OzoneConfiguration conf;
-  private VolumeSet volumeSet;
-  private final String baseDir = MiniDFSCluster.getBaseDirectory();
-  private final String volume1 = baseDir + "disk1";
-  private final String volume2 = baseDir + "disk2";
-  private final List<String> volumes = new ArrayList<>();
-
-  private static final String DUMMY_IP_ADDR = "0.0.0.0";
-
-  private void initializeVolumeSet() throws Exception {
-    volumeSet = new VolumeSet(UUID.randomUUID().toString(), conf);
-  }
-
-  @Rule
-  public Timeout testTimeout = new Timeout(300_000);
-
-  @Before
-  public void setup() throws Exception {
-    conf = new OzoneConfiguration();
-    String dataDirKey = volume1 + "," + volume2;
-    volumes.add(volume1);
-    volumes.add(volume2);
-    conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, dataDirKey);
-    initializeVolumeSet();
-  }
-
-  @After
-  public void shutdown() throws IOException {
-    // Delete the hdds volume root dir
-    List<HddsVolume> hddsVolumes = new ArrayList<>();
-    hddsVolumes.addAll(volumeSet.getVolumesList());
-    hddsVolumes.addAll(volumeSet.getFailedVolumesList());
-
-    for (HddsVolume volume : hddsVolumes) {
-      FileUtils.deleteDirectory(volume.getHddsRootDir());
-    }
-    volumeSet.shutdown();
-
-    FileUtil.fullyDelete(new File(baseDir));
-  }
-
-  private boolean checkVolumeExistsInVolumeSet(String volume) {
-    for (HddsVolume hddsVolume : volumeSet.getVolumesList()) {
-      if (hddsVolume.getHddsRootDir().getPath().equals(
-          HddsVolumeUtil.getHddsRoot(volume))) {
-        return true;
-      }
-    }
-    return false;
-  }
-
-  @Test
-  public void testVolumeSetInitialization() throws Exception {
-
-    List<HddsVolume> volumesList = volumeSet.getVolumesList();
-
-    // VolumeSet initialization should add volume1 and volume2 to VolumeSet
-    assertEquals("VolumeSet intialization is incorrect",
-        volumesList.size(), volumes.size());
-    assertTrue("VolumeSet not initailized correctly",
-        checkVolumeExistsInVolumeSet(volume1));
-    assertTrue("VolumeSet not initailized correctly",
-        checkVolumeExistsInVolumeSet(volume2));
-  }
-
-  @Test
-  public void testAddVolume() {
-
-    assertEquals(2, volumeSet.getVolumesList().size());
-
-    // Add a volume to VolumeSet
-    String volume3 = baseDir + "disk3";
-    boolean success = volumeSet.addVolume(volume3);
-
-    assertTrue(success);
-    assertEquals(3, volumeSet.getVolumesList().size());
-    assertTrue("AddVolume did not add requested volume to VolumeSet",
-        checkVolumeExistsInVolumeSet(volume3));
-  }
-
-  @Test
-  public void testFailVolume() throws Exception {
-
-    //Fail a volume
-    volumeSet.failVolume(volume1);
-
-    // Failed volume should not show up in the volumeList
-    assertEquals(1, volumeSet.getVolumesList().size());
-
-    // Failed volume should be added to FailedVolumeList
-    assertEquals("Failed volume not present in FailedVolumeMap",
-        1, volumeSet.getFailedVolumesList().size());
-    assertEquals("Failed Volume list did not match",
-        HddsVolumeUtil.getHddsRoot(volume1),
-        volumeSet.getFailedVolumesList().get(0).getHddsRootDir().getPath());
-    assertTrue(volumeSet.getFailedVolumesList().get(0).isFailed());
-
-    // Failed volume should not exist in VolumeMap
-    Path volume1Path = new Path(volume1);
-    assertFalse(volumeSet.getVolumeMap().containsKey(volume1Path));
-  }
-
-  @Test
-  public void testRemoveVolume() throws Exception {
-
-    assertEquals(2, volumeSet.getVolumesList().size());
-
-    // Remove a volume from VolumeSet
-    volumeSet.removeVolume(volume1);
-    assertEquals(1, volumeSet.getVolumesList().size());
-
-    // Attempting to remove a volume which does not exist in VolumeSet should
-    // log a warning.
-    LogCapturer logs = LogCapturer.captureLogs(
-        LogFactory.getLog(VolumeSet.class));
-    volumeSet.removeVolume(volume1);
-    assertEquals(1, volumeSet.getVolumesList().size());
-    String expectedLogMessage = "Volume : " +
-        HddsVolumeUtil.getHddsRoot(volume1) + " does not exist in VolumeSet";
-    assertTrue("Log output does not contain expected log message: "
-        + expectedLogMessage, logs.getOutput().contains(expectedLogMessage));
-  }
-
-  @Test
-  public void testVolumeInInconsistentState() throws Exception {
-    assertEquals(2, volumeSet.getVolumesList().size());
-
-    // Add a volume to VolumeSet
-    String volume3 = baseDir + "disk3";
-
-    // Create the root volume dir and create a sub-directory within it.
-    File newVolume = new File(volume3, HDDS_VOLUME_DIR);
-    System.out.println("new volume root: " + newVolume);
-    newVolume.mkdirs();
-    assertTrue("Failed to create new volume root", newVolume.exists());
-    File dataDir = new File(newVolume, "chunks");
-    dataDir.mkdirs();
-    assertTrue(dataDir.exists());
-
-    // The new volume is in an inconsistent state as the root dir is
-    // non-empty but the version file does not exist. Add Volume should
-    // return false.
-    boolean success = volumeSet.addVolume(volume3);
-
-    assertFalse(success);
-    assertEquals(2, volumeSet.getVolumesList().size());
-    assertTrue("AddVolume should fail for an inconsistent volume",
-        !checkVolumeExistsInVolumeSet(volume3));
-
-    // Delete volume3
-    File volume = new File(volume3);
-    FileUtils.deleteDirectory(volume);
-  }
-
-  @Test
-  public void testShutdown() throws Exception {
-    List<HddsVolume> volumesList = volumeSet.getVolumesList();
-
-    volumeSet.shutdown();
-
-    // Verify that the volumes are shutdown and the volumeUsage is set to null.
-    for (HddsVolume volume : volumesList) {
-      Assert.assertNull(volume.getVolumeInfo().getUsageForTesting());
-      try {
-        // getAvailable() should throw null pointer exception as usage is null.
-        volume.getAvailable();
-        fail("Volume shutdown failed.");
-      } catch (NullPointerException ex) {
-        // Do Nothing. Exception is expected.
-      }
-    }
-  }
-
-  @Test
-  public void testFailVolumes() throws  Exception{
-    VolumeSet volSet = null;
-    File readOnlyVolumePath = new File(baseDir);
-    //Set to readonly, so that this volume will be failed
-    readOnlyVolumePath.setReadOnly();
-    File volumePath = GenericTestUtils.getRandomizedTestDir();
-    OzoneConfiguration ozoneConfig = new OzoneConfiguration();
-    ozoneConfig.set(HDDS_DATANODE_DIR_KEY, readOnlyVolumePath.getAbsolutePath()
-        + "," + volumePath.getAbsolutePath());
-    volSet = new VolumeSet(UUID.randomUUID().toString(), ozoneConfig);
-    assertTrue(volSet.getFailedVolumesList().size() == 1);
-    assertEquals(readOnlyVolumePath, volSet.getFailedVolumesList().get(0)
-        .getHddsRootDir());
-
-    //Set back to writable
-    try {
-      readOnlyVolumePath.setWritable(true);
-    } finally {
-      FileUtil.fullyDelete(volumePath);
-    }
-
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestBlockManagerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestBlockManagerImpl.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestBlockManagerImpl.java
deleted file mode 100644
index 6fe6d81..0000000
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestBlockManagerImpl.java
+++ /dev/null
@@ -1,211 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.container.keyvalue;
-
-import org.apache.hadoop.conf.StorageUnit;
-import org.apache.hadoop.hdds.client.BlockID;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
-import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
-import org.apache.hadoop.ozone.container.common.helpers.BlockData;
-import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo;
-import org.apache.hadoop.ozone.container.common.volume
-    .RoundRobinVolumeChoosingPolicy;
-import org.apache.hadoop.ozone.container.common.volume.VolumeSet;
-import org.apache.hadoop.ozone.container.common.volume.HddsVolume;
-import org.apache.hadoop.ozone.container.keyvalue.impl.BlockManagerImpl;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.TemporaryFolder;
-import org.mockito.Mockito;
-
-import java.io.IOException;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.UUID;
-
-import static org.junit.Assert.*;
-import static org.mockito.ArgumentMatchers.anyList;
-import static org.mockito.ArgumentMatchers.anyLong;
-import static org.mockito.Mockito.mock;
-
-/**
- * This class is used to test key related operations on the container.
- */
-public class TestBlockManagerImpl {
-
-  private OzoneConfiguration config;
-  private String scmId = UUID.randomUUID().toString();
-  private VolumeSet volumeSet;
-  private RoundRobinVolumeChoosingPolicy volumeChoosingPolicy;
-  private KeyValueContainerData keyValueContainerData;
-  private KeyValueContainer keyValueContainer;
-  private BlockData blockData;
-  private BlockManagerImpl blockManager;
-  private BlockID blockID;
-
-  @Rule
-  public TemporaryFolder folder = new TemporaryFolder();
-
-
-  @Before
-  public void setUp() throws Exception {
-    config = new OzoneConfiguration();
-
-    HddsVolume hddsVolume = new HddsVolume.Builder(folder.getRoot()
-        .getAbsolutePath()).conf(config).datanodeUuid(UUID.randomUUID()
-        .toString()).build();
-
-    volumeSet = mock(VolumeSet.class);
-
-    volumeChoosingPolicy = mock(RoundRobinVolumeChoosingPolicy.class);
-    Mockito.when(volumeChoosingPolicy.chooseVolume(anyList(), anyLong()))
-        .thenReturn(hddsVolume);
-
-    keyValueContainerData = new KeyValueContainerData(1L,
-        (long) StorageUnit.GB.toBytes(5));
-
-    keyValueContainer = new KeyValueContainer(
-        keyValueContainerData, config);
-
-    keyValueContainer.create(volumeSet, volumeChoosingPolicy, scmId);
-
-    // Creating BlockData
-    blockID = new BlockID(1L, 1L);
-    blockData = new BlockData(blockID);
-    blockData.addMetadata("VOLUME", "ozone");
-    blockData.addMetadata("OWNER", "hdfs");
-    List<ContainerProtos.ChunkInfo> chunkList = new LinkedList<>();
-    ChunkInfo info = new ChunkInfo(String.format("%d.data.%d", blockID
-        .getLocalID(), 0), 0, 1024);
-    chunkList.add(info.getProtoBufMessage());
-    blockData.setChunks(chunkList);
-
-    // Create KeyValueContainerManager
-    blockManager = new BlockManagerImpl(config);
-
-  }
-
-  @Test
-  public void testPutAndGetBlock() throws Exception {
-    assertEquals(0, keyValueContainer.getContainerData().getKeyCount());
-    //Put Block
-    blockManager.putBlock(keyValueContainer, blockData);
-
-    assertEquals(1, keyValueContainer.getContainerData().getKeyCount());
-    //Get Block
-    BlockData fromGetBlockData = blockManager.getBlock(keyValueContainer,
-        blockData.getBlockID());
-
-    assertEquals(blockData.getContainerID(), fromGetBlockData.getContainerID());
-    assertEquals(blockData.getLocalID(), fromGetBlockData.getLocalID());
-    assertEquals(blockData.getChunks().size(),
-        fromGetBlockData.getChunks().size());
-    assertEquals(blockData.getMetadata().size(), fromGetBlockData.getMetadata()
-        .size());
-
-  }
-
-
-  @Test
-  public void testDeleteBlock() throws Exception {
-    try {
-      assertEquals(0,
-          keyValueContainer.getContainerData().getKeyCount());
-      //Put Block
-      blockManager.putBlock(keyValueContainer, blockData);
-      assertEquals(1,
-          keyValueContainer.getContainerData().getKeyCount());
-      //Delete Block
-      blockManager.deleteBlock(keyValueContainer, blockID);
-      assertEquals(0,
-          keyValueContainer.getContainerData().getKeyCount());
-      try {
-        blockManager.getBlock(keyValueContainer, blockID);
-        fail("testDeleteBlock");
-      } catch (StorageContainerException ex) {
-        GenericTestUtils.assertExceptionContains(
-            "Unable to find the block", ex);
-      }
-    } catch (IOException ex) {
-      fail("testDeleteBlock failed");
-    }
-  }
-
-  @Test
-  public void testListBlock() throws Exception {
-    try {
-      blockManager.putBlock(keyValueContainer, blockData);
-      List<BlockData> listBlockData = blockManager.listBlock(
-          keyValueContainer, 1, 10);
-      assertNotNull(listBlockData);
-      assertTrue(listBlockData.size() == 1);
-
-      for (long i = 2; i <= 10; i++) {
-        blockID = new BlockID(1L, i);
-        blockData = new BlockData(blockID);
-        blockData.addMetadata("VOLUME", "ozone");
-        blockData.addMetadata("OWNER", "hdfs");
-        List<ContainerProtos.ChunkInfo> chunkList = new LinkedList<>();
-        ChunkInfo info = new ChunkInfo(String.format("%d.data.%d", blockID
-            .getLocalID(), 0), 0, 1024);
-        chunkList.add(info.getProtoBufMessage());
-        blockData.setChunks(chunkList);
-        blockManager.putBlock(keyValueContainer, blockData);
-      }
-
-      listBlockData = blockManager.listBlock(
-          keyValueContainer, 1, 10);
-      assertNotNull(listBlockData);
-      assertTrue(listBlockData.size() == 10);
-
-    } catch (IOException ex) {
-      fail("testListBlock failed");
-    }
-  }
-
-  @Test
-  public void testGetNoSuchBlock() throws Exception {
-    try {
-      assertEquals(0,
-          keyValueContainer.getContainerData().getKeyCount());
-      //Put Block
-      blockManager.putBlock(keyValueContainer, blockData);
-      assertEquals(1,
-          keyValueContainer.getContainerData().getKeyCount());
-      //Delete Block
-      blockManager.deleteBlock(keyValueContainer, blockID);
-      assertEquals(0,
-          keyValueContainer.getContainerData().getKeyCount());
-      try {
-        //Since the block has been deleted, we should not be able to find it
-        blockManager.getBlock(keyValueContainer, blockID);
-        fail("testGetNoSuchBlock failed");
-      } catch (StorageContainerException ex) {
-        GenericTestUtils.assertExceptionContains(
-            "Unable to find the block", ex);
-        assertEquals(ContainerProtos.Result.NO_SUCH_BLOCK, ex.getResult());
-      }
-    } catch (IOException ex) {
-      fail("testGetNoSuchBlock failed");
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestChunkManagerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestChunkManagerImpl.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestChunkManagerImpl.java
deleted file mode 100644
index 3c0876b..0000000
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestChunkManagerImpl.java
+++ /dev/null
@@ -1,292 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.container.keyvalue;
-
-import org.apache.hadoop.conf.StorageUnit;
-import org.apache.hadoop.hdds.client.BlockID;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
-import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo;
-import org.apache.hadoop.ozone.container.keyvalue.helpers.ChunkUtils;
-import org.apache.hadoop.ozone.container.common.volume.HddsVolume;
-import org.apache.hadoop.ozone.container.common.volume.RoundRobinVolumeChoosingPolicy;
-import org.apache.hadoop.ozone.container.common.volume.VolumeIOStats;
-import org.apache.hadoop.ozone.container.common.volume.VolumeSet;
-import org.apache.hadoop.ozone.container.keyvalue.impl.ChunkManagerImpl;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.TemporaryFolder;
-import org.mockito.Mockito;
-
-import java.io.File;
-import java.util.Arrays;
-import java.util.UUID;
-
-import static org.junit.Assert.*;
-import static org.mockito.ArgumentMatchers.anyList;
-import static org.mockito.ArgumentMatchers.anyLong;
-import static org.mockito.Mockito.mock;
-
-/**
- * This class is used to test ChunkManager operations.
- */
-public class TestChunkManagerImpl {
-
-  private OzoneConfiguration config;
-  private String scmId = UUID.randomUUID().toString();
-  private VolumeSet volumeSet;
-  private RoundRobinVolumeChoosingPolicy volumeChoosingPolicy;
-  private HddsVolume hddsVolume;
-  private KeyValueContainerData keyValueContainerData;
-  private KeyValueContainer keyValueContainer;
-  private BlockID blockID;
-  private ChunkManagerImpl chunkManager;
-  private ChunkInfo chunkInfo;
-  private byte[] data;
-
-  @Rule
-  public TemporaryFolder folder = new TemporaryFolder();
-
-  @Before
-  public void setUp() throws Exception {
-    config = new OzoneConfiguration();
-    hddsVolume = new HddsVolume.Builder(folder.getRoot()
-        .getAbsolutePath()).conf(config).datanodeUuid(UUID.randomUUID()
-        .toString()).build();
-
-    volumeSet = mock(VolumeSet.class);
-
-    volumeChoosingPolicy = mock(RoundRobinVolumeChoosingPolicy.class);
-    Mockito.when(volumeChoosingPolicy.chooseVolume(anyList(), anyLong()))
-        .thenReturn(hddsVolume);
-
-    keyValueContainerData = new KeyValueContainerData(1L,
-        (long) StorageUnit.GB.toBytes(5));
-
-    keyValueContainer = new KeyValueContainer(keyValueContainerData, config);
-
-    keyValueContainer.create(volumeSet, volumeChoosingPolicy, scmId);
-
-    data = "testing write chunks".getBytes();
-    // Creating BlockData
-    blockID = new BlockID(1L, 1L);
-    chunkInfo = new ChunkInfo(String.format("%d.data.%d", blockID
-        .getLocalID(), 0), 0, data.length);
-
-    // Create a ChunkManager object.
-    chunkManager = new ChunkManagerImpl();
-
-  }
-
-  @Test
-  public void testWriteChunkStageWriteAndCommit() throws Exception {
-    //As in Setup, we try to create container, these paths should exist.
-    assertTrue(keyValueContainerData.getChunksPath() != null);
-    File chunksPath = new File(keyValueContainerData.getChunksPath());
-    assertTrue(chunksPath.exists());
-    // Initially chunks folder should be empty.
-    assertTrue(chunksPath.listFiles().length == 0);
-
-    // As no chunks are written to the volume writeBytes should be 0
-    checkWriteIOStats(0, 0);
-    chunkManager.writeChunk(keyValueContainer, blockID, chunkInfo, data,
-        ContainerProtos.Stage.WRITE_DATA);
-    // Now a chunk file is being written with Stage WRITE_DATA, so it should
-    // create a temporary chunk file.
-    assertTrue(chunksPath.listFiles().length == 1);
-
-    File chunkFile = ChunkUtils.getChunkFile(keyValueContainerData, chunkInfo);
-    File tempChunkFile = new File(chunkFile.getParent(),
-        chunkFile.getName() +
-            OzoneConsts.CONTAINER_CHUNK_NAME_DELIMITER +
-            OzoneConsts.CONTAINER_TEMPORARY_CHUNK_PREFIX);
-
-    // As chunk write stage is WRITE_DATA, temp chunk file will be created.
-    assertTrue(tempChunkFile.exists());
-
-    checkWriteIOStats(data.length, 1);
-
-    chunkManager.writeChunk(keyValueContainer, blockID, chunkInfo, data,
-        ContainerProtos.Stage.COMMIT_DATA);
-
-    checkWriteIOStats(data.length, 1);
-
-    // Old temp file should have been renamed to chunk file.
-    assertTrue(chunksPath.listFiles().length == 1);
-
-    // As commit happened, chunk file should exist.
-    assertTrue(chunkFile.exists());
-    assertFalse(tempChunkFile.exists());
-
-  }
-
-  @Test
-  public void testWriteChunkIncorrectLength() throws Exception {
-    try {
-      long randomLength = 200L;
-      chunkInfo = new ChunkInfo(String.format("%d.data.%d", blockID
-          .getLocalID(), 0), 0, randomLength);
-      chunkManager.writeChunk(keyValueContainer, blockID, chunkInfo, data,
-          ContainerProtos.Stage.WRITE_DATA);
-      fail("testWriteChunkIncorrectLength failed");
-    } catch (StorageContainerException ex) {
-      // As we got an exception, writeBytes should be 0.
-      checkWriteIOStats(0, 0);
-      GenericTestUtils.assertExceptionContains("data array does not match " +
-          "the length ", ex);
-      assertEquals(ContainerProtos.Result.INVALID_WRITE_SIZE, ex.getResult());
-    }
-  }
-
-  @Test
-  public void testWriteChunkStageCombinedData() throws Exception {
-    //As in Setup, we try to create container, these paths should exist.
-    assertTrue(keyValueContainerData.getChunksPath() != null);
-    File chunksPath = new File(keyValueContainerData.getChunksPath());
-    assertTrue(chunksPath.exists());
-    // Initially chunks folder should be empty.
-    assertTrue(chunksPath.listFiles().length == 0);
-    checkWriteIOStats(0, 0);
-    chunkManager.writeChunk(keyValueContainer, blockID, chunkInfo, data,
-        ContainerProtos.Stage.COMBINED);
-    // Now a chunk file is being written with Stage COMBINED_DATA, so it should
-    // create a chunk file.
-    assertTrue(chunksPath.listFiles().length == 1);
-    File chunkFile = ChunkUtils.getChunkFile(keyValueContainerData, chunkInfo);
-    assertTrue(chunkFile.exists());
-    checkWriteIOStats(data.length, 1);
-  }
-
-  @Test
-  public void testReadChunk() throws Exception {
-    checkWriteIOStats(0, 0);
-    chunkManager.writeChunk(keyValueContainer, blockID, chunkInfo, data,
-        ContainerProtos.Stage.COMBINED);
-    checkWriteIOStats(data.length, 1);
-    checkReadIOStats(0, 0);
-    byte[] expectedData = chunkManager.readChunk(keyValueContainer, blockID,
-        chunkInfo);
-    assertEquals(expectedData.length, data.length);
-    assertTrue(Arrays.equals(expectedData, data));
-    checkReadIOStats(data.length, 1);
-  }
-
-  @Test
-  public void testDeleteChunk() throws Exception {
-    File chunksPath = new File(keyValueContainerData.getChunksPath());
-    chunkManager.writeChunk(keyValueContainer, blockID, chunkInfo, data,
-        ContainerProtos.Stage.COMBINED);
-    assertTrue(chunksPath.listFiles().length == 1);
-    chunkManager.deleteChunk(keyValueContainer, blockID, chunkInfo);
-    assertTrue(chunksPath.listFiles().length == 0);
-  }
-
-  @Test
-  public void testDeleteChunkUnsupportedRequest() throws Exception {
-    try {
-      chunkManager.writeChunk(keyValueContainer, blockID, chunkInfo, data,
-          ContainerProtos.Stage.COMBINED);
-      long randomLength = 200L;
-      chunkInfo = new ChunkInfo(String.format("%d.data.%d", blockID
-          .getLocalID(), 0), 0, randomLength);
-      chunkManager.deleteChunk(keyValueContainer, blockID, chunkInfo);
-      fail("testDeleteChunkUnsupportedRequest");
-    } catch (StorageContainerException ex) {
-      GenericTestUtils.assertExceptionContains("Not Supported Operation.", ex);
-      assertEquals(ContainerProtos.Result.UNSUPPORTED_REQUEST, ex.getResult());
-    }
-  }
-
-  @Test
-  public void testWriteChunkChecksumMismatch() throws Exception {
-    try {
-      chunkInfo = new ChunkInfo(String.format("%d.data.%d", blockID
-          .getLocalID(), 0), 0, data.length);
-      //Setting checksum to some value.
-      chunkInfo.setChecksum("some garbage");
-      chunkManager.writeChunk(keyValueContainer, blockID, chunkInfo, data,
-          ContainerProtos.Stage.COMBINED);
-      fail("testWriteChunkChecksumMismatch failed");
-    } catch (StorageContainerException ex) {
-      GenericTestUtils.assertExceptionContains("Checksum mismatch.", ex);
-      assertEquals(ContainerProtos.Result.CHECKSUM_MISMATCH, ex.getResult());
-    }
-  }
-
-  @Test
-  public void testReadChunkFileNotExists() throws Exception {
-    try {
-      // trying to read a chunk, where chunk file does not exist
-      byte[] expectedData = chunkManager.readChunk(keyValueContainer, blockID,
-          chunkInfo);
-      fail("testReadChunkFileNotExists failed");
-    } catch (StorageContainerException ex) {
-      GenericTestUtils.assertExceptionContains("Unable to find the chunk " +
-          "file.", ex);
-      assertEquals(ContainerProtos.Result.UNABLE_TO_FIND_CHUNK, ex.getResult());
-    }
-  }
-
-  @Test
-  public void testWriteAndReadChunkMultipleTimes() throws Exception {
-    for (int i=0; i<100; i++) {
-      chunkInfo = new ChunkInfo(String.format("%d.data.%d", blockID
-          .getLocalID(), i), 0, data.length);
-      chunkManager.writeChunk(keyValueContainer, blockID, chunkInfo, data,
-          ContainerProtos.Stage.COMBINED);
-    }
-    checkWriteIOStats(data.length*100, 100);
-    assertTrue(hddsVolume.getVolumeIOStats().getWriteTime() > 0);
-
-    for (int i=0; i<100; i++) {
-      chunkInfo = new ChunkInfo(String.format("%d.data.%d", blockID
-          .getLocalID(), i), 0, data.length);
-      chunkManager.readChunk(keyValueContainer, blockID, chunkInfo);
-    }
-    checkReadIOStats(data.length*100, 100);
-    assertTrue(hddsVolume.getVolumeIOStats().getReadTime() > 0);
-  }
-
-
-  /**
-   * Check WriteIO stats.
-   * @param length
-   * @param opCount
-   */
-  private void checkWriteIOStats(long length, long opCount) {
-    VolumeIOStats volumeIOStats = hddsVolume.getVolumeIOStats();
-    assertEquals(length, volumeIOStats.getWriteBytes());
-    assertEquals(opCount, volumeIOStats.getWriteOpCount());
-  }
-
-  /**
-   * Check ReadIO stats.
-   * @param length
-   * @param opCount
-   */
-  private void checkReadIOStats(long length, long opCount) {
-    VolumeIOStats volumeIOStats = hddsVolume.getVolumeIOStats();
-    assertEquals(length, volumeIOStats.getReadBytes());
-    assertEquals(opCount, volumeIOStats.getReadOpCount());
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueBlockIterator.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueBlockIterator.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueBlockIterator.java
deleted file mode 100644
index fbc5ad0..0000000
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueBlockIterator.java
+++ /dev/null
@@ -1,277 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.container.keyvalue;
-
-import com.google.common.primitives.Longs;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.conf.StorageUnit;
-import org.apache.hadoop.fs.FileUtil;
-import org.apache.hadoop.hdds.client.BlockID;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
-import org.apache.hadoop.hdfs.DFSUtil;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.ozone.container.common.helpers.BlockData;
-import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo;
-import org.apache.hadoop.ozone.container.common.volume.RoundRobinVolumeChoosingPolicy;
-import org.apache.hadoop.ozone.container.common.volume.VolumeSet;
-import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.hadoop.utils.MetadataKeyFilters;
-import org.apache.hadoop.utils.MetadataStore;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-import org.junit.runner.RunWith;
-import org.junit.runners.Parameterized;
-
-import java.io.File;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.NoSuchElementException;
-import java.util.UUID;
-
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_KEY;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_STORE_IMPL;
-import static org.apache.hadoop.ozone.OzoneConfigKeys
-    .OZONE_METADATA_STORE_IMPL_LEVELDB;
-import static org.apache.hadoop.ozone.OzoneConfigKeys
-    .OZONE_METADATA_STORE_IMPL_ROCKSDB;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-
-/**
- * This class is used to test KeyValue container block iterator.
- */
-@RunWith(Parameterized.class)
-public class TestKeyValueBlockIterator {
-
-  private KeyValueContainer container;
-  private KeyValueContainerData containerData;
-  private VolumeSet volumeSet;
-  private Configuration conf;
-  private File testRoot;
-
-  private final String storeImpl;
-
-  public TestKeyValueBlockIterator(String metadataImpl) {
-    this.storeImpl = metadataImpl;
-  }
-
-  @Parameterized.Parameters
-  public static Collection<Object[]> data() {
-    return Arrays.asList(new Object[][] {
-        {OZONE_METADATA_STORE_IMPL_LEVELDB},
-        {OZONE_METADATA_STORE_IMPL_ROCKSDB}});
-  }
-
-  @Before
-  public void setUp() throws Exception {
-    testRoot = GenericTestUtils.getRandomizedTestDir();
-    conf = new OzoneConfiguration();
-    conf.set(HDDS_DATANODE_DIR_KEY, testRoot.getAbsolutePath());
-    conf.set(OZONE_METADATA_STORE_IMPL, storeImpl);
-    volumeSet = new VolumeSet(UUID.randomUUID().toString(), conf);
-  }
-
-
-  @After
-  public void tearDown() {
-    volumeSet.shutdown();
-    FileUtil.fullyDelete(testRoot);
-  }
-
-  @Test
-  public void testKeyValueBlockIteratorWithMixedBlocks() throws Exception {
-
-    long containerID = 100L;
-    int deletedBlocks = 5;
-    int normalBlocks = 5;
-    createContainerWithBlocks(containerID, normalBlocks, deletedBlocks);
-    String containerPath = new File(containerData.getMetadataPath())
-        .getParent();
-    KeyValueBlockIterator keyValueBlockIterator = new KeyValueBlockIterator(
-        containerID, new File(containerPath));
-
-    int counter = 0;
-    while(keyValueBlockIterator.hasNext()) {
-      BlockData blockData = keyValueBlockIterator.nextBlock();
-      assertEquals(blockData.getLocalID(), counter++);
-    }
-
-    assertFalse(keyValueBlockIterator.hasNext());
-
-    keyValueBlockIterator.seekToFirst();
-    counter = 0;
-    while(keyValueBlockIterator.hasNext()) {
-      BlockData blockData = keyValueBlockIterator.nextBlock();
-      assertEquals(blockData.getLocalID(), counter++);
-    }
-    assertFalse(keyValueBlockIterator.hasNext());
-
-    try {
-      keyValueBlockIterator.nextBlock();
-    } catch (NoSuchElementException ex) {
-      GenericTestUtils.assertExceptionContains("Block Iterator reached end " +
-          "for ContainerID " + containerID, ex);
-    }
-  }
-
-  @Test
-  public void testKeyValueBlockIteratorWithNextBlock() throws Exception {
-    long containerID = 101L;
-    createContainerWithBlocks(containerID, 2, 0);
-    String containerPath = new File(containerData.getMetadataPath())
-        .getParent();
-    KeyValueBlockIterator keyValueBlockIterator = new KeyValueBlockIterator(
-        containerID, new File(containerPath));
-    long blockID = 0L;
-    assertEquals(blockID++, keyValueBlockIterator.nextBlock().getLocalID());
-    assertEquals(blockID, keyValueBlockIterator.nextBlock().getLocalID());
-
-    try {
-      keyValueBlockIterator.nextBlock();
-    } catch (NoSuchElementException ex) {
-      GenericTestUtils.assertExceptionContains("Block Iterator reached end " +
-          "for ContainerID " + containerID, ex);
-    }
-  }
-
-  @Test
-  public void testKeyValueBlockIteratorWithHasNext() throws Exception {
-    long containerID = 102L;
-    createContainerWithBlocks(containerID, 2, 0);
-    String containerPath = new File(containerData.getMetadataPath())
-        .getParent();
-    KeyValueBlockIterator keyValueBlockIterator = new KeyValueBlockIterator(
-        containerID, new File(containerPath));
-    long blockID = 0L;
-
-    // Even calling multiple times hasNext() should not move entry forward.
-    assertTrue(keyValueBlockIterator.hasNext());
-    assertTrue(keyValueBlockIterator.hasNext());
-    assertTrue(keyValueBlockIterator.hasNext());
-    assertTrue(keyValueBlockIterator.hasNext());
-    assertTrue(keyValueBlockIterator.hasNext());
-    assertEquals(blockID++, keyValueBlockIterator.nextBlock().getLocalID());
-
-    assertTrue(keyValueBlockIterator.hasNext());
-    assertTrue(keyValueBlockIterator.hasNext());
-    assertTrue(keyValueBlockIterator.hasNext());
-    assertTrue(keyValueBlockIterator.hasNext());
-    assertTrue(keyValueBlockIterator.hasNext());
-    assertEquals(blockID, keyValueBlockIterator.nextBlock().getLocalID());
-
-    keyValueBlockIterator.seekToLast();
-    assertTrue(keyValueBlockIterator.hasNext());
-    assertEquals(blockID, keyValueBlockIterator.nextBlock().getLocalID());
-
-    keyValueBlockIterator.seekToFirst();
-    blockID = 0L;
-    assertEquals(blockID++, keyValueBlockIterator.nextBlock().getLocalID());
-    assertEquals(blockID, keyValueBlockIterator.nextBlock().getLocalID());
-
-    try {
-      keyValueBlockIterator.nextBlock();
-    } catch (NoSuchElementException ex) {
-      GenericTestUtils.assertExceptionContains("Block Iterator reached end " +
-          "for ContainerID " + containerID, ex);
-    }
-
-
-  }
-
-  @Test
-  public void testKeyValueBlockIteratorWithFilter() throws Exception {
-    long containerId = 103L;
-    int deletedBlocks = 5;
-    int normalBlocks = 5;
-    createContainerWithBlocks(containerId, normalBlocks, deletedBlocks);
-    String containerPath = new File(containerData.getMetadataPath())
-        .getParent();
-    KeyValueBlockIterator keyValueBlockIterator = new KeyValueBlockIterator(
-        containerId, new File(containerPath), MetadataKeyFilters
-        .getDeletingKeyFilter());
-
-    int counter = 5;
-    while(keyValueBlockIterator.hasNext()) {
-      BlockData blockData = keyValueBlockIterator.nextBlock();
-      assertEquals(blockData.getLocalID(), counter++);
-    }
-  }
-
-  @Test
-  public void testKeyValueBlockIteratorWithOnlyDeletedBlocks() throws
-      Exception {
-    long containerId = 104L;
-    createContainerWithBlocks(containerId, 0, 5);
-    String containerPath = new File(containerData.getMetadataPath())
-        .getParent();
-    KeyValueBlockIterator keyValueBlockIterator = new KeyValueBlockIterator(
-        containerId, new File(containerPath));
-    //As all blocks are deleted blocks, blocks does not match with normal key
-    // filter.
-    assertFalse(keyValueBlockIterator.hasNext());
-  }
-
-  /**
-   * Creates a container with specified number of normal blocks and deleted
-   * blocks. First it will insert normal blocks, and then it will insert
-   * deleted blocks.
-   * @param containerId
-   * @param normalBlocks
-   * @param deletedBlocks
-   * @throws Exception
-   */
-  private void createContainerWithBlocks(long containerId, int
-      normalBlocks, int deletedBlocks) throws
-      Exception {
-    containerData = new KeyValueContainerData(containerId,
-        (long) StorageUnit.GB.toBytes(1));
-    container = new KeyValueContainer(containerData, conf);
-    container.create(volumeSet, new RoundRobinVolumeChoosingPolicy(), UUID
-        .randomUUID().toString());
-    MetadataStore metadataStore = BlockUtils.getDB(containerData, conf);
-
-    List<ContainerProtos.ChunkInfo> chunkList = new LinkedList<>();
-    ChunkInfo info = new ChunkInfo("chunkfile", 0, 1024);
-    chunkList.add(info.getProtoBufMessage());
-
-    for (int i=0; i<normalBlocks; i++) {
-      BlockID blockID = new BlockID(containerId, i);
-      BlockData blockData = new BlockData(blockID);
-      blockData.setChunks(chunkList);
-      metadataStore.put(Longs.toByteArray(blockID.getLocalID()), blockData
-          .getProtoBufMessage().toByteArray());
-    }
-
-    for (int i=normalBlocks; i<deletedBlocks; i++) {
-      BlockID blockID = new BlockID(containerId, i);
-      BlockData blockData = new BlockData(blockID);
-      blockData.setChunks(chunkList);
-      metadataStore.put(DFSUtil.string2Bytes(OzoneConsts
-          .DELETING_KEY_PREFIX + blockID.getLocalID()), blockData
-          .getProtoBufMessage().toByteArray());
-    }
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java
deleted file mode 100644
index bf6b8b0..0000000
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java
+++ /dev/null
@@ -1,374 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.container.keyvalue;
-
-import com.google.common.primitives.Longs;
-import org.apache.hadoop.conf.StorageUnit;
-import org.apache.hadoop.hdds.client.BlockID;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
-
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .ContainerLifeCycleState;
-import org.apache.hadoop.hdds.scm.container.common.helpers
-    .StorageContainerException;
-import org.apache.hadoop.ozone.container.common.helpers.BlockData;
-import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo;
-import org.apache.hadoop.ozone.container.common.impl.ContainerDataYaml;
-import org.apache.hadoop.ozone.container.common.volume.HddsVolume;
-import org.apache.hadoop.ozone.container.common.volume
-    .RoundRobinVolumeChoosingPolicy;
-import org.apache.hadoop.ozone.container.common.volume.VolumeSet;
-import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.hadoop.util.DiskChecker;
-import org.apache.hadoop.utils.MetadataStore;
-
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.TemporaryFolder;
-
-import org.mockito.Mockito;
-
-import java.io.File;
-
-import java.io.FileInputStream;
-import java.io.FileOutputStream;
-import java.io.IOException;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.List;
-import java.util.LinkedList;
-import java.util.UUID;
-
-import static org.apache.ratis.util.Preconditions.assertTrue;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.fail;
-import static org.mockito.ArgumentMatchers.anyList;
-import static org.mockito.ArgumentMatchers.anyLong;
-import static org.mockito.Mockito.mock;
-
-/**
- * Class to test KeyValue Container operations.
- */
-public class TestKeyValueContainer {
-
-  @Rule
-  public TemporaryFolder folder = new TemporaryFolder();
-
-
-  private OzoneConfiguration conf;
-  private String scmId = UUID.randomUUID().toString();
-  private VolumeSet volumeSet;
-  private RoundRobinVolumeChoosingPolicy volumeChoosingPolicy;
-  private KeyValueContainerData keyValueContainerData;
-  private KeyValueContainer keyValueContainer;
-
-  @Before
-  public void setUp() throws Exception {
-    conf = new OzoneConfiguration();
-    HddsVolume hddsVolume = new HddsVolume.Builder(folder.getRoot()
-        .getAbsolutePath()).conf(conf).datanodeUuid(UUID.randomUUID()
-        .toString()).build();
-
-    volumeSet = mock(VolumeSet.class);
-    volumeChoosingPolicy = mock(RoundRobinVolumeChoosingPolicy.class);
-    Mockito.when(volumeChoosingPolicy.chooseVolume(anyList(), anyLong()))
-        .thenReturn(hddsVolume);
-
-    keyValueContainerData = new KeyValueContainerData(1L,
-        (long) StorageUnit.GB.toBytes(5));
-
-    keyValueContainer = new KeyValueContainer(
-        keyValueContainerData, conf);
-
-  }
-
-  @Test
-  public void testBlockIterator() throws Exception{
-    keyValueContainerData = new KeyValueContainerData(100L,
-        (long) StorageUnit.GB.toBytes(1));
-    keyValueContainer = new KeyValueContainer(
-        keyValueContainerData, conf);
-    keyValueContainer.create(volumeSet, volumeChoosingPolicy, scmId);
-    KeyValueBlockIterator blockIterator = keyValueContainer.blockIterator();
-    //As no blocks created, hasNext should return false.
-    assertFalse(blockIterator.hasNext());
-    int blockCount = 10;
-    addBlocks(blockCount);
-    blockIterator = keyValueContainer.blockIterator();
-    assertTrue(blockIterator.hasNext());
-    BlockData blockData;
-    int blockCounter = 0;
-    while(blockIterator.hasNext()) {
-      blockData = blockIterator.nextBlock();
-      assertEquals(blockCounter++, blockData.getBlockID().getLocalID());
-    }
-    assertEquals(blockCount, blockCounter);
-  }
-
-  private void addBlocks(int count) throws Exception {
-    long containerId = keyValueContainerData.getContainerID();
-
-    MetadataStore metadataStore = BlockUtils.getDB(keyValueContainer
-        .getContainerData(), conf);
-    for (int i=0; i < count; i++) {
-      // Creating BlockData
-      BlockID blockID = new BlockID(containerId, i);
-      BlockData blockData = new BlockData(blockID);
-      blockData.addMetadata("VOLUME", "ozone");
-      blockData.addMetadata("OWNER", "hdfs");
-      List<ContainerProtos.ChunkInfo> chunkList = new LinkedList<>();
-      ChunkInfo info = new ChunkInfo(String.format("%d.data.%d", blockID
-          .getLocalID(), 0), 0, 1024);
-      chunkList.add(info.getProtoBufMessage());
-      blockData.setChunks(chunkList);
-      metadataStore.put(Longs.toByteArray(blockID.getLocalID()), blockData
-          .getProtoBufMessage().toByteArray());
-    }
-
-  }
-
-  @SuppressWarnings("RedundantCast")
-  @Test
-  public void testCreateContainer() throws Exception {
-
-    // Create Container.
-    keyValueContainer.create(volumeSet, volumeChoosingPolicy, scmId);
-
-    keyValueContainerData = keyValueContainer
-        .getContainerData();
-
-    String containerMetaDataPath = keyValueContainerData
-        .getMetadataPath();
-    String chunksPath = keyValueContainerData.getChunksPath();
-
-    // Check whether containerMetaDataPath and chunksPath exists or not.
-    assertTrue(containerMetaDataPath != null);
-    assertTrue(chunksPath != null);
-    File containerMetaDataLoc = new File(containerMetaDataPath);
-
-    //Check whether container file and container db file exists or not.
-    assertTrue(keyValueContainer.getContainerFile().exists(),
-        ".Container File does not exist");
-    assertTrue(keyValueContainer.getContainerDBFile().exists(), "Container " +
-        "DB does not exist");
-  }
-
-  @Test
-  public void testContainerImportExport() throws Exception {
-
-    long containerId = keyValueContainer.getContainerData().getContainerID();
-    // Create Container.
-    keyValueContainer.create(volumeSet, volumeChoosingPolicy, scmId);
-
-
-    keyValueContainerData = keyValueContainer
-        .getContainerData();
-
-    keyValueContainerData.setState(ContainerLifeCycleState.CLOSED);
-
-    int numberOfKeysToWrite = 12;
-    //write one few keys to check the key count after import
-    MetadataStore metadataStore = BlockUtils.getDB(keyValueContainerData, conf);
-    for (int i = 0; i < numberOfKeysToWrite; i++) {
-      metadataStore.put(("test" + i).getBytes(), "test".getBytes());
-    }
-    metadataStore.close();
-
-    Map<String, String> metadata = new HashMap<>();
-    metadata.put("key1", "value1");
-    keyValueContainer.update(metadata, true);
-
-    //destination path
-    File folderToExport = folder.newFile("exported.tar.gz");
-
-    TarContainerPacker packer = new TarContainerPacker();
-
-    //export the container
-    try (FileOutputStream fos = new FileOutputStream(folderToExport)) {
-      keyValueContainer
-          .exportContainerData(fos, packer);
-    }
-
-    //delete the original one
-    keyValueContainer.delete(true);
-
-    //create a new one
-    KeyValueContainerData containerData =
-        new KeyValueContainerData(containerId, 1,
-            keyValueContainerData.getMaxSize());
-    KeyValueContainer container = new KeyValueContainer(containerData, conf);
-
-    HddsVolume containerVolume = volumeChoosingPolicy.chooseVolume(volumeSet
-        .getVolumesList(), 1);
-    String hddsVolumeDir = containerVolume.getHddsRootDir().toString();
-
-    container.populatePathFields(scmId, containerVolume, hddsVolumeDir);
-    try (FileInputStream fis = new FileInputStream(folderToExport)) {
-      container.importContainerData(fis, packer);
-    }
-
-    Assert.assertEquals("value1", containerData.getMetadata().get("key1"));
-    Assert.assertEquals(keyValueContainerData.getContainerDBType(),
-        containerData.getContainerDBType());
-    Assert.assertEquals(keyValueContainerData.getState(),
-        containerData.getState());
-    Assert.assertEquals(numberOfKeysToWrite,
-        containerData.getKeyCount());
-    Assert.assertEquals(keyValueContainerData.getLayOutVersion(),
-        containerData.getLayOutVersion());
-    Assert.assertEquals(keyValueContainerData.getMaxSize(),
-        containerData.getMaxSize());
-    Assert.assertEquals(keyValueContainerData.getBytesUsed(),
-        containerData.getBytesUsed());
-
-    //Can't overwrite existing container
-    try {
-      try (FileInputStream fis = new FileInputStream(folderToExport)) {
-        container.importContainerData(fis, packer);
-      }
-      fail("Container is imported twice. Previous files are overwritten");
-    } catch (Exception ex) {
-      //all good
-    }
-
-  }
-
-  @Test
-  public void testDuplicateContainer() throws Exception {
-    try {
-      // Create Container.
-      keyValueContainer.create(volumeSet, volumeChoosingPolicy, scmId);
-      keyValueContainer.create(volumeSet, volumeChoosingPolicy, scmId);
-      fail("testDuplicateContainer failed");
-    } catch (StorageContainerException ex) {
-      GenericTestUtils.assertExceptionContains("ContainerFile already " +
-          "exists", ex);
-      assertEquals(ContainerProtos.Result.CONTAINER_ALREADY_EXISTS, ex
-          .getResult());
-    }
-  }
-
-  @Test
-  public void testDiskFullExceptionCreateContainer() throws Exception {
-
-    Mockito.when(volumeChoosingPolicy.chooseVolume(anyList(), anyLong()))
-        .thenThrow(DiskChecker.DiskOutOfSpaceException.class);
-    try {
-      keyValueContainer.create(volumeSet, volumeChoosingPolicy, scmId);
-      fail("testDiskFullExceptionCreateContainer failed");
-    } catch (StorageContainerException ex) {
-      GenericTestUtils.assertExceptionContains("disk out of space",
-          ex);
-      assertEquals(ContainerProtos.Result.DISK_OUT_OF_SPACE, ex.getResult());
-    }
-  }
-
-  @Test
-  public void testDeleteContainer() throws Exception {
-    keyValueContainerData.setState(ContainerProtos.ContainerLifeCycleState
-        .CLOSED);
-    keyValueContainer = new KeyValueContainer(
-        keyValueContainerData, conf);
-    keyValueContainer.create(volumeSet, volumeChoosingPolicy, scmId);
-    keyValueContainer.delete(true);
-
-    String containerMetaDataPath = keyValueContainerData
-        .getMetadataPath();
-    File containerMetaDataLoc = new File(containerMetaDataPath);
-
-    assertFalse("Container directory still exists", containerMetaDataLoc
-        .getParentFile().exists());
-
-    assertFalse("Container File still exists",
-        keyValueContainer.getContainerFile().exists());
-    assertFalse("Container DB file still exists",
-        keyValueContainer.getContainerDBFile().exists());
-  }
-
-
-  @Test
-  public void testCloseContainer() throws Exception {
-    keyValueContainer.create(volumeSet, volumeChoosingPolicy, scmId);
-    keyValueContainer.close();
-
-    keyValueContainerData = keyValueContainer
-        .getContainerData();
-
-    assertEquals(ContainerProtos.ContainerLifeCycleState.CLOSED,
-        keyValueContainerData.getState());
-
-    //Check state in the .container file
-    String containerMetaDataPath = keyValueContainerData
-        .getMetadataPath();
-    File containerFile = keyValueContainer.getContainerFile();
-
-    keyValueContainerData = (KeyValueContainerData) ContainerDataYaml
-        .readContainerFile(containerFile);
-    assertEquals(ContainerProtos.ContainerLifeCycleState.CLOSED,
-        keyValueContainerData.getState());
-  }
-
-  @Test
-  public void testUpdateContainer() throws IOException {
-    keyValueContainer.create(volumeSet, volumeChoosingPolicy, scmId);
-    Map<String, String> metadata = new HashMap<>();
-    metadata.put("VOLUME", "ozone");
-    metadata.put("OWNER", "hdfs");
-    keyValueContainer.update(metadata, true);
-
-    keyValueContainerData = keyValueContainer
-        .getContainerData();
-
-    assertEquals(2, keyValueContainerData.getMetadata().size());
-
-    //Check metadata in the .container file
-    File containerFile = keyValueContainer.getContainerFile();
-
-    keyValueContainerData = (KeyValueContainerData) ContainerDataYaml
-        .readContainerFile(containerFile);
-    assertEquals(2, keyValueContainerData.getMetadata().size());
-
-  }
-
-  @Test
-  public void testUpdateContainerUnsupportedRequest() throws Exception {
-    try {
-      keyValueContainerData.setState(ContainerProtos.ContainerLifeCycleState
-          .CLOSED);
-      keyValueContainer = new KeyValueContainer(keyValueContainerData, conf);
-      keyValueContainer.create(volumeSet, volumeChoosingPolicy, scmId);
-      Map<String, String> metadata = new HashMap<>();
-      metadata.put("VOLUME", "ozone");
-      keyValueContainer.update(metadata, false);
-      fail("testUpdateContainerUnsupportedRequest failed");
-    } catch (StorageContainerException ex) {
-      GenericTestUtils.assertExceptionContains("Updating a closed container " +
-          "without force option is not allowed", ex);
-      assertEquals(ContainerProtos.Result.UNSUPPORTED_REQUEST, ex
-          .getResult());
-    }
-  }
-
-
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java
deleted file mode 100644
index e1904c1..0000000
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java
+++ /dev/null
@@ -1,292 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.container.keyvalue;
-
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.conf.StorageUnit;
-import org.apache.hadoop.fs.FileUtil;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .ContainerCommandRequestProto;
-import org.apache.hadoop.hdds.scm.container.common.helpers
-    .StorageContainerException;
-import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.ozone.container.common.helpers.ContainerMetrics;
-import org.apache.hadoop.ozone.container.common.impl.ContainerSet;
-import org.apache.hadoop.ozone.container.common.impl.HddsDispatcher;
-import org.apache.hadoop.ozone.container.common.volume.VolumeSet;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.Assert;
-import org.junit.BeforeClass;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.TestRule;
-import org.junit.rules.Timeout;
-
-import org.mockito.Mockito;
-
-import static org.apache.hadoop.hdds.HddsConfigKeys
-    .HDDS_DATANODE_VOLUME_CHOOSING_POLICY;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_KEY;
-import static org.junit.Assert.assertEquals;
-import static org.mockito.ArgumentMatchers.any;
-import static org.mockito.ArgumentMatchers.anyLong;
-import static org.mockito.Mockito.doCallRealMethod;
-import static org.mockito.Mockito.times;
-
-
-import java.io.File;
-import java.util.UUID;
-
-/**
- * Unit tests for {@link KeyValueHandler}.
- */
-public class TestKeyValueHandler {
-
-  @Rule
-  public TestRule timeout = new Timeout(300000);
-
-  private static HddsDispatcher dispatcher;
-  private static KeyValueHandler handler;
-
-  private final static String DATANODE_UUID = UUID.randomUUID().toString();
-
-  private final String baseDir = MiniDFSCluster.getBaseDirectory();
-  private final String volume = baseDir + "disk1";
-
-  private static final long DUMMY_CONTAINER_ID = 9999;
-
-  @BeforeClass
-  public static void setup() throws StorageContainerException {
-    // Create mock HddsDispatcher and KeyValueHandler.
-    handler = Mockito.mock(KeyValueHandler.class);
-    dispatcher = Mockito.mock(HddsDispatcher.class);
-    Mockito.when(dispatcher.getHandler(any())).thenReturn(handler);
-    Mockito.when(dispatcher.dispatch(any())).thenCallRealMethod();
-    Mockito.when(dispatcher.getContainer(anyLong())).thenReturn(
-        Mockito.mock(KeyValueContainer.class));
-    Mockito.when(handler.handle(any(), any())).thenCallRealMethod();
-    doCallRealMethod().when(dispatcher).setMetricsForTesting(any());
-    dispatcher.setMetricsForTesting(Mockito.mock(ContainerMetrics.class));
-  }
-
-  @Test
-  /**
-   * Test that Handler handles different command types correctly.
-   */
-  public void testHandlerCommandHandling() throws Exception {
-
-    // Test Create Container Request handling
-    ContainerCommandRequestProto createContainerRequest =
-        ContainerProtos.ContainerCommandRequestProto.newBuilder()
-            .setCmdType(ContainerProtos.Type.CreateContainer)
-            .setContainerID(DUMMY_CONTAINER_ID)
-            .setDatanodeUuid(DATANODE_UUID)
-            .setCreateContainer(ContainerProtos.CreateContainerRequestProto
-                .getDefaultInstance())
-            .build();
-    dispatcher.dispatch(createContainerRequest);
-    Mockito.verify(handler, times(1)).handleCreateContainer(
-        any(ContainerCommandRequestProto.class), any());
-
-    // Test Read Container Request handling
-    ContainerCommandRequestProto readContainerRequest =
-        getDummyCommandRequestProto(ContainerProtos.Type.ReadContainer);
-    dispatcher.dispatch(readContainerRequest);
-    Mockito.verify(handler, times(1)).handleReadContainer(
-        any(ContainerCommandRequestProto.class), any());
-
-    // Test Update Container Request handling
-    ContainerCommandRequestProto updateContainerRequest =
-        getDummyCommandRequestProto(ContainerProtos.Type.UpdateContainer);
-    dispatcher.dispatch(updateContainerRequest);
-    Mockito.verify(handler, times(1)).handleUpdateContainer(
-        any(ContainerCommandRequestProto.class), any());
-
-    // Test Delete Container Request handling
-    ContainerCommandRequestProto deleteContainerRequest =
-        getDummyCommandRequestProto(ContainerProtos.Type.DeleteContainer);
-    dispatcher.dispatch(deleteContainerRequest);
-    Mockito.verify(handler, times(1)).handleDeleteContainer(
-        any(ContainerCommandRequestProto.class), any());
-
-    // Test List Container Request handling
-    ContainerCommandRequestProto listContainerRequest =
-        getDummyCommandRequestProto(ContainerProtos.Type.ListContainer);
-    dispatcher.dispatch(listContainerRequest);
-    Mockito.verify(handler, times(1)).handleUnsupportedOp(
-        any(ContainerCommandRequestProto.class));
-
-    // Test Close Container Request handling
-    ContainerCommandRequestProto closeContainerRequest =
-        getDummyCommandRequestProto(ContainerProtos.Type.CloseContainer);
-    dispatcher.dispatch(closeContainerRequest);
-    Mockito.verify(handler, times(1)).handleCloseContainer(
-        any(ContainerCommandRequestProto.class), any());
-
-    // Test Put Block Request handling
-    ContainerCommandRequestProto putBlockRequest =
-        getDummyCommandRequestProto(ContainerProtos.Type.PutBlock);
-    dispatcher.dispatch(putBlockRequest);
-    Mockito.verify(handler, times(1)).handlePutBlock(
-        any(ContainerCommandRequestProto.class), any());
-
-    // Test Get Block Request handling
-    ContainerCommandRequestProto getBlockRequest =
-        getDummyCommandRequestProto(ContainerProtos.Type.GetBlock);
-    dispatcher.dispatch(getBlockRequest);
-    Mockito.verify(handler, times(1)).handleGetBlock(
-        any(ContainerCommandRequestProto.class), any());
-
-    // Test Delete Block Request handling
-    ContainerCommandRequestProto deleteBlockRequest =
-        getDummyCommandRequestProto(ContainerProtos.Type.DeleteBlock);
-    dispatcher.dispatch(deleteBlockRequest);
-    Mockito.verify(handler, times(1)).handleDeleteBlock(
-        any(ContainerCommandRequestProto.class), any());
-
-    // Test List Block Request handling
-    ContainerCommandRequestProto listBlockRequest =
-        getDummyCommandRequestProto(ContainerProtos.Type.ListBlock);
-    dispatcher.dispatch(listBlockRequest);
-    Mockito.verify(handler, times(2)).handleUnsupportedOp(
-        any(ContainerCommandRequestProto.class));
-
-    // Test Read Chunk Request handling
-    ContainerCommandRequestProto readChunkRequest =
-        getDummyCommandRequestProto(ContainerProtos.Type.ReadChunk);
-    dispatcher.dispatch(readChunkRequest);
-    Mockito.verify(handler, times(1)).handleReadChunk(
-        any(ContainerCommandRequestProto.class), any());
-
-    // Test Delete Chunk Request handling
-    ContainerCommandRequestProto deleteChunkRequest =
-        getDummyCommandRequestProto(ContainerProtos.Type.DeleteChunk);
-    dispatcher.dispatch(deleteChunkRequest);
-    Mockito.verify(handler, times(1)).handleDeleteChunk(
-        any(ContainerCommandRequestProto.class), any());
-
-    // Test Write Chunk Request handling
-    ContainerCommandRequestProto writeChunkRequest =
-        getDummyCommandRequestProto(ContainerProtos.Type.WriteChunk);
-    dispatcher.dispatch(writeChunkRequest);
-    Mockito.verify(handler, times(1)).handleWriteChunk(
-        any(ContainerCommandRequestProto.class), any());
-
-    // Test List Chunk Request handling
-    ContainerCommandRequestProto listChunkRequest =
-        getDummyCommandRequestProto(ContainerProtos.Type.ListChunk);
-    dispatcher.dispatch(listChunkRequest);
-    Mockito.verify(handler, times(3)).handleUnsupportedOp(
-        any(ContainerCommandRequestProto.class));
-
-    // Test Put Small File Request handling
-    ContainerCommandRequestProto putSmallFileRequest =
-        getDummyCommandRequestProto(ContainerProtos.Type.PutSmallFile);
-    dispatcher.dispatch(putSmallFileRequest);
-    Mockito.verify(handler, times(1)).handlePutSmallFile(
-        any(ContainerCommandRequestProto.class), any());
-
-    // Test Get Small File Request handling
-    ContainerCommandRequestProto getSmallFileRequest =
-        getDummyCommandRequestProto(ContainerProtos.Type.GetSmallFile);
-    dispatcher.dispatch(getSmallFileRequest);
-    Mockito.verify(handler, times(1)).handleGetSmallFile(
-        any(ContainerCommandRequestProto.class), any());
-  }
-
-  @Test
-  public void testVolumeSetInKeyValueHandler() throws Exception{
-    File path = GenericTestUtils.getRandomizedTestDir();
-    try {
-      Configuration conf = new OzoneConfiguration();
-      conf.set(HDDS_DATANODE_DIR_KEY, path.getAbsolutePath());
-      ContainerSet cset = new ContainerSet();
-      int[] interval = new int[1];
-      interval[0] = 2;
-      ContainerMetrics metrics = new ContainerMetrics(interval);
-      VolumeSet volumeSet = new VolumeSet(UUID.randomUUID().toString(), conf);
-      KeyValueHandler keyValueHandler = new KeyValueHandler(conf, cset,
-          volumeSet, metrics);
-      assertEquals(keyValueHandler.getVolumeChoosingPolicyForTesting()
-          .getClass().getName(), "org.apache.hadoop.ozone.container.common" +
-          ".volume.RoundRobinVolumeChoosingPolicy");
-
-      //Set a class which is not of sub class of VolumeChoosingPolicy
-      conf.set(HDDS_DATANODE_VOLUME_CHOOSING_POLICY,
-          "org.apache.hadoop.ozone.container.common.impl.HddsDispatcher");
-      try {
-        new KeyValueHandler(conf, cset, volumeSet, metrics);
-      } catch (RuntimeException ex) {
-        GenericTestUtils.assertExceptionContains("class org.apache.hadoop" +
-            ".ozone.container.common.impl.HddsDispatcher not org.apache" +
-            ".hadoop.ozone.container.common.interfaces.VolumeChoosingPolicy",
-            ex);
-      }
-    } finally {
-      FileUtil.fullyDelete(path);
-    }
-  }
-
-  private ContainerCommandRequestProto getDummyCommandRequestProto(
-      ContainerProtos.Type cmdType) {
-    ContainerCommandRequestProto request =
-        ContainerProtos.ContainerCommandRequestProto.newBuilder()
-            .setCmdType(cmdType)
-            .setContainerID(DUMMY_CONTAINER_ID)
-            .setDatanodeUuid(DATANODE_UUID)
-            .build();
-
-    return request;
-  }
-
-
-  @Test
-  public void testCloseInvalidContainer() {
-    long containerID = 1234L;
-    Configuration conf = new Configuration();
-    KeyValueContainerData kvData = new KeyValueContainerData(containerID,
-        (long) StorageUnit.GB.toBytes(1));
-    KeyValueContainer container = new KeyValueContainer(kvData, conf);
-    kvData.setState(ContainerProtos.ContainerLifeCycleState.INVALID);
-
-    // Create Close container request
-    ContainerCommandRequestProto closeContainerRequest =
-        ContainerProtos.ContainerCommandRequestProto.newBuilder()
-            .setCmdType(ContainerProtos.Type.CloseContainer)
-            .setContainerID(DUMMY_CONTAINER_ID)
-            .setDatanodeUuid(DATANODE_UUID)
-            .setCloseContainer(ContainerProtos.CloseContainerRequestProto
-                .getDefaultInstance())
-            .build();
-    dispatcher.dispatch(closeContainerRequest);
-
-    Mockito.when(handler.handleCloseContainer(any(), any()))
-        .thenCallRealMethod();
-    // Closing invalid container should return error response.
-    ContainerProtos.ContainerCommandResponseProto response =
-        handler.handleCloseContainer(closeContainerRequest, container);
-
-    Assert.assertTrue("Close container should return Invalid container error",
-        response.getResult().equals(
-            ContainerProtos.Result.INVALID_CONTAINER_STATE));
-  }
-}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org