You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by bh...@apache.org on 2018/07/09 20:33:21 UTC

[12/37] hadoop git commit: HDDS-156. Implement HDDSVolume to manage volume state

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9a5552bf/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/helpers/TestDatanodeVersionFile.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/helpers/TestDatanodeVersionFile.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/helpers/TestDatanodeVersionFile.java
new file mode 100644
index 0000000..5889222
--- /dev/null
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/helpers/TestDatanodeVersionFile.java
@@ -0,0 +1,134 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.hadoop.ozone.container.common.helpers;
+
+import org.apache.hadoop.ozone.common.InconsistentStorageStateException;
+import org.apache.hadoop.ozone.container.common.DataNodeLayoutVersion;
+import org.apache.hadoop.ozone.container.common.utils.HddsVolumeUtil;
+import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.util.Time;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.TemporaryFolder;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.Properties;
+import java.util.UUID;
+
+import static org.junit.Assert.*;
+
+/**
+ * This class tests {@link DatanodeVersionFile}.
+ */
+public class TestDatanodeVersionFile {
+
+  private File versionFile;
+  private DatanodeVersionFile dnVersionFile;
+  private Properties properties;
+
+  private String storageID;
+  private String clusterID;
+  private String datanodeUUID;
+  private long cTime;
+  private int lv;
+
+  @Rule
+  public TemporaryFolder folder= new TemporaryFolder();
+
+  @Before
+  public void setup() throws IOException {
+    versionFile = folder.newFile("Version");
+    storageID = UUID.randomUUID().toString();
+    clusterID = UUID.randomUUID().toString();
+    datanodeUUID = UUID.randomUUID().toString();
+    cTime = Time.now();
+    lv = DataNodeLayoutVersion.getLatestVersion().getVersion();
+
+    dnVersionFile = new DatanodeVersionFile(
+        storageID, clusterID, datanodeUUID, cTime, lv);
+
+    dnVersionFile.createVersionFile(versionFile);
+
+    properties = dnVersionFile.readFrom(versionFile);
+  }
+
+  @Test
+  public void testCreateAndReadVersionFile() throws IOException{
+
+    //Check VersionFile exists
+    assertTrue(versionFile.exists());
+
+    assertEquals(storageID, HddsVolumeUtil.getStorageID(
+        properties, versionFile));
+    assertEquals(clusterID, HddsVolumeUtil.getClusterID(
+        properties, versionFile, clusterID));
+    assertEquals(datanodeUUID, HddsVolumeUtil.getDatanodeUUID(
+        properties, versionFile, datanodeUUID));
+    assertEquals(cTime, HddsVolumeUtil.getCreationTime(
+        properties, versionFile));
+    assertEquals(lv, HddsVolumeUtil.getLayOutVersion(
+        properties, versionFile));
+  }
+
+  @Test
+  public void testIncorrectClusterId() throws IOException{
+    try {
+      String randomClusterID = UUID.randomUUID().toString();
+      HddsVolumeUtil.getClusterID(properties, versionFile,
+          randomClusterID);
+      fail("Test failure in testIncorrectClusterId");
+    } catch (InconsistentStorageStateException ex) {
+      GenericTestUtils.assertExceptionContains("Mismatched ClusterIDs", ex);
+    }
+  }
+
+  @Test
+  public void testVerifyCTime() throws IOException{
+    long invalidCTime = -10;
+    dnVersionFile = new DatanodeVersionFile(
+        storageID, clusterID, datanodeUUID, invalidCTime, lv);
+    dnVersionFile.createVersionFile(versionFile);
+    properties = dnVersionFile.readFrom(versionFile);
+
+    try {
+      HddsVolumeUtil.getCreationTime(properties, versionFile);
+      fail("Test failure in testVerifyCTime");
+    } catch (InconsistentStorageStateException ex) {
+      GenericTestUtils.assertExceptionContains("Invalid Creation time in " +
+          "Version File : " + versionFile, ex);
+    }
+  }
+
+  @Test
+  public void testVerifyLayOut() throws IOException{
+    int invalidLayOutVersion = 100;
+    dnVersionFile = new DatanodeVersionFile(
+        storageID, clusterID, datanodeUUID, cTime, invalidLayOutVersion);
+    dnVersionFile.createVersionFile(versionFile);
+    Properties props = dnVersionFile.readFrom(versionFile);
+
+    try {
+      HddsVolumeUtil.getLayOutVersion(props, versionFile);
+      fail("Test failure in testVerifyLayOut");
+    } catch (InconsistentStorageStateException ex) {
+      GenericTestUtils.assertExceptionContains("Invalid layOutVersion.", ex);
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9a5552bf/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestRoundRobinVolumeChoosingPolicy.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestRoundRobinVolumeChoosingPolicy.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestRoundRobinVolumeChoosingPolicy.java
deleted file mode 100644
index 409db57..0000000
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestRoundRobinVolumeChoosingPolicy.java
+++ /dev/null
@@ -1,100 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.container.common.impl;
-
-import org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException;
-import org.apache.hadoop.util.ReflectionUtils;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
-import org.mockito.Mockito;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-
-/**
- * Tests {@link RoundRobinVolumeChoosingPolicy}.
- */
-public class TestRoundRobinVolumeChoosingPolicy {
-
-  private RoundRobinVolumeChoosingPolicy policy;
-
-  @Before
-  public void setup() {
-   policy = ReflectionUtils.newInstance(
-       RoundRobinVolumeChoosingPolicy.class, null);
-  }
-
-  @Test
-  public void testRRVolumeChoosingPolicy() throws Exception {
-    final List<VolumeInfo> volumes = new ArrayList<>();
-
-    // First volume, with 100 bytes of space.
-    volumes.add(Mockito.mock(VolumeInfo.class));
-    Mockito.when(volumes.get(0).getAvailable()).thenReturn(100L);
-
-    // Second volume, with 200 bytes of space.
-    volumes.add(Mockito.mock(VolumeInfo.class));
-    Mockito.when(volumes.get(1).getAvailable()).thenReturn(200L);
-
-    // Test two rounds of round-robin choosing
-    Assert.assertEquals(volumes.get(0), policy.chooseVolume(volumes, 0));
-    Assert.assertEquals(volumes.get(1), policy.chooseVolume(volumes, 0));
-    Assert.assertEquals(volumes.get(0), policy.chooseVolume(volumes, 0));
-    Assert.assertEquals(volumes.get(1), policy.chooseVolume(volumes, 0));
-
-    // The first volume has only 100L space, so the policy should
-    // choose the second one in case we ask for more.
-    Assert.assertEquals(volumes.get(1),
-        policy.chooseVolume(volumes, 150));
-
-    // Fail if no volume has enough space available
-    try {
-      policy.chooseVolume(volumes, Long.MAX_VALUE);
-      Assert.fail();
-    } catch (IOException e) {
-      // Passed.
-    }
-  }
-
-  @Test
-  public void testRRPolicyExceptionMessage() throws Exception {
-    final List<VolumeInfo> volumes = new ArrayList<>();
-
-    // First volume, with 100 bytes of space.
-    volumes.add(Mockito.mock(VolumeInfo.class));
-    Mockito.when(volumes.get(0).getAvailable()).thenReturn(100L);
-
-    // Second volume, with 200 bytes of space.
-    volumes.add(Mockito.mock(VolumeInfo.class));
-    Mockito.when(volumes.get(1).getAvailable()).thenReturn(200L);
-
-    int blockSize = 300;
-    try {
-      policy.chooseVolume(volumes, blockSize);
-      Assert.fail("expected to throw DiskOutOfSpaceException");
-    } catch(DiskOutOfSpaceException e) {
-      Assert.assertEquals("Not returnig the expected message",
-          "Out of space: The volume with the most available space (=" + 200
-              + " B) is less than the container size (=" + blockSize + " B).",
-          e.getMessage());
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9a5552bf/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/interfaces/TestVolumeSet.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/interfaces/TestVolumeSet.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/interfaces/TestVolumeSet.java
deleted file mode 100644
index ceeacff..0000000
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/interfaces/TestVolumeSet.java
+++ /dev/null
@@ -1,149 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.container.common.interfaces;
-
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.ozone.container.common.impl.VolumeInfo;
-import org.apache.hadoop.ozone.container.common.impl.VolumeSet;
-import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.Timeout;
-
-import java.util.ArrayList;
-import java.util.List;
-
-/**
- * Tests {@link VolumeSet} operations.
- */
-public class TestVolumeSet {
-
-  private OzoneConfiguration conf;
-  protected VolumeSet volumeSet;
-  protected final String baseDir = MiniDFSCluster.getBaseDirectory();
-  protected final String volume1 = baseDir + "disk1";
-  protected final String volume2 = baseDir + "disk2";
-  private final List<String> volumes = new ArrayList<>();
-
-  private void initializeVolumeSet() throws Exception {
-    volumeSet = new VolumeSet(conf);
-  }
-
-  @Rule
-  public Timeout testTimeout = new Timeout(300_000);
-
-  @Before
-  public void setup() throws Exception {
-    conf = new OzoneConfiguration();
-    String dataDirKey = volume1 + "," + volume2;
-    volumes.add(volume1);
-    volumes.add(volume2);
-    conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, dataDirKey);
-    initializeVolumeSet();
-  }
-
-  @Test
-  public void testVolumeSetInitialization() throws Exception {
-
-    List<VolumeInfo> volumesList = volumeSet.getVolumesList();
-
-    // VolumeSet initialization should add volume1 and volume2 to VolumeSet
-    assertEquals("VolumeSet intialization is incorrect",
-        volumesList.size(), volumes.size());
-    assertTrue("VolumeSet not initailized correctly",
-        checkVolumeExistsInVolumeSet(volume1));
-    assertTrue("VolumeSet not initailized correctly",
-        checkVolumeExistsInVolumeSet(volume2));
-  }
-
-  @Test
-  public void testAddVolume() throws Exception {
-
-    List<VolumeInfo> volumesList = volumeSet.getVolumesList();
-    assertEquals(2, volumeSet.getVolumesList().size());
-
-    // Add a volume to VolumeSet
-    String volume3 = baseDir + "disk3";
-    volumeSet.addVolume(volume3);
-
-    assertEquals(3, volumeSet.getVolumesList().size());
-    assertTrue("AddVolume did not add requested volume to VolumeSet",
-        checkVolumeExistsInVolumeSet(volume3));
-  }
-
-  @Test
-  public void testFailVolume() throws Exception {
-
-    //Fail a volume
-    volumeSet.failVolume(volume1);
-
-    // Failed volume should not show up in the volumeList
-    assertEquals(1, volumeSet.getVolumesList().size());
-
-    // Failed volume should be added to FailedVolumeList
-    assertEquals("Failed volume not present in FailedVolumeMap",
-        1, volumeSet.getFailedVolumesList().size());
-    assertEquals("Failed Volume list did not match", volume1,
-        volumeSet.getFailedVolumesList().get(0).getRootDir().toString());
-    assertTrue(volumeSet.getFailedVolumesList().get(0).isFailed());
-
-    // Failed volume should not exist in VolumeMap
-    Path volume1Path = new Path(volume1);
-    assertFalse(volumeSet.getVolumeMap().containsKey(volume1Path));
-  }
-
-  @Test
-  public void testRemoveVolume() throws Exception {
-
-    List<VolumeInfo> volumesList = volumeSet.getVolumesList();
-    assertEquals(2, volumeSet.getVolumesList().size());
-
-    // Remove a volume from VolumeSet
-    volumeSet.removeVolume(volume1);
-    assertEquals(1, volumeSet.getVolumesList().size());
-
-    // Attempting to remove a volume which does not exist in VolumeSet should
-    // log a warning.
-    LogCapturer logs = LogCapturer.captureLogs(
-        LogFactory.getLog(VolumeSet.class));
-    volumeSet.removeVolume(volume1);
-    assertEquals(1, volumeSet.getVolumesList().size());
-    String expectedLogMessage = "Volume : " + volume1 + " does not exist in "
-        + "VolumeSet";
-    assertTrue("Log output does not contain expected log message: "
-        + expectedLogMessage, logs.getOutput().contains(expectedLogMessage));
-  }
-
-  private boolean checkVolumeExistsInVolumeSet(String volume) {
-    for (VolumeInfo volumeInfo : volumeSet.getVolumesList()) {
-      if (volumeInfo.getRootDir().toString().equals(volume)) {
-        return true;
-      }
-    }
-    return false;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9a5552bf/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestHddsVolume.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestHddsVolume.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestHddsVolume.java
new file mode 100644
index 0000000..7755345
--- /dev/null
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestHddsVolume.java
@@ -0,0 +1,145 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.container.common.volume;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.GetSpaceUsed;
+import org.apache.hadoop.fs.StorageType;
+import org.apache.hadoop.ozone.container.common.helpers.DatanodeVersionFile;
+import org.apache.hadoop.ozone.container.common.utils.HddsVolumeUtil;
+import static org.junit.Assert.*;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.TemporaryFolder;
+import org.mockito.Mockito;
+
+import java.io.File;
+import java.util.Properties;
+import java.util.UUID;
+
+/**
+ * Unit tests for {@link HddsVolume}.
+ */
+public class TestHddsVolume {
+
+  @Rule
+  public TemporaryFolder folder = new TemporaryFolder();
+
+  private static final String DATANODE_UUID = UUID.randomUUID().toString();
+  private static final String CLUSTER_ID = UUID.randomUUID().toString();
+  private static final Configuration CONF = new Configuration();
+  private static final String DU_CACHE_FILE = "scmUsed";
+
+  private File rootDir;
+  private HddsVolume volume;
+  private File versionFile;
+
+  @Before
+  public void setup() throws Exception {
+    rootDir = new File(folder.getRoot(), HddsVolume.HDDS_VOLUME_DIR);
+    volume = new HddsVolume.Builder(folder.getRoot().getPath())
+        .datanodeUuid(DATANODE_UUID)
+        .conf(CONF)
+        .build();
+    versionFile = HddsVolumeUtil.getVersionFile(rootDir);
+  }
+
+  @Test
+  public void testHddsVolumeInitialization() throws Exception {
+
+    // The initial state of HddsVolume should be "NOT_FORMATTED" when
+    // clusterID is not specified and the version file should not be written
+    // to disk.
+    assertTrue(volume.getClusterID() == null);
+    assertEquals(volume.getStorageType(), StorageType.DEFAULT);
+    assertEquals(volume.getStorageState(),
+        HddsVolume.VolumeState.NOT_FORMATTED);
+    assertFalse("Version file should not be created when clusterID is not " +
+        "known.", versionFile.exists());
+
+
+    // Format the volume with clusterID.
+    volume.format(CLUSTER_ID);
+
+    // The state of HddsVolume after formatting with clusterID should be
+    // NORMAL and the version file should exist.
+    assertTrue("Volume format should create Version file",
+        versionFile.exists());
+    assertEquals(volume.getClusterID(), CLUSTER_ID);
+    assertEquals(volume.getStorageState(), HddsVolume.VolumeState.NORMAL);
+  }
+
+  @Test
+  public void testReadPropertiesFromVersionFile() throws Exception {
+    volume.format(CLUSTER_ID);
+
+    Properties properties = DatanodeVersionFile.readFrom(versionFile);
+
+    String storageID = HddsVolumeUtil.getStorageID(properties, versionFile);
+    String clusterID = HddsVolumeUtil.getClusterID(
+        properties, versionFile, CLUSTER_ID);
+    String datanodeUuid = HddsVolumeUtil.getDatanodeUUID(
+        properties, versionFile, DATANODE_UUID);
+    long cTime = HddsVolumeUtil.getCreationTime(
+        properties, versionFile);
+    int layoutVersion = HddsVolumeUtil.getLayOutVersion(
+        properties, versionFile);
+
+    assertEquals(volume.getStorageID(), storageID);
+    assertEquals(volume.getClusterID(), clusterID);
+    assertEquals(volume.getDatanodeUuid(), datanodeUuid);
+    assertEquals(volume.getCTime(), cTime);
+    assertEquals(volume.getLayoutVersion(), layoutVersion);
+  }
+
+  @Test
+  public void testShutdown() throws Exception{
+    // Return dummy value > 0 for scmUsage so that scm cache file is written
+    // during shutdown.
+    GetSpaceUsed scmUsageMock = Mockito.mock(GetSpaceUsed.class);
+    volume.setScmUsageForTesting(scmUsageMock);
+    Mockito.when(scmUsageMock.getUsed()).thenReturn(Long.valueOf(100));
+
+    assertTrue("Available volume should be positive",
+        volume.getAvailable() > 0);
+
+    // Shutdown the volume.
+    volume.shutdown();
+
+    // Volume state should be "NON_EXISTENT" when volume is shutdown.
+    assertEquals(volume.getStorageState(),
+        HddsVolume.VolumeState.NON_EXISTENT);
+
+    // Volume should save scmUsed cache file once volume is shutdown
+    File scmUsedFile = new File(folder.getRoot(), DU_CACHE_FILE);
+    System.out.println("scmUsedFile: " + scmUsedFile);
+    assertTrue("scmUsed cache file should be saved on shutdown",
+        scmUsedFile.exists());
+
+    try {
+      // Volume.getAvailable() should fail with NullPointerException as usage
+      // is shutdown.
+      volume.getAvailable();
+      fail("HddsVolume#shutdown test failed");
+    } catch (Exception ex){
+      assertTrue(ex instanceof NullPointerException);
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9a5552bf/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestRoundRobinVolumeChoosingPolicy.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestRoundRobinVolumeChoosingPolicy.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestRoundRobinVolumeChoosingPolicy.java
new file mode 100644
index 0000000..41610af
--- /dev/null
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestRoundRobinVolumeChoosingPolicy.java
@@ -0,0 +1,131 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.container.common.volume;
+
+import org.apache.hadoop.fs.GetSpaceUsed;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException;
+import org.apache.hadoop.util.ReflectionUtils;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+import org.mockito.Mockito;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.UUID;
+
+/**
+ * Tests {@link RoundRobinVolumeChoosingPolicy}.
+ */
+public class TestRoundRobinVolumeChoosingPolicy {
+
+  private RoundRobinVolumeChoosingPolicy policy;
+  private List<HddsVolume> volumes;
+
+  private final String baseDir = MiniDFSCluster.getBaseDirectory();
+	private final String volume1 = baseDir + "disk1";
+	private final String volume2 = baseDir + "disk2";
+  private static final String DUMMY_IP_ADDR = "0.0.0.0";
+
+  @Before
+  public void setup() throws Exception {
+    OzoneConfiguration conf = new OzoneConfiguration();
+    String dataDirKey = volume1 + "," + volume2;
+    conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, dataDirKey);
+    policy = ReflectionUtils.newInstance(
+        RoundRobinVolumeChoosingPolicy.class, null);
+    DatanodeDetails datanodeDetails = DatanodeDetails.newBuilder()
+        .setUuid(UUID.randomUUID().toString())
+        .setIpAddress(DUMMY_IP_ADDR)
+        .build();
+    VolumeSet volumeSet = new VolumeSet(datanodeDetails, conf);
+    volumes = volumeSet.getVolumesList();
+  }
+
+  @Test
+  public void testRRVolumeChoosingPolicy() throws Exception {
+    HddsVolume hddsVolume1 = volumes.get(0);
+    HddsVolume hddsVolume2 = volumes.get(1);
+
+    // Set available space in volume1 to 100L
+    setAvailableSpace(hddsVolume1, 100L);
+
+    // Set available space in volume1 to 200L
+    setAvailableSpace(hddsVolume2, 200L);
+
+    Assert.assertEquals(100L, hddsVolume1.getAvailable());
+    Assert.assertEquals(200L, hddsVolume2.getAvailable());
+
+    // Test two rounds of round-robin choosing
+    Assert.assertEquals(hddsVolume1, policy.chooseVolume(volumes, 0));
+    Assert.assertEquals(hddsVolume2, policy.chooseVolume(volumes, 0));
+    Assert.assertEquals(hddsVolume1, policy.chooseVolume(volumes, 0));
+    Assert.assertEquals(hddsVolume2, policy.chooseVolume(volumes, 0));
+
+    // The first volume has only 100L space, so the policy should
+    // choose the second one in case we ask for more.
+    Assert.assertEquals(hddsVolume2,
+        policy.chooseVolume(volumes, 150));
+
+    // Fail if no volume has enough space available
+    try {
+      policy.chooseVolume(volumes, Long.MAX_VALUE);
+      Assert.fail();
+    } catch (IOException e) {
+      // Passed.
+    }
+  }
+
+  @Test
+  public void testRRPolicyExceptionMessage() throws Exception {
+    HddsVolume hddsVolume1 = volumes.get(0);
+    HddsVolume hddsVolume2 = volumes.get(1);
+
+    // Set available space in volume1 to 100L
+    setAvailableSpace(hddsVolume1, 100L);
+
+    // Set available space in volume1 to 200L
+    setAvailableSpace(hddsVolume2, 200L);
+
+    int blockSize = 300;
+    try {
+      policy.chooseVolume(volumes, blockSize);
+      Assert.fail("expected to throw DiskOutOfSpaceException");
+    } catch(DiskOutOfSpaceException e) {
+      Assert.assertEquals("Not returnig the expected message",
+          "Out of space: The volume with the most available space (=" + 200
+              + " B) is less than the container size (=" + blockSize + " B).",
+          e.getMessage());
+    }
+  }
+
+  private void setAvailableSpace(HddsVolume hddsVolume, long availableSpace)
+      throws IOException {
+    GetSpaceUsed scmUsageMock = Mockito.mock(GetSpaceUsed.class);
+    hddsVolume.setScmUsageForTesting(scmUsageMock);
+    // Set used space to capacity -requiredAvailableSpace so that
+    // getAvailable() returns us the specified availableSpace.
+    Mockito.when(scmUsageMock.getUsed()).thenReturn(
+        (hddsVolume.getCapacity() - availableSpace));
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9a5552bf/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java
new file mode 100644
index 0000000..61383de
--- /dev/null
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java
@@ -0,0 +1,157 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.container.common.volume;
+
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.ozone.container.common.utils.HddsVolumeUtil;
+import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.Timeout;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.UUID;
+
+/**
+ * Tests {@link VolumeSet} operations.
+ */
+public class TestVolumeSet {
+
+  private OzoneConfiguration conf;
+  private VolumeSet volumeSet;
+  private final String baseDir = MiniDFSCluster.getBaseDirectory();
+  private final String volume1 = baseDir + "disk1";
+  private final String volume2 = baseDir + "disk2";
+  private final List<String> volumes = new ArrayList<>();
+
+  private static final String DUMMY_IP_ADDR = "0.0.0.0";
+
+  private void initializeVolumeSet() throws Exception {
+    DatanodeDetails datanodeDetails = DatanodeDetails.newBuilder()
+        .setUuid(UUID.randomUUID().toString())
+        .setIpAddress(DUMMY_IP_ADDR)
+        .build();
+    volumeSet = new VolumeSet(datanodeDetails, conf);
+  }
+
+  @Rule
+  public Timeout testTimeout = new Timeout(300_000);
+
+  @Before
+  public void setup() throws Exception {
+    conf = new OzoneConfiguration();
+    String dataDirKey = volume1 + "," + volume2;
+    volumes.add(volume1);
+    volumes.add(volume2);
+    conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, dataDirKey);
+    initializeVolumeSet();
+  }
+
+  @Test
+  public void testVolumeSetInitialization() throws Exception {
+
+    List<HddsVolume> volumesList = volumeSet.getVolumesList();
+
+    // VolumeSet initialization should add volume1 and volume2 to VolumeSet
+    assertEquals("VolumeSet intialization is incorrect",
+        volumesList.size(), volumes.size());
+    assertTrue("VolumeSet not initailized correctly",
+        checkVolumeExistsInVolumeSet(volume1));
+    assertTrue("VolumeSet not initailized correctly",
+        checkVolumeExistsInVolumeSet(volume2));
+  }
+
+  @Test
+  public void testAddVolume() throws Exception {
+
+    assertEquals(2, volumeSet.getVolumesList().size());
+
+    // Add a volume to VolumeSet
+    String volume3 = baseDir + "disk3";
+    volumeSet.addVolume(volume3);
+
+    assertEquals(3, volumeSet.getVolumesList().size());
+    assertTrue("AddVolume did not add requested volume to VolumeSet",
+        checkVolumeExistsInVolumeSet(volume3));
+  }
+
+  @Test
+  public void testFailVolume() throws Exception {
+
+    //Fail a volume
+    volumeSet.failVolume(volume1);
+
+    // Failed volume should not show up in the volumeList
+    assertEquals(1, volumeSet.getVolumesList().size());
+
+    // Failed volume should be added to FailedVolumeList
+    assertEquals("Failed volume not present in FailedVolumeMap",
+        1, volumeSet.getFailedVolumesList().size());
+    assertEquals("Failed Volume list did not match",
+        HddsVolumeUtil.getHddsRoot(volume1),
+        volumeSet.getFailedVolumesList().get(0).getHddsRootDir().getPath());
+    assertTrue(volumeSet.getFailedVolumesList().get(0).isFailed());
+
+    // Failed volume should not exist in VolumeMap
+    Path volume1Path = new Path(volume1);
+    assertFalse(volumeSet.getVolumeMap().containsKey(volume1Path));
+  }
+
+  @Test
+  public void testRemoveVolume() throws Exception {
+
+    List<HddsVolume> volumesList = volumeSet.getVolumesList();
+    assertEquals(2, volumeSet.getVolumesList().size());
+
+    // Remove a volume from VolumeSet
+    volumeSet.removeVolume(volume1);
+    assertEquals(1, volumeSet.getVolumesList().size());
+
+    // Attempting to remove a volume which does not exist in VolumeSet should
+    // log a warning.
+    LogCapturer logs = LogCapturer.captureLogs(
+        LogFactory.getLog(VolumeSet.class));
+    volumeSet.removeVolume(volume1);
+    assertEquals(1, volumeSet.getVolumesList().size());
+    String expectedLogMessage = "Volume : " +
+        HddsVolumeUtil.getHddsRoot(volume1) + " does not exist in VolumeSet";
+    assertTrue("Log output does not contain expected log message: "
+        + expectedLogMessage, logs.getOutput().contains(expectedLogMessage));
+  }
+
+  private boolean checkVolumeExistsInVolumeSet(String volume) {
+    for (HddsVolume hddsVolume : volumeSet.getVolumesList()) {
+      if (hddsVolume.getHddsRootDir().getPath().equals(
+          HddsVolumeUtil.getHddsRoot(volume))) {
+        return true;
+      }
+    }
+    return false;
+  }
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org