You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by in...@apache.org on 2018/07/25 01:32:21 UTC

[38/50] hadoop git commit: HDDS-262. Send SCM healthy and failed volumes in the heartbeat. Contributed by Bharat Viswanadham.

HDDS-262. Send SCM healthy and failed volumes in the heartbeat. Contributed by Bharat Viswanadham.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/16f9aee5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/16f9aee5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/16f9aee5

Branch: refs/heads/HADOOP-15461
Commit: 16f9aee5f55bc37c1bb243708ee9b3f97e5a5b83
Parents: 2ced3ef
Author: Nanda kumar <na...@apache.org>
Authored: Tue Jul 24 12:09:15 2018 +0530
Committer: Nanda kumar <na...@apache.org>
Committed: Tue Jul 24 12:09:15 2018 +0530

----------------------------------------------------------------------
 .../container/common/volume/HddsVolume.java     | 81 ++++++++++++++------
 .../container/common/volume/VolumeSet.java      | 28 +++++--
 .../container/common/volume/TestVolumeSet.java  | 35 ++++++++-
 3 files changed, 111 insertions(+), 33 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/16f9aee5/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java
index 0cbfd9f..6b90146 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java
@@ -37,6 +37,7 @@ import org.slf4j.LoggerFactory;
 import java.io.File;
 import java.io.IOException;
 import java.util.Properties;
+import java.util.UUID;
 
 /**
  * HddsVolume represents volume in a datanode. {@link VolumeSet} maitains a
@@ -84,6 +85,7 @@ public final class HddsVolume {
 
     private String datanodeUuid;
     private String clusterID;
+    private boolean failedVolume = false;
 
     public Builder(String rootDirStr) {
       this.volumeRootStr = rootDirStr;
@@ -114,29 +116,47 @@ public final class HddsVolume {
       return this;
     }
 
+    // This is added just to create failed volume objects, which will be used
+    // to create failed HddsVolume objects in the case of any exceptions caused
+    // during creating HddsVolume object.
+    public Builder failedVolume(boolean failed) {
+      this.failedVolume = failed;
+      return this;
+    }
+
     public HddsVolume build() throws IOException {
       return new HddsVolume(this);
     }
   }
 
   private HddsVolume(Builder b) throws IOException {
-    StorageLocation location = StorageLocation.parse(b.volumeRootStr);
-    hddsRootDir = new File(location.getUri().getPath(), HDDS_VOLUME_DIR);
-    this.state = VolumeState.NOT_INITIALIZED;
-    this.clusterID = b.clusterID;
-    this.datanodeUuid = b.datanodeUuid;
-    this.volumeIOStats = new VolumeIOStats();
-
-    VolumeInfo.Builder volumeBuilder =
-        new VolumeInfo.Builder(b.volumeRootStr, b.conf)
-        .storageType(b.storageType)
-        .configuredCapacity(b.configuredCapacity);
-    this.volumeInfo = volumeBuilder.build();
-
-    LOG.info("Creating Volume: " + this.hddsRootDir + " of  storage type : " +
-        b.storageType + " and capacity : " + volumeInfo.getCapacity());
-
-    initialize();
+    if (!b.failedVolume) {
+      StorageLocation location = StorageLocation.parse(b.volumeRootStr);
+      hddsRootDir = new File(location.getUri().getPath(), HDDS_VOLUME_DIR);
+      this.state = VolumeState.NOT_INITIALIZED;
+      this.clusterID = b.clusterID;
+      this.datanodeUuid = b.datanodeUuid;
+      this.volumeIOStats = new VolumeIOStats();
+
+      VolumeInfo.Builder volumeBuilder =
+          new VolumeInfo.Builder(b.volumeRootStr, b.conf)
+              .storageType(b.storageType)
+              .configuredCapacity(b.configuredCapacity);
+      this.volumeInfo = volumeBuilder.build();
+
+      LOG.info("Creating Volume: " + this.hddsRootDir + " of  storage type : " +
+          b.storageType + " and capacity : " + volumeInfo.getCapacity());
+
+      initialize();
+    } else {
+      // Builder is called with failedVolume set, so create a failed volume
+      // HddsVolumeObject.
+      hddsRootDir = new File(b.volumeRootStr);
+      volumeIOStats = null;
+      volumeInfo = null;
+      storageID = UUID.randomUUID().toString();
+      state = VolumeState.FAILED;
+    }
   }
 
   public VolumeInfo getVolumeInfo() {
@@ -285,7 +305,10 @@ public final class HddsVolume {
   }
 
   public StorageType getStorageType() {
-    return volumeInfo.getStorageType();
+    if(volumeInfo != null) {
+      return volumeInfo.getStorageType();
+    }
+    return StorageType.DEFAULT;
   }
 
   public String getStorageID() {
@@ -313,11 +336,17 @@ public final class HddsVolume {
   }
 
   public long getCapacity() throws IOException {
-    return volumeInfo.getCapacity();
+    if(volumeInfo != null) {
+      return volumeInfo.getCapacity();
+    }
+    return 0;
   }
 
   public long getAvailable() throws IOException {
-    return volumeInfo.getAvailable();
+    if(volumeInfo != null) {
+      return volumeInfo.getAvailable();
+    }
+    return 0;
   }
 
   public void setState(VolumeState state) {
@@ -334,12 +363,16 @@ public final class HddsVolume {
 
   public void failVolume() {
     setState(VolumeState.FAILED);
-    volumeInfo.shutdownUsageThread();
+    if (volumeInfo != null) {
+      volumeInfo.shutdownUsageThread();
+    }
   }
 
   public void shutdown() {
     this.state = VolumeState.NON_EXISTENT;
-    volumeInfo.shutdownUsageThread();
+    if (volumeInfo != null) {
+      volumeInfo.shutdownUsageThread();
+    }
   }
 
   /**
@@ -368,6 +401,8 @@ public final class HddsVolume {
    */
   @VisibleForTesting
   public void setScmUsageForTesting(GetSpaceUsed scmUsageForTest) {
-    volumeInfo.setScmUsageForTesting(scmUsageForTest);
+    if (volumeInfo != null) {
+      volumeInfo.setScmUsageForTesting(scmUsageForTest);
+    }
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/16f9aee5/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeSet.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeSet.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeSet.java
index 4dfde37..4a1487b 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeSet.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeSet.java
@@ -76,6 +76,7 @@ public class VolumeSet {
    * mutually exclusive.
    */
   private Map<String, HddsVolume> failedVolumeMap;
+
   /**
    * {@link VolumeSet#volumeStateMap} maintains a list of active volumes per
    * StorageType.
@@ -95,12 +96,12 @@ public class VolumeSet {
   private Runnable shutdownHook;
 
   public VolumeSet(String dnUuid, Configuration conf)
-      throws DiskOutOfSpaceException {
+      throws IOException {
     this(dnUuid, null, conf);
   }
 
   public VolumeSet(String dnUuid, String clusterID, Configuration conf)
-      throws DiskOutOfSpaceException {
+      throws IOException {
     this.datanodeUuid = dnUuid;
     this.clusterID = clusterID;
     this.conf = conf;
@@ -120,7 +121,7 @@ public class VolumeSet {
   }
 
   // Add DN volumes configured through ConfigKeys to volumeMap.
-  private void initializeVolumeSet() throws DiskOutOfSpaceException {
+  private void initializeVolumeSet() throws IOException {
     volumeMap = new ConcurrentHashMap<>();
     failedVolumeMap = new ConcurrentHashMap<>();
     volumeStateMap = new EnumMap<>(StorageType.class);
@@ -153,6 +154,9 @@ public class VolumeSet {
         LOG.info("Added Volume : {} to VolumeSet",
             hddsVolume.getHddsRootDir().getPath());
       } catch (IOException e) {
+        HddsVolume volume = new HddsVolume.Builder(locationString)
+            .failedVolume(true).build();
+        failedVolumeMap.put(locationString, volume);
         LOG.error("Failed to parse the storage location: " + locationString, e);
       }
     }
@@ -337,11 +341,12 @@ public class VolumeSet {
   public StorageContainerDatanodeProtocolProtos.NodeReportProto getNodeReport()
       throws IOException {
     boolean failed;
-    StorageLocationReport[] reports =
-        new StorageLocationReport[volumeMap.size()];
+    StorageLocationReport[] reports = new StorageLocationReport[volumeMap
+        .size() + failedVolumeMap.size()];
     int counter = 0;
+    HddsVolume hddsVolume;
     for (Map.Entry<String, HddsVolume> entry : volumeMap.entrySet()) {
-      HddsVolume hddsVolume = entry.getValue();
+      hddsVolume = entry.getValue();
       VolumeInfo volumeInfo = hddsVolume.getVolumeInfo();
       long scmUsed = 0;
       long remaining = 0;
@@ -370,6 +375,17 @@ public class VolumeSet {
       StorageLocationReport r = builder.build();
       reports[counter++] = r;
     }
+    for (Map.Entry<String, HddsVolume> entry : failedVolumeMap.entrySet()) {
+      hddsVolume = entry.getValue();
+      StorageLocationReport.Builder builder = StorageLocationReport
+          .newBuilder();
+      builder.setStorageLocation(hddsVolume.getHddsRootDir()
+          .getAbsolutePath()).setId(hddsVolume.getStorageID()).setFailed(true)
+          .setCapacity(0).setRemaining(0).setScmUsed(0).setStorageType(
+              hddsVolume.getStorageType());
+      StorageLocationReport r = builder.build();
+      reports[counter++] = r;
+    }
     NodeReportProto.Builder nrb = NodeReportProto.newBuilder();
     for (int i = 0; i < reports.length; i++) {
       nrb.addStorageReport(reports[i].getProtoBufMessage());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/16f9aee5/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java
index 3ee9343..fca68b1 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java
@@ -27,8 +27,10 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.ozone.container.common.utils.HddsVolumeUtil;
+import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
 
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_KEY;
 import static org.apache.hadoop.ozone.container.common.volume.HddsVolume
     .HDDS_VOLUME_DIR;
 import static org.junit.Assert.assertEquals;
@@ -82,14 +84,16 @@ public class TestVolumeSet {
   @After
   public void shutdown() throws IOException {
     // Delete the hdds volume root dir
-    List<HddsVolume> volumes = new ArrayList<>();
-    volumes.addAll(volumeSet.getVolumesList());
-    volumes.addAll(volumeSet.getFailedVolumesList());
+    List<HddsVolume> hddsVolumes = new ArrayList<>();
+    hddsVolumes.addAll(volumeSet.getVolumesList());
+    hddsVolumes.addAll(volumeSet.getFailedVolumesList());
 
-    for (HddsVolume volume : volumes) {
+    for (HddsVolume volume : hddsVolumes) {
       FileUtils.deleteDirectory(volume.getHddsRootDir());
     }
     volumeSet.shutdown();
+
+    FileUtil.fullyDelete(new File(baseDir));
   }
 
   private boolean checkVolumeExistsInVolumeSet(String volume) {
@@ -222,6 +226,29 @@ public class TestVolumeSet {
         // Do Nothing. Exception is expected.
       }
     }
+  }
+
+  @Test
+  public void testFailVolumes() throws  Exception{
+    VolumeSet volSet = null;
+    File readOnlyVolumePath = new File(baseDir);
+    //Set to readonly, so that this volume will be failed
+    readOnlyVolumePath.setReadOnly();
+    File volumePath = GenericTestUtils.getRandomizedTestDir();
+    OzoneConfiguration ozoneConfig = new OzoneConfiguration();
+    ozoneConfig.set(HDDS_DATANODE_DIR_KEY, readOnlyVolumePath.getAbsolutePath()
+        + "," + volumePath.getAbsolutePath());
+    volSet = new VolumeSet(UUID.randomUUID().toString(), ozoneConfig);
+    assertTrue(volSet.getFailedVolumesList().size() == 1);
+    assertEquals(readOnlyVolumePath, volSet.getFailedVolumesList().get(0)
+        .getHddsRootDir());
+
+    //Set back to writable
+    try {
+      readOnlyVolumePath.setWritable(true);
+    } finally {
+      FileUtil.fullyDelete(volumePath);
+    }
 
   }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org