You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by xy...@apache.org on 2017/06/23 21:38:50 UTC

hadoop git commit: HDFS-12016. Ozone: SCM: Container metadata are not loaded properly after datanode restart. Contributed by Xiaoyu Yao.

Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7240 c395bc8fa -> 29c942b22


HDFS-12016. Ozone: SCM: Container metadata are not loaded properly after datanode restart. Contributed by Xiaoyu Yao.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/29c942b2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/29c942b2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/29c942b2

Branch: refs/heads/HDFS-7240
Commit: 29c942b225a33063f2ad815a91c406a63d38b397
Parents: c395bc8
Author: Xiaoyu Yao <xy...@apache.org>
Authored: Fri Jun 23 14:32:05 2017 -0700
Committer: Xiaoyu Yao <xy...@apache.org>
Committed: Fri Jun 23 14:32:05 2017 -0700

----------------------------------------------------------------------
 .../impl/ContainerLocationManagerImpl.java      |  3 +-
 .../common/impl/ContainerManagerImpl.java       |  2 +
 .../container/ozoneimpl/OzoneContainer.java     |  5 ++-
 .../apache/hadoop/ozone/MiniOzoneCluster.java   |  2 +
 .../common/impl/TestContainerPersistence.java   | 24 +++++++++++-
 .../hadoop/ozone/web/client/TestKeys.java       | 39 ++++++++++++++++++++
 6 files changed, 71 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/29c942b2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerLocationManagerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerLocationManagerImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerLocationManagerImpl.java
index 33a3814..9a6ef2e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerLocationManagerImpl.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerLocationManagerImpl.java
@@ -81,8 +81,7 @@ public class ContainerLocationManagerImpl implements ContainerLocationManager {
       throws IOException {
     Preconditions.checkState(metadataLocations.size() > 0);
     int index = currentIndex % metadataLocations.size();
-    Path path = Paths.get(metadataLocations.get(index).getNormalizedUri());
-    return path.resolve(OzoneConsts.CONTAINER_ROOT_PREFIX);
+    return Paths.get(metadataLocations.get(index).getNormalizedUri());
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/29c942b2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java
index 80279c6..48d218a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java
@@ -128,6 +128,7 @@ public class ContainerManagerImpl implements ContainerManager {
     readLock();
     try {
       for (StorageLocation path : containerDirs) {
+        LOG.info("Loading containers under {}", path);
         File directory = Paths.get(path.getNormalizedUri()).toFile();
         // TODO: This will fail if any directory is invalid.
         // We should fix this to handle invalid directories and continue.
@@ -141,6 +142,7 @@ public class ContainerManagerImpl implements ContainerManager {
         File[] files = directory.listFiles(new ContainerFilter());
         if (files != null) {
           for (File containerFile : files) {
+            LOG.debug("Loading container {}", containerFile);
             String containerPath =
                 ContainerUtils.getContainerNameFromFile(containerFile);
             Preconditions.checkNotNull(containerPath, "Container path cannot" +

http://git-wip-us.apache.org/repos/asf/hadoop/blob/29c942b2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java
index ec9c27f..f7caf5a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java
@@ -39,10 +39,12 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import java.io.IOException;
+import java.nio.file.Paths;
 import java.util.LinkedList;
 import java.util.List;
 
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY;
+import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_ROOT_PREFIX;
 
 /**
  * Ozone main class sets up the network server and initializes the container
@@ -73,7 +75,8 @@ public class OzoneContainer {
         OzoneConfigKeys.OZONE_CONTAINER_METADATA_DIRS);
     if (paths != null && paths.length > 0) {
       for (String p : paths) {
-        locations.add(StorageLocation.parse(p));
+        locations.add(StorageLocation.parse(
+            Paths.get(p).resolve(CONTAINER_ROOT_PREFIX).toString()));
       }
     } else {
       getDataDir(locations);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/29c942b2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java
index 38692e3..c7dbc49 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java
@@ -413,6 +413,8 @@ public final class MiniOzoneCluster extends MiniDFSCluster
       // and create SCM under that directory.
       Path scmPath = Paths.get(path, runID.toString(), "cont-meta");
       Files.createDirectories(scmPath);
+      Path containerPath = scmPath.resolve(OzoneConsts.CONTAINER_ROOT_PREFIX);
+      Files.createDirectories(containerPath);
       conf.set(OzoneConfigKeys.OZONE_CONTAINER_METADATA_DIRS, scmPath
           .toString());
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/29c942b2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java
index 3cec5d4..35e358a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java
@@ -43,6 +43,8 @@ import org.junit.Rule;
 import org.junit.Test;
 import org.junit.rules.ExpectedException;
 import org.junit.rules.Timeout;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import java.io.File;
 import java.io.FileInputStream;
@@ -60,6 +62,8 @@ import java.util.Map;
 import java.util.ArrayList;
 import java.util.UUID;
 
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY;
+import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_ROOT_PREFIX;
 import static org.apache.hadoop.ozone.container.ContainerTestHelper
     .createSingleNodePipeline;
 import static org.apache.hadoop.ozone.container.ContainerTestHelper.getChunk;
@@ -81,6 +85,8 @@ public class TestContainerPersistence {
   @Rule
   public Timeout testTimeout = new Timeout(300000);
 
+  private static Logger LOG =
+      LoggerFactory.getLogger(TestContainerPersistence.class);
   private static String path;
   private static ContainerManagerImpl containerManager;
   private static ChunkManagerImpl chunkManager;
@@ -121,15 +127,31 @@ public class TestContainerPersistence {
     if (!new File(path).exists() && !new File(path).mkdirs()) {
       throw new IOException("Unable to create paths. " + path);
     }
+    StorageLocation loc = StorageLocation.parse(
+        Paths.get(path).resolve(CONTAINER_ROOT_PREFIX).toString());
+
     pathLists.clear();
     containerManager.getContainerMap().clear();
-    pathLists.add(StorageLocation.parse(path.toString()));
+
+    if (!new File(loc.getNormalizedUri()).mkdirs()) {
+      throw new IOException("unable to create paths. " +
+          loc.getNormalizedUri());
+    }
+    pathLists.add(loc);
     containerManager.init(conf, pathLists);
   }
 
   @After
   public void cleanupDir() throws IOException {
+    // Clean up SCM metadata
+    LOG.info("Deletting {}", path);
     FileUtils.deleteDirectory(new File(path));
+
+    // Clean up SCM datanode container metadata/data
+    for (String dir : conf.getStrings(DFS_DATANODE_DATA_DIR_KEY)) {
+      StorageLocation location = StorageLocation.parse(dir);
+      FileUtils.deleteDirectory(new File(location.getNormalizedUri()));
+    }
   }
 
   @Test

http://git-wip-us.apache.org/repos/asf/hadoop/blob/29c942b2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/web/client/TestKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/web/client/TestKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/web/client/TestKeys.java
index 958e59a..53d6de7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/web/client/TestKeys.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/web/client/TestKeys.java
@@ -41,6 +41,7 @@ import java.io.File;
 import java.io.FileInputStream;
 import java.io.FileOutputStream;
 import java.io.IOException;
+import java.net.URISyntaxException;
 import java.nio.file.Path;
 import java.nio.file.Paths;
 import java.util.List;
@@ -171,6 +172,44 @@ public class TestKeys {
     assertNotNull(helper.getFile());
   }
 
+  private void restartDatanode(int datanodeIdx)
+      throws IOException, OzoneException, URISyntaxException {
+    cluster.restartDataNode(datanodeIdx);
+    // refresh the datanode endpoint uri after datanode restart
+    DataNode dataNode = cluster.getDataNodes().get(datanodeIdx);
+    final int port = dataNode.getInfoPort();
+    client.setEndPoint(String.format("http://localhost:%d", port));
+  }
+
+  @Test
+  public void testPutAndGetKeyWithDnRestart()
+      throws OzoneException, IOException, URISyntaxException {
+
+    PutHelper helper  = new PutHelper();
+    String keyName = helper.putKey();
+    assertNotNull(helper.getBucket());
+    assertNotNull(helper.getFile());
+
+    // restart the datanode
+    restartDatanode(0);
+
+    // verify getKey after the datanode restart
+    String newFileName =  path + "/" +OzoneUtils.getRequestID().toLowerCase();
+    Path newPath = Paths.get(newFileName);
+
+    helper.getBucket().getKey(keyName, newPath);
+
+    FileInputStream original = new FileInputStream(helper.getFile());
+    FileInputStream downloaded = new FileInputStream(newPath.toFile());
+
+
+    String originalHash = DigestUtils.sha256Hex(original);
+    String downloadedHash = DigestUtils.sha256Hex(downloaded);
+
+    assertEquals(
+        "Sha256 does not match between original file and downloaded file.",
+        originalHash, downloadedHash);
+  }
 
   @Test
   public void testPutAndGetKey() throws OzoneException, IOException {


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org