You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by xy...@apache.org on 2018/05/15 23:58:09 UTC

[02/50] [abbrv] hadoop git commit: HDDS-34. Remove .meta file during creation of container Contributed by Bharat Viswanadham.

HDDS-34. Remove .meta file during creation of container
Contributed by Bharat Viswanadham.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/30293f60
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/30293f60
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/30293f60

Branch: refs/heads/HDDS-4
Commit: 30293f6065c9e5b41c07cd670c7a6a1768d1434b
Parents: db1ab0f
Author: Anu Engineer <ae...@apache.org>
Authored: Thu May 10 17:08:26 2018 -0700
Committer: Anu Engineer <ae...@apache.org>
Committed: Thu May 10 17:08:26 2018 -0700

----------------------------------------------------------------------
 .../main/proto/DatanodeContainerProtocol.proto  |  5 ---
 .../container/common/helpers/ContainerData.java | 27 -------------
 .../common/helpers/ContainerUtils.java          | 35 ++--------------
 .../common/impl/ContainerManagerImpl.java       | 42 ++------------------
 .../scm/cli/container/InfoContainerHandler.java |  3 --
 .../org/apache/hadoop/ozone/scm/TestSCMCli.java |  2 +-
 6 files changed, 9 insertions(+), 105 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/30293f60/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto b/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto
index 172b660..80bc22d 100644
--- a/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto
+++ b/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto
@@ -221,17 +221,12 @@ message ContainerData {
   repeated KeyValue metadata = 2;
   optional string dbPath = 3;
   optional string containerPath = 4;
-  optional string hash = 5;
   optional int64 bytesUsed = 6;
   optional int64 size = 7;
   optional int64 keyCount = 8;
   optional ContainerLifeCycleState state = 9 [default = OPEN];
 }
 
-message ContainerMeta {
-  required string fileName = 1;
-  required string hash = 2;
-}
 
 // Container Messages.
 message  CreateContainerRequestProto {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/30293f60/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerData.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerData.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerData.java
index 799cca3..947dc7d 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerData.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerData.java
@@ -18,14 +18,12 @@
 
 package org.apache.hadoop.ozone.container.common.helpers;
 
-import org.apache.commons.codec.digest.DigestUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import org.apache.hadoop.hdds.protocol.proto.ContainerProtos;
 import org.apache.hadoop.hdds.protocol.proto.ContainerProtos
     .ContainerLifeCycleState;
 import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.util.Time;
 
 import java.io.IOException;
 import java.util.Collections;
@@ -45,7 +43,6 @@ public class ContainerData {
   private String dbPath;  // Path to Level DB Store.
   // Path to Physical file system where container and checksum are stored.
   private String containerFilePath;
-  private String hash;
   private AtomicLong bytesUsed;
   private long maxSize;
   private long containerID;
@@ -95,10 +92,6 @@ public class ContainerData {
       data.setState(protoData.getState());
     }
 
-    if(protoData.hasHash()) {
-      data.setHash(protoData.getHash());
-    }
-
     if (protoData.hasBytesUsed()) {
       data.setBytesUsed(protoData.getBytesUsed());
     }
@@ -123,10 +116,6 @@ public class ContainerData {
       builder.setDbPath(this.getDBPath());
     }
 
-    if (this.getHash() != null) {
-      builder.setHash(this.getHash());
-    }
-
     if (this.getContainerPath() != null) {
       builder.setContainerPath(this.getContainerPath());
     }
@@ -274,22 +263,6 @@ public class ContainerData {
     // TODO: closed or closing here
     setState(ContainerLifeCycleState.CLOSED);
 
-    // Some thing brain dead for now. name + Time stamp of when we get the close
-    // container message.
-    setHash(DigestUtils.sha256Hex(this.getContainerID() +
-        Long.toString(Time.monotonicNow())));
-  }
-
-  /**
-   * Final hash for this container.
-   * @return - Hash
-   */
-  public String getHash() {
-    return hash;
-  }
-
-  public void setHash(String hash) {
-    this.hash = hash;
   }
 
   public void setMaxSize(long maxSize) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/30293f60/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerUtils.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerUtils.java
index e244354..959d88c 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerUtils.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerUtils.java
@@ -47,7 +47,7 @@ import static org.apache.hadoop.hdds.protocol.proto.ContainerProtos.Result
 import static org.apache.hadoop.hdds.protocol.proto.ContainerProtos.Result
     .UNABLE_TO_FIND_DATA_DIR;
 import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_EXTENSION;
-import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_META;
+
 
 /**
  * A set of helper functions to create proper responses.
@@ -194,10 +194,9 @@ public final class ContainerUtils {
    * Verifies that this in indeed a new container.
    *
    * @param containerFile - Container File to verify
-   * @param metadataFile - metadata File to verify
    * @throws IOException
    */
-  public static void verifyIsNewContainer(File containerFile, File metadataFile)
+  public static void verifyIsNewContainer(File containerFile)
       throws IOException {
     Logger log = LoggerFactory.getLogger(ContainerManagerImpl.class);
     if (containerFile.exists()) {
@@ -207,13 +206,6 @@ public final class ContainerUtils {
           "disk.");
     }
 
-    if (metadataFile.exists()) {
-      log.error("metadata found on disk, but missing container. Refusing to" +
-          " write this container. File: {} ", metadataFile.toPath());
-      throw new FileAlreadyExistsException(("metadata found on disk, but " +
-          "missing container. Refusing to write this container."));
-    }
-
     File parentPath = new File(containerFile.getParent());
 
     if (!parentPath.exists() && !parentPath.mkdirs()) {
@@ -228,11 +220,6 @@ public final class ContainerUtils {
       throw new IOException("creation of a new container file failed.");
     }
 
-    if (!metadataFile.createNewFile()) {
-      log.error("creation of the metadata file failed. File: {}",
-          metadataFile.toPath());
-      throw new IOException("creation of a new container file failed.");
-    }
   }
 
   public static String getContainerDbFileName(String containerName) {
@@ -287,20 +274,6 @@ public final class ContainerUtils {
   }
 
   /**
-   * Returns Metadata location.
-   *
-   * @param containerData - Data
-   * @param location - Path
-   * @return Path
-   */
-  public static File getMetadataFile(ContainerData containerData,
-      Path location) {
-    return location.resolve(Long.toString(containerData
-        .getContainerID()).concat(CONTAINER_META))
-        .toFile();
-  }
-
-  /**
    * Returns container file location.
    *
    * @param containerData - Data
@@ -395,10 +368,10 @@ public final class ContainerUtils {
     String rootPath = getContainerNameFromFile(new File(containerData
         .getContainerPath()));
     Path containerPath = Paths.get(rootPath.concat(CONTAINER_EXTENSION));
-    Path metaPath = Paths.get(rootPath.concat(CONTAINER_META));
+
 
     FileUtils.forceDelete(containerPath.toFile());
-    FileUtils.forceDelete(metaPath.toFile());
+
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/30293f60/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java
index 1893b3b..cb60334 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java
@@ -20,7 +20,6 @@ package org.apache.hadoop.ozone.container.common.impl;
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
-import org.apache.commons.codec.digest.DigestUtils;
 import org.apache.commons.io.FileUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
@@ -101,7 +100,6 @@ import static org.apache.hadoop.hdds.protocol.proto.ContainerProtos.Result
 import static org.apache.hadoop.hdds.protocol.proto.ContainerProtos.Result
     .UNSUPPORTED_REQUEST;
 import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_EXTENSION;
-import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_META;
 
 /**
  * A Generic ContainerManagerImpl that will be called from Ozone
@@ -233,18 +231,11 @@ public class ContainerManagerImpl implements ContainerManager {
     long containerID = Long.parseLong(keyName);
     try {
       String containerFileName = containerName.concat(CONTAINER_EXTENSION);
-      String metaFileName = containerName.concat(CONTAINER_META);
 
       containerStream = new FileInputStream(containerFileName);
 
-      metaStream = new FileInputStream(metaFileName);
-
-      MessageDigest sha = MessageDigest.getInstance(OzoneConsts.FILE_HASH);
-
-      dis = new DigestInputStream(containerStream, sha);
-
       ContainerProtos.ContainerData containerDataProto =
-          ContainerProtos.ContainerData.parseDelimitedFrom(dis);
+          ContainerProtos.ContainerData.parseDelimitedFrom(containerStream);
       ContainerData containerData;
       if (containerDataProto == null) {
         // Sometimes container metadata might have been created but empty,
@@ -255,19 +246,6 @@ public class ContainerManagerImpl implements ContainerManager {
         return;
       }
       containerData = ContainerData.getFromProtBuf(containerDataProto, conf);
-      ContainerProtos.ContainerMeta meta =
-          ContainerProtos.ContainerMeta.parseDelimitedFrom(metaStream);
-      if (meta != null && !DigestUtils.sha256Hex(sha.digest())
-          .equals(meta.getHash())) {
-        // This means we were not able read data from the disk when booted the
-        // datanode. We are going to rely on SCM understanding that we don't
-        // have valid data for this container when we send container reports.
-        // Hopefully SCM will ask us to delete this container and rebuild it.
-        LOG.error("Invalid SHA found for container data. Name :{}"
-            + "cowardly refusing to read invalid data", containerName);
-        containerMap.put(containerID, new ContainerStatus(null));
-        return;
-      }
 
       ContainerStatus containerStatus = new ContainerStatus(containerData);
       // Initialize pending deletion blocks count in in-memory
@@ -298,7 +276,7 @@ public class ContainerManagerImpl implements ContainerManager {
       containerStatus.setBytesUsed(bytesUsed);
 
       containerMap.put(containerID, containerStatus);
-    } catch (IOException | NoSuchAlgorithmException ex) {
+    } catch (IOException ex) {
       LOG.error("read failed for file: {} ex: {}", containerName,
           ex.getMessage());
 
@@ -398,12 +376,10 @@ public class ContainerManagerImpl implements ContainerManager {
 
       File containerFile = ContainerUtils.getContainerFile(containerData,
           location);
-      File metadataFile = ContainerUtils.getMetadataFile(containerData,
-          location);
       String containerName = Long.toString(containerData.getContainerID());
 
       if(!overwrite) {
-        ContainerUtils.verifyIsNewContainer(containerFile, metadataFile);
+        ContainerUtils.verifyIsNewContainer(containerFile);
         metadataPath = this.locationManager.getDataPath(containerName);
         metadataPath = ContainerUtils.createMetadata(metadataPath,
             containerName, conf);
@@ -412,7 +388,7 @@ public class ContainerManagerImpl implements ContainerManager {
       }
 
       containerStream = new FileOutputStream(containerFile);
-      metaStream = new FileOutputStream(metadataFile);
+
       MessageDigest sha = MessageDigest.getInstance(OzoneConsts.FILE_HASH);
 
       dos = new DigestOutputStream(containerStream, sha);
@@ -425,13 +401,6 @@ public class ContainerManagerImpl implements ContainerManager {
           .getProtoBufMessage();
       protoData.writeDelimitedTo(dos);
 
-      ContainerProtos.ContainerMeta protoMeta = ContainerProtos
-          .ContainerMeta.newBuilder()
-          .setFileName(containerFile.toString())
-          .setHash(DigestUtils.sha256Hex(sha.digest()))
-          .build();
-      protoMeta.writeDelimitedTo(metaStream);
-
     } catch (IOException ex) {
       // TODO : we need to clean up partially constructed files
       // The proper way to do would be for a thread
@@ -913,9 +882,6 @@ public class ContainerManagerImpl implements ContainerManager {
           .setWriteBytes(container.getWriteBytes())
           .setContainerID(container.getContainer().getContainerID());
 
-      if (container.getContainer().getHash() != null) {
-        ciBuilder.setFinalhash(container.getContainer().getHash());
-      }
       crBuilder.addReports(ciBuilder.build());
     }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/30293f60/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/InfoContainerHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/InfoContainerHandler.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/InfoContainerHandler.java
index 843d9db..cefa28c 100644
--- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/InfoContainerHandler.java
+++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/InfoContainerHandler.java
@@ -81,9 +81,6 @@ public class InfoContainerHandler extends OzoneCommandHandler {
         containerData.getState() == ContainerLifeCycleState.OPEN ? "OPEN" :
             "CLOSED";
     logOut("Container State: %s", openStatus);
-    if (!containerData.getHash().isEmpty()) {
-      logOut("Container Hash: %s", containerData.getHash());
-    }
     logOut("Container DB Path: %s", containerData.getDbPath());
     logOut("Container Path: %s", containerData.getContainerPath());
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/30293f60/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMCli.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMCli.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMCli.java
index 2d8577c..19bc423 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMCli.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMCli.java
@@ -332,7 +332,7 @@ public class TestSCMCli {
     openStatus = data.isOpen() ? "OPEN" : "CLOSED";
     expected = String
         .format(formatStrWithHash, container.getContainerID(), openStatus,
-            data.getHash(), data.getDBPath(), data.getContainerPath(), "",
+            data.getDBPath(), data.getContainerPath(), "",
             datanodeDetails.getHostName(), datanodeDetails.getHostName());
     assertEquals(expected, out.toString());
   }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org