You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by bo...@apache.org on 2018/07/14 00:44:56 UTC
[02/50] [abbrv] hadoop git commit: HDDS-213. Single lock to
synchronize KeyValueContainer#update.
HDDS-213. Single lock to synchronize KeyValueContainer#update.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/44e19fc7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/44e19fc7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/44e19fc7
Branch: refs/heads/YARN-7402
Commit: 44e19fc7f70b5c19f2b626fe247aea5d51ada51c
Parents: cb9574a
Author: Hanisha Koneru <ha...@apache.org>
Authored: Mon Jul 9 09:33:09 2018 -0700
Committer: Hanisha Koneru <ha...@apache.org>
Committed: Mon Jul 9 09:33:09 2018 -0700
----------------------------------------------------------------------
.../container/common/impl/ContainerData.java | 28 +++--
.../common/impl/ContainerDataYaml.java | 10 +-
.../container/keyvalue/KeyValueContainer.java | 124 +++++++------------
.../container/ozoneimpl/ContainerReader.java | 37 +++---
4 files changed, 87 insertions(+), 112 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/44e19fc7/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerData.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerData.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerData.java
index 0d217e4..54b186b 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerData.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerData.java
@@ -182,12 +182,14 @@ public class ContainerData {
}
/**
- * Adds metadata.
+ * Add/Update metadata.
+ * We should hold the container lock before updating the metadata as this
+ * will be persisted on disk. Unless, we are reconstructing ContainerData
+ * from protoBuf or from on disk .container file in which case lock is not
+ * required.
*/
- public void addMetadata(String key, String value) throws IOException {
- synchronized (this.metadata) {
- metadata.put(key, value);
- }
+ public void addMetadata(String key, String value) {
+ metadata.put(key, value);
}
/**
@@ -195,9 +197,19 @@ public class ContainerData {
* @return metadata
*/
public Map<String, String> getMetadata() {
- synchronized (this.metadata) {
- return Collections.unmodifiableMap(this.metadata);
- }
+ return Collections.unmodifiableMap(this.metadata);
+ }
+
+ /**
+ * Set metadata.
+ * We should hold the container lock before updating the metadata as this
+ * will be persisted on disk. Unless, we are reconstructing ContainerData
+ * from protoBuf or from on disk .container file in which case lock is not
+ * required.
+ */
+ public void setMetadata(Map<String, String> metadataMap) {
+ metadata.clear();
+ metadata.putAll(metadataMap);
}
/**
http://git-wip-us.apache.org/repos/asf/hadoop/blob/44e19fc7/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerDataYaml.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerDataYaml.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerDataYaml.java
index 70d1615..90af24f 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerDataYaml.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerDataYaml.java
@@ -200,15 +200,7 @@ public final class ContainerDataYaml {
OzoneConsts.METADATA_PATH));
kvData.setChunksPath((String) nodes.get(OzoneConsts.CHUNKS_PATH));
Map<String, String> meta = (Map) nodes.get(OzoneConsts.METADATA);
- meta.forEach((key, val) -> {
- try {
- kvData.addMetadata(key, val);
- } catch (IOException e) {
- throw new IllegalStateException("Unexpected " +
- "Key Value Pair " + "(" + key + "," + val +")in the metadata " +
- "for containerId " + (long) nodes.get("containerId"));
- }
- });
+ kvData.setMetadata(meta);
String state = (String) nodes.get(OzoneConsts.STATE);
switch (state) {
case "OPEN":
http://git-wip-us.apache.org/repos/asf/hadoop/blob/44e19fc7/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java
index b07b053..155a988 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java
@@ -19,6 +19,8 @@
package org.apache.hadoop.ozone.container.keyvalue;
import com.google.common.base.Preconditions;
+import java.nio.file.Files;
+import java.nio.file.StandardCopyOption;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileAlreadyExistsException;
import org.apache.hadoop.fs.FileUtil;
@@ -32,7 +34,6 @@ import org.apache.hadoop.io.nativeio.NativeIO;
import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils;
-import org.apache.hadoop.ozone.container.common.impl.ContainerData;
import org.apache.hadoop.ozone.container.common.impl.ContainerDataYaml;
import org.apache.hadoop.ozone.container.common.volume.VolumeSet;
import org.apache.hadoop.ozone.container.common.volume.HddsVolume;
@@ -59,8 +60,6 @@ import java.util.concurrent.locks.ReentrantReadWriteLock;
import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
.Result.CONTAINER_ALREADY_EXISTS;
import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
- .Result.CONTAINER_METADATA_ERROR;
-import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
.Result.CONTAINER_INTERNAL_ERROR;
import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
.Result.CONTAINER_FILES_CREATE_ERROR;
@@ -146,7 +145,7 @@ public class KeyValueContainer implements Container {
containerData.setVolume(containerVolume);
// Create .container file and .chksm file
- createContainerFile(containerFile, containerCheckSumFile);
+ writeToContainerFile(containerFile, containerCheckSumFile, true);
} catch (StorageContainerException ex) {
@@ -177,36 +176,50 @@ public class KeyValueContainer implements Container {
* Creates .container file and checksum file.
*
* @param containerFile
- * @param containerCheckSumFile
+ * @param checksumFile
+ * @param isCreate true if we are creating a new container file and false if
+ * we are updating an existing container file.
* @throws StorageContainerException
*/
- private void createContainerFile(File containerFile, File
- containerCheckSumFile) throws StorageContainerException {
+ private void writeToContainerFile(File containerFile, File
+ checksumFile, boolean isCreate)
+ throws StorageContainerException {
File tempContainerFile = null;
- File tempCheckSumFile = null;
+ File tempChecksumFile = null;
FileOutputStream containerCheckSumStream = null;
Writer writer = null;
long containerId = containerData.getContainerID();
try {
tempContainerFile = createTempFile(containerFile);
- tempCheckSumFile = createTempFile(containerCheckSumFile);
+ tempChecksumFile = createTempFile(checksumFile);
ContainerDataYaml.createContainerFile(ContainerProtos.ContainerType
.KeyValueContainer, tempContainerFile, containerData);
//Compute Checksum for container file
String checksum = KeyValueContainerUtil.computeCheckSum(containerId,
tempContainerFile);
- containerCheckSumStream = new FileOutputStream(tempCheckSumFile);
+ containerCheckSumStream = new FileOutputStream(tempChecksumFile);
writer = new OutputStreamWriter(containerCheckSumStream, "UTF-8");
writer.write(checksum);
writer.flush();
- NativeIO.renameTo(tempContainerFile, containerFile);
- NativeIO.renameTo(tempCheckSumFile, containerCheckSumFile);
+ if (isCreate) {
+ // When creating a new container, .container file should not exist
+ // already.
+ NativeIO.renameTo(tempContainerFile, containerFile);
+ NativeIO.renameTo(tempChecksumFile, checksumFile);
+ } else {
+ // When updating a container, the .container file should exist. If
+ // not, the container is in an inconsistent state.
+ Files.move(tempContainerFile.toPath(), containerFile.toPath(),
+ StandardCopyOption.REPLACE_EXISTING);
+ Files.move(tempChecksumFile.toPath(), checksumFile.toPath(),
+ StandardCopyOption.REPLACE_EXISTING);
+ }
} catch (IOException ex) {
throw new StorageContainerException("Error during creation of " +
- "required files(.container, .chksm) for container. Container Name: "
+ "required files(.container, .chksm) for container. ContainerID: "
+ containerId, ex, CONTAINER_FILES_CREATE_ERROR);
} finally {
IOUtils.closeStream(containerCheckSumStream);
@@ -216,8 +229,8 @@ public class KeyValueContainer implements Container {
tempContainerFile.getAbsolutePath());
}
}
- if (tempCheckSumFile != null && tempCheckSumFile.exists()) {
- if (!tempCheckSumFile.delete()) {
+ if (tempChecksumFile != null && tempChecksumFile.exists()) {
+ if (!tempChecksumFile.delete()) {
LOG.warn("Unable to delete container temporary checksum file: {}.",
tempContainerFile.getAbsolutePath());
}
@@ -236,68 +249,24 @@ public class KeyValueContainer implements Container {
private void updateContainerFile(File containerFile, File
- containerCheckSumFile) throws StorageContainerException {
+ checksumFile) throws StorageContainerException {
- File containerBkpFile = null;
- File checkSumBkpFile = null;
long containerId = containerData.getContainerID();
- try {
- if (containerFile.exists() && containerCheckSumFile.exists()) {
- //Take backup of original files (.container and .chksm files)
- containerBkpFile = new File(containerFile + ".bkp");
- checkSumBkpFile = new File(containerCheckSumFile + ".bkp");
- NativeIO.renameTo(containerFile, containerBkpFile);
- NativeIO.renameTo(containerCheckSumFile, checkSumBkpFile);
- createContainerFile(containerFile, containerCheckSumFile);
- } else {
- containerData.setState(ContainerProtos.ContainerLifeCycleState.INVALID);
- throw new StorageContainerException("Container is an Inconsistent " +
- "state, missing required files(.container, .chksm). ContainerID: " +
- containerId, INVALID_CONTAINER_STATE);
- }
- } catch (StorageContainerException ex) {
- throw ex;
- } catch (IOException ex) {
- // Restore from back up files.
+ if (containerFile.exists() && checksumFile.exists()) {
try {
- if (containerBkpFile != null && containerBkpFile
- .exists() && containerFile.delete()) {
- LOG.info("update failed for container Name: {}, restoring container" +
- " file", containerId);
- NativeIO.renameTo(containerBkpFile, containerFile);
- }
- if (checkSumBkpFile != null && checkSumBkpFile.exists() &&
- containerCheckSumFile.delete()) {
- LOG.info("update failed for container Name: {}, restoring checksum" +
- " file", containerId);
- NativeIO.renameTo(checkSumBkpFile, containerCheckSumFile);
- }
- throw new StorageContainerException("Error during updating of " +
- "required files(.container, .chksm) for container. Container Name: "
- + containerId, ex, CONTAINER_FILES_CREATE_ERROR);
+ writeToContainerFile(containerFile, checksumFile, false);
} catch (IOException e) {
- containerData.setState(ContainerProtos.ContainerLifeCycleState.INVALID);
- LOG.error("During restore failed for container Name: " +
- containerId);
- throw new StorageContainerException(
- "Failed to restore container data from the backup. ID: "
- + containerId, CONTAINER_FILES_CREATE_ERROR);
- }
- } finally {
- if (containerBkpFile != null && containerBkpFile
- .exists()) {
- if(!containerBkpFile.delete()) {
- LOG.warn("Unable to delete container backup file: {}",
- containerBkpFile);
- }
- }
- if (checkSumBkpFile != null && checkSumBkpFile.exists()) {
- if(!checkSumBkpFile.delete()) {
- LOG.warn("Unable to delete container checksum backup file: {}",
- checkSumBkpFile);
- }
+ //TODO : Container update failure is not handled currently. Might
+ // lead to loss of .container file. When Update container feature
+ // support is added, this failure should also be handled.
+ throw new StorageContainerException("Container update failed. " +
+ "ContainerID: " + containerId, CONTAINER_FILES_CREATE_ERROR);
}
+ } else {
+ throw new StorageContainerException("Container is an Inconsistent " +
+ "state, missing required files(.container, .chksm). ContainerID: " +
+ containerId, INVALID_CONTAINER_STATE);
}
}
@@ -393,22 +362,21 @@ public class KeyValueContainer implements Container {
"Updating a closed container without force option is not allowed. " +
"ContainerID: " + containerId, UNSUPPORTED_REQUEST);
}
+
+ Map<String, String> oldMetadata = containerData.getMetadata();
try {
+ writeLock();
for (Map.Entry<String, String> entry : metadata.entrySet()) {
containerData.addMetadata(entry.getKey(), entry.getValue());
}
- } catch (IOException ex) {
- throw new StorageContainerException("Container Metadata update error" +
- ". Container Name:" + containerId, ex, CONTAINER_METADATA_ERROR);
- }
- try {
- writeLock();
- String containerName = String.valueOf(containerId);
File containerFile = getContainerFile();
File containerCheckSumFile = getContainerCheckSumFile();
// update the new container data to .container File
updateContainerFile(containerFile, containerCheckSumFile);
} catch (StorageContainerException ex) {
+ // TODO:
+ // On error, reset the metadata.
+ containerData.setMetadata(oldMetadata);
throw ex;
} finally {
writeUnlock();
http://git-wip-us.apache.org/repos/asf/hadoop/blob/44e19fc7/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerReader.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerReader.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerReader.java
index b90efdc..06e49f0 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerReader.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerReader.java
@@ -109,25 +109,28 @@ public class ContainerReader implements Runnable {
for (File containerTopDir : containerTopDirs) {
if (containerTopDir.isDirectory()) {
File[] containerDirs = containerTopDir.listFiles();
- for (File containerDir : containerDirs) {
- File metadataPath = new File(containerDir + File.separator +
- OzoneConsts.CONTAINER_META_PATH);
- String containerName = containerDir.getName();
- if (metadataPath.exists()) {
- File containerFile = KeyValueContainerLocationUtil
- .getContainerFile(metadataPath, containerName);
- File checksumFile = KeyValueContainerLocationUtil
- .getContainerCheckSumFile(metadataPath, containerName);
- if (containerFile.exists() && checksumFile.exists()) {
- verifyContainerFile(containerName, containerFile,
- checksumFile);
+ if (containerDirs != null) {
+ for (File containerDir : containerDirs) {
+ File metadataPath = new File(containerDir + File.separator +
+ OzoneConsts.CONTAINER_META_PATH);
+ String containerName = containerDir.getName();
+ if (metadataPath.exists()) {
+ File containerFile = KeyValueContainerLocationUtil
+ .getContainerFile(metadataPath, containerName);
+ File checksumFile = KeyValueContainerLocationUtil
+ .getContainerCheckSumFile(metadataPath, containerName);
+ if (containerFile.exists() && checksumFile.exists()) {
+ verifyContainerFile(containerName, containerFile,
+ checksumFile);
+ } else {
+ LOG.error(
+ "Missing container metadata files for Container: " +
+ "{}", containerName);
+ }
} else {
- LOG.error("Missing container metadata files for Container: " +
- "{}", containerName);
+ LOG.error("Missing container metadata directory for " +
+ "Container: {}", containerName);
}
- } else {
- LOG.error("Missing container metadata directory for " +
- "Container: {}", containerName);
}
}
}
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org