You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by ji...@apache.org on 2018/10/15 20:43:20 UTC
[1/3] hadoop git commit: HDDS-603. Add BlockCommitSequenceId field
per Container and expose it in Container Reports. Contributed by Shashikant
Banerjee.
Repository: hadoop
Updated Branches:
refs/heads/ozone-0.3 abfff4ccc -> b51bc6b93
HDDS-603. Add BlockCommitSequenceId field per Container and expose it in Container Reports. Contributed by Shashikant Banerjee.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/00083579
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/00083579
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/00083579
Branch: refs/heads/ozone-0.3
Commit: 0008357940d4bdb4c9162e15a0967bd287999a97
Parents: abfff4c
Author: Jitendra Pandey <ji...@apache.org>
Authored: Sat Oct 13 12:15:42 2018 -0700
Committer: Jitendra Pandey <ji...@apache.org>
Committed: Mon Oct 15 13:30:39 2018 -0700
----------------------------------------------------------------------
.../org/apache/hadoop/ozone/OzoneConsts.java | 1 +
.../apache/hadoop/utils/MetadataKeyFilters.java | 3 +-
.../common/helpers/ContainerReport.java | 205 -------------------
.../common/helpers/KeyValueContainerReport.java | 117 -----------
.../container/common/interfaces/Container.java | 5 +
.../container/keyvalue/KeyValueContainer.java | 9 +-
.../keyvalue/KeyValueContainerData.java | 16 ++
.../keyvalue/impl/BlockManagerImpl.java | 21 +-
.../container/ozoneimpl/ContainerReader.java | 7 +
.../StorageContainerDatanodeProtocol.proto | 1 +
.../ozone/om/helpers/OmKeyLocationInfo.java | 4 +
.../hadoop/ozone/client/rpc/TestBCSID.java | 147 +++++++++++++
12 files changed, 208 insertions(+), 328 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/00083579/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
index 923271c..8ccc648 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
@@ -113,6 +113,7 @@ public final class OzoneConsts {
public static final String DELETING_KEY_PREFIX = "#deleting#";
public static final String DELETED_KEY_PREFIX = "#deleted#";
public static final String DELETE_TRANSACTION_KEY_PREFIX = "#delTX#";
+ public static final String BLOCK_COMMIT_SEQUENCE_ID_PREFIX = "#BCSID";
/**
* OM LevelDB prefixes.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/00083579/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/MetadataKeyFilters.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/MetadataKeyFilters.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/MetadataKeyFilters.java
index a3430f8..04c87ae 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/MetadataKeyFilters.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/MetadataKeyFilters.java
@@ -42,7 +42,8 @@ public final class MetadataKeyFilters {
new MetadataKeyFilters.KeyPrefixFilter()
.addFilter(OzoneConsts.DELETING_KEY_PREFIX, true)
.addFilter(OzoneConsts.DELETED_KEY_PREFIX, true)
- .addFilter(OzoneConsts.DELETE_TRANSACTION_KEY_PREFIX, true);
+ .addFilter(OzoneConsts.DELETE_TRANSACTION_KEY_PREFIX, true)
+ .addFilter(OzoneConsts.BLOCK_COMMIT_SEQUENCE_ID_PREFIX, true);
private MetadataKeyFilters() {
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/00083579/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerReport.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerReport.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerReport.java
deleted file mode 100644
index a4c1f2f..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerReport.java
+++ /dev/null
@@ -1,205 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.container.common.helpers;
-
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerInfo;
-
-
-/**
- * Container Report iterates the closed containers and sends a container report
- * to SCM.
- */
-public class ContainerReport {
- private static final int UNKNOWN = -1;
- private final String finalhash;
- private long size;
- private long keyCount;
- private long bytesUsed;
- private long readCount;
- private long writeCount;
- private long readBytes;
- private long writeBytes;
- private long containerID;
-
- public long getContainerID() {
- return containerID;
- }
-
- public void setContainerID(long containerID) {
- this.containerID = containerID;
- }
-
- /**
- * Constructs the ContainerReport.
- *
- * @param containerID - Container ID.
- * @param finalhash - Final Hash.
- */
- public ContainerReport(long containerID, String finalhash) {
- this.containerID = containerID;
- this.finalhash = finalhash;
- this.size = UNKNOWN;
- this.keyCount = UNKNOWN;
- this.bytesUsed = 0L;
- this.readCount = 0L;
- this.readBytes = 0L;
- this.writeCount = 0L;
- this.writeBytes = 0L;
- }
-
- /**
- * Gets a containerReport from protobuf class.
- *
- * @param info - ContainerInfo.
- * @return - ContainerReport.
- */
- public static ContainerReport getFromProtoBuf(ContainerInfo info) {
- Preconditions.checkNotNull(info);
- ContainerReport report = new ContainerReport(info.getContainerID(),
- info.getFinalhash());
- if (info.hasSize()) {
- report.setSize(info.getSize());
- }
- if (info.hasKeyCount()) {
- report.setKeyCount(info.getKeyCount());
- }
- if (info.hasUsed()) {
- report.setBytesUsed(info.getUsed());
- }
- if (info.hasReadCount()) {
- report.setReadCount(info.getReadCount());
- }
- if (info.hasReadBytes()) {
- report.setReadBytes(info.getReadBytes());
- }
- if (info.hasWriteCount()) {
- report.setWriteCount(info.getWriteCount());
- }
- if (info.hasWriteBytes()) {
- report.setWriteBytes(info.getWriteBytes());
- }
-
- report.setContainerID(info.getContainerID());
- return report;
- }
-
- /**
- * Returns the final signature for this container.
- *
- * @return - hash
- */
- public String getFinalhash() {
- return finalhash;
- }
-
- /**
- * Returns a positive number it is a valid number, -1 if not known.
- *
- * @return size or -1
- */
- public long getSize() {
- return size;
- }
-
- /**
- * Sets the size of the container on disk.
- *
- * @param size - int
- */
- public void setSize(long size) {
- this.size = size;
- }
-
- /**
- * Gets number of keys in the container if known.
- *
- * @return - Number of keys or -1 for not known.
- */
- public long getKeyCount() {
- return keyCount;
- }
-
- /**
- * Sets the key count.
- *
- * @param keyCount - Key Count
- */
- public void setKeyCount(long keyCount) {
- this.keyCount = keyCount;
- }
-
- public long getReadCount() {
- return readCount;
- }
-
- public void setReadCount(long readCount) {
- this.readCount = readCount;
- }
-
- public long getWriteCount() {
- return writeCount;
- }
-
- public void setWriteCount(long writeCount) {
- this.writeCount = writeCount;
- }
-
- public long getReadBytes() {
- return readBytes;
- }
-
- public void setReadBytes(long readBytes) {
- this.readBytes = readBytes;
- }
-
- public long getWriteBytes() {
- return writeBytes;
- }
-
- public void setWriteBytes(long writeBytes) {
- this.writeBytes = writeBytes;
- }
-
- public long getBytesUsed() {
- return bytesUsed;
- }
-
- public void setBytesUsed(long bytesUsed) {
- this.bytesUsed = bytesUsed;
- }
-
- /**
- * Gets a containerInfo protobuf message from ContainerReports.
- *
- * @return ContainerInfo
- */
- public ContainerInfo getProtoBufMessage() {
- return ContainerInfo.newBuilder()
- .setKeyCount(this.getKeyCount())
- .setSize(this.getSize())
- .setUsed(this.getBytesUsed())
- .setReadCount(this.getReadCount())
- .setReadBytes(this.getReadBytes())
- .setWriteCount(this.getWriteCount())
- .setWriteBytes(this.getWriteBytes())
- .setFinalhash(this.getFinalhash())
- .setContainerID(this.getContainerID())
- .build();
- }
-}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/00083579/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/KeyValueContainerReport.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/KeyValueContainerReport.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/KeyValueContainerReport.java
deleted file mode 100644
index b03487b..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/KeyValueContainerReport.java
+++ /dev/null
@@ -1,117 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.container.common.helpers;
-
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerInfo;
-
-import static java.lang.Math.max;
-
-/**
- * KeyValueContainer Report iterates the closed containers and sends a
- * container report to SCM.
- */
-public class KeyValueContainerReport extends ContainerReport{
- private long deleteTransactionId;
-
- /**
- * Constructs the KeyValueContainerReport.
- *
- * @param containerID - Container ID.
- * @param finalhash - Final Hash.
- */
- public KeyValueContainerReport(long containerID, String finalhash) {
- super(containerID, finalhash);
- this.deleteTransactionId = 0;
- }
-
- /**
- * Sets the deleteTransactionId if it is greater than existing.
- * @param transactionId - deleteTransactionId
- */
- public void updateDeleteTransactionId(long transactionId) {
- this.deleteTransactionId = max(transactionId, deleteTransactionId);
- }
-
- /**
- * Gets the deleteTransactionId.
- * @return - deleteTransactionId.
- */
- public long getDeleteTransactionId() {
- return this.deleteTransactionId;
- }
-
- /**
- * Gets a containerReport from protobuf class.
- *
- * @param info - ContainerInfo.
- * @return - ContainerReport.
- */
- public static KeyValueContainerReport getFromProtoBuf(ContainerInfo info) {
- Preconditions.checkNotNull(info);
- KeyValueContainerReport report = new KeyValueContainerReport(
- info.getContainerID(), info.getFinalhash());
- if (info.hasSize()) {
- report.setSize(info.getSize());
- }
- if (info.hasKeyCount()) {
- report.setKeyCount(info.getKeyCount());
- }
- if (info.hasUsed()) {
- report.setBytesUsed(info.getUsed());
- }
- if (info.hasReadCount()) {
- report.setReadCount(info.getReadCount());
- }
- if (info.hasReadBytes()) {
- report.setReadBytes(info.getReadBytes());
- }
- if (info.hasWriteCount()) {
- report.setWriteCount(info.getWriteCount());
- }
- if (info.hasWriteBytes()) {
- report.setWriteBytes(info.getWriteBytes());
- }
- if (info.hasDeleteTransactionId()) {
- report.updateDeleteTransactionId(info.getDeleteTransactionId());
- }
- report.setContainerID(info.getContainerID());
- return report;
- }
-
- /**
- * Gets a containerInfo protobuf message from ContainerReports.
- *
- * @return ContainerInfo
- */
- @Override
- public ContainerInfo getProtoBufMessage() {
- return ContainerInfo.newBuilder()
- .setKeyCount(this.getKeyCount())
- .setSize(this.getSize())
- .setUsed(this.getBytesUsed())
- .setReadCount(this.getReadCount())
- .setReadBytes(this.getReadBytes())
- .setWriteCount(this.getWriteCount())
- .setWriteBytes(this.getWriteBytes())
- .setFinalhash(this.getFinalhash())
- .setContainerID(this.getContainerID())
- .setDeleteTransactionId(this.getDeleteTransactionId())
- .build();
- }
-}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/00083579/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/Container.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/Container.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/Container.java
index 9380f0c..dbef74c 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/Container.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/Container.java
@@ -132,4 +132,9 @@ public interface Container<CONTAINERDATA extends ContainerData> extends RwLock {
*/
StorageContainerDatanodeProtocolProtos.ContainerInfo getContainerReport()
throws StorageContainerException;
+
+ /**
+ * updates the blockCommitSequenceId.
+ */
+ void updateBlockCommitSequenceId(long blockCommitSequenceId);
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/00083579/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java
index e5b344d..9a5c94c 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java
@@ -508,6 +508,12 @@ public class KeyValueContainer implements Container<KeyValueContainerData> {
.getContainerID() + OzoneConsts.CONTAINER_EXTENSION);
}
+ @Override
+ public void updateBlockCommitSequenceId(long blockCommitSequenceId) {
+ containerData.updateBlockCommitSequenceId(blockCommitSequenceId);
+ }
+
+
/**
* Returns KeyValueContainerReport for the KeyValueContainer.
*/
@@ -524,7 +530,8 @@ public class KeyValueContainer implements Container<KeyValueContainerData> {
.setKeyCount(containerData.getKeyCount())
.setUsed(containerData.getBytesUsed())
.setState(getHddsState())
- .setDeleteTransactionId(containerData.getDeleteTransactionId());
+ .setDeleteTransactionId(containerData.getDeleteTransactionId())
+ .setBlockCommitSequenceId(containerData.getBlockCommitSequenceId());
return ciBuilder.build();
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/00083579/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerData.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerData.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerData.java
index 7ffdbf5..9ea84c2 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerData.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerData.java
@@ -72,6 +72,8 @@ public class KeyValueContainerData extends ContainerData {
private long deleteTransactionId;
+ private long blockCommitSequenceId;
+
static {
// Initialize YAML fields
KV_YAML_FIELDS = Lists.newArrayList();
@@ -152,6 +154,20 @@ public class KeyValueContainerData extends ContainerData {
}
/**
+ * Returns the blockCommitSequenceId.
+ */
+ public long getBlockCommitSequenceId() {
+ return blockCommitSequenceId;
+ }
+
+ /**
+ * updates the blockCommitSequenceId.
+ */
+ public void updateBlockCommitSequenceId(long id) {
+ this.blockCommitSequenceId = id;
+ }
+
+ /**
* Get chunks path.
* @return - Path where chunks are stored
*/
http://git-wip-us.apache.org/repos/asf/hadoop/blob/00083579/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java
index 0dd8739..bd19441 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java
@@ -25,12 +25,16 @@ import org.apache.hadoop.hdds.client.BlockID;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
+import org.apache.hadoop.hdfs.DFSUtil;
+import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.container.common.helpers.BlockData;
import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils;
import org.apache.hadoop.ozone.container.common.interfaces.Container;
import org.apache.hadoop.ozone.container.keyvalue.interfaces.BlockManager;
import org.apache.hadoop.ozone.container.common.utils.ContainerCache;
+import org.apache.hadoop.utils.BatchOperation;
+import org.apache.hadoop.utils.MetadataKeyFilters;
import org.apache.hadoop.utils.MetadataStore;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -83,8 +87,16 @@ public class BlockManagerImpl implements BlockManager {
// This is a post condition that acts as a hint to the user.
// Should never fail.
Preconditions.checkNotNull(db, "DB cannot be null here");
- db.put(Longs.toByteArray(data.getLocalID()), data.getProtoBufMessage()
- .toByteArray());
+
+ long blockCommitSequenceId = data.getBlockCommitSequenceId();
+ // update the blockData as well as BlockCommitSequenceId here
+ BatchOperation batch = new BatchOperation();
+ batch.put(Longs.toByteArray(data.getLocalID()),
+ data.getProtoBufMessage().toByteArray());
+ batch.put(DFSUtil.string2Bytes(OzoneConsts.BLOCK_COMMIT_SEQUENCE_ID_PREFIX),
+ Longs.toByteArray(blockCommitSequenceId));
+ db.writeBatch(batch);
+ container.updateBlockCommitSequenceId(blockCommitSequenceId);
// Increment keycount here
container.getContainerData().incrKeyCount();
return data.getSize();
@@ -208,8 +220,9 @@ public class BlockManagerImpl implements BlockManager {
MetadataStore db = BlockUtils.getDB(cData, config);
result = new ArrayList<>();
byte[] startKeyInBytes = Longs.toByteArray(startLocalID);
- List<Map.Entry<byte[], byte[]>> range = db.getSequentialRangeKVs(
- startKeyInBytes, count, null);
+ List<Map.Entry<byte[], byte[]>> range =
+ db.getSequentialRangeKVs(startKeyInBytes, count,
+ MetadataKeyFilters.getNormalKeyFilter());
for (Map.Entry<byte[], byte[]> entry : range) {
BlockData value = BlockUtils.getBlockData(entry.getValue());
BlockData data = new BlockData(value.getBlockID());
http://git-wip-us.apache.org/repos/asf/hadoop/blob/00083579/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerReader.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerReader.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerReader.java
index c3a4126..aee5775 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerReader.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerReader.java
@@ -191,6 +191,13 @@ public class ContainerReader implements Runnable {
kvContainerData
.updateDeleteTransactionId(Longs.fromByteArray(delTxnId));
}
+ // sets the BlockCommitSequenceId.
+ byte[] bcsId = containerDB.get(
+ DFSUtil.string2Bytes(OzoneConsts.BLOCK_COMMIT_SEQUENCE_ID_PREFIX));
+ if (bcsId != null) {
+ kvContainerData
+ .updateBlockCommitSequenceId(Longs.fromByteArray(bcsId));
+ }
containerSet.addContainer(kvContainer);
} else {
throw new StorageContainerException("Container File is corrupted. " +
http://git-wip-us.apache.org/repos/asf/hadoop/blob/00083579/hadoop-hdds/container-service/src/main/proto/StorageContainerDatanodeProtocol.proto
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/proto/StorageContainerDatanodeProtocol.proto b/hadoop-hdds/container-service/src/main/proto/StorageContainerDatanodeProtocol.proto
index 982029c..f8fb32d 100644
--- a/hadoop-hdds/container-service/src/main/proto/StorageContainerDatanodeProtocol.proto
+++ b/hadoop-hdds/container-service/src/main/proto/StorageContainerDatanodeProtocol.proto
@@ -215,6 +215,7 @@ message ContainerInfo {
optional string finalhash = 9;
optional hadoop.hdds.LifeCycleState state = 10;
optional int64 deleteTransactionId = 11;
+ optional uint64 blockCommitSequenceId = 12;
}
/*
http://git-wip-us.apache.org/repos/asf/hadoop/blob/00083579/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyLocationInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyLocationInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyLocationInfo.java
index ada3567..9d54cea 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyLocationInfo.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyLocationInfo.java
@@ -78,6 +78,10 @@ public final class OmKeyLocationInfo {
return offset;
}
+ public long getBlockCommitSequenceId() {
+ return blockCommitSequenceId;
+ }
+
/**
* Builder of OmKeyLocationInfo.
*/
http://git-wip-us.apache.org/repos/asf/hadoop/blob/00083579/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBCSID.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBCSID.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBCSID.java
new file mode 100644
index 0000000..ed4629c
--- /dev/null
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBCSID.java
@@ -0,0 +1,147 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.hadoop.ozone.client.rpc;
+
+import org.apache.hadoop.hdds.client.ReplicationFactor;
+import org.apache.hadoop.hdds.client.ReplicationType;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.ozone.MiniOzoneCluster;
+import org.apache.hadoop.ozone.client.ObjectStore;
+import org.apache.hadoop.ozone.client.OzoneClient;
+import org.apache.hadoop.ozone.client.OzoneClientFactory;
+import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
+import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
+import org.apache.hadoop.test.GenericTestUtils;
+import org.junit.AfterClass;
+import org.junit.Assert;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.List;
+import java.util.concurrent.TimeUnit;
+
+import static org.apache.hadoop.hdds.HddsConfigKeys.
+ HDDS_COMMAND_STATUS_REPORT_INTERVAL;
+import static org.apache.hadoop.hdds.HddsConfigKeys.
+ HDDS_CONTAINER_REPORT_INTERVAL;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.
+ HDDS_SCM_WATCHER_TIMEOUT;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.
+ OZONE_SCM_STALENODE_INTERVAL;
+
+/**
+ * Tests the validity BCSID of a container.
+ */
+public class TestBCSID {
+
+ private static MiniOzoneCluster cluster;
+ private static OzoneConfiguration conf;
+ private static OzoneClient client;
+ private static ObjectStore objectStore;
+ private static String volumeName;
+ private static String bucketName;
+
+ /**
+ * Create a MiniDFSCluster for testing.
+ *
+ * @throws IOException
+ */
+ @BeforeClass
+ public static void init() throws Exception {
+ conf = new OzoneConfiguration();
+ String path = GenericTestUtils
+ .getTempPath(TestBCSID.class.getSimpleName());
+ File baseDir = new File(path);
+ baseDir.mkdirs();
+
+ conf.setTimeDuration(HDDS_CONTAINER_REPORT_INTERVAL, 200,
+ TimeUnit.MILLISECONDS);
+ conf.setTimeDuration(HDDS_COMMAND_STATUS_REPORT_INTERVAL, 200,
+ TimeUnit.MILLISECONDS);
+ conf.setTimeDuration(HDDS_SCM_WATCHER_TIMEOUT, 1000, TimeUnit.MILLISECONDS);
+ conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 3, TimeUnit.SECONDS);
+ conf.setQuietMode(false);
+ cluster =
+ MiniOzoneCluster.newBuilder(conf).setNumDatanodes(1).setHbInterval(200)
+ .build();
+ cluster.waitForClusterToBeReady();
+ //the easiest way to create an open container is creating a key
+ client = OzoneClientFactory.getClient(conf);
+ objectStore = client.getObjectStore();
+ volumeName = "bcsid";
+ bucketName = volumeName;
+ objectStore.createVolume(volumeName);
+ objectStore.getVolume(volumeName).createBucket(bucketName);
+ }
+
+ /**
+ * Shutdown MiniDFSCluster.
+ */
+ @AfterClass
+ public static void shutdown() {
+ if (cluster != null) {
+ cluster.shutdown();
+ }
+ }
+
+ @Test
+ public void testBCSID() throws Exception {
+ OzoneOutputStream key =
+ objectStore.getVolume(volumeName).getBucket(bucketName)
+ .createKey("ratis", 1024, ReplicationType.RATIS,
+ ReplicationFactor.ONE);
+ key.write("ratis".getBytes());
+ key.close();
+
+ // get the name of a valid container.
+ OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(volumeName).
+ setBucketName(bucketName).setType(HddsProtos.ReplicationType.RATIS)
+ .setFactor(HddsProtos.ReplicationFactor.ONE).setKeyName("ratis")
+ .build();
+ OmKeyInfo keyInfo = cluster.getOzoneManager().lookupKey(keyArgs);
+ List<OmKeyLocationInfo> keyLocationInfos =
+ keyInfo.getKeyLocationVersions().get(0).getBlocksLatestVersionOnly();
+ Assert.assertEquals(1, keyLocationInfos.size());
+ OmKeyLocationInfo omKeyLocationInfo = keyLocationInfos.get(0);
+
+ long blockCommitSequenceId =
+ cluster.getHddsDatanodes().get(0).getDatanodeStateMachine()
+ .getContainer().getContainerSet()
+ .getContainer(omKeyLocationInfo.getContainerID())
+ .getContainerReport().getBlockCommitSequenceId();
+ Assert.assertTrue(blockCommitSequenceId > 0);
+
+ // make sure the persisted block Id in OM is same as that seen in the
+ // container report to be reported to SCM.
+ Assert.assertEquals(blockCommitSequenceId,
+ omKeyLocationInfo.getBlockCommitSequenceId());
+
+ // verify that on restarting the datanode, it reloads the BCSID correctly.
+ cluster.restartHddsDatanode(0);
+ Assert.assertEquals(blockCommitSequenceId,
+ cluster.getHddsDatanodes().get(0).getDatanodeStateMachine()
+ .getContainer().getContainerSet()
+ .getContainer(omKeyLocationInfo.getContainerID())
+ .getContainerReport().getBlockCommitSequenceId());
+ }
+}
\ No newline at end of file
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[2/3] hadoop git commit: HDDS-579. ContainerStateMachine should fail
subsequent transactions per container in case one fails. Contributed by
Shashikant Banerjee.
Posted by ji...@apache.org.
HDDS-579. ContainerStateMachine should fail subsequent transactions per container in case one fails. Contributed by Shashikant Banerjee.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a619d120
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a619d120
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a619d120
Branch: refs/heads/ozone-0.3
Commit: a619d120a6c44bde2a846d61505a94f896e58e46
Parents: 0008357
Author: Jitendra Pandey <ji...@apache.org>
Authored: Sat Oct 13 19:15:01 2018 -0700
Committer: Jitendra Pandey <ji...@apache.org>
Committed: Mon Oct 15 13:30:53 2018 -0700
----------------------------------------------------------------------
.../main/proto/DatanodeContainerProtocol.proto | 4 +-
.../container/common/impl/HddsDispatcher.java | 63 +++++--
.../container/keyvalue/KeyValueHandler.java | 20 +-
.../StorageContainerDatanodeProtocol.proto | 1 +
.../rpc/TestContainerStateMachineFailures.java | 185 +++++++++++++++++++
5 files changed, 242 insertions(+), 31 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a619d120/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto b/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto
index 662df8f..da55db3 100644
--- a/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto
+++ b/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto
@@ -139,6 +139,7 @@ enum Result {
CONTAINER_CHECKSUM_ERROR = 33;
UNKNOWN_CONTAINER_TYPE = 34;
BLOCK_NOT_COMMITTED = 35;
+ CONTAINER_UNHEALTHY = 36;
}
/**
@@ -161,7 +162,8 @@ enum ContainerLifeCycleState {
OPEN = 1;
CLOSING = 2;
CLOSED = 3;
- INVALID = 4;
+ UNHEALTHY = 4;
+ INVALID = 5;
}
message ContainerCommandRequestProto {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a619d120/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java
index bb5002a..1849841 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java
@@ -142,6 +142,26 @@ public class HddsDispatcher implements ContainerDispatcher {
responseProto = handler.handle(msg, container);
if (responseProto != null) {
metrics.incContainerOpsLatencies(cmdType, System.nanoTime() - startTime);
+
+ // If the request is of Write Type and the container operation
+ // is unsuccessful, it implies the applyTransaction on the container
+ // failed. All subsequent transactions on the container should fail and
+ // hence replica will be marked unhealthy here. In this case, a close
+ // container action will be sent to SCM to close the container.
+ if (!HddsUtils.isReadOnly(msg)
+ && responseProto.getResult() != ContainerProtos.Result.SUCCESS) {
+ // If the container is open and the container operation has failed,
+ // it should be first marked unhealthy and the initiate the close
+ // container action. This also implies this is the first transaction
+ // which has failed, so the container is marked unhealthy right here.
+ // Once container is marked unhealthy, all the subsequent write
+ // transactions will fail with UNHEALTHY_CONTAINER exception.
+ if (container.getContainerState() == ContainerLifeCycleState.OPEN) {
+ container.getContainerData()
+ .setState(ContainerLifeCycleState.UNHEALTHY);
+ sendCloseContainerActionIfNeeded(container);
+ }
+ }
return responseProto;
} else {
return ContainerUtils.unsupportedRequest(msg);
@@ -149,31 +169,46 @@ public class HddsDispatcher implements ContainerDispatcher {
}
/**
- * If the container usage reaches the close threshold we send Close
- * ContainerAction to SCM.
- *
+ * If the container usage reaches the close threshold or the container is
+ * marked unhealthy we send Close ContainerAction to SCM.
* @param container current state of container
*/
private void sendCloseContainerActionIfNeeded(Container container) {
// We have to find a more efficient way to close a container.
- Boolean isOpen = Optional.ofNullable(container)
+ boolean isSpaceFull = isContainerFull(container);
+ boolean shouldClose = isSpaceFull || isContainerUnhealthy(container);
+ if (shouldClose) {
+ ContainerData containerData = container.getContainerData();
+ ContainerAction.Reason reason =
+ isSpaceFull ? ContainerAction.Reason.CONTAINER_FULL :
+ ContainerAction.Reason.CONTAINER_UNHEALTHY;
+ ContainerAction action = ContainerAction.newBuilder()
+ .setContainerID(containerData.getContainerID())
+ .setAction(ContainerAction.Action.CLOSE).setReason(reason).build();
+ context.addContainerActionIfAbsent(action);
+ }
+ }
+
+ private boolean isContainerFull(Container container) {
+ boolean isOpen = Optional.ofNullable(container)
.map(cont -> cont.getContainerState() == ContainerLifeCycleState.OPEN)
.orElse(Boolean.FALSE);
if (isOpen) {
ContainerData containerData = container.getContainerData();
- double containerUsedPercentage = 1.0f * containerData.getBytesUsed() /
- containerData.getMaxSize();
- if (containerUsedPercentage >= containerCloseThreshold) {
- ContainerAction action = ContainerAction.newBuilder()
- .setContainerID(containerData.getContainerID())
- .setAction(ContainerAction.Action.CLOSE)
- .setReason(ContainerAction.Reason.CONTAINER_FULL)
- .build();
- context.addContainerActionIfAbsent(action);
- }
+ double containerUsedPercentage =
+ 1.0f * containerData.getBytesUsed() / containerData.getMaxSize();
+ return containerUsedPercentage >= containerCloseThreshold;
+ } else {
+ return false;
}
}
+ private boolean isContainerUnhealthy(Container container) {
+ return Optional.ofNullable(container).map(
+ cont -> (cont.getContainerState() == ContainerLifeCycleState.UNHEALTHY))
+ .orElse(Boolean.FALSE);
+ }
+
@Override
public Handler getHandler(ContainerProtos.ContainerType containerType) {
return handlers.get(containerType);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a619d120/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
index 922db2a..4c87b19 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
@@ -79,22 +79,7 @@ import com.google.common.base.Preconditions;
import com.google.protobuf.ByteString;
import static org.apache.hadoop.hdds.HddsConfigKeys
.HDDS_DATANODE_VOLUME_CHOOSING_POLICY;
-import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
- .Result.BLOCK_NOT_COMMITTED;
-import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
- .Result.CLOSED_CONTAINER_IO;
-import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
- .Result.CONTAINER_INTERNAL_ERROR;
-import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
- .Result.DELETE_ON_OPEN_CONTAINER;
-import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
- .Result.GET_SMALL_FILE_ERROR;
-import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
- .Result.INVALID_CONTAINER_STATE;
-import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
- .Result.IO_EXCEPTION;
-import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
- .Result.PUT_SMALL_FILE_ERROR;
+import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.*;
import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
.Stage;
import static org.apache.hadoop.ozone.OzoneConfigKeys
@@ -819,6 +804,9 @@ public class KeyValueHandler extends Handler {
case CLOSED:
result = CLOSED_CONTAINER_IO;
break;
+ case UNHEALTHY:
+ result = CONTAINER_UNHEALTHY;
+ break;
case INVALID:
result = INVALID_CONTAINER_STATE;
break;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a619d120/hadoop-hdds/container-service/src/main/proto/StorageContainerDatanodeProtocol.proto
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/proto/StorageContainerDatanodeProtocol.proto b/hadoop-hdds/container-service/src/main/proto/StorageContainerDatanodeProtocol.proto
index f8fb32d..72d48a6 100644
--- a/hadoop-hdds/container-service/src/main/proto/StorageContainerDatanodeProtocol.proto
+++ b/hadoop-hdds/container-service/src/main/proto/StorageContainerDatanodeProtocol.proto
@@ -159,6 +159,7 @@ message ContainerAction {
enum Reason {
CONTAINER_FULL = 1;
+ CONTAINER_UNHEALTHY = 2;
}
required int64 containerID = 1;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a619d120/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailures.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailures.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailures.java
new file mode 100644
index 0000000..0e593fb
--- /dev/null
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailures.java
@@ -0,0 +1,185 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.hadoop.ozone.client.rpc;
+
+import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.hdds.client.ReplicationFactor;
+import org.apache.hadoop.hdds.client.ReplicationType;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.hdds.protocol.proto.
+ StorageContainerDatanodeProtocolProtos;
+import org.apache.hadoop.hdds.protocol.proto.
+ StorageContainerDatanodeProtocolProtos.ContainerAction.Action;
+import org.apache.hadoop.hdds.protocol.proto.
+ StorageContainerDatanodeProtocolProtos.ContainerAction.Reason;
+import org.apache.hadoop.hdds.scm.container.
+ common.helpers.StorageContainerException;
+import org.apache.hadoop.ozone.MiniOzoneCluster;
+import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.client.ObjectStore;
+import org.apache.hadoop.ozone.client.OzoneClient;
+import org.apache.hadoop.ozone.client.OzoneClientFactory;
+import org.apache.hadoop.ozone.client.io.ChunkGroupOutputStream;
+import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
+import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
+import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
+import org.apache.hadoop.test.GenericTestUtils;
+import org.junit.AfterClass;
+import org.junit.Assert;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.List;
+import java.util.concurrent.TimeUnit;
+
+import static org.apache.hadoop.hdds.HddsConfigKeys.
+ HDDS_COMMAND_STATUS_REPORT_INTERVAL;
+import static org.apache.hadoop.hdds.HddsConfigKeys.
+ HDDS_CONTAINER_REPORT_INTERVAL;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.
+ HDDS_SCM_WATCHER_TIMEOUT;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.
+ OZONE_SCM_STALENODE_INTERVAL;
+
+/**
+ * Tests the containerStateMachine failure handling.
+ */
+
+public class TestContainerStateMachineFailures {
+
+ private static MiniOzoneCluster cluster;
+ private static OzoneConfiguration conf;
+ private static OzoneClient client;
+ private static ObjectStore objectStore;
+ private static String volumeName;
+ private static String bucketName;
+ private static String path;
+ private static int chunkSize;
+
+ /**
+ * Create a MiniDFSCluster for testing.
+ *
+ * @throws IOException
+ */
+ @BeforeClass
+ public static void init() throws Exception {
+ conf = new OzoneConfiguration();
+ path = GenericTestUtils
+ .getTempPath(TestContainerStateMachineFailures.class.getSimpleName());
+ File baseDir = new File(path);
+ baseDir.mkdirs();
+
+ chunkSize = (int) OzoneConsts.MB;
+
+ conf.setTimeDuration(HDDS_CONTAINER_REPORT_INTERVAL, 200,
+ TimeUnit.MILLISECONDS);
+ conf.setTimeDuration(HDDS_COMMAND_STATUS_REPORT_INTERVAL, 200,
+ TimeUnit.MILLISECONDS);
+ conf.setTimeDuration(HDDS_SCM_WATCHER_TIMEOUT, 1000, TimeUnit.MILLISECONDS);
+ conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 3, TimeUnit.SECONDS);
+ conf.setQuietMode(false);
+ cluster =
+ MiniOzoneCluster.newBuilder(conf).setNumDatanodes(1).setHbInterval(200)
+ .build();
+ cluster.waitForClusterToBeReady();
+ //the easiest way to create an open container is creating a key
+ client = OzoneClientFactory.getClient(conf);
+ objectStore = client.getObjectStore();
+ volumeName = "testcontainerstatemachinefailures";
+ bucketName = volumeName;
+ objectStore.createVolume(volumeName);
+ objectStore.getVolume(volumeName).createBucket(bucketName);
+ }
+
+ /**
+ * Shutdown MiniDFSCluster.
+ */
+ @AfterClass
+ public static void shutdown() {
+ if (cluster != null) {
+ cluster.shutdown();
+ }
+ }
+
+ @Test
+ public void testContainerStateMachineFailures() throws Exception {
+ OzoneOutputStream key =
+ objectStore.getVolume(volumeName).getBucket(bucketName)
+ .createKey("ratis", 1024, ReplicationType.RATIS,
+ ReplicationFactor.ONE);
+ key.write("ratis".getBytes());
+
+ //get the name of a valid container
+ OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(volumeName).
+ setBucketName(bucketName).setType(HddsProtos.ReplicationType.RATIS)
+ .setFactor(HddsProtos.ReplicationFactor.ONE).setKeyName("ratis")
+ .build();
+ ChunkGroupOutputStream groupOutputStream =
+ (ChunkGroupOutputStream) key.getOutputStream();
+ List<OmKeyLocationInfo> locationInfoList =
+ groupOutputStream.getLocationInfoList();
+ Assert.assertEquals(1, locationInfoList.size());
+ OmKeyLocationInfo omKeyLocationInfo = locationInfoList.get(0);
+
+ long containerID = omKeyLocationInfo.getContainerID();
+ // delete the container dir
+ FileUtil.fullyDelete(new File(
+ cluster.getHddsDatanodes().get(0).getDatanodeStateMachine()
+ .getContainer().getContainerSet()
+ .getContainer(omKeyLocationInfo.getContainerID()).getContainerData()
+ .getContainerPath()));
+ try {
+ // flush will throw an exception
+ key.flush();
+ Assert.fail("Expected exception not thrown");
+ } catch (IOException ioe) {
+ Assert.assertTrue(ioe.getCause() instanceof StorageContainerException);
+ }
+
+ // Make sure the container is marked unhealthy
+ Assert.assertTrue(
+ cluster.getHddsDatanodes().get(0).getDatanodeStateMachine()
+ .getContainer().getContainerSet()
+ .getContainer(omKeyLocationInfo.getContainerID())
+ .getContainerState()
+ == ContainerProtos.ContainerLifeCycleState.UNHEALTHY);
+ try {
+ // subsequent requests will fail with unhealthy container exception
+ key.close();
+ Assert.fail("Expected exception not thrown");
+ } catch (IOException ioe) {
+ Assert.assertTrue(ioe.getCause() instanceof StorageContainerException);
+ Assert.assertTrue(((StorageContainerException) ioe.getCause()).getResult()
+ == ContainerProtos.Result.CONTAINER_UNHEALTHY);
+ }
+ StorageContainerDatanodeProtocolProtos.ContainerAction action =
+ StorageContainerDatanodeProtocolProtos.ContainerAction.newBuilder()
+ .setContainerID(containerID).setAction(Action.CLOSE)
+ .setReason(Reason.CONTAINER_UNHEALTHY)
+ .build();
+
+ // Make sure the container close action is initiated to SCM.
+ Assert.assertTrue(
+ cluster.getHddsDatanodes().get(0).getDatanodeStateMachine().getContext()
+ .getAllPendingContainerActions().contains(action));
+ }
+}
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[3/3] hadoop git commit: HDDS-629. Make ApplyTransaction calls in
ContainerStateMachine idempotent. Contributed by Shashikant Banerjee.
Posted by ji...@apache.org.
HDDS-629. Make ApplyTransaction calls in ContainerStateMachine idempotent. Contributed by Shashikant Banerjee.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b51bc6b9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b51bc6b9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b51bc6b9
Branch: refs/heads/ozone-0.3
Commit: b51bc6b93ffb41de860a59b3875ce49274287b82
Parents: a619d12
Author: Jitendra Pandey <ji...@apache.org>
Authored: Mon Oct 15 11:52:38 2018 -0700
Committer: Jitendra Pandey <ji...@apache.org>
Committed: Mon Oct 15 13:31:08 2018 -0700
----------------------------------------------------------------------
.../container/keyvalue/KeyValueHandler.java | 10 +-
.../container/keyvalue/helpers/ChunkUtils.java | 11 +-
.../keyvalue/impl/BlockManagerImpl.java | 24 +++-
.../keyvalue/impl/ChunkManagerImpl.java | 43 +++++++
.../TestContainerStateMachineIdempotency.java | 121 +++++++++++++++++++
.../common/impl/TestContainerPersistence.java | 17 +--
6 files changed, 203 insertions(+), 23 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b51bc6b9/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
index 4c87b19..da77f1c 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
@@ -241,9 +241,12 @@ public class KeyValueHandler extends Handler {
newContainer.create(volumeSet, volumeChoosingPolicy, scmID);
containerSet.addContainer(newContainer);
} else {
- throw new StorageContainerException("Container already exists with " +
- "container Id " + containerID, ContainerProtos.Result
- .CONTAINER_EXISTS);
+
+ // The create container request for an already existing container can
+ // arrive in case the ContainerStateMachine reapplies the transaction
+ // on datanode restart. Just log a warning msg here.
+ LOG.warn("Container already exists." +
+ "container Id " + containerID);
}
} catch (StorageContainerException ex) {
return ContainerUtils.logAndReturnError(LOG, ex, request);
@@ -370,6 +373,7 @@ public class KeyValueHandler extends Handler {
/**
* Handles Close Container Request. An open container is closed.
+ * Close Container call is idempotent.
*/
ContainerCommandResponseProto handleCloseContainer(
ContainerCommandRequestProto request, KeyValueContainer kvContainer) {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b51bc6b9/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/ChunkUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/ChunkUtils.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/ChunkUtils.java
index 8bdae0f..20598d9 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/ChunkUtils.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/ChunkUtils.java
@@ -231,21 +231,18 @@ public final class ChunkUtils {
*
* @param chunkFile - chunkFile to write data into.
* @param info - chunk info.
- * @return boolean isOverwrite
- * @throws StorageContainerException
+ * @return true if the chunkFile exists and chunkOffset < chunkFile length,
+ * false otherwise.
*/
public static boolean validateChunkForOverwrite(File chunkFile,
- ChunkInfo info) throws StorageContainerException {
+ ChunkInfo info) {
Logger log = LoggerFactory.getLogger(ChunkManagerImpl.class);
if (isOverWriteRequested(chunkFile, info)) {
if (!isOverWritePermitted(info)) {
- log.error("Rejecting write chunk request. Chunk overwrite " +
+ log.warn("Duplicate write chunk request. Chunk overwrite " +
"without explicit request. {}", info.toString());
- throw new StorageContainerException("Rejecting write chunk request. " +
- "OverWrite flag required." + info.toString(),
- OVERWRITE_FLAG_REQUIRED);
}
return true;
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b51bc6b9/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java
index bd19441..7fa0cfb 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java
@@ -89,11 +89,33 @@ public class BlockManagerImpl implements BlockManager {
Preconditions.checkNotNull(db, "DB cannot be null here");
long blockCommitSequenceId = data.getBlockCommitSequenceId();
+ byte[] blockCommitSequenceIdKey =
+ DFSUtil.string2Bytes(OzoneConsts.BLOCK_COMMIT_SEQUENCE_ID_PREFIX);
+ byte[] blockCommitSequenceIdValue = db.get(blockCommitSequenceIdKey);
+
+ // default blockCommitSequenceId for any block is 0. It the putBlock
+ // request is not coming via Ratis(for test scenarios), it will be 0.
+ // In such cases, we should overwrite the block as well
+ if (blockCommitSequenceIdValue != null && blockCommitSequenceId != 0) {
+ if (blockCommitSequenceId <= Longs
+ .fromByteArray(blockCommitSequenceIdValue)) {
+ // Since the blockCommitSequenceId stored in the db is greater than
+ // equal to blockCommitSequenceId to be updated, it means the putBlock
+ // transaction is reapplied in the ContainerStateMachine on restart.
+ // It also implies that the given block must already exist in the db.
+ // just log and return
+ LOG.warn("blockCommitSequenceId " + Longs
+ .fromByteArray(blockCommitSequenceIdValue)
+ + " in the Container Db is greater than" + " the supplied value "
+ + blockCommitSequenceId + " .Ignoring it");
+ return data.getSize();
+ }
+ }
// update the blockData as well as BlockCommitSequenceId here
BatchOperation batch = new BatchOperation();
batch.put(Longs.toByteArray(data.getLocalID()),
data.getProtoBufMessage().toByteArray());
- batch.put(DFSUtil.string2Bytes(OzoneConsts.BLOCK_COMMIT_SEQUENCE_ID_PREFIX),
+ batch.put(blockCommitSequenceIdKey,
Longs.toByteArray(blockCommitSequenceId));
db.writeBatch(batch);
container.updateBlockCommitSequenceId(blockCommitSequenceId);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b51bc6b9/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerImpl.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerImpl.java
index ce317bd..6fd8d5f 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerImpl.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerImpl.java
@@ -87,6 +87,32 @@ public class ChunkManagerImpl implements ChunkManager {
switch (stage) {
case WRITE_DATA:
+ if (isOverwrite) {
+ // if the actual chunk file already exists here while writing the temp
+ // chunk file, then it means the same ozone client request has
+ // generated two raft log entries. This can happen either because
+ // retryCache expired in Ratis (or log index mismatch/corruption in
+ // Ratis). This can be solved by two approaches as of now:
+ // 1. Read the complete data in the actual chunk file ,
+ // verify the data integrity and in case it mismatches , either
+ // 2. Delete the chunk File and write the chunk again. For now,
+ // let's rewrite the chunk file
+ // TODO: once the checksum support for write chunks gets plugged in,
+ // the checksum needs to be verified for the actual chunk file and
+ // the data to be written here which should be efficient and
+ // it matches we can safely return without rewriting.
+ LOG.warn("ChunkFile already exists" + chunkFile + ".Deleting it.");
+ FileUtil.fullyDelete(chunkFile);
+ }
+ if (tmpChunkFile.exists()) {
+ // If the tmp chunk file already exists it means the raft log got
+ // appended, but later on the log entry got truncated in Ratis leaving
+ // behind garbage.
+ // TODO: once the checksum support for data chunks gets plugged in,
+ // instead of rewriting the chunk here, let's compare the checkSums
+ LOG.warn(
+ "tmpChunkFile already exists" + tmpChunkFile + "Overwriting it.");
+ }
// Initially writes to temporary chunk file.
ChunkUtils.writeData(tmpChunkFile, info, data, volumeIOStats);
// No need to increment container stats here, as still data is not
@@ -95,6 +121,15 @@ public class ChunkManagerImpl implements ChunkManager {
case COMMIT_DATA:
// commit the data, means move chunk data from temporary chunk file
// to actual chunk file.
+ if (isOverwrite) {
+ // if the actual chunk file already exists , it implies the write
+ // chunk transaction in the containerStateMachine is getting
+ // reapplied. This can happen when a node restarts.
+ // TODO: verify the checkSums for the existing chunkFile and the
+ // chunkInfo to be committed here
+ LOG.warn("ChunkFile already exists" + chunkFile);
+ return;
+ }
commitChunk(tmpChunkFile, chunkFile);
// Increment container stats here, as we commit the data.
containerData.incrBytesUsed(info.getLen());
@@ -200,6 +235,14 @@ public class ChunkManagerImpl implements ChunkManager {
if (containerData.getLayOutVersion() == ChunkLayOutVersion
.getLatestVersion().getVersion()) {
File chunkFile = ChunkUtils.getChunkFile(containerData, info);
+
+ // if the chunk file does not exist, it might have already been deleted.
+ // The call might be because of reapply of transactions on datanode
+ // restart.
+ if (!chunkFile.exists()) {
+ LOG.warn("Chunk file doe not exist. chunk info :" + info.toString());
+ return;
+ }
if ((info.getOffset() == 0) && (info.getLen() == chunkFile.length())) {
FileUtil.fullyDelete(chunkFile);
containerData.decrBytesUsed(chunkFile.length());
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b51bc6b9/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestContainerStateMachineIdempotency.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestContainerStateMachineIdempotency.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestContainerStateMachineIdempotency.java
new file mode 100644
index 0000000..c69a94c
--- /dev/null
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestContainerStateMachineIdempotency.java
@@ -0,0 +1,121 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone;
+
+import org.apache.commons.lang3.RandomStringUtils;
+import org.apache.commons.lang3.RandomUtils;
+import org.apache.hadoop.hdds.client.BlockID;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.hdds.scm.ScmConfigKeys;
+import org.apache.hadoop.hdds.scm.XceiverClientManager;
+import org.apache.hadoop.hdds.scm.XceiverClientSpi;
+import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
+import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
+import org.apache.hadoop.hdds.scm.container.placement.algorithms.
+ ContainerPlacementPolicy;
+import org.apache.hadoop.hdds.scm.container.placement.algorithms.
+ SCMContainerPlacementCapacity;
+import org.apache.hadoop.hdds.scm.protocolPB.
+ StorageContainerLocationProtocolClientSideTranslatorPB;
+import org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls;
+import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.ozone.container.ContainerTestHelper;
+import org.junit.AfterClass;
+import org.junit.Assert;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.UUID;
+
+/**
+ * Tests the idempotent operations in ContainerStateMachine.
+ */
+public class TestContainerStateMachineIdempotency {
+ private static MiniOzoneCluster cluster;
+ private static OzoneConfiguration ozoneConfig;
+ private static StorageContainerLocationProtocolClientSideTranslatorPB
+ storageContainerLocationClient;
+ private static XceiverClientManager xceiverClientManager;
+ private static String containerOwner = "OZONE";
+
+ @BeforeClass
+ public static void init() throws Exception {
+ ozoneConfig = new OzoneConfiguration();
+ ozoneConfig.setClass(ScmConfigKeys.OZONE_SCM_CONTAINER_PLACEMENT_IMPL_KEY,
+ SCMContainerPlacementCapacity.class, ContainerPlacementPolicy.class);
+ cluster =
+ MiniOzoneCluster.newBuilder(ozoneConfig).setNumDatanodes(1).build();
+ cluster.waitForClusterToBeReady();
+ storageContainerLocationClient =
+ cluster.getStorageContainerLocationClient();
+ xceiverClientManager = new XceiverClientManager(ozoneConfig);
+ }
+
+ @AfterClass
+ public static void shutdown() {
+ if (cluster != null) {
+ cluster.shutdown();
+ }
+ IOUtils.cleanupWithLogger(null, storageContainerLocationClient);
+ }
+
+ @Test
+ public void testContainerStateMachineIdempotency() throws Exception {
+ String traceID = UUID.randomUUID().toString();
+ ContainerWithPipeline container = storageContainerLocationClient
+ .allocateContainer(HddsProtos.ReplicationType.RATIS,
+ HddsProtos.ReplicationFactor.ONE, containerOwner);
+ long containerID = container.getContainerInfo().getContainerID();
+ Pipeline pipeline = container.getPipeline();
+ XceiverClientSpi client = xceiverClientManager.acquireClient(pipeline);
+ try {
+ //create the container
+ ContainerProtocolCalls.createContainer(client, containerID, traceID);
+ // call create Container again
+ BlockID blockID = ContainerTestHelper.getTestBlockID(containerID);
+ byte[] data =
+ RandomStringUtils.random(RandomUtils.nextInt(0, 1024)).getBytes();
+ ContainerProtos.ContainerCommandRequestProto writeChunkRequest =
+ ContainerTestHelper
+ .getWriteChunkRequest(container.getPipeline(), blockID,
+ data.length);
+ client.sendCommand(writeChunkRequest);
+
+ //Make the write chunk request again without requesting for overWrite
+ client.sendCommand(writeChunkRequest);
+ // Now, explicitly make a putKey request for the block.
+ ContainerProtos.ContainerCommandRequestProto putKeyRequest =
+ ContainerTestHelper
+ .getPutBlockRequest(pipeline, writeChunkRequest.getWriteChunk());
+ client.sendCommand(putKeyRequest).getPutBlock();
+ // send the putBlock again
+ client.sendCommand(putKeyRequest);
+
+ // close container call
+ ContainerProtocolCalls.closeContainer(client, containerID, traceID);
+ ContainerProtocolCalls.closeContainer(client, containerID, traceID);
+ } catch (IOException ioe) {
+ Assert.fail("Container operation failed" + ioe);
+ }
+ xceiverClientManager.releaseClient(client);
+ }
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b51bc6b9/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java
index d9e0d29..bea00fe 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java
@@ -462,15 +462,7 @@ public class TestContainerPersistence {
byte[] data = getData(datalen);
setDataChecksum(info, data);
chunkManager.writeChunk(container, blockID, info, data, COMBINED);
- try {
- chunkManager.writeChunk(container, blockID, info, data, COMBINED);
- } catch (StorageContainerException ex) {
- Assert.assertTrue(ex.getMessage().contains(
- "Rejecting write chunk request. OverWrite flag required"));
- Assert.assertEquals(ex.getResult(),
- ContainerProtos.Result.OVERWRITE_FLAG_REQUIRED);
- }
-
+ chunkManager.writeChunk(container, blockID, info, data, COMBINED);
// With the overwrite flag it should work now.
info.addMetadata(OzoneConsts.CHUNK_OVERWRITE, "true");
chunkManager.writeChunk(container, blockID, info, data, COMBINED);
@@ -478,7 +470,7 @@ public class TestContainerPersistence {
Assert.assertEquals(datalen, bytesUsed);
long bytesWrite = container.getContainerData().getWriteBytes();
- Assert.assertEquals(datalen * 2, bytesWrite);
+ Assert.assertEquals(datalen * 3, bytesWrite);
}
/**
@@ -748,10 +740,11 @@ public class TestContainerPersistence {
}
- private BlockData writeBlockHelper(BlockID blockID)
+ private BlockData writeBlockHelper(BlockID blockID, int i)
throws IOException, NoSuchAlgorithmException {
ChunkInfo info = writeChunkHelper(blockID);
BlockData blockData = new BlockData(blockID);
+ blockData.setBlockCommitSequenceId((long) i);
List<ContainerProtos.ChunkInfo> chunkList = new LinkedList<>();
chunkList.add(info.getProtoBufMessage());
blockData.setChunks(chunkList);
@@ -766,7 +759,7 @@ public class TestContainerPersistence {
for (int i = 0; i < 10; i++) {
BlockID blockID = new BlockID(testContainerID, i);
expectedBlocks.add(blockID);
- BlockData kd = writeBlockHelper(blockID);
+ BlockData kd = writeBlockHelper(blockID, i);
blockManager.putBlock(container, kd);
}
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org