You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ozone.apache.org by av...@apache.org on 2021/01/11 05:04:55 UTC
[ozone] branch HDDS-3698-upgrade updated: HDDS-4175. Implement
Datanode Finalization. (#1720)
This is an automated email from the ASF dual-hosted git repository.
avijayan pushed a commit to branch HDDS-3698-upgrade
in repository https://gitbox.apache.org/repos/asf/ozone.git
The following commit(s) were added to refs/heads/HDDS-3698-upgrade by this push:
new d36b87d HDDS-4175. Implement Datanode Finalization. (#1720)
d36b87d is described below
commit d36b87d4ee0b2f01fc683cf7301f0dc7d16a2f51
Author: prashantpogde <pr...@gmail.com>
AuthorDate: Sun Jan 10 21:04:37 2021 -0800
HDDS-4175. Implement Datanode Finalization. (#1720)
---
hadoop-hdds/common/pom.xml | 5 +
.../hdds/upgrade/HDDSLayoutFeatureCatalog.java | 32 +++++--
.../hadoop/hdds/upgrade/HDDSUpgradeAction.java | 6 +-
.../java/org/apache/hadoop/ozone/OzoneConsts.java | 1 +
.../org/apache/hadoop/ozone/common/Storage.java | 2 +-
.../ozone/upgrade/BasicUpgradeFinalizer.java | 11 ++-
.../container/common/DataNodeStorageConfig.java | 77 ++++++++++++++++
.../common/statemachine/DatanodeStateMachine.java | 72 ++++++++++++++-
.../FinalizeNewLayoutVersionCommandHandler.java | 19 +++-
.../upgrade/DataNodeLayoutActionCatalog.java | 58 ++++++++++++
.../upgrade/DataNodeLayoutVersionManager.java | 49 ++--------
.../container/upgrade/DataNodeUpgradeAction.java} | 12 +--
.../DataNodeUpgradeActionFirstUpgradeVersion.java} | 19 ++--
.../upgrade/DataNodeUpgradeFinalizer.java} | 55 +++++------
.../upgrade/TestDataNodeStartupSlvLessThanMlv.java | 101 +++++++++++++++++++++
.../hdds/scm/server/SCMDatanodeProtocolServer.java | 8 ++
.../hdds/scm/server/StorageContainerManager.java | 11 +++
.../scm/server/upgrade/SCMLayoutActionCatalog.java | 55 +++++++++++
.../hdds/scm/server/upgrade/SCMUpgradeAction.java | 4 +-
...va => SCMUpgradeActionFirstUpgradeVersion.java} | 13 ++-
.../scm/server/upgrade/SCMUpgradeFinalizer.java | 7 +-
21 files changed, 504 insertions(+), 113 deletions(-)
diff --git a/hadoop-hdds/common/pom.xml b/hadoop-hdds/common/pom.xml
index 4a17336..580c389 100644
--- a/hadoop-hdds/common/pom.xml
+++ b/hadoop-hdds/common/pom.xml
@@ -200,6 +200,11 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
<scope>test</scope>
<!-- Needed for mocking RaftServerImpl -->
</dependency>
+ <dependency>
+ <groupId>com.github.spotbugs</groupId>
+ <artifactId>spotbugs-annotations</artifactId>
+ <scope>compile</scope>
+ </dependency>
</dependencies>
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/upgrade/HDDSLayoutFeatureCatalog.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/upgrade/HDDSLayoutFeatureCatalog.java
index 5ef86af..2c6760e 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/upgrade/HDDSLayoutFeatureCatalog.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/upgrade/HDDSLayoutFeatureCatalog.java
@@ -19,6 +19,7 @@
package org.apache.hadoop.hdds.upgrade;
import java.util.Optional;
+import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
import org.apache.hadoop.ozone.upgrade.LayoutFeature;
@@ -27,6 +28,7 @@ import org.apache.hadoop.ozone.upgrade.LayoutFeature;
*/
public class HDDSLayoutFeatureCatalog {
+
/**
* List of HDDS Features.
*/
@@ -39,7 +41,11 @@ public class HDDSLayoutFeatureCatalog {
private int layoutVersion;
private String description;
- private Optional< ? extends HDDSUpgradeAction> hddsUpgradeAction =
+
+ private Optional<? extends HDDSUpgradeAction> scmUpgradeAction =
+ Optional.empty();
+
+ private Optional<? extends HDDSUpgradeAction> datanodeUpgradeAction =
Optional.empty();
HDDSLayoutFeature(final int layoutVersion, String description) {
@@ -47,11 +53,16 @@ public class HDDSLayoutFeatureCatalog {
this.description = description;
}
- HDDSLayoutFeature(final int layoutVersion, String description,
- HDDSUpgradeAction upgradeAction) {
- this.layoutVersion = layoutVersion;
- this.description = description;
- hddsUpgradeAction = Optional.of(upgradeAction);
+ @SuppressFBWarnings("ME_ENUM_FIELD_SETTER")
+ public void setSCMUpgradeAction(Optional<? extends HDDSUpgradeAction>
+ scmAction) {
+ this.scmUpgradeAction = scmAction;
+ }
+
+ @SuppressFBWarnings("ME_ENUM_FIELD_SETTER")
+ public void setDataNodeUpgradeAction(Optional<? extends HDDSUpgradeAction>
+ datanodeAction) {
+ this.datanodeUpgradeAction = datanodeAction;
}
@Override
@@ -64,9 +75,12 @@ public class HDDSLayoutFeatureCatalog {
return description;
}
- @Override
- public Optional<? extends HDDSUpgradeAction> onFinalizeAction() {
- return hddsUpgradeAction;
+ public Optional<? extends HDDSUpgradeAction> onFinalizeSCMAction() {
+ return scmUpgradeAction;
+ }
+
+ public Optional<? extends HDDSUpgradeAction> onFinalizeDataNodeAction() {
+ return datanodeUpgradeAction;
}
}
}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/upgrade/HDDSUpgradeAction.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/upgrade/HDDSUpgradeAction.java
index 68ab666..0808b0f 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/upgrade/HDDSUpgradeAction.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/upgrade/HDDSUpgradeAction.java
@@ -23,9 +23,5 @@ import org.apache.hadoop.ozone.upgrade.LayoutFeature.UpgradeAction;
/**
* Upgrade Action for SCM and DataNodes.
*/
-public class HDDSUpgradeAction<T> implements UpgradeAction<T> {
- @Override
- public void executeAction(T arg) throws Exception {
-
- }
+public interface HDDSUpgradeAction<T> extends UpgradeAction<T> {
}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
index e1d1b0d..81470b2 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
@@ -42,6 +42,7 @@ public final class OzoneConsts {
public static final String STORAGE_ID = "storageID";
public static final String DATANODE_UUID = "datanodeUuid";
+ public static final String DATANODE_STORAGE_DIR = "datanodeStorageConfig";
public static final String CLUSTER_ID = "clusterID";
public static final String LAYOUTVERSION = "layOutVersion";
public static final String CTIME = "ctime";
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/Storage.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/Storage.java
index 9029c63..7cd8f78 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/Storage.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/Storage.java
@@ -145,7 +145,7 @@ public abstract class Storage {
abstract protected Properties getNodeProperties();
/**
- * Sets the Node properties specific to OM/SCM.
+ * Sets the Node properties specific to OM/SCM/DataNode.
*/
private void setNodeProperties() {
Properties nodeProperties = getNodeProperties();
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/upgrade/BasicUpgradeFinalizer.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/upgrade/BasicUpgradeFinalizer.java
index 552da54..65cbfab 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/upgrade/BasicUpgradeFinalizer.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/upgrade/BasicUpgradeFinalizer.java
@@ -23,8 +23,6 @@ import static org.apache.hadoop.ozone.upgrade.UpgradeException.ResultCodes.LAYOU
import static org.apache.hadoop.ozone.upgrade.UpgradeException.ResultCodes.PERSIST_UPGRADE_TO_LAYOUT_VERSION_FAILED;
import static org.apache.hadoop.ozone.upgrade.UpgradeException.ResultCodes.REMOVE_UPGRADE_TO_LAYOUT_VERSION_FAILED;
import static org.apache.hadoop.ozone.upgrade.UpgradeException.ResultCodes.UPDATE_LAYOUT_VERSION_FAILED;
-import static org.apache.hadoop.ozone.upgrade.UpgradeFinalizer.Status.FINALIZATION_DONE;
-import static org.apache.hadoop.ozone.upgrade.UpgradeFinalizer.Status.FINALIZATION_IN_PROGRESS;
import static org.apache.hadoop.ozone.upgrade.UpgradeFinalizer.Status.FINALIZATION_REQUIRED;
import static org.apache.hadoop.ozone.upgrade.UpgradeFinalizer.Status.STARTING_FINALIZATION;
@@ -71,6 +69,11 @@ public class BasicUpgradeFinalizer<T, V extends AbstractLayoutVersionManager>
return FINALIZATION_IN_PROGRESS_MSG;
case FINALIZATION_DONE:
case ALREADY_FINALIZED:
+ if (versionManager.needsFinalization()) {
+ throw new UpgradeException("Upgrade found in inconsistent state. " +
+ "Upgrade state is FINALIZATION Complete while MLV has not been " +
+ "upgraded to SLV.", INVALID_REQUEST);
+ }
return FINALIZED_MSG;
default:
if (!versionManager.needsFinalization()) {
@@ -129,9 +132,9 @@ public class BasicUpgradeFinalizer<T, V extends AbstractLayoutVersionManager>
}
}
- protected void finalizeFeature(LayoutFeature feature, Storage config)
+ protected void finalizeFeature(LayoutFeature feature, Storage config,
+ Optional<? extends UpgradeAction> action)
throws UpgradeException {
- Optional<? extends UpgradeAction> action = feature.onFinalizeAction();
if (!action.isPresent()) {
emitNOOPMsg(feature.name());
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/DataNodeStorageConfig.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/DataNodeStorageConfig.java
new file mode 100644
index 0000000..03c5e70
--- /dev/null
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/DataNodeStorageConfig.java
@@ -0,0 +1,77 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone.container.common;
+
+import static org.apache.hadoop.ozone.OzoneConsts.DATANODE_STORAGE_DIR;
+import static org.apache.hadoop.ozone.OzoneConsts.DATANODE_UUID;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.Properties;
+import java.util.UUID;
+
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeType;
+import org.apache.hadoop.hdds.server.ServerUtils;
+import org.apache.hadoop.ozone.common.Storage;
+
+/**
+ * DataNodeStorageConfig is responsible for management of the
+ * StorageDirectories used by the DataNode.
+ */
+public class DataNodeStorageConfig extends Storage {
+
+ /**
+ * Construct DataNodeStorageConfig.
+ * @throws IOException if any directories are inaccessible.
+ */
+ public DataNodeStorageConfig(OzoneConfiguration conf, String dataNodeId)
+ throws IOException {
+ super(NodeType.DATANODE, ServerUtils.getOzoneMetaDirPath(conf),
+ DATANODE_STORAGE_DIR);
+ setDataNodeId(dataNodeId);
+ }
+
+ public DataNodeStorageConfig(NodeType type, File root, String sdName)
+ throws IOException {
+ super(type, root, sdName);
+ }
+
+ public void setDataNodeId(String dataNodeId) throws IOException {
+ getStorageInfo().setProperty(DATANODE_UUID, dataNodeId);
+ }
+
+ /**
+ * Retrieves the DataNode ID from the version file.
+ * @return DataNodeId
+ */
+ public String getDataNodeId() {
+ return getStorageInfo().getProperty(DATANODE_UUID);
+ }
+
+ @Override
+ protected Properties getNodeProperties() {
+ String dataNodeId = getDataNodeId();
+ if (dataNodeId == null) {
+ dataNodeId = UUID.randomUUID().toString();
+ }
+ Properties datanodeProperties = new Properties();
+ datanodeProperties.setProperty(DATANODE_UUID, dataNodeId);
+ return datanodeProperties;
+ }
+}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java
index 2b898c7..28e34c7 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java
@@ -16,8 +16,11 @@
*/
package org.apache.hadoop.ozone.container.common.statemachine;
+import static org.apache.hadoop.ozone.common.Storage.StorageState.INITIALIZED;
+
import java.io.Closeable;
import java.io.IOException;
+import java.util.Iterator;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;
@@ -36,6 +39,8 @@ import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolPro
import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient;
import org.apache.hadoop.hdds.utils.LegacyHadoopConfigurationSource;
import org.apache.hadoop.ozone.HddsDatanodeStopService;
+import org.apache.hadoop.ozone.container.common.DataNodeStorageConfig;
+import org.apache.hadoop.ozone.container.common.interfaces.Container;
import org.apache.hadoop.ozone.container.common.report.ReportManager;
import org.apache.hadoop.ozone.container.common.statemachine.commandhandler.CloseContainerCommandHandler;
import org.apache.hadoop.ozone.container.common.statemachine.commandhandler.ClosePipelineCommandHandler;
@@ -51,8 +56,11 @@ import org.apache.hadoop.ozone.container.replication.ContainerReplicator;
import org.apache.hadoop.ozone.container.replication.DownloadAndImportReplicator;
import org.apache.hadoop.ozone.container.replication.ReplicationSupervisor;
import org.apache.hadoop.ozone.container.replication.SimpleContainerDownloader;
+import org.apache.hadoop.ozone.container.upgrade.DataNodeLayoutActionCatalog.DataNodeLayoutAction;
import org.apache.hadoop.ozone.container.upgrade.DataNodeLayoutVersionManager;
+import org.apache.hadoop.ozone.container.upgrade.DataNodeUpgradeFinalizer;
import org.apache.hadoop.ozone.protocol.commands.SCMCommand;
+import org.apache.hadoop.ozone.upgrade.UpgradeFinalizer.StatusAndMessages;
import org.apache.hadoop.util.JvmPauseMonitor;
import org.apache.hadoop.util.Time;
@@ -88,6 +96,8 @@ public class DatanodeStateMachine implements Closeable {
private final HddsDatanodeStopService hddsDatanodeStopService;
private DataNodeLayoutVersionManager dataNodeVersionManager;
+ private DataNodeStorageConfig dataNodeStorageConfig;
+ private DataNodeUpgradeFinalizer upgradeFinalizer;
/**
* Used to synchronize to the OzoneContainer object created in the
@@ -113,7 +123,17 @@ public class DatanodeStateMachine implements Closeable {
this.hddsDatanodeStopService = hddsDatanodeStopService;
this.conf = conf;
this.datanodeDetails = datanodeDetails;
- dataNodeVersionManager = DataNodeLayoutVersionManager.initialize(conf);
+
+ loadDataNodeUpgradeActions();
+ dataNodeStorageConfig = new DataNodeStorageConfig(conf,
+ datanodeDetails.getUuidString());
+ if (dataNodeStorageConfig.getState() != INITIALIZED) {
+ dataNodeStorageConfig.initialize();
+ }
+ dataNodeVersionManager = DataNodeLayoutVersionManager
+ .initialize(dataNodeStorageConfig);
+ upgradeFinalizer = new DataNodeUpgradeFinalizer(dataNodeVersionManager);
+
executorService = Executors.newFixedThreadPool(
getEndPointTaskThreadPoolSize(),
new ThreadFactoryBuilder()
@@ -565,4 +585,54 @@ public class DatanodeStateMachine implements Closeable {
public DataNodeLayoutVersionManager getDataNodeVersionManager() {
return dataNodeVersionManager;
}
+
+ @VisibleForTesting
+ public DataNodeStorageConfig getDataNodeStorageConfig() {
+ return dataNodeStorageConfig;
+ }
+
+ @VisibleForTesting
+ public boolean canFinalizeDataNode() {
+ // Lets be sure that we do not have any open container before we return
+ // from here. This function should be called in its own finalizer thread
+ // context.
+ Iterator<Container<?>> containerIt =
+ getContainer().getController().getContainers();
+ while (containerIt.hasNext()) {
+ Container ctr = containerIt.next();
+ switch (ctr.getContainerState()) {
+ case OPEN:
+ case CLOSING:
+ case UNHEALTHY:
+ return false;
+ default:
+ continue;
+ }
+ }
+ return true;
+ }
+
+ @VisibleForTesting
+ public boolean preFinalizeUpgrade() {
+ return canFinalizeDataNode();
+ }
+
+
+ @VisibleForTesting
+ public void postFinalizeUpgrade() {
+ }
+
+ public StatusAndMessages finalizeUpgrade()
+ throws IOException{
+ return upgradeFinalizer.finalize(datanodeDetails.getUuidString(), this);
+ }
+
+ private void loadDataNodeUpgradeActions() {
+ // we just need to iterate through the enum list to load
+ // the actions.
+ for (DataNodeLayoutAction action : DataNodeLayoutAction.values()) {
+ LOG.info("Loading datanode action for {}",
+ action.getHddsFeature().description());
+ }
+ }
}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/FinalizeNewLayoutVersionCommandHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/FinalizeNewLayoutVersionCommandHandler.java
index 34f7dc5..d74f867 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/FinalizeNewLayoutVersionCommandHandler.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/FinalizeNewLayoutVersionCommandHandler.java
@@ -16,12 +16,16 @@
*/
package org.apache.hadoop.ozone.container.common.statemachine.commandhandler;
+import static org.apache.hadoop.ozone.upgrade.UpgradeFinalizer.Status.FINALIZATION_REQUIRED;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.FinalizeNewLayoutVersionCommandProto;
import org.apache.hadoop.hdds.protocol.proto
.StorageContainerDatanodeProtocolProtos.SCMCommandProto;
+import org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine;
import org.apache.hadoop.ozone.container.common.statemachine
.SCMConnectionManager;
import org.apache.hadoop.ozone.container.common.statemachine.StateContext;
import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer;
+import org.apache.hadoop.ozone.protocol.commands.FinalizeNewLayoutVersionCommand;
import org.apache.hadoop.ozone.protocol.commands.SCMCommand;
import org.apache.hadoop.util.Time;
import org.slf4j.Logger;
@@ -60,10 +64,19 @@ public class FinalizeNewLayoutVersionCommandHandler implements CommandHandler {
LOG.info("Processing FinalizeNewLayoutVersionCommandHandler command.");
invocationCount.incrementAndGet();
final long startTime = Time.monotonicNow();
+ DatanodeStateMachine dsm = context.getParent();
+ final FinalizeNewLayoutVersionCommandProto finalizeCommand =
+ ((FinalizeNewLayoutVersionCommand)command).getProto();
try {
- // TODO : finalization logic
- if (LOG.isDebugEnabled()) {
- LOG.debug("Finalize Upgrade called!");
+ if (finalizeCommand.getFinalizeNewLayoutVersion()) {
+ // SCM is asking datanode to finalize
+ if (dsm.getDataNodeVersionManager().getUpgradeState() ==
+ FINALIZATION_REQUIRED) {
+ // SCM will keep sending Finalize command until datanode mlv == slv
+ // we need to avoid multiple invocations of finalizeUpgrade.
+ LOG.info("Finalize Upgrade called!");
+ dsm.finalizeUpgrade();
+ }
}
} catch (Exception e) {
LOG.debug("Unexpected Error: {} ", e);
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/upgrade/DataNodeLayoutActionCatalog.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/upgrade/DataNodeLayoutActionCatalog.java
new file mode 100644
index 0000000..48bfec2
--- /dev/null
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/upgrade/DataNodeLayoutActionCatalog.java
@@ -0,0 +1,58 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.container.upgrade;
+
+import static org.apache.hadoop.hdds.upgrade.HDDSLayoutFeatureCatalog.HDDSLayoutFeature.FIRST_UPGRADE_VERSION;
+
+import java.util.Optional;
+
+import org.apache.hadoop.hdds.upgrade.HDDSLayoutFeatureCatalog.HDDSLayoutFeature;
+
+/**
+ * Catalog of HDDS features and their corresponding DataNode action.
+ * It is OK to skip HDDS features from the catalog that do not have
+ * any specific DataNodeActions.
+ */
+public class DataNodeLayoutActionCatalog {
+
+ /**
+ * List of HDDS Features and corresponding DataNode actions.
+ */
+ public enum DataNodeLayoutAction {
+ DataNodeAction1(FIRST_UPGRADE_VERSION,
+ new DataNodeUpgradeActionFirstUpgradeVersion());
+
+ ////////////////////////////// //////////////////////////////
+
+ private HDDSLayoutFeature hddsFeature;
+ private DataNodeUpgradeAction dataNodeAction;
+
+ DataNodeLayoutAction(HDDSLayoutFeature feature,
+ DataNodeUpgradeAction action) {
+ this.hddsFeature = feature;
+ this.dataNodeAction = action;
+ this.hddsFeature.setDataNodeUpgradeAction(Optional.of(dataNodeAction));
+ }
+
+ public HDDSLayoutFeature getHddsFeature() {
+ return hddsFeature;
+ }
+ }
+}
+
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/upgrade/DataNodeLayoutVersionManager.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/upgrade/DataNodeLayoutVersionManager.java
index c075938..1f33a81 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/upgrade/DataNodeLayoutVersionManager.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/upgrade/DataNodeLayoutVersionManager.java
@@ -18,21 +18,10 @@
package org.apache.hadoop.ozone.container.upgrade;
-
-import static org.apache.hadoop.ozone.container.common.volume.HddsVolume.HDDS_VOLUME_DIR;
-import static org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet.getDatanodeStorageDirs;
-
-import java.io.File;
import java.io.IOException;
-import java.util.Collection;
-import java.util.Properties;
-import org.apache.hadoop.hdds.conf.ConfigurationSource;
import org.apache.hadoop.hdds.upgrade.HDDSLayoutFeatureCatalog.HDDSLayoutFeature;
-import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
-import org.apache.hadoop.ozone.container.common.helpers.DatanodeVersionFile;
-import org.apache.hadoop.ozone.container.common.utils.HddsVolumeUtil;
-import org.apache.hadoop.ozone.container.common.volume.HddsVolume;
+import org.apache.hadoop.ozone.common.Storage;
import org.apache.hadoop.ozone.upgrade.AbstractLayoutVersionManager;
import org.apache.hadoop.ozone.upgrade.LayoutVersionManager;
import org.slf4j.Logger;
@@ -46,7 +35,7 @@ import com.google.common.annotations.VisibleForTesting;
*/
@SuppressWarnings("FinalClass")
public class DataNodeLayoutVersionManager extends
- AbstractLayoutVersionManager {
+ AbstractLayoutVersionManager<HDDSLayoutFeature> {
private static final Logger LOG = LoggerFactory.getLogger(
DataNodeLayoutVersionManager.class);
private static DataNodeLayoutVersionManager dataNodeLayoutVersionManager;
@@ -69,41 +58,15 @@ public class DataNodeLayoutVersionManager extends
/**
* Initialize DataNode version manager from version file stored on the
* DataNode.
- * @param conf - Ozone Configuration
+ * @param dataNodeStorage - DataNode storage config
* @return version manager instance.
*/
+
public static synchronized DataNodeLayoutVersionManager initialize(
- ConfigurationSource conf)
- throws IOException {
+ Storage dataNodeStorage) throws IOException {
if (dataNodeLayoutVersionManager == null) {
dataNodeLayoutVersionManager = new DataNodeLayoutVersionManager();
- int layoutVersion = 0;
- Collection<String> rawLocations = getDatanodeStorageDirs(conf);
- for (String locationString : rawLocations) {
- StorageLocation location = StorageLocation.parse(locationString);
- File hddsRootDir = new File(location.getUri().getPath(),
- HDDS_VOLUME_DIR);
- // Read the version from VersionFile Stored on the data node.
- File versionFile = HddsVolumeUtil.getVersionFile(hddsRootDir);
- if (!versionFile.exists()) {
- // Volume Root is non empty but VERSION file does not exist.
- LOG.warn("VERSION file does not exist in volume {},"
- + " current volume state: {}.",
- hddsRootDir.getPath(), HddsVolume.VolumeState.INCONSISTENT);
- continue;
- } else {
- LOG.debug("Reading version file {} from disk.", versionFile);
- }
- Properties props = DatanodeVersionFile.readFrom(versionFile);
- if (props.isEmpty()) {
- continue;
- }
- int storedVersion = HddsVolumeUtil.getLayOutVersion(props, versionFile);
- if (storedVersion > layoutVersion) {
- layoutVersion = storedVersion;
- }
- }
- dataNodeLayoutVersionManager.init(layoutVersion,
+ dataNodeLayoutVersionManager.init(dataNodeStorage.getLayoutVersion(),
HDDSLayoutFeature.values());
}
return dataNodeLayoutVersionManager;
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/upgrade/SCMUpgradeAction.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/upgrade/DataNodeUpgradeAction.java
similarity index 70%
copy from hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/upgrade/SCMUpgradeAction.java
copy to hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/upgrade/DataNodeUpgradeAction.java
index 8137756..9097043 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/upgrade/SCMUpgradeAction.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/upgrade/DataNodeUpgradeAction.java
@@ -16,16 +16,14 @@
* limitations under the License.
*/
-package org.apache.hadoop.hdds.scm.server.upgrade;
+package org.apache.hadoop.ozone.container.upgrade;
-import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
import org.apache.hadoop.hdds.upgrade.HDDSUpgradeAction;
+import org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine;
/**
- * Upgrade Action for StorageContainerManager which takes in an 'SCM' instance.
+ * Upgrade Action for DataNode which takes in a 'DataNodeStateMachine' instance.
*/
-public class SCMUpgradeAction extends
- HDDSUpgradeAction<StorageContainerManager> {
- public void executeAction(StorageContainerManager arg) throws Exception {
- }
+public interface DataNodeUpgradeAction extends
+ HDDSUpgradeAction<DatanodeStateMachine> {
}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/upgrade/HDDSUpgradeAction.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/upgrade/DataNodeUpgradeActionFirstUpgradeVersion.java
similarity index 56%
copy from hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/upgrade/HDDSUpgradeAction.java
copy to hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/upgrade/DataNodeUpgradeActionFirstUpgradeVersion.java
index 68ab666..267af05 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/upgrade/HDDSUpgradeAction.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/upgrade/DataNodeUpgradeActionFirstUpgradeVersion.java
@@ -16,16 +16,23 @@
* limitations under the License.
*/
-package org.apache.hadoop.hdds.upgrade;
+package org.apache.hadoop.ozone.container.upgrade;
-import org.apache.hadoop.ozone.upgrade.LayoutFeature.UpgradeAction;
+import org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
- * Upgrade Action for SCM and DataNodes.
+ * Upgrade Action for DataNode for the very first first Upgrade Version.
*/
-public class HDDSUpgradeAction<T> implements UpgradeAction<T> {
- @Override
- public void executeAction(T arg) throws Exception {
+public class DataNodeUpgradeActionFirstUpgradeVersion
+ implements DataNodeUpgradeAction {
+ public static final Logger LOG =
+ LoggerFactory.getLogger(DataNodeUpgradeActionFirstUpgradeVersion.class);
+ @Override
+ public void executeAction(DatanodeStateMachine arg) throws Exception {
+ LOG.info("Executing datanode upgrade action for the very First Upgrade " +
+ "Version.");
}
}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/upgrade/SCMUpgradeFinalizer.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/upgrade/DataNodeUpgradeFinalizer.java
similarity index 60%
copy from hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/upgrade/SCMUpgradeFinalizer.java
copy to hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/upgrade/DataNodeUpgradeFinalizer.java
index ccc6030..6081f00 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/upgrade/SCMUpgradeFinalizer.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/upgrade/DataNodeUpgradeFinalizer.java
@@ -16,57 +16,63 @@
* limitations under the License.
*/
-package org.apache.hadoop.hdds.scm.server.upgrade;
+package org.apache.hadoop.ozone.container.upgrade;
import static org.apache.hadoop.ozone.upgrade.UpgradeFinalizer.Status.FINALIZATION_DONE;
import static org.apache.hadoop.ozone.upgrade.UpgradeFinalizer.Status.FINALIZATION_IN_PROGRESS;
import static org.apache.hadoop.ozone.upgrade.UpgradeFinalizer.Status.FINALIZATION_REQUIRED;
import java.io.IOException;
+import java.util.Optional;
import java.util.concurrent.Callable;
-import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
-import org.apache.hadoop.hdds
- .upgrade.HDDSLayoutFeatureCatalog.HDDSLayoutFeature;
-import org.apache.hadoop.hdds.upgrade.HDDSLayoutVersionManager;
+import org.apache.hadoop.hdds.upgrade.HDDSLayoutFeatureCatalog.HDDSLayoutFeature;
+import org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine;
import org.apache.hadoop.ozone.upgrade.BasicUpgradeFinalizer;
+import org.apache.hadoop.ozone.upgrade.LayoutFeature;
/**
- * UpgradeFinalizer for the Storage Container Manager service.
+ * UpgradeFinalizer for the DataNode.
*/
-public class SCMUpgradeFinalizer extends
- BasicUpgradeFinalizer<StorageContainerManager, HDDSLayoutVersionManager> {
+public class DataNodeUpgradeFinalizer extends
+ BasicUpgradeFinalizer<DatanodeStateMachine, DataNodeLayoutVersionManager> {
- public SCMUpgradeFinalizer(HDDSLayoutVersionManager versionManager) {
+ public DataNodeUpgradeFinalizer(DataNodeLayoutVersionManager versionManager) {
super(versionManager);
}
@Override
public StatusAndMessages finalize(String upgradeClientID,
- StorageContainerManager scm)
+ DatanodeStateMachine dsm)
throws IOException {
- StatusAndMessages response = preFinalize(upgradeClientID, scm);
+ StatusAndMessages response = preFinalize(upgradeClientID, dsm);
if (response.status() != FINALIZATION_REQUIRED) {
return response;
}
- new Worker(scm).call();
+ new Worker(dsm).call();
return STARTING_MSG;
}
private class Worker implements Callable<Void> {
- private StorageContainerManager storageContainerManager;
+ private DatanodeStateMachine datanodeStateMachine;
/**
- * Initiates the Worker, for the specified SCM instance.
- * @param scm the StorageContainerManager instance on which to finalize the
+ * Initiates the Worker, for the specified DataNode instance.
+ * @param dsm the DataNodeStateMachine instance on which to finalize the
* new LayoutFeatures.
*/
- Worker(StorageContainerManager scm) {
- storageContainerManager = scm;
+ Worker(DatanodeStateMachine dsm) {
+ datanodeStateMachine = dsm;
}
@Override
public Void call() throws IOException {
+ if(!datanodeStateMachine.preFinalizeUpgrade()) {
+ // datanode is not yet ready to finalize.
+ // Reset the Finalization state.
+ versionManager.setUpgradeState(FINALIZATION_REQUIRED);
+ return null;
+ }
try {
emitStartingMsg();
versionManager.setUpgradeState(FINALIZATION_IN_PROGRESS);
@@ -75,21 +81,18 @@ public class SCMUpgradeFinalizer extends
* all existing pipelines are closed and pipeline Manger would freeze
* all new pipeline creation.
*/
- String msg = " Existing pipelines and containers will be closed " +
- "during Upgrade.";
- msg += "\n New pipelines creation will remain frozen until Upgrade " +
- "is finalized.";
- storageContainerManager.preFinalizeUpgrade();
- logAndEmit(msg);
for (HDDSLayoutFeature f : versionManager.unfinalizedFeatures()) {
- finalizeFeature(f, storageContainerManager.getScmStorageConfig());
+ Optional<? extends LayoutFeature.UpgradeAction> action =
+ f.onFinalizeDataNodeAction();
+ finalizeFeature(f, datanodeStateMachine.getDataNodeStorageConfig(),
+ action);
updateLayoutVersionInVersionFile(f,
- storageContainerManager.getScmStorageConfig());
+ datanodeStateMachine.getDataNodeStorageConfig());
versionManager.finalized(f);
}
versionManager.completeFinalization();
- storageContainerManager.postFinalizeUpgrade();
+ datanodeStateMachine.postFinalizeUpgrade();
emitFinishedMsg();
return null;
} finally {
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDataNodeStartupSlvLessThanMlv.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDataNodeStartupSlvLessThanMlv.java
new file mode 100644
index 0000000..35e7985
--- /dev/null
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDataNodeStartupSlvLessThanMlv.java
@@ -0,0 +1,101 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone.container.upgrade;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.UUID;
+
+import org.apache.hadoop.hdds.HddsConfigKeys;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.hdds.upgrade.HDDSLayoutFeatureCatalog;
+import org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine;
+import org.apache.hadoop.ozone.upgrade.LayoutFeature;
+import org.apache.hadoop.ozone.upgrade.TestUpgradeUtils;
+import org.apache.hadoop.test.GenericTestUtils;
+import org.junit.Assert;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.TemporaryFolder;
+
+/**
+ * Tests that DataNode will throw an exception on creation when it reads in a
+ * VERSION file indicating a metadata layout version larger than its
+ * software layout version.
+ */
+public class TestDataNodeStartupSlvLessThanMlv {
+ @Rule
+ public TemporaryFolder tempFolder = new TemporaryFolder();
+
+ @Test
+ public void testStartupSlvLessThanMlv() throws Exception {
+ // Add subdirectories under the temporary folder where the version file
+ // will be placed.
+ File datanodeSubdir = tempFolder.newFolder("datanodeStorageConfig",
+ "current");
+
+ OzoneConfiguration conf = new OzoneConfiguration();
+ conf.set(HddsConfigKeys.OZONE_METADATA_DIRS,
+ tempFolder.getRoot().getAbsolutePath());
+
+ // Set metadata layout version larger then software layout version.
+ int largestSlv = 0;
+ for (LayoutFeature f :
+ HDDSLayoutFeatureCatalog.HDDSLayoutFeature.values()) {
+ largestSlv = Math.max(largestSlv, f.layoutVersion());
+ }
+ int mlv = largestSlv + 1;
+
+ // Create version file with MLV > SLV, which should fail the
+ // DataNodeStateMachine construction.
+ TestUpgradeUtils.createVersionFile(datanodeSubdir,
+ HddsProtos.NodeType.DATANODE, mlv);
+
+ try {
+ DatanodeStateMachine stateMachine =
+ new DatanodeStateMachine(getNewDatanodeDetails(), conf, null,
+ null);
+ Assert.fail("Expected IOException due to incorrect MLV on DataNode " +
+ "creation.");
+ } catch (IOException e) {
+ String expectedMessage = String.format("Metadata layout version (%s) > " +
+ "software layout version (%s)", mlv, largestSlv);
+ GenericTestUtils.assertExceptionContains(expectedMessage, e);
+ }
+ }
+
+
+ private DatanodeDetails getNewDatanodeDetails() {
+ DatanodeDetails.Port containerPort = DatanodeDetails.newPort(
+ DatanodeDetails.Port.Name.STANDALONE, 0);
+ DatanodeDetails.Port ratisPort = DatanodeDetails.newPort(
+ DatanodeDetails.Port.Name.RATIS, 0);
+ DatanodeDetails.Port restPort = DatanodeDetails.newPort(
+ DatanodeDetails.Port.Name.REST, 0);
+ return DatanodeDetails.newBuilder()
+ .setUuid(UUID.randomUUID())
+ .setHostName("localhost")
+ .setIpAddress("127.0.0.1")
+ .addPort(containerPort)
+ .addPort(ratisPort)
+ .addPort(restPort)
+ .build();
+ }
+}
\ No newline at end of file
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java
index eeee1fb..87a3462 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java
@@ -68,6 +68,7 @@ import org.apache.hadoop.ozone.protocol.commands.ClosePipelineCommand;
import org.apache.hadoop.ozone.protocol.commands.CreatePipelineCommand;
import org.apache.hadoop.ozone.protocol.commands.DeleteBlocksCommand;
import org.apache.hadoop.ozone.protocol.commands.DeleteContainerCommand;
+import org.apache.hadoop.ozone.protocol.commands.FinalizeNewLayoutVersionCommand;
import org.apache.hadoop.ozone.protocol.commands.RegisteredCommand;
import org.apache.hadoop.ozone.protocol.commands.ReplicateContainerCommand;
import org.apache.hadoop.ozone.protocol.commands.SCMCommand;
@@ -85,6 +86,7 @@ import static org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProt
import static org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCommandProto.Type.createPipelineCommand;
import static org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCommandProto.Type.deleteBlocksCommand;
import static org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCommandProto.Type.deleteContainerCommand;
+import static org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCommandProto.Type.finalizeNewLayoutVersionCommand;
import static org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCommandProto.Type.replicateContainerCommand;
import static org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCommandProto.Type.reregisterCommand;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DATANODE_ADDRESS_KEY;
@@ -348,6 +350,12 @@ public class SCMDatanodeProtocolServer implements
.setClosePipelineCommandProto(
((ClosePipelineCommand)cmd).getProto())
.build();
+ case finalizeNewLayoutVersionCommand:
+ return builder
+ .setCommandType(finalizeNewLayoutVersionCommand)
+ .setFinalizeNewLayoutVersionCommandProto(
+ ((FinalizeNewLayoutVersionCommand)cmd).getProto())
+ .build();
default:
throw new IllegalArgumentException("Scm command " +
cmd.getType().toString() + " is not implemented");
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
index 48d5cdc..770b4db 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
@@ -90,6 +90,7 @@ import org.apache.hadoop.hdds.scm.pipeline.PipelineReportHandler;
import org.apache.hadoop.hdds.scm.pipeline.SCMPipelineManager;
import org.apache.hadoop.hdds.scm.pipeline.choose.algorithms.PipelineChoosePolicyFactory;
import org.apache.hadoop.hdds.scm.safemode.SCMSafeModeManager;
+import org.apache.hadoop.hdds.scm.server.upgrade.SCMLayoutActionCatalog.SCMLayoutAction;
import org.apache.hadoop.hdds.scm.server.upgrade.SCMUpgradeFinalizer;
import org.apache.hadoop.hdds.security.exception.SCMSecurityException;
import org.apache.hadoop.hdds.security.x509.SecurityConfig;
@@ -262,6 +263,7 @@ public final class StorageContainerManager extends ServiceRuntimeInfoImpl
"failure.", ResultCodes.SCM_NOT_INITIALIZED);
}
+ loadSCMUpgradeActions();
scmLayoutVersionManager =
HDDSLayoutVersionManager.initialize(scmStorageConfig);
@@ -1281,4 +1283,13 @@ public final class StorageContainerManager extends ServiceRuntimeInfoImpl
) throws IOException {
return upgradeFinalizer.reportStatus(upgradeClientID, takeover);
}
+
+ private void loadSCMUpgradeActions() {
+ // we just need to iterate through the enum list to load
+ // the actions.
+ for (SCMLayoutAction action : SCMLayoutAction.values()) {
+ LOG.info("Loading datanode action for {}",
+ action.getHddsFeature().description());
+ }
+ }
}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/upgrade/SCMLayoutActionCatalog.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/upgrade/SCMLayoutActionCatalog.java
new file mode 100644
index 0000000..d8c9765
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/upgrade/SCMLayoutActionCatalog.java
@@ -0,0 +1,55 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdds.scm.server.upgrade;
+
+import static org.apache.hadoop.hdds.upgrade.HDDSLayoutFeatureCatalog.HDDSLayoutFeature.FIRST_UPGRADE_VERSION;
+import java.util.Optional;
+import org.apache.hadoop.hdds.upgrade.HDDSLayoutFeatureCatalog.HDDSLayoutFeature;
+
+/**
+ * Catalog of HDDS features and their corresponding SCM action.
+ * It is OK to skip HDDS features from the catalog that do not have
+ * any specific SCMActions.
+ */
+public class SCMLayoutActionCatalog {
+
+ /**
+ * List of HDDS Features and corresponding SCM actions.
+ */
+ public enum SCMLayoutAction {
+ SCMAction1(FIRST_UPGRADE_VERSION,
+ new SCMUpgradeActionFirstUpgradeVersion());
+
+ ////////////////////////////// //////////////////////////////
+
+ private HDDSLayoutFeature hddsFeature;
+ private SCMUpgradeAction scmAction;
+
+ SCMLayoutAction(HDDSLayoutFeature feature, SCMUpgradeAction action) {
+ this.hddsFeature = feature;
+ this.scmAction = action;
+ hddsFeature.setSCMUpgradeAction(Optional.of(scmAction));
+ }
+
+ public HDDSLayoutFeature getHddsFeature() {
+ return hddsFeature;
+ }
+ }
+}
+
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/upgrade/SCMUpgradeAction.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/upgrade/SCMUpgradeAction.java
index 8137756..b3f31a2 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/upgrade/SCMUpgradeAction.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/upgrade/SCMUpgradeAction.java
@@ -24,8 +24,6 @@ import org.apache.hadoop.hdds.upgrade.HDDSUpgradeAction;
/**
* Upgrade Action for StorageContainerManager which takes in an 'SCM' instance.
*/
-public class SCMUpgradeAction extends
+public interface SCMUpgradeAction extends
HDDSUpgradeAction<StorageContainerManager> {
- public void executeAction(StorageContainerManager arg) throws Exception {
- }
}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/upgrade/SCMUpgradeAction.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/upgrade/SCMUpgradeActionFirstUpgradeVersion.java
similarity index 71%
copy from hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/upgrade/SCMUpgradeAction.java
copy to hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/upgrade/SCMUpgradeActionFirstUpgradeVersion.java
index 8137756..8e3f54c 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/upgrade/SCMUpgradeAction.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/upgrade/SCMUpgradeActionFirstUpgradeVersion.java
@@ -19,13 +19,18 @@
package org.apache.hadoop.hdds.scm.server.upgrade;
import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
-import org.apache.hadoop.hdds.upgrade.HDDSUpgradeAction;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
- * Upgrade Action for StorageContainerManager which takes in an 'SCM' instance.
+ * SCM Upgrade Action for the very first Upgrade Version.
*/
-public class SCMUpgradeAction extends
- HDDSUpgradeAction<StorageContainerManager> {
+public class SCMUpgradeActionFirstUpgradeVersion implements
+ SCMUpgradeAction {
+ public static final Logger LOG =
+ LoggerFactory.getLogger(SCMUpgradeActionFirstUpgradeVersion.class);
+ @Override
public void executeAction(StorageContainerManager arg) throws Exception {
+ LOG.info("Executing SCM Upgrade action for Very first Upgrade Version");
}
}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/upgrade/SCMUpgradeFinalizer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/upgrade/SCMUpgradeFinalizer.java
index ccc6030..f8c3f41 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/upgrade/SCMUpgradeFinalizer.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/upgrade/SCMUpgradeFinalizer.java
@@ -23,6 +23,7 @@ import static org.apache.hadoop.ozone.upgrade.UpgradeFinalizer.Status.FINALIZATI
import static org.apache.hadoop.ozone.upgrade.UpgradeFinalizer.Status.FINALIZATION_REQUIRED;
import java.io.IOException;
+import java.util.Optional;
import java.util.concurrent.Callable;
import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
@@ -30,6 +31,7 @@ import org.apache.hadoop.hdds
.upgrade.HDDSLayoutFeatureCatalog.HDDSLayoutFeature;
import org.apache.hadoop.hdds.upgrade.HDDSLayoutVersionManager;
import org.apache.hadoop.ozone.upgrade.BasicUpgradeFinalizer;
+import org.apache.hadoop.ozone.upgrade.LayoutFeature;
/**
* UpgradeFinalizer for the Storage Container Manager service.
@@ -83,7 +85,10 @@ public class SCMUpgradeFinalizer extends
logAndEmit(msg);
for (HDDSLayoutFeature f : versionManager.unfinalizedFeatures()) {
- finalizeFeature(f, storageContainerManager.getScmStorageConfig());
+ Optional<? extends LayoutFeature.UpgradeAction> action =
+ f.onFinalizeSCMAction();
+ finalizeFeature(f, storageContainerManager.getScmStorageConfig(),
+ action);
updateLayoutVersionInVersionFile(f,
storageContainerManager.getScmStorageConfig());
versionManager.finalized(f);
---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@ozone.apache.org
For additional commands, e-mail: commits-help@ozone.apache.org