You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by ar...@apache.org on 2015/03/31 00:26:17 UTC
[2/2] hadoop git commit: HDFS-7645. Rolling upgrade is restoring
blocks from trash multiple times (Contributed by Vinayakumar B and Keisuke
Ogiwara)
HDFS-7645. Rolling upgrade is restoring blocks from trash multiple times (Contributed by Vinayakumar B and Keisuke Ogiwara)
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/abf3ad98
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/abf3ad98
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/abf3ad98
Branch: refs/heads/branch-2
Commit: abf3ad988ddf07450fccb7ca8f4bb4dd688c118a
Parents: defae1d
Author: Arpit Agarwal <ar...@apache.org>
Authored: Mon Mar 30 15:25:16 2015 -0700
Committer: Arpit Agarwal <ar...@apache.org>
Committed: Mon Mar 30 15:25:26 2015 -0700
----------------------------------------------------------------------
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +
.../hdfs/protocol/RollingUpgradeInfo.java | 18 +++++-
.../hdfs/protocol/RollingUpgradeStatus.java | 13 +++-
.../apache/hadoop/hdfs/protocolPB/PBHelper.java | 4 +-
.../hdfs/server/datanode/BPOfferService.java | 13 ++--
.../hdfs/server/datanode/BPServiceActor.java | 2 +-
.../server/datanode/BlockPoolSliceStorage.java | 21 +++---
.../hdfs/server/datanode/DataStorage.java | 6 +-
.../server/datanode/fsdataset/FsDatasetSpi.java | 4 +-
.../datanode/fsdataset/impl/FsDatasetImpl.java | 4 +-
.../hdfs/server/namenode/FSNamesystem.java | 17 ++---
.../hadoop-hdfs/src/main/proto/hdfs.proto | 1 +
.../src/main/webapps/hdfs/dfshealth.html | 4 ++
.../server/datanode/SimulatedFSDataset.java | 2 +-
.../datanode/TestDataNodeRollingUpgrade.java | 67 +++++++++++++-------
.../extdataset/ExternalDatasetImpl.java | 2 +-
16 files changed, 118 insertions(+), 63 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/abf3ad98/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 667aa05..cb4ac29 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -44,6 +44,9 @@ Release 2.8.0 - UNRELEASED
HDFS-7890. Improve information on Top users for metrics in
RollingWindowsManager and lower log level (J.Andreina via vinayakumarb)
+ HDFS-7645. Rolling upgrade is restoring blocks from trash multiple times.
+ (Vinayakumar B and Keisuke Ogiwara via Arpit Agarwal)
+
OPTIMIZATIONS
BUG FIXES
http://git-wip-us.apache.org/repos/asf/hadoop/blob/abf3ad98/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/RollingUpgradeInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/RollingUpgradeInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/RollingUpgradeInfo.java
index 98089bc..80e3e34 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/RollingUpgradeInfo.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/RollingUpgradeInfo.java
@@ -29,12 +29,12 @@ import org.apache.hadoop.classification.InterfaceStability;
@InterfaceStability.Evolving
public class RollingUpgradeInfo extends RollingUpgradeStatus {
private final long startTime;
- private final long finalizeTime;
+ private long finalizeTime;
private boolean createdRollbackImages;
public RollingUpgradeInfo(String blockPoolId, boolean createdRollbackImages,
long startTime, long finalizeTime) {
- super(blockPoolId);
+ super(blockPoolId, finalizeTime != 0);
this.createdRollbackImages = createdRollbackImages;
this.startTime = startTime;
this.finalizeTime = finalizeTime;
@@ -56,11 +56,23 @@ public class RollingUpgradeInfo extends RollingUpgradeStatus {
public long getStartTime() {
return startTime;
}
-
+
+ @Override
public boolean isFinalized() {
return finalizeTime != 0;
}
+ /**
+ * Finalize the upgrade if not already finalized
+ * @param finalizeTime
+ */
+ public void finalize(long finalizeTime) {
+ if (finalizeTime != 0) {
+ this.finalizeTime = finalizeTime;
+ createdRollbackImages = false;
+ }
+ }
+
public long getFinalizeTime() {
return finalizeTime;
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/abf3ad98/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/RollingUpgradeStatus.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/RollingUpgradeStatus.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/RollingUpgradeStatus.java
index 9925920..1f969fb 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/RollingUpgradeStatus.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/RollingUpgradeStatus.java
@@ -27,15 +27,21 @@ import org.apache.hadoop.classification.InterfaceStability;
@InterfaceStability.Evolving
public class RollingUpgradeStatus {
private final String blockPoolId;
+ private final boolean finalized;
- public RollingUpgradeStatus(String blockPoolId) {
+ public RollingUpgradeStatus(String blockPoolId, boolean finalized) {
this.blockPoolId = blockPoolId;
+ this.finalized = finalized;
}
public String getBlockPoolId() {
return blockPoolId;
}
+ public boolean isFinalized() {
+ return finalized;
+ }
+
@Override
public int hashCode() {
return blockPoolId.hashCode();
@@ -48,8 +54,9 @@ public class RollingUpgradeStatus {
} else if (obj == null || !(obj instanceof RollingUpgradeStatus)) {
return false;
}
- final RollingUpgradeStatus that = (RollingUpgradeStatus)obj;
- return this.blockPoolId.equals(that.blockPoolId);
+ final RollingUpgradeStatus that = (RollingUpgradeStatus) obj;
+ return this.blockPoolId.equals(that.blockPoolId)
+ && this.isFinalized() == that.isFinalized();
}
@Override
http://git-wip-us.apache.org/repos/asf/hadoop/blob/abf3ad98/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
index 73c6705..4b9eadf 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
@@ -1687,11 +1687,13 @@ public class PBHelper {
RollingUpgradeStatus status) {
return RollingUpgradeStatusProto.newBuilder()
.setBlockPoolId(status.getBlockPoolId())
+ .setFinalized(status.isFinalized())
.build();
}
public static RollingUpgradeStatus convert(RollingUpgradeStatusProto proto) {
- return new RollingUpgradeStatus(proto.getBlockPoolId());
+ return new RollingUpgradeStatus(proto.getBlockPoolId(),
+ proto.getFinalized());
}
public static RollingUpgradeInfoProto convert(RollingUpgradeInfo info) {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/abf3ad98/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
index 3e7c897..8efad83 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
@@ -29,6 +29,7 @@ import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
+import org.apache.hadoop.hdfs.protocol.RollingUpgradeStatus;
import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB;
import org.apache.hadoop.hdfs.server.protocol.*;
import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo.BlockStatus;
@@ -471,15 +472,19 @@ class BPOfferService {
/**
* Signal the current rolling upgrade status as indicated by the NN.
- * @param inProgress true if a rolling upgrade is in progress
+ * @param rollingUpgradeStatus rolling upgrade status
*/
- void signalRollingUpgrade(boolean inProgress) throws IOException {
+ void signalRollingUpgrade(RollingUpgradeStatus rollingUpgradeStatus)
+ throws IOException {
+ if (rollingUpgradeStatus == null) {
+ return;
+ }
String bpid = getBlockPoolId();
- if (inProgress) {
+ if (!rollingUpgradeStatus.isFinalized()) {
dn.getFSDataset().enableTrash(bpid);
dn.getFSDataset().setRollingUpgradeMarker(bpid);
} else {
- dn.getFSDataset().restoreTrash(bpid);
+ dn.getFSDataset().clearTrash(bpid);
dn.getFSDataset().clearRollingUpgradeMarker(bpid);
}
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/abf3ad98/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
index 3b4756c..df582f1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
@@ -662,7 +662,7 @@ class BPServiceActor implements Runnable {
" in HeartbeatResponse. Expected " +
bpos.getBlockPoolId());
} else {
- bpos.signalRollingUpgrade(rollingUpgradeStatus != null);
+ bpos.signalRollingUpgrade(rollingUpgradeStatus);
}
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/abf3ad98/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java
index 4076a8b..d26a9a5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java
@@ -351,7 +351,8 @@ public class BlockPoolSliceStorage extends Storage {
sd.getPreviousDir() + " and " + getTrashRootDir(sd) + " should not " +
" both be present.");
doRollback(sd, nsInfo); // rollback if applicable
- } else {
+ } else if (startOpt == StartupOption.ROLLBACK &&
+ !sd.getPreviousDir().exists()) {
// Restore all the files in the trash. The restored files are retained
// during rolling upgrade rollback. They are deleted during rolling
// upgrade downgrade.
@@ -378,6 +379,12 @@ public class BlockPoolSliceStorage extends Storage {
&& this.cTime == nsInfo.getCTime()) {
return; // regular startup
}
+ if (this.layoutVersion > HdfsConstants.DATANODE_LAYOUT_VERSION) {
+ int restored = restoreBlockFilesFromTrash(getTrashRootDir(sd));
+ LOG.info("Restored " + restored + " block files from trash " +
+ "before the layout upgrade. These blocks will be moved to " +
+ "the previous directory during the upgrade");
+ }
if (this.layoutVersion > HdfsConstants.DATANODE_LAYOUT_VERSION
|| this.cTime < nsInfo.getCTime()) {
doUpgrade(datanode, sd, nsInfo); // upgrade
@@ -730,16 +737,12 @@ public class BlockPoolSliceStorage extends Storage {
/**
* Delete all files and directories in the trash directories.
*/
- public void restoreTrash() {
+ public void clearTrash() {
for (StorageDirectory sd : storageDirs) {
File trashRoot = getTrashRootDir(sd);
- try {
- Preconditions.checkState(!(trashRoot.exists() && sd.getPreviousDir().exists()));
- restoreBlockFilesFromTrash(trashRoot);
- FileUtil.fullyDelete(getTrashRootDir(sd));
- } catch (IOException ioe) {
- LOG.warn("Restoring trash failed for storage directory " + sd);
- }
+ Preconditions.checkState(!(trashRoot.exists() && sd.getPreviousDir().exists()));
+ FileUtil.fullyDelete(trashRoot);
+ LOG.info("Cleared trash for storage directory " + sd);
}
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/abf3ad98/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
index f979d3c..77fcfed 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
@@ -168,11 +168,11 @@ public class DataStorage extends Storage {
}
}
- public void restoreTrash(String bpid) {
+ public void clearTrash(String bpid) {
if (trashEnabledBpids.contains(bpid)) {
- getBPStorage(bpid).restoreTrash();
+ getBPStorage(bpid).clearTrash();
trashEnabledBpids.remove(bpid);
- LOG.info("Restored trash for bpid " + bpid);
+ LOG.info("Cleared trash for bpid " + bpid);
}
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/abf3ad98/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsDatasetSpi.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsDatasetSpi.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsDatasetSpi.java
index 4ecfa99..5a99c9e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsDatasetSpi.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsDatasetSpi.java
@@ -490,9 +490,9 @@ public interface FsDatasetSpi<V extends FsVolumeSpi> extends FSDatasetMBean {
public void enableTrash(String bpid);
/**
- * Restore trash
+ * Clear trash
*/
- public void restoreTrash(String bpid);
+ public void clearTrash(String bpid);
/**
* @return true when trash is enabled
http://git-wip-us.apache.org/repos/asf/hadoop/blob/abf3ad98/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
index 9dc428f..2cc6779 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
@@ -2619,8 +2619,8 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
}
@Override
- public void restoreTrash(String bpid) {
- dataStorage.restoreTrash(bpid);
+ public void clearTrash(String bpid) {
+ dataStorage.clearTrash(bpid);
}
@Override
http://git-wip-us.apache.org/repos/asf/hadoop/blob/abf3ad98/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 32dcd5a..17308d2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -7566,7 +7566,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
/** Is rolling upgrade in progress? */
public boolean isRollingUpgrade() {
- return rollingUpgradeInfo != null;
+ return rollingUpgradeInfo != null && !rollingUpgradeInfo.isFinalized();
}
void checkRollingUpgrade(String action) throws RollingUpgradeException {
@@ -7581,7 +7581,6 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
checkSuperuserPrivilege();
checkOperation(OperationCategory.WRITE);
writeLock();
- final RollingUpgradeInfo returnInfo;
try {
checkOperation(OperationCategory.WRITE);
if (!isRollingUpgrade()) {
@@ -7589,8 +7588,8 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
}
checkNameNodeSafeMode("Failed to finalize rolling upgrade");
- returnInfo = finalizeRollingUpgradeInternal(now());
- getEditLog().logFinalizeRollingUpgrade(returnInfo.getFinalizeTime());
+ finalizeRollingUpgradeInternal(now());
+ getEditLog().logFinalizeRollingUpgrade(rollingUpgradeInfo.getFinalizeTime());
if (haEnabled) {
// roll the edit log to make sure the standby NameNode can tail
getFSImage().rollEditLog();
@@ -7610,14 +7609,12 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
if (auditLog.isInfoEnabled() && isExternalInvocation()) {
logAuditEvent(true, "finalizeRollingUpgrade", null, null, null);
}
- return returnInfo;
+ return rollingUpgradeInfo;
}
- RollingUpgradeInfo finalizeRollingUpgradeInternal(long finalizeTime)
- throws RollingUpgradeException {
- final long startTime = rollingUpgradeInfo.getStartTime();
- rollingUpgradeInfo = null;
- return new RollingUpgradeInfo(blockPoolId, false, startTime, finalizeTime);
+ void finalizeRollingUpgradeInternal(long finalizeTime) {
+ // Set the finalize time
+ rollingUpgradeInfo.finalize(finalizeTime);
}
long addCacheDirective(CacheDirectiveInfo directive,
http://git-wip-us.apache.org/repos/asf/hadoop/blob/abf3ad98/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto
index 7d94f04..86fb462 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto
@@ -607,4 +607,5 @@ message SnapshotInfoProto {
*/
message RollingUpgradeStatusProto {
required string blockPoolId = 1;
+ optional bool finalized = 2 [default = false];
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/abf3ad98/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
index 391ca79..928431c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
@@ -79,6 +79,9 @@
<button type="button" class="close" data-dismiss="alert" aria-hidden="true">×</button>
{#RollingUpgradeStatus}
+ {@if cond="{finalizeTime} > 0"}
+ <p>Rolling upgrade finalized at {#helper_date_tostring value="{finalizeTime}"/}. </p>
+ {:else}
<p>Rolling upgrade started at {#helper_date_tostring value="{startTime}"/}. </br>
{#createdRollbackImages}
Rollback image has been created. Proceed to upgrade daemons.
@@ -86,6 +89,7 @@
Rollback image has not been created.
{/createdRollbackImages}
</p>
+ {/if}
{/RollingUpgradeStatus}
{@if cond="{DistinctVersionCount} > 1"}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/abf3ad98/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
index 23fc95b..160a86c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
@@ -1226,7 +1226,7 @@ public class SimulatedFSDataset implements FsDatasetSpi<FsVolumeSpi> {
}
@Override
- public void restoreTrash(String bpid) {
+ public void clearTrash(String bpid) {
}
@Override
http://git-wip-us.apache.org/repos/asf/hadoop/blob/abf3ad98/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeRollingUpgrade.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeRollingUpgrade.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeRollingUpgrade.java
index 7fd8398..57fee06 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeRollingUpgrade.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeRollingUpgrade.java
@@ -19,12 +19,7 @@
package org.apache.hadoop.hdfs.server.datanode;
import static org.hamcrest.core.Is.is;
-import static org.junit.Assert.assertArrayEquals;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertThat;
-import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.*;
import java.io.File;
import java.io.IOException;
@@ -43,7 +38,9 @@ import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.MiniDFSCluster.Builder;
+import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties;
import org.apache.hadoop.hdfs.TestRollingUpgrade;
+import org.apache.hadoop.hdfs.client.BlockReportOptions;
import org.apache.hadoop.hdfs.protocol.BlockLocalPathInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
@@ -208,29 +205,53 @@ public class TestDataNodeRollingUpgrade {
public void testDatanodeRollingUpgradeWithFinalize() throws Exception {
try {
startCluster();
+ rollingUpgradeAndFinalize();
+ // Do it again
+ rollingUpgradeAndFinalize();
+ } finally {
+ shutdownCluster();
+ }
+ }
- // Create files in DFS.
- Path testFile1 = new Path("/" + GenericTestUtils.getMethodName() + ".01.dat");
- Path testFile2 = new Path("/" + GenericTestUtils.getMethodName() + ".02.dat");
- DFSTestUtil.createFile(fs, testFile1, FILE_SIZE, REPL_FACTOR, SEED);
- DFSTestUtil.createFile(fs, testFile2, FILE_SIZE, REPL_FACTOR, SEED);
-
- startRollingUpgrade();
- File blockFile = getBlockForFile(testFile2, true);
- File trashFile = getTrashFileForBlock(blockFile, false);
- deleteAndEnsureInTrash(testFile2, blockFile, trashFile);
- finalizeRollingUpgrade();
-
- // Ensure that delete file testFile2 stays deleted after finalize
- assertFalse(isTrashRootPresent());
- assert(!fs.exists(testFile2));
- assert(fs.exists(testFile1));
-
+ @Test(timeout = 600000)
+ public void testDatanodeRUwithRegularUpgrade() throws Exception {
+ try {
+ startCluster();
+ rollingUpgradeAndFinalize();
+ DataNodeProperties dn = cluster.stopDataNode(0);
+ cluster.restartNameNode(0, true, "-upgrade");
+ cluster.restartDataNode(dn, true);
+ cluster.waitActive();
+ fs = cluster.getFileSystem(0);
+ Path testFile3 = new Path("/" + GenericTestUtils.getMethodName()
+ + ".03.dat");
+ DFSTestUtil.createFile(fs, testFile3, FILE_SIZE, REPL_FACTOR, SEED);
+ cluster.getFileSystem().finalizeUpgrade();
} finally {
shutdownCluster();
}
}
+ private void rollingUpgradeAndFinalize() throws IOException, Exception {
+ // Create files in DFS.
+ Path testFile1 = new Path("/" + GenericTestUtils.getMethodName() + ".01.dat");
+ Path testFile2 = new Path("/" + GenericTestUtils.getMethodName() + ".02.dat");
+ DFSTestUtil.createFile(fs, testFile1, FILE_SIZE, REPL_FACTOR, SEED);
+ DFSTestUtil.createFile(fs, testFile2, FILE_SIZE, REPL_FACTOR, SEED);
+
+ startRollingUpgrade();
+ File blockFile = getBlockForFile(testFile2, true);
+ File trashFile = getTrashFileForBlock(blockFile, false);
+ cluster.triggerBlockReports();
+ deleteAndEnsureInTrash(testFile2, blockFile, trashFile);
+ finalizeRollingUpgrade();
+
+ // Ensure that delete file testFile2 stays deleted after finalize
+ assertFalse(isTrashRootPresent());
+ assert(!fs.exists(testFile2));
+ assert(fs.exists(testFile1));
+ }
+
@Test (timeout=600000)
public void testDatanodeRollingUpgradeWithRollback() throws Exception {
try {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/abf3ad98/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/extdataset/ExternalDatasetImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/extdataset/ExternalDatasetImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/extdataset/ExternalDatasetImpl.java
index d28c467..c49ef6b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/extdataset/ExternalDatasetImpl.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/extdataset/ExternalDatasetImpl.java
@@ -306,7 +306,7 @@ public class ExternalDatasetImpl implements FsDatasetSpi<ExternalVolumeImpl> {
}
@Override
- public void restoreTrash(String bpid) {
+ public void clearTrash(String bpid) {
}