You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by so...@apache.org on 2021/04/20 15:42:09 UTC
[hadoop] branch branch-3.3 updated: HDFS-15569. Speed up the
Storage#doRecover during datanode rolling upgrade. Contributed by Hemanth
Boyina.
This is an automated email from the ASF dual-hosted git repository.
sodonnell pushed a commit to branch branch-3.3
in repository https://gitbox.apache.org/repos/asf/hadoop.git
The following commit(s) were added to refs/heads/branch-3.3 by this push:
new 90f63cb HDFS-15569. Speed up the Storage#doRecover during datanode rolling upgrade. Contributed by Hemanth Boyina.
90f63cb is described below
commit 90f63cb12ccdeba6e423874aa7162dcf76200315
Author: hemanthboyina <he...@apache.org>
AuthorDate: Tue Dec 22 17:27:31 2020 +0530
HDFS-15569. Speed up the Storage#doRecover during datanode rolling upgrade. Contributed by Hemanth Boyina.
(cherry picked from commit 16a20503cacf12c3d8e27ba90820384f58bed06f)
---
.../apache/hadoop/hdfs/server/common/Storage.java | 36 +++++++++++++++++-----
1 file changed, 28 insertions(+), 8 deletions(-)
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java
index ea10f01..83a8256 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java
@@ -801,8 +801,7 @@ public abstract class Storage extends StorageInfo {
case RECOVER_UPGRADE: // mv previous.tmp -> current
LOG.info("Recovering storage directory {} from previous upgrade",
rootPath);
- if (curDir.exists())
- deleteDir(curDir);
+ deleteAsync(curDir);
rename(getPreviousTmp(), curDir);
return;
case COMPLETE_ROLLBACK: // rm removed.tmp
@@ -818,21 +817,19 @@ public abstract class Storage extends StorageInfo {
case COMPLETE_FINALIZE: // rm finalized.tmp
LOG.info("Completing previous finalize for storage directory {}",
rootPath);
- deleteDir(getFinalizedTmp());
+ deleteAsync(getFinalizedTmp());
return;
case COMPLETE_CHECKPOINT: // mv lastcheckpoint.tmp -> previous.checkpoint
LOG.info("Completing previous checkpoint for storage directory {}",
rootPath);
File prevCkptDir = getPreviousCheckpoint();
- if (prevCkptDir.exists())
- deleteDir(prevCkptDir);
+ deleteAsync(prevCkptDir);
rename(getLastCheckpointTmp(), prevCkptDir);
return;
case RECOVER_CHECKPOINT: // mv lastcheckpoint.tmp -> current
LOG.info("Recovering storage directory {} from failed checkpoint",
rootPath);
- if (curDir.exists())
- deleteDir(curDir);
+ deleteAsync(curDir);
rename(getLastCheckpointTmp(), curDir);
return;
default:
@@ -840,7 +837,30 @@ public abstract class Storage extends StorageInfo {
+ " for storage directory: " + rootPath);
}
}
-
+
+ /**
+ * Rename the curDir to curDir.tmp and delete the curDir.tmp parallely.
+ * @throws IOException
+ */
+ private void deleteAsync(File curDir) throws IOException {
+ if (curDir.exists()) {
+ File curTmp = new File(curDir.getParent(), curDir.getName() + ".tmp");
+ if (curTmp.exists()) {
+ deleteDir(curTmp);
+ }
+ rename(curDir, curTmp);
+ new Thread("Async Delete Current.tmp") {
+ public void run() {
+ try {
+ deleteDir(curTmp);
+ } catch (IOException e) {
+ LOG.warn("Deleting storage directory {} failed", curTmp);
+ }
+ }
+ }.start();
+ }
+ }
+
/**
* @return true if the storage directory should prompt the user prior
* to formatting (i.e if the directory appears to contain some data)
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org