You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by zj...@apache.org on 2015/05/12 22:49:18 UTC
[01/36] hadoop git commit: YARN-3602.
TestResourceLocalizationService.testPublicResourceInitializesLocalDir fails
Intermittently due to IOException from cleanup. Contributed by zhihai xu
Repository: hadoop
Updated Branches:
refs/heads/YARN-2928 b3b791be4 -> b2f589c7f
YARN-3602. TestResourceLocalizationService.testPublicResourceInitializesLocalDir fails Intermittently due to IOException from cleanup. Contributed by zhihai xu
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/951d7fc9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/951d7fc9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/951d7fc9
Branch: refs/heads/YARN-2928
Commit: 951d7fc957a8d8effb4b9a0a22606ccfeb2c65f1
Parents: b3b791b
Author: Xuan <xg...@apache.org>
Authored: Fri May 8 17:52:54 2015 -0700
Committer: Zhijie Shen <zj...@apache.org>
Committed: Tue May 12 13:24:09 2015 -0700
----------------------------------------------------------------------
hadoop-yarn-project/CHANGES.txt | 3 +++
.../localizer/TestResourceLocalizationService.java | 6 +++++-
2 files changed, 8 insertions(+), 1 deletion(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/951d7fc9/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 8c6e37c..1bff9c7 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -455,6 +455,9 @@ Release 2.8.0 - UNRELEASED
YARN-2206. Updated document for applications REST API response examples. (Kenji
Kikushima and Brahma Reddy Battula via zjshen)
+ YARN-3602. TestResourceLocalizationService.testPublicResourceInitializesLocalDir
+ fails Intermittently due to IOException from cleanup. (zhihai xu via xgong)
+
Release 2.7.1 - UNRELEASED
INCOMPATIBLE CHANGES
http://git-wip-us.apache.org/repos/asf/hadoop/blob/951d7fc9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestResourceLocalizationService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestResourceLocalizationService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestResourceLocalizationService.java
index 128b3de..ffa5939 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestResourceLocalizationService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestResourceLocalizationService.java
@@ -184,7 +184,11 @@ public class TestResourceLocalizationService {
@After
public void cleanup() throws IOException {
conf = null;
- FileUtils.deleteDirectory(new File(basedir.toString()));
+ try {
+ FileUtils.deleteDirectory(new File(basedir.toString()));
+ } catch (IOException e) {
+ // ignore
+ }
}
@Test
[20/36] hadoop git commit: HADOOP-11950. Add cli option to test-patch
to set the project-under-test (Sean Busbey via aw)
Posted by zj...@apache.org.
HADOOP-11950. Add cli option to test-patch to set the project-under-test (Sean Busbey via aw)
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/74e8340a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/74e8340a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/74e8340a
Branch: refs/heads/YARN-2928
Commit: 74e8340afa6eb69176a5726ff7d116d6b98a621a
Parents: ab597d0
Author: Allen Wittenauer <aw...@apache.org>
Authored: Mon May 11 11:50:01 2015 -0700
Committer: Zhijie Shen <zj...@apache.org>
Committed: Tue May 12 13:24:13 2015 -0700
----------------------------------------------------------------------
dev-support/test-patch.sh | 13 +++++++++++--
hadoop-common-project/hadoop-common/CHANGES.txt | 3 +++
2 files changed, 14 insertions(+), 2 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/74e8340a/dev-support/test-patch.sh
----------------------------------------------------------------------
diff --git a/dev-support/test-patch.sh b/dev-support/test-patch.sh
index 11fd9a9..ad3d22c 100755
--- a/dev-support/test-patch.sh
+++ b/dev-support/test-patch.sh
@@ -37,7 +37,6 @@ function setup_defaults
PROJECT_NAME=hadoop
HOW_TO_CONTRIBUTE="https://wiki.apache.org/hadoop/HowToContribute"
JENKINS=false
- PATCH_DIR=/tmp/${PROJECT_NAME}-test-patch/$$
BASEDIR=$(pwd)
FINDBUGS_HOME=${FINDBUGS_HOME:-}
@@ -585,6 +584,7 @@ function hadoop_usage
echo "--modulelist=<list> Specify additional modules to test (comma delimited)"
echo "--offline Avoid connecting to the Internet"
echo "--patch-dir=<dir> The directory for working and output files (default '/tmp/${PROJECT_NAME}-test-patch/pid')"
+ echo "--project=<name> The short name for project currently using test-patch (default 'hadoop')"
echo "--resetrepo Forcibly clean the repo"
echo "--run-tests Run all relevant tests below the base directory"
echo "--testlist=<list> Specify which subsystem tests to use (comma delimited)"
@@ -695,7 +695,10 @@ function parse_args
PATCH=${i#*=}
;;
--patch-dir=*)
- PATCH_DIR=${i#*=}
+ USER_PATCH_DIR=${i#*=}
+ ;;
+ --project=*)
+ PROJECT_NAME=${i#*=}
;;
--ps-cmd=*)
PS=${i#*=}
@@ -755,6 +758,12 @@ function parse_args
JENKINS=false
fi
+ if [[ -n ${USER_PATCH_DIR} ]]; then
+ PATCH_DIR="${USER_PATCH_DIR}"
+ else
+ PATCH_DIR=/tmp/${PROJECT_NAME}-test-patch/$$
+ fi
+
cd "${CWD}"
if [[ ! -d ${PATCH_DIR} ]]; then
mkdir -p "${PATCH_DIR}"
http://git-wip-us.apache.org/repos/asf/hadoop/blob/74e8340a/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index f237b85..ee7f09b 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -554,6 +554,9 @@ Release 2.8.0 - UNRELEASED
HADOOP-11906. test-patch.sh should use 'file' command for patch
determinism (Sean Busbey via aw)
+ HADOOP-11950. Add cli option to test-patch to set the project-under-test
+ (Sean Busbey via aw)
+
OPTIMIZATIONS
HADOOP-11785. Reduce the number of listStatus operation in distcp
[07/36] hadoop git commit: YARN-3395. FairScheduler: Trim whitespaces
when using username for queuename. (Zhihai Xu via kasha)
Posted by zj...@apache.org.
YARN-3395. FairScheduler: Trim whitespaces when using username for queuename. (Zhihai Xu via kasha)
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/86ff0736
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/86ff0736
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/86ff0736
Branch: refs/heads/YARN-2928
Commit: 86ff0736be016219d26ef50ebb8a491d93991526
Parents: a9a43fa
Author: Karthik Kambatla <ka...@apache.org>
Authored: Sat May 9 15:41:20 2015 -0700
Committer: Zhijie Shen <zj...@apache.org>
Committed: Tue May 12 13:24:11 2015 -0700
----------------------------------------------------------------------
hadoop-yarn-project/CHANGES.txt | 3 ++
.../scheduler/fair/QueuePlacementRule.java | 4 ++-
.../scheduler/fair/TestFairScheduler.java | 35 ++++++++++++++++++++
3 files changed, 41 insertions(+), 1 deletion(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/86ff0736/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index f7c56f1..0a6d2cc 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -294,6 +294,9 @@ Release 2.8.0 - UNRELEASED
YARN-1287. Consolidate MockClocks.
(Sebastian Wong and Anubhav Dhoot via kasha)
+ YARN-3395. FairScheduler: Trim whitespaces when using username for
+ queuename. (Zhihai Xu via kasha)
+
OPTIMIZATIONS
YARN-3339. TestDockerContainerExecutor should pull a single image and not
http://git-wip-us.apache.org/repos/asf/hadoop/blob/86ff0736/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/QueuePlacementRule.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/QueuePlacementRule.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/QueuePlacementRule.java
index 80de315..f2e32e6 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/QueuePlacementRule.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/QueuePlacementRule.java
@@ -345,9 +345,11 @@ public abstract class QueuePlacementRule {
}
/**
- * Replace the periods in the username or groupname with "_dot_".
+ * Replace the periods in the username or groupname with "_dot_" and
+ * remove trailing and leading whitespace.
*/
protected String cleanName(String name) {
+ name = name.trim();
if (name.contains(".")) {
String converted = name.replaceAll("\\.", "_dot_");
LOG.warn("Name " + name + " is converted to " + converted
http://git-wip-us.apache.org/repos/asf/hadoop/blob/86ff0736/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
index a26209b..69e0a8c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
@@ -4335,4 +4335,39 @@ public class TestFairScheduler extends FairSchedulerTestBase {
"Failed to initialize FairScheduler"));
}
}
+
+ @Test
+ public void testUserAsDefaultQueueWithLeadingTrailingSpaceUserName()
+ throws Exception {
+ conf.set(FairSchedulerConfiguration.USER_AS_DEFAULT_QUEUE, "true");
+ scheduler.init(conf);
+ scheduler.start();
+ scheduler.reinitialize(conf, resourceManager.getRMContext());
+ ApplicationAttemptId appAttemptId = createAppAttemptId(1, 1);
+ createApplicationWithAMResource(appAttemptId, "default", " user1", null);
+ assertEquals(1, scheduler.getQueueManager().getLeafQueue("user1", true)
+ .getNumRunnableApps());
+ assertEquals(0, scheduler.getQueueManager().getLeafQueue("default", true)
+ .getNumRunnableApps());
+ assertEquals("root.user1", resourceManager.getRMContext().getRMApps()
+ .get(appAttemptId.getApplicationId()).getQueue());
+
+ ApplicationAttemptId attId2 = createAppAttemptId(2, 1);
+ createApplicationWithAMResource(attId2, "default", "user1 ", null);
+ assertEquals(2, scheduler.getQueueManager().getLeafQueue("user1", true)
+ .getNumRunnableApps());
+ assertEquals(0, scheduler.getQueueManager().getLeafQueue("default", true)
+ .getNumRunnableApps());
+ assertEquals("root.user1", resourceManager.getRMContext().getRMApps()
+ .get(attId2.getApplicationId()).getQueue());
+
+ ApplicationAttemptId attId3 = createAppAttemptId(3, 1);
+ createApplicationWithAMResource(attId3, "default", "user1", null);
+ assertEquals(3, scheduler.getQueueManager().getLeafQueue("user1", true)
+ .getNumRunnableApps());
+ assertEquals(0, scheduler.getQueueManager().getLeafQueue("default", true)
+ .getNumRunnableApps());
+ assertEquals("root.user1", resourceManager.getRMContext().getRMApps()
+ .get(attId3.getApplicationId()).getQueue());
+ }
}
[28/36] hadoop git commit: Move YARN-3493 in CHANGES.txt from 2.8 to
2.7.1
Posted by zj...@apache.org.
Move YARN-3493 in CHANGES.txt from 2.8 to 2.7.1
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/39b40d9e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/39b40d9e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/39b40d9e
Branch: refs/heads/YARN-2928
Commit: 39b40d9ecf3b68f25b9bcdc1214fcecf8d0ffbea
Parents: ca82dc4
Author: Wangda Tan <wa...@apache.org>
Authored: Mon May 11 18:06:54 2015 -0700
Committer: Zhijie Shen <zj...@apache.org>
Committed: Tue May 12 13:44:26 2015 -0700
----------------------------------------------------------------------
hadoop-yarn-project/CHANGES.txt | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/39b40d9e/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index e02a564..18a61e7 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -371,9 +371,6 @@ Release 2.8.0 - UNRELEASED
YARN-3021. YARN's delegation-token handling disallows certain trust setups
to operate properly over DistCp. (Yongjun Zhang via jianhe)
- YARN-3493. RM fails to come up with error "Failed to load/recover state"
- when mem settings are changed. (Jian He via wangda)
-
YARN-3136. Fixed a synchronization problem of
AbstractYarnScheduler#getTransferredContainers. (Sunil G via jianhe)
@@ -538,6 +535,9 @@ Release 2.7.1 - UNRELEASED
YARN-3434. Interaction between reservations and userlimit can result in
significant ULF violation (tgraves)
+ YARN-3493. RM fails to come up with error "Failed to load/recover state"
+ when mem settings are changed. (Jian He via wangda)
+
Release 2.7.0 - 2015-04-20
INCOMPATIBLE CHANGES
[15/36] hadoop git commit: HDFS-8241. Remove unused NameNode startup
option -finalize. Contributed by Brahma Reddy Battula.
Posted by zj...@apache.org.
HDFS-8241. Remove unused NameNode startup option -finalize. Contributed by Brahma Reddy Battula.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c1d0160c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c1d0160c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c1d0160c
Branch: refs/heads/YARN-2928
Commit: c1d0160c9a474b2bf2adab1881a95011e1443e9f
Parents: af51262
Author: Akira Ajisaka <aa...@apache.org>
Authored: Tue May 12 00:18:18 2015 +0900
Committer: Zhijie Shen <zj...@apache.org>
Committed: Tue May 12 13:24:12 2015 -0700
----------------------------------------------------------------------
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++
.../hadoop/hdfs/server/common/HdfsServerConstants.java | 1 -
.../apache/hadoop/hdfs/server/namenode/NameNode.java | 12 ------------
.../hadoop-hdfs/src/site/markdown/HDFSCommands.md | 4 +---
.../hdfs/server/datanode/TestHdfsServerConstants.java | 1 -
5 files changed, 4 insertions(+), 17 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c1d0160c/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 6b53e88..8060644 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -25,6 +25,9 @@ Trunk (Unreleased)
HDFS-8349. Remove .xml and documentation references to dfs.webhdfs.enabled.
(Ray Chiang via aajisaka)
+
+ HDFS-8241. Remove unused NameNode startup option -finalize.
+ (Brahma Reddy Battula via aajisaka)
NEW FEATURES
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c1d0160c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java
index 31af6c8..c664b01 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java
@@ -155,7 +155,6 @@ public interface HdfsServerConstants {
CHECKPOINT("-checkpoint"),
UPGRADE ("-upgrade"),
ROLLBACK("-rollback"),
- FINALIZE("-finalize"),
ROLLINGUPGRADE("-rollingUpgrade"),
IMPORT ("-importCheckpoint"),
BOOTSTRAPSTANDBY("-bootstrapStandby"),
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c1d0160c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
index 979378a..1c1032b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
@@ -262,7 +262,6 @@ public class NameNode implements NameNodeStatusMXBean {
+ StartupOption.ROLLBACK.getName() + "] | \n\t["
+ StartupOption.ROLLINGUPGRADE.getName() + " "
+ RollingUpgradeStartupOption.getAllOptionString() + " ] | \n\t["
- + StartupOption.FINALIZE.getName() + "] | \n\t["
+ StartupOption.IMPORT.getName() + "] | \n\t["
+ StartupOption.INITIALIZESHAREDEDITS.getName() + "] | \n\t["
+ StartupOption.BOOTSTRAPSTANDBY.getName() + "] | \n\t["
@@ -778,8 +777,6 @@ public class NameNode implements NameNodeStatusMXBean {
* metadata</li>
* <li>{@link StartupOption#ROLLBACK ROLLBACK} - roll the
* cluster back to the previous state</li>
- * <li>{@link StartupOption#FINALIZE FINALIZE} - finalize
- * previous upgrade</li>
* <li>{@link StartupOption#IMPORT IMPORT} - import checkpoint</li>
* </ul>
* The option is passed via configuration field:
@@ -1306,8 +1303,6 @@ public class NameNode implements NameNodeStatusMXBean {
startOpt.setRollingUpgradeStartupOption(args[i]);
} else if (StartupOption.ROLLBACK.getName().equalsIgnoreCase(cmd)) {
startOpt = StartupOption.ROLLBACK;
- } else if (StartupOption.FINALIZE.getName().equalsIgnoreCase(cmd)) {
- startOpt = StartupOption.FINALIZE;
} else if (StartupOption.IMPORT.getName().equalsIgnoreCase(cmd)) {
startOpt = StartupOption.IMPORT;
} else if (StartupOption.BOOTSTRAPSTANDBY.getName().equalsIgnoreCase(cmd)) {
@@ -1442,13 +1437,6 @@ public class NameNode implements NameNodeStatusMXBean {
terminate(0);
return null;
}
- case FINALIZE: {
- System.err.println("Use of the argument '" + StartupOption.FINALIZE +
- "' is no longer supported. To finalize an upgrade, start the NN " +
- " and then run `hdfs dfsadmin -finalizeUpgrade'");
- terminate(1);
- return null; // avoid javac warning
- }
case ROLLBACK: {
boolean aborted = doRollback(conf, true);
terminate(aborted ? 1 : 0);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c1d0160c/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
index 534d63a..2ceff89 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
@@ -416,7 +416,6 @@ Usage:
[-upgradeOnly [-clusterid cid] [-renameReserved<k-v pairs>] ] |
[-rollback] |
[-rollingUpgrade <rollback |started> ] |
- [-finalize] |
[-importCheckpoint] |
[-initializeSharedEdits] |
[-bootstrapStandby] |
@@ -432,14 +431,13 @@ Usage:
| `-upgradeOnly` `[-clusterid cid]` [`-renameReserved` \<k-v pairs\>] | Upgrade the specified NameNode and then shutdown it. |
| `-rollback` | Rollback the NameNode to the previous version. This should be used after stopping the cluster and distributing the old Hadoop version. |
| `-rollingUpgrade` \<rollback\|started\> | See [Rolling Upgrade document](./HdfsRollingUpgrade.html#NameNode_Startup_Options) for the detail. |
-| `-finalize` | No longer supported. Use `dfsadmin -finalizeUpgrade` instead. |
| `-importCheckpoint` | Loads image from a checkpoint directory and save it into the current one. Checkpoint dir is read from property fs.checkpoint.dir |
| `-initializeSharedEdits` | Format a new shared edits dir and copy in enough edit log segments so that the standby NameNode can start up. |
| `-bootstrapStandby` | Allows the standby NameNode's storage directories to be bootstrapped by copying the latest namespace snapshot from the active NameNode. This is used when first configuring an HA cluster. |
| `-recover` `[-force]` | Recover lost metadata on a corrupt filesystem. See [HDFS User Guide](./HdfsUserGuide.html#Recovery_Mode) for the detail. |
| `-metadataVersion` | Verify that configured directories exist, then print the metadata versions of the software and the image. |
-Runs the namenode. More info about the upgrade, rollback and finalize is at [Upgrade Rollback](./HdfsUserGuide.html#Upgrade_and_Rollback).
+Runs the namenode. More info about the upgrade and rollback is at [Upgrade Rollback](./HdfsUserGuide.html#Upgrade_and_Rollback).
### `nfs3`
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c1d0160c/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestHdfsServerConstants.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestHdfsServerConstants.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestHdfsServerConstants.java
index 0f24c05..0d359d8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestHdfsServerConstants.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestHdfsServerConstants.java
@@ -61,7 +61,6 @@ public class TestHdfsServerConstants {
verifyStartupOptionResult("CHECKPOINT", StartupOption.CHECKPOINT, null);
verifyStartupOptionResult("UPGRADE", StartupOption.UPGRADE, null);
verifyStartupOptionResult("ROLLBACK", StartupOption.ROLLBACK, null);
- verifyStartupOptionResult("FINALIZE", StartupOption.FINALIZE, null);
verifyStartupOptionResult("ROLLINGUPGRADE", StartupOption.ROLLINGUPGRADE, null);
verifyStartupOptionResult("IMPORT", StartupOption.IMPORT, null);
verifyStartupOptionResult("INITIALIZESHAREDEDITS", StartupOption.INITIALIZESHAREDEDITS, null);
[26/36] hadoop git commit: YARN-3513. Remove unused variables in
ContainersMonitorImpl and add debug log for overall resource usage by all
containers. Contributed by Naganarasimha G R.
Posted by zj...@apache.org.
YARN-3513. Remove unused variables in ContainersMonitorImpl and add debug
log for overall resource usage by all containers. Contributed by
Naganarasimha G R.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/65935b00
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/65935b00
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/65935b00
Branch: refs/heads/YARN-2928
Commit: 65935b00437c12a80d0f0bb1c7c32bc6f7123588
Parents: abf9e44
Author: Devaraj K <de...@apache.org>
Authored: Tue May 12 16:54:38 2015 +0530
Committer: Zhijie Shen <zj...@apache.org>
Committed: Tue May 12 13:44:26 2015 -0700
----------------------------------------------------------------------
hadoop-yarn-project/CHANGES.txt | 3 +++
.../monitor/ContainersMonitorImpl.java | 27 ++++++++++++++------
2 files changed, 22 insertions(+), 8 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/65935b00/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 18a61e7..78bda68 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -300,6 +300,9 @@ Release 2.8.0 - UNRELEASED
YARN-3587. Fix the javadoc of DelegationTokenSecretManager in yarn, etc.
projects. (Gabor Liptak via junping_du)
+ YARN-3513. Remove unused variables in ContainersMonitorImpl and add debug
+ log for overall resource usage by all containers. (Naganarasimha G R via devaraj)
+
OPTIMIZATIONS
YARN-3339. TestDockerContainerExecutor should pull a single image and not
http://git-wip-us.apache.org/repos/asf/hadoop/blob/65935b00/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitorImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitorImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitorImpl.java
index ebd43d6..3db999b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitorImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitorImpl.java
@@ -445,8 +445,10 @@ public class ContainersMonitorImpl extends AbstractService implements
// Now do the monitoring for the trackingContainers
// Check memory usage and kill any overflowing containers
- long vmemStillInUsage = 0;
- long pmemStillInUsage = 0;
+ long vmemUsageByAllContainers = 0;
+ long pmemByAllContainers = 0;
+ long cpuUsagePercentPerCoreByAllContainers = 0;
+ long cpuUsageTotalCoresByAllContainers = 0;
for (Iterator<Map.Entry<ContainerId, ProcessTreeInfo>> it =
trackingContainers.entrySet().iterator(); it.hasNext();) {
@@ -587,6 +589,13 @@ public class ContainersMonitorImpl extends AbstractService implements
containerExitStatus = ContainerExitStatus.KILLED_EXCEEDED_PMEM;
}
+ // Accounting the total memory in usage for all containers
+ vmemUsageByAllContainers += currentVmemUsage;
+ pmemByAllContainers += currentPmemUsage;
+ // Accounting the total cpu usage for all containers
+ cpuUsagePercentPerCoreByAllContainers += cpuUsagePercentPerCore;
+ cpuUsageTotalCoresByAllContainers += cpuUsagePercentPerCore;
+
if (isMemoryOverLimit) {
// Virtual or physical memory over limit. Fail the container and
// remove
@@ -603,12 +612,6 @@ public class ContainersMonitorImpl extends AbstractService implements
containerExitStatus, msg));
it.remove();
LOG.info("Removed ProcessTree with root " + pId);
- } else {
- // Accounting the total memory in usage for all containers that
- // are still
- // alive and within limits.
- vmemStillInUsage += currentVmemUsage;
- pmemStillInUsage += currentPmemUsage;
}
} catch (Exception e) {
@@ -629,6 +632,14 @@ public class ContainersMonitorImpl extends AbstractService implements
}
}
}
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Total Resource Usage stats in NM by all containers : "
+ + "Virtual Memory= " + vmemUsageByAllContainers
+ + ", Physical Memory= " + pmemByAllContainers
+ + ", Total CPU usage= " + cpuUsageTotalCoresByAllContainers
+ + ", Total CPU(% per core) usage"
+ + cpuUsagePercentPerCoreByAllContainers);
+ }
try {
Thread.sleep(monitoringInterval);
[25/36] hadoop git commit: MAPREDUCE-5465. Tasks are often killed
before they exit on their own. Contributed by Ming Ma
Posted by zj...@apache.org.
MAPREDUCE-5465. Tasks are often killed before they exit on their own. Contributed by Ming Ma
Conflicts:
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5625ac46
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5625ac46
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5625ac46
Branch: refs/heads/YARN-2928
Commit: 5625ac469a4d7ba4d936a0002c0fe61828c8eedb
Parents: aa3e32d
Author: Jason Lowe <jl...@apache.org>
Authored: Mon May 11 22:37:35 2015 +0000
Committer: Zhijie Shen <zj...@apache.org>
Committed: Tue May 12 13:44:13 2015 -0700
----------------------------------------------------------------------
hadoop-mapreduce-project/CHANGES.txt | 3 +
.../hadoop/mapred/LocalContainerLauncher.java | 12 +-
.../hadoop/mapreduce/v2/app/AppContext.java | 2 +
.../hadoop/mapreduce/v2/app/MRAppMaster.java | 39 +-
.../v2/app/TaskAttemptFinishingMonitor.java | 63 +++
.../v2/app/client/MRClientService.java | 2 +-
.../v2/app/job/TaskAttemptStateInternal.java | 39 +-
.../v2/app/job/event/TaskAttemptEventType.java | 3 +
.../v2/app/job/impl/TaskAttemptImpl.java | 445 +++++++++++++++----
.../v2/app/launcher/ContainerLauncher.java | 8 +-
.../v2/app/launcher/ContainerLauncherImpl.java | 11 +-
.../mapred/TestTaskAttemptFinishingMonitor.java | 108 +++++
.../apache/hadoop/mapreduce/v2/app/MRApp.java | 16 +
.../hadoop/mapreduce/v2/app/MockAppContext.java | 6 +
.../hadoop/mapreduce/v2/app/TestFail.java | 2 +
.../hadoop/mapreduce/v2/app/TestKill.java | 142 ++++--
.../mapreduce/v2/app/TestRuntimeEstimators.java | 5 +
.../v2/app/job/impl/TestTaskAttempt.java | 263 ++++++++++-
.../apache/hadoop/mapreduce/MRJobConfig.java | 10 +-
.../src/main/resources/mapred-default.xml | 20 +
.../hadoop/mapreduce/v2/hs/JobHistory.java | 6 +
.../v2/TestSpeculativeExecutionWithMRApp.java | 6 +-
22 files changed, 1060 insertions(+), 151 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/5625ac46/hadoop-mapreduce-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt
index 2152be0..e28d575 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -426,6 +426,9 @@ Release 2.8.0 - UNRELEASED
MAPREDUCE-6353. Divide by zero error in MR AM when calculating available
containers. (Anubhav Dhoot via kasha)
+ MAPREDUCE-5465. Tasks are often killed before they exit on their own
+ (Ming Ma via jlowe)
+
Release 2.7.1 - UNRELEASED
INCOMPATIBLE CHANGES
http://git-wip-us.apache.org/repos/asf/hadoop/blob/5625ac46/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/LocalContainerLauncher.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/LocalContainerLauncher.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/LocalContainerLauncher.java
index ffc5326..52b3497 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/LocalContainerLauncher.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/LocalContainerLauncher.java
@@ -264,7 +264,8 @@ public class LocalContainerLauncher extends AbstractService implements
context.getEventHandler().handle(
new TaskAttemptEvent(taId,
TaskAttemptEventType.TA_CONTAINER_CLEANED));
-
+ } else if (event.getType() == EventType.CONTAINER_COMPLETED) {
+ LOG.debug("Container completed " + event.toString());
} else {
LOG.warn("Ignoring unexpected event " + event.toString());
}
@@ -314,7 +315,14 @@ public class LocalContainerLauncher extends AbstractService implements
}
runSubtask(remoteTask, ytask.getType(), attemptID, numMapTasks,
(numReduceTasks > 0), localMapFiles);
-
+
+ // In non-uber mode, TA gets TA_CONTAINER_COMPLETED from MRAppMaster
+ // as part of NM -> RM -> AM notification route.
+ // In uber mode, given the task run inside the MRAppMaster container,
+ // we have to simulate the notification.
+ context.getEventHandler().handle(new TaskAttemptEvent(attemptID,
+ TaskAttemptEventType.TA_CONTAINER_COMPLETED));
+
} catch (RuntimeException re) {
JobCounterUpdateEvent jce = new JobCounterUpdateEvent(attemptID.getTaskId().getJobId());
jce.addCounterUpdate(JobCounter.NUM_FAILED_UBERTASKS, 1);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/5625ac46/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/AppContext.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/AppContext.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/AppContext.java
index 31e282a..4af11c3 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/AppContext.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/AppContext.java
@@ -67,4 +67,6 @@ public interface AppContext {
boolean hasSuccessfullyUnregistered();
String getNMHostname();
+
+ TaskAttemptFinishingMonitor getTaskAttemptFinishingMonitor();
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/5625ac46/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java
index 9272c7a..fa0a432 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java
@@ -208,6 +208,14 @@ public class MRAppMaster extends CompositeService {
private SpeculatorEventDispatcher speculatorEventDispatcher;
private AMPreemptionPolicy preemptionPolicy;
+ // After a task attempt completes from TaskUmbilicalProtocol's point of view,
+ // it will be transitioned to finishing state.
+ // taskAttemptFinishingMonitor is just a timer for attempts in finishing
+ // state. If the attempt stays in finishing state for too long,
+ // taskAttemptFinishingMonitor will notify the attempt via TA_TIMED_OUT
+ // event.
+ private TaskAttemptFinishingMonitor taskAttemptFinishingMonitor;
+
private Job job;
private Credentials jobCredentials = new Credentials(); // Filled during init
protected UserGroupInformation currentUser; // Will be setup during init
@@ -250,6 +258,12 @@ public class MRAppMaster extends CompositeService {
logSyncer = TaskLog.createLogSyncer();
LOG.info("Created MRAppMaster for application " + applicationAttemptId);
}
+ protected TaskAttemptFinishingMonitor createTaskAttemptFinishingMonitor(
+ EventHandler eventHandler) {
+ TaskAttemptFinishingMonitor monitor =
+ new TaskAttemptFinishingMonitor(eventHandler);
+ return monitor;
+ }
@Override
protected void serviceInit(final Configuration conf) throws Exception {
@@ -260,7 +274,11 @@ public class MRAppMaster extends CompositeService {
initJobCredentialsAndUGI(conf);
- context = new RunningAppContext(conf);
+ dispatcher = createDispatcher();
+ addIfService(dispatcher);
+ taskAttemptFinishingMonitor = createTaskAttemptFinishingMonitor(dispatcher.getEventHandler());
+ addIfService(taskAttemptFinishingMonitor);
+ context = new RunningAppContext(conf, taskAttemptFinishingMonitor);
// Job name is the same as the app name util we support DAG of jobs
// for an app later
@@ -327,9 +345,6 @@ public class MRAppMaster extends CompositeService {
}
if (errorHappenedShutDown) {
- dispatcher = createDispatcher();
- addIfService(dispatcher);
-
NoopEventHandler eater = new NoopEventHandler();
//We do not have a JobEventDispatcher in this path
dispatcher.register(JobEventType.class, eater);
@@ -376,9 +391,6 @@ public class MRAppMaster extends CompositeService {
} else {
committer = createOutputCommitter(conf);
- dispatcher = createDispatcher();
- addIfService(dispatcher);
-
//service to handle requests from JobClient
clientService = createClientService(context);
// Init ClientService separately so that we stop it separately, since this
@@ -967,10 +979,14 @@ public class MRAppMaster extends CompositeService {
private final ClientToAMTokenSecretManager clientToAMTokenSecretManager;
private TimelineClient timelineClient = null;
- public RunningAppContext(Configuration config) {
+ private final TaskAttemptFinishingMonitor taskAttemptFinishingMonitor;
+
+ public RunningAppContext(Configuration config,
+ TaskAttemptFinishingMonitor taskAttemptFinishingMonitor) {
this.conf = config;
this.clientToAMTokenSecretManager =
new ClientToAMTokenSecretManager(appAttemptID, null);
+ this.taskAttemptFinishingMonitor = taskAttemptFinishingMonitor;
if (conf.getBoolean(MRJobConfig.MAPREDUCE_JOB_EMIT_TIMELINE_DATA,
MRJobConfig.DEFAULT_MAPREDUCE_JOB_EMIT_TIMELINE_DATA)
&& conf.getBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED,
@@ -1072,7 +1088,12 @@ public class MRAppMaster extends CompositeService {
public String getNMHostname() {
return nmHost;
}
-
+
+ @Override
+ public TaskAttemptFinishingMonitor getTaskAttemptFinishingMonitor() {
+ return taskAttemptFinishingMonitor;
+ }
+
// Get Timeline Collector's address (get sync from RM)
public TimelineClient getTimelineClient() {
return timelineClient;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/5625ac46/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/TaskAttemptFinishingMonitor.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/TaskAttemptFinishingMonitor.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/TaskAttemptFinishingMonitor.java
new file mode 100644
index 0000000..f603398
--- /dev/null
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/TaskAttemptFinishingMonitor.java
@@ -0,0 +1,63 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.mapreduce.v2.app;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.mapreduce.MRJobConfig;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
+import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent;
+import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEventType;
+import org.apache.hadoop.yarn.event.EventHandler;
+import org.apache.hadoop.yarn.util.AbstractLivelinessMonitor;
+import org.apache.hadoop.yarn.util.SystemClock;
+
+/**
+ * This class generates TA_TIMED_OUT if the task attempt stays in FINISHING
+ * state for too long.
+ */
+@SuppressWarnings({"unchecked", "rawtypes"})
+public class TaskAttemptFinishingMonitor extends
+ AbstractLivelinessMonitor<TaskAttemptId> {
+
+ private EventHandler eventHandler;
+
+ public TaskAttemptFinishingMonitor(EventHandler eventHandler) {
+ super("TaskAttemptFinishingMonitor", new SystemClock());
+ this.eventHandler = eventHandler;
+ }
+
+ public void init(Configuration conf) {
+ super.init(conf);
+ int expireIntvl = conf.getInt(MRJobConfig.TASK_EXIT_TIMEOUT,
+ MRJobConfig.TASK_EXIT_TIMEOUT_DEFAULT);
+ int checkIntvl = conf.getInt(
+ MRJobConfig.TASK_EXIT_TIMEOUT_CHECK_INTERVAL_MS,
+ MRJobConfig.TASK_EXIT_TIMEOUT_CHECK_INTERVAL_MS_DEFAULT);
+
+ setExpireInterval(expireIntvl);
+ setMonitorInterval(checkIntvl);
+ }
+
+ @Override
+ protected void expire(TaskAttemptId id) {
+ eventHandler.handle(
+ new TaskAttemptEvent(id,
+ TaskAttemptEventType.TA_TIMED_OUT));
+ }
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/5625ac46/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/client/MRClientService.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/client/MRClientService.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/client/MRClientService.java
index ceb1dbf..d378b0a 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/client/MRClientService.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/client/MRClientService.java
@@ -370,7 +370,7 @@ public class MRClientService extends AbstractService implements ClientService {
new TaskAttemptDiagnosticsUpdateEvent(taskAttemptId, message));
appContext.getEventHandler().handle(
new TaskAttemptEvent(taskAttemptId,
- TaskAttemptEventType.TA_FAILMSG));
+ TaskAttemptEventType.TA_FAILMSG_BY_CLIENT));
FailTaskAttemptResponse response = recordFactory.
newRecordInstance(FailTaskAttemptResponse.class);
return response;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/5625ac46/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/TaskAttemptStateInternal.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/TaskAttemptStateInternal.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/TaskAttemptStateInternal.java
index f6c3e57..5f17651 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/TaskAttemptStateInternal.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/TaskAttemptStateInternal.java
@@ -30,9 +30,42 @@ public enum TaskAttemptStateInternal {
UNASSIGNED,
ASSIGNED,
RUNNING,
- COMMIT_PENDING,
- SUCCESS_CONTAINER_CLEANUP,
- SUCCEEDED,
+ COMMIT_PENDING,
+
+ // Transition into SUCCESS_FINISHING_CONTAINER
+ // After the attempt finishes successfully from
+ // TaskUmbilicalProtocol's point of view, it will transition to
+ // SUCCESS_FINISHING_CONTAINER state. That will give a chance for the
+ // container to exit by itself. In the transition,
+ // the attempt will notify the task via T_ATTEMPT_SUCCEEDED so that
+ // from job point of view, the task is considered succeeded.
+
+ // Transition out of SUCCESS_FINISHING_CONTAINER
+ // The attempt will transition from SUCCESS_FINISHING_CONTAINER to
+ // SUCCESS_CONTAINER_CLEANUP if it doesn't receive container exit
+ // notification within TASK_EXIT_TIMEOUT;
+ // Or it will transition to SUCCEEDED if it receives container exit
+ // notification from YARN.
+ SUCCESS_FINISHING_CONTAINER,
+
+ // Transition into FAIL_FINISHING_CONTAINER
+ // After the attempt fails from
+ // TaskUmbilicalProtocol's point of view, it will transition to
+ // FAIL_FINISHING_CONTAINER state. That will give a chance for the container
+ // to exit by itself. In the transition,
+ // the attempt will notify the task via T_ATTEMPT_FAILED so that
+ // from job point of view, the task is considered failed.
+
+ // Transition out of FAIL_FINISHING_CONTAINER
+ // The attempt will transition from FAIL_FINISHING_CONTAINER to
+ // FAIL_CONTAINER_CLEANUP if it doesn't receive container exit
+ // notification within TASK_EXIT_TIMEOUT;
+ // Or it will transition to FAILED if it receives container exit
+ // notification from YARN.
+ FAIL_FINISHING_CONTAINER,
+
+ SUCCESS_CONTAINER_CLEANUP,
+ SUCCEEDED,
FAIL_CONTAINER_CLEANUP,
FAIL_TASK_CLEANUP,
FAILED,
http://git-wip-us.apache.org/repos/asf/hadoop/blob/5625ac46/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/event/TaskAttemptEventType.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/event/TaskAttemptEventType.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/event/TaskAttemptEventType.java
index 1f05ac3..61de032 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/event/TaskAttemptEventType.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/event/TaskAttemptEventType.java
@@ -49,6 +49,9 @@ public enum TaskAttemptEventType {
TA_TIMED_OUT,
TA_PREEMPTED,
+ //Producer:Client
+ TA_FAILMSG_BY_CLIENT,
+
//Producer:TaskCleaner
TA_CLEANUP_DONE,
http://git-wip-us.apache.org/repos/asf/hadoop/blob/5625ac46/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskAttemptImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskAttemptImpl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskAttemptImpl.java
index f4b434b..7e82df2 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskAttemptImpl.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskAttemptImpl.java
@@ -184,8 +184,20 @@ public abstract class TaskAttemptImpl implements
private Locality locality;
private Avataar avataar;
- private static final CleanupContainerTransition CLEANUP_CONTAINER_TRANSITION =
- new CleanupContainerTransition();
+ private static final CleanupContainerTransition
+ CLEANUP_CONTAINER_TRANSITION = new CleanupContainerTransition();
+ private static final MoveContainerToSucceededFinishingTransition
+ SUCCEEDED_FINISHING_TRANSITION =
+ new MoveContainerToSucceededFinishingTransition();
+ private static final MoveContainerToFailedFinishingTransition
+ FAILED_FINISHING_TRANSITION =
+ new MoveContainerToFailedFinishingTransition();
+ private static final ExitFinishingOnTimeoutTransition
+ FINISHING_ON_TIMEOUT_TRANSITION =
+ new ExitFinishingOnTimeoutTransition();
+
+ private static final FinalizeFailedTransition FINALIZE_FAILED_TRANSITION =
+ new FinalizeFailedTransition();
private static final DiagnosticInformationUpdater
DIAGNOSTIC_INFORMATION_UPDATE_TRANSITION
@@ -204,6 +216,8 @@ public abstract class TaskAttemptImpl implements
TaskAttemptEventType.TA_COMMIT_PENDING,
TaskAttemptEventType.TA_DONE,
TaskAttemptEventType.TA_FAILMSG,
+ TaskAttemptEventType.TA_FAILMSG_BY_CLIENT,
+ TaskAttemptEventType.TA_TIMED_OUT,
TaskAttemptEventType.TA_TOO_MANY_FETCH_FAILURE);
private static final StateMachineFactory
@@ -221,16 +235,16 @@ public abstract class TaskAttemptImpl implements
.addTransition(TaskAttemptStateInternal.NEW, TaskAttemptStateInternal.KILLED,
TaskAttemptEventType.TA_KILL, new KilledTransition())
.addTransition(TaskAttemptStateInternal.NEW, TaskAttemptStateInternal.FAILED,
- TaskAttemptEventType.TA_FAILMSG, new FailedTransition())
+ TaskAttemptEventType.TA_FAILMSG_BY_CLIENT, new FailedTransition())
.addTransition(TaskAttemptStateInternal.NEW,
EnumSet.of(TaskAttemptStateInternal.FAILED,
TaskAttemptStateInternal.KILLED,
TaskAttemptStateInternal.SUCCEEDED),
TaskAttemptEventType.TA_RECOVER, new RecoverTransition())
.addTransition(TaskAttemptStateInternal.NEW,
- TaskAttemptStateInternal.NEW,
- TaskAttemptEventType.TA_DIAGNOSTICS_UPDATE,
- DIAGNOSTIC_INFORMATION_UPDATE_TRANSITION)
+ TaskAttemptStateInternal.NEW,
+ TaskAttemptEventType.TA_DIAGNOSTICS_UPDATE,
+ DIAGNOSTIC_INFORMATION_UPDATE_TRANSITION)
// Transitions from the UNASSIGNED state.
.addTransition(TaskAttemptStateInternal.UNASSIGNED,
@@ -238,14 +252,14 @@ public abstract class TaskAttemptImpl implements
new ContainerAssignedTransition())
.addTransition(TaskAttemptStateInternal.UNASSIGNED, TaskAttemptStateInternal.KILLED,
TaskAttemptEventType.TA_KILL, new DeallocateContainerTransition(
- TaskAttemptStateInternal.KILLED, true))
+ TaskAttemptStateInternal.KILLED, true))
.addTransition(TaskAttemptStateInternal.UNASSIGNED, TaskAttemptStateInternal.FAILED,
- TaskAttemptEventType.TA_FAILMSG, new DeallocateContainerTransition(
+ TaskAttemptEventType.TA_FAILMSG_BY_CLIENT, new DeallocateContainerTransition(
TaskAttemptStateInternal.FAILED, true))
.addTransition(TaskAttemptStateInternal.UNASSIGNED,
- TaskAttemptStateInternal.UNASSIGNED,
- TaskAttemptEventType.TA_DIAGNOSTICS_UPDATE,
- DIAGNOSTIC_INFORMATION_UPDATE_TRANSITION)
+ TaskAttemptStateInternal.UNASSIGNED,
+ TaskAttemptEventType.TA_DIAGNOSTICS_UPDATE,
+ DIAGNOSTIC_INFORMATION_UPDATE_TRANSITION)
// Transitions from the ASSIGNED state.
.addTransition(TaskAttemptStateInternal.ASSIGNED, TaskAttemptStateInternal.RUNNING,
@@ -258,15 +272,19 @@ public abstract class TaskAttemptImpl implements
TaskAttemptEventType.TA_CONTAINER_LAUNCH_FAILED,
new DeallocateContainerTransition(TaskAttemptStateInternal.FAILED, false))
.addTransition(TaskAttemptStateInternal.ASSIGNED,
- TaskAttemptStateInternal.FAIL_CONTAINER_CLEANUP,
+ TaskAttemptStateInternal.FAILED,
TaskAttemptEventType.TA_CONTAINER_COMPLETED,
- CLEANUP_CONTAINER_TRANSITION)
+ FINALIZE_FAILED_TRANSITION)
.addTransition(TaskAttemptStateInternal.ASSIGNED,
TaskAttemptStateInternal.KILL_CONTAINER_CLEANUP,
TaskAttemptEventType.TA_KILL, CLEANUP_CONTAINER_TRANSITION)
- .addTransition(TaskAttemptStateInternal.ASSIGNED,
+ .addTransition(TaskAttemptStateInternal.ASSIGNED,
+ TaskAttemptStateInternal.FAIL_FINISHING_CONTAINER,
+ TaskAttemptEventType.TA_FAILMSG, FAILED_FINISHING_TRANSITION)
+ .addTransition(TaskAttemptStateInternal.ASSIGNED,
TaskAttemptStateInternal.FAIL_CONTAINER_CLEANUP,
- TaskAttemptEventType.TA_FAILMSG, CLEANUP_CONTAINER_TRANSITION)
+ TaskAttemptEventType.TA_FAILMSG_BY_CLIENT,
+ CLEANUP_CONTAINER_TRANSITION)
// Transitions from RUNNING state.
.addTransition(TaskAttemptStateInternal.RUNNING, TaskAttemptStateInternal.RUNNING,
@@ -274,23 +292,27 @@ public abstract class TaskAttemptImpl implements
.addTransition(TaskAttemptStateInternal.RUNNING, TaskAttemptStateInternal.RUNNING,
TaskAttemptEventType.TA_DIAGNOSTICS_UPDATE,
DIAGNOSTIC_INFORMATION_UPDATE_TRANSITION)
- // If no commit is required, task directly goes to success
+ // If no commit is required, task goes to finishing state
+ // This will give a chance for the container to exit by itself
.addTransition(TaskAttemptStateInternal.RUNNING,
- TaskAttemptStateInternal.SUCCESS_CONTAINER_CLEANUP,
- TaskAttemptEventType.TA_DONE, CLEANUP_CONTAINER_TRANSITION)
+ TaskAttemptStateInternal.SUCCESS_FINISHING_CONTAINER,
+ TaskAttemptEventType.TA_DONE, SUCCEEDED_FINISHING_TRANSITION)
// If commit is required, task goes through commit pending state.
.addTransition(TaskAttemptStateInternal.RUNNING,
TaskAttemptStateInternal.COMMIT_PENDING,
TaskAttemptEventType.TA_COMMIT_PENDING, new CommitPendingTransition())
// Failure handling while RUNNING
.addTransition(TaskAttemptStateInternal.RUNNING,
+ TaskAttemptStateInternal.FAIL_FINISHING_CONTAINER,
+ TaskAttemptEventType.TA_FAILMSG, FAILED_FINISHING_TRANSITION)
+ .addTransition(TaskAttemptStateInternal.RUNNING,
TaskAttemptStateInternal.FAIL_CONTAINER_CLEANUP,
- TaskAttemptEventType.TA_FAILMSG, CLEANUP_CONTAINER_TRANSITION)
+ TaskAttemptEventType.TA_FAILMSG_BY_CLIENT, CLEANUP_CONTAINER_TRANSITION)
//for handling container exit without sending the done or fail msg
.addTransition(TaskAttemptStateInternal.RUNNING,
- TaskAttemptStateInternal.FAIL_CONTAINER_CLEANUP,
+ TaskAttemptStateInternal.FAILED,
TaskAttemptEventType.TA_CONTAINER_COMPLETED,
- CLEANUP_CONTAINER_TRANSITION)
+ FINALIZE_FAILED_TRANSITION)
// Timeout handling while RUNNING
.addTransition(TaskAttemptStateInternal.RUNNING,
TaskAttemptStateInternal.FAIL_CONTAINER_CLEANUP,
@@ -301,12 +323,97 @@ public abstract class TaskAttemptImpl implements
TaskAttemptEventType.TA_CONTAINER_CLEANED, new KilledTransition())
// Kill handling
.addTransition(TaskAttemptStateInternal.RUNNING,
- TaskAttemptStateInternal.KILL_CONTAINER_CLEANUP, TaskAttemptEventType.TA_KILL,
+ TaskAttemptStateInternal.KILL_CONTAINER_CLEANUP,
+ TaskAttemptEventType.TA_KILL,
CLEANUP_CONTAINER_TRANSITION)
.addTransition(TaskAttemptStateInternal.RUNNING,
TaskAttemptStateInternal.KILLED,
TaskAttemptEventType.TA_PREEMPTED, new PreemptedTransition())
+ // Transitions from SUCCESS_FINISHING_CONTAINER state
+ // When the container exits by itself, the notification of container
+ // completed event will be routed via NM -> RM -> AM.
+ // After MRAppMaster gets notification from RM, it will generate
+ // TA_CONTAINER_COMPLETED event.
+ .addTransition(TaskAttemptStateInternal.SUCCESS_FINISHING_CONTAINER,
+ TaskAttemptStateInternal.SUCCEEDED,
+ TaskAttemptEventType.TA_CONTAINER_COMPLETED,
+ new ExitFinishingOnContainerCompletedTransition())
+ // Given TA notifies task T_ATTEMPT_SUCCEEDED when it transitions to
+ // SUCCESS_FINISHING_CONTAINER, it is possible to receive the event
+ // TA_CONTAINER_CLEANED in the following scenario.
+ // 1. It is the last task for the job.
+ // 2. After the task receives T_ATTEMPT_SUCCEEDED, it will notify job.
+ // 3. Job will be marked completed.
+ // 4. As part of MRAppMaster's shutdown, all containers will be killed.
+ .addTransition(TaskAttemptStateInternal.SUCCESS_FINISHING_CONTAINER,
+ TaskAttemptStateInternal.SUCCEEDED,
+ TaskAttemptEventType.TA_CONTAINER_CLEANED,
+ new ExitFinishingOnContainerCleanedupTransition())
+ // The client wants to kill the task. Given the task is in finishing
+ // state, it could go to succeeded state or killed state. If it is a
+ // reducer, it will go to succeeded state;
+ // otherwise, it goes to killed state.
+ .addTransition(TaskAttemptStateInternal.SUCCESS_FINISHING_CONTAINER,
+ EnumSet.of(TaskAttemptStateInternal.SUCCESS_CONTAINER_CLEANUP,
+ TaskAttemptStateInternal.KILL_CONTAINER_CLEANUP),
+ TaskAttemptEventType.TA_KILL,
+ new KilledAfterSucceededFinishingTransition())
+ // The attempt stays in finishing state for too long
+ // Let us clean up the container
+ .addTransition(TaskAttemptStateInternal.SUCCESS_FINISHING_CONTAINER,
+ TaskAttemptStateInternal.SUCCESS_CONTAINER_CLEANUP,
+ TaskAttemptEventType.TA_TIMED_OUT, FINISHING_ON_TIMEOUT_TRANSITION)
+ .addTransition(TaskAttemptStateInternal.SUCCESS_FINISHING_CONTAINER,
+ TaskAttemptStateInternal.SUCCESS_FINISHING_CONTAINER,
+ TaskAttemptEventType.TA_DIAGNOSTICS_UPDATE,
+ DIAGNOSTIC_INFORMATION_UPDATE_TRANSITION)
+ // ignore-able events
+ .addTransition(TaskAttemptStateInternal.SUCCESS_FINISHING_CONTAINER,
+ TaskAttemptStateInternal.SUCCESS_FINISHING_CONTAINER,
+ EnumSet.of(TaskAttemptEventType.TA_UPDATE,
+ TaskAttemptEventType.TA_DONE,
+ TaskAttemptEventType.TA_COMMIT_PENDING,
+ TaskAttemptEventType.TA_FAILMSG,
+ TaskAttemptEventType.TA_FAILMSG_BY_CLIENT))
+
+ // Transitions from FAIL_FINISHING_CONTAINER state
+ // When the container exits by itself, the notification of container
+ // completed event will be routed via NM -> RM -> AM.
+ // After MRAppMaster gets notification from RM, it will generate
+ // TA_CONTAINER_COMPLETED event.
+ .addTransition(TaskAttemptStateInternal.FAIL_FINISHING_CONTAINER,
+ TaskAttemptStateInternal.FAILED,
+ TaskAttemptEventType.TA_CONTAINER_COMPLETED,
+ new ExitFinishingOnContainerCompletedTransition())
+ // Given TA notifies task T_ATTEMPT_FAILED when it transitions to
+ // FAIL_FINISHING_CONTAINER, it is possible to receive the event
+ // TA_CONTAINER_CLEANED in the following scenario.
+ // 1. It is the last task attempt for the task.
+ // 2. After the task receives T_ATTEMPT_FAILED, it will notify job.
+ // 3. Job will be marked failed.
+ // 4. As part of MRAppMaster's shutdown, all containers will be killed.
+ .addTransition(TaskAttemptStateInternal.FAIL_FINISHING_CONTAINER,
+ TaskAttemptStateInternal.FAILED,
+ TaskAttemptEventType.TA_CONTAINER_CLEANED,
+ new ExitFinishingOnContainerCleanedupTransition())
+ .addTransition(TaskAttemptStateInternal.FAIL_FINISHING_CONTAINER,
+ TaskAttemptStateInternal.FAIL_CONTAINER_CLEANUP,
+ TaskAttemptEventType.TA_TIMED_OUT, FINISHING_ON_TIMEOUT_TRANSITION)
+ .addTransition(TaskAttemptStateInternal.FAIL_FINISHING_CONTAINER,
+ TaskAttemptStateInternal.FAIL_FINISHING_CONTAINER,
+ TaskAttemptEventType.TA_DIAGNOSTICS_UPDATE,
+ DIAGNOSTIC_INFORMATION_UPDATE_TRANSITION)
+ // ignore-able events
+ .addTransition(TaskAttemptStateInternal.FAIL_FINISHING_CONTAINER,
+ TaskAttemptStateInternal.FAIL_FINISHING_CONTAINER,
+ EnumSet.of(TaskAttemptEventType.TA_KILL,
+ TaskAttemptEventType.TA_UPDATE,
+ TaskAttemptEventType.TA_DONE,
+ TaskAttemptEventType.TA_COMMIT_PENDING,
+ TaskAttemptEventType.TA_FAILMSG,
+ TaskAttemptEventType.TA_FAILMSG_BY_CLIENT))
+
// Transitions from COMMIT_PENDING state
.addTransition(TaskAttemptStateInternal.COMMIT_PENDING,
TaskAttemptStateInternal.COMMIT_PENDING, TaskAttemptEventType.TA_UPDATE,
@@ -316,22 +423,27 @@ public abstract class TaskAttemptImpl implements
TaskAttemptEventType.TA_DIAGNOSTICS_UPDATE,
DIAGNOSTIC_INFORMATION_UPDATE_TRANSITION)
.addTransition(TaskAttemptStateInternal.COMMIT_PENDING,
- TaskAttemptStateInternal.SUCCESS_CONTAINER_CLEANUP,
- TaskAttemptEventType.TA_DONE, CLEANUP_CONTAINER_TRANSITION)
+ TaskAttemptStateInternal.SUCCESS_FINISHING_CONTAINER,
+ TaskAttemptEventType.TA_DONE, SUCCEEDED_FINISHING_TRANSITION)
.addTransition(TaskAttemptStateInternal.COMMIT_PENDING,
- TaskAttemptStateInternal.KILL_CONTAINER_CLEANUP, TaskAttemptEventType.TA_KILL,
+ TaskAttemptStateInternal.KILL_CONTAINER_CLEANUP,
+ TaskAttemptEventType.TA_KILL,
CLEANUP_CONTAINER_TRANSITION)
// if container killed by AM shutting down
.addTransition(TaskAttemptStateInternal.COMMIT_PENDING,
TaskAttemptStateInternal.KILLED,
TaskAttemptEventType.TA_CONTAINER_CLEANED, new KilledTransition())
.addTransition(TaskAttemptStateInternal.COMMIT_PENDING,
- TaskAttemptStateInternal.FAIL_CONTAINER_CLEANUP,
- TaskAttemptEventType.TA_FAILMSG, CLEANUP_CONTAINER_TRANSITION)
+ TaskAttemptStateInternal.FAIL_FINISHING_CONTAINER,
+ TaskAttemptEventType.TA_FAILMSG, FAILED_FINISHING_TRANSITION)
.addTransition(TaskAttemptStateInternal.COMMIT_PENDING,
TaskAttemptStateInternal.FAIL_CONTAINER_CLEANUP,
+ TaskAttemptEventType.TA_FAILMSG_BY_CLIENT,
+ CLEANUP_CONTAINER_TRANSITION)
+ .addTransition(TaskAttemptStateInternal.COMMIT_PENDING,
+ TaskAttemptStateInternal.FAILED,
TaskAttemptEventType.TA_CONTAINER_COMPLETED,
- CLEANUP_CONTAINER_TRANSITION)
+ FINALIZE_FAILED_TRANSITION)
.addTransition(TaskAttemptStateInternal.COMMIT_PENDING,
TaskAttemptStateInternal.FAIL_CONTAINER_CLEANUP,
TaskAttemptEventType.TA_TIMED_OUT, CLEANUP_CONTAINER_TRANSITION)
@@ -348,8 +460,8 @@ public abstract class TaskAttemptImpl implements
// Transitions from SUCCESS_CONTAINER_CLEANUP state
// kill and cleanup the container
.addTransition(TaskAttemptStateInternal.SUCCESS_CONTAINER_CLEANUP,
- TaskAttemptStateInternal.SUCCEEDED, TaskAttemptEventType.TA_CONTAINER_CLEANED,
- new SucceededTransition())
+ TaskAttemptStateInternal.SUCCEEDED,
+ TaskAttemptEventType.TA_CONTAINER_CLEANED)
.addTransition(
TaskAttemptStateInternal.SUCCESS_CONTAINER_CLEANUP,
TaskAttemptStateInternal.SUCCESS_CONTAINER_CLEANUP,
@@ -360,6 +472,7 @@ public abstract class TaskAttemptImpl implements
TaskAttemptStateInternal.SUCCESS_CONTAINER_CLEANUP,
EnumSet.of(TaskAttemptEventType.TA_KILL,
TaskAttemptEventType.TA_FAILMSG,
+ TaskAttemptEventType.TA_FAILMSG_BY_CLIENT,
TaskAttemptEventType.TA_TIMED_OUT,
TaskAttemptEventType.TA_CONTAINER_COMPLETED))
@@ -383,6 +496,7 @@ public abstract class TaskAttemptImpl implements
TaskAttemptEventType.TA_CONTAINER_LAUNCH_FAILED,
TaskAttemptEventType.TA_DONE,
TaskAttemptEventType.TA_FAILMSG,
+ TaskAttemptEventType.TA_FAILMSG_BY_CLIENT,
TaskAttemptEventType.TA_TIMED_OUT))
// Transitions from KILL_CONTAINER_CLEANUP
@@ -405,6 +519,7 @@ public abstract class TaskAttemptImpl implements
TaskAttemptEventType.TA_CONTAINER_LAUNCH_FAILED,
TaskAttemptEventType.TA_DONE,
TaskAttemptEventType.TA_FAILMSG,
+ TaskAttemptEventType.TA_FAILMSG_BY_CLIENT,
TaskAttemptEventType.TA_TIMED_OUT))
// Transitions from FAIL_TASK_CLEANUP
@@ -425,6 +540,7 @@ public abstract class TaskAttemptImpl implements
TaskAttemptEventType.TA_COMMIT_PENDING,
TaskAttemptEventType.TA_DONE,
TaskAttemptEventType.TA_FAILMSG,
+ TaskAttemptEventType.TA_FAILMSG_BY_CLIENT,
TaskAttemptEventType.TA_CONTAINER_CLEANED,
// Container launch events can arrive late
TaskAttemptEventType.TA_CONTAINER_LAUNCHED,
@@ -447,6 +563,7 @@ public abstract class TaskAttemptImpl implements
TaskAttemptEventType.TA_COMMIT_PENDING,
TaskAttemptEventType.TA_DONE,
TaskAttemptEventType.TA_FAILMSG,
+ TaskAttemptEventType.TA_FAILMSG_BY_CLIENT,
TaskAttemptEventType.TA_CONTAINER_CLEANED,
TaskAttemptEventType.TA_PREEMPTED,
// Container launch events can arrive late
@@ -460,7 +577,7 @@ public abstract class TaskAttemptImpl implements
new TooManyFetchFailureTransition())
.addTransition(TaskAttemptStateInternal.SUCCEEDED,
EnumSet.of(TaskAttemptStateInternal.SUCCEEDED, TaskAttemptStateInternal.KILLED),
- TaskAttemptEventType.TA_KILL,
+ TaskAttemptEventType.TA_KILL,
new KilledAfterSuccessTransition())
.addTransition(
TaskAttemptStateInternal.SUCCEEDED, TaskAttemptStateInternal.SUCCEEDED,
@@ -470,6 +587,10 @@ public abstract class TaskAttemptImpl implements
.addTransition(TaskAttemptStateInternal.SUCCEEDED,
TaskAttemptStateInternal.SUCCEEDED,
EnumSet.of(TaskAttemptEventType.TA_FAILMSG,
+ TaskAttemptEventType.TA_FAILMSG_BY_CLIENT,
+ // TaskAttemptFinishingMonitor might time out the attempt right
+ // after the attempt receives TA_CONTAINER_COMPLETED.
+ TaskAttemptEventType.TA_TIMED_OUT,
TaskAttemptEventType.TA_CONTAINER_CLEANED,
TaskAttemptEventType.TA_CONTAINER_COMPLETED))
@@ -1213,21 +1334,21 @@ public abstract class TaskAttemptImpl implements
return TaskAttemptState.STARTING;
case COMMIT_PENDING:
return TaskAttemptState.COMMIT_PENDING;
- case FAILED:
- return TaskAttemptState.FAILED;
- case KILLED:
- return TaskAttemptState.KILLED;
- // All CLEANUP states considered as RUNNING since events have not gone out
- // to the Task yet. May be possible to consider them as a Finished state.
case FAIL_CONTAINER_CLEANUP:
case FAIL_TASK_CLEANUP:
+ case FAIL_FINISHING_CONTAINER:
+ case FAILED:
+ return TaskAttemptState.FAILED;
case KILL_CONTAINER_CLEANUP:
case KILL_TASK_CLEANUP:
- case SUCCESS_CONTAINER_CLEANUP:
+ case KILLED:
+ return TaskAttemptState.KILLED;
case RUNNING:
return TaskAttemptState.RUNNING;
case NEW:
return TaskAttemptState.NEW;
+ case SUCCESS_CONTAINER_CLEANUP:
+ case SUCCESS_FINISHING_CONTAINER:
case SUCCEEDED:
return TaskAttemptState.SUCCEEDED;
default:
@@ -1429,6 +1550,15 @@ public abstract class TaskAttemptImpl implements
}
}
+ private static void finalizeProgress(TaskAttemptImpl taskAttempt) {
+ // unregister it to TaskAttemptListener so that it stops listening
+ taskAttempt.taskAttemptListener.unregister(
+ taskAttempt.attemptId, taskAttempt.jvmID);
+ taskAttempt.reportedStatus.progress = 1.0f;
+ taskAttempt.updateProgressSplits();
+ }
+
+
static class RequestContainerTransition implements
SingleArcTransition<TaskAttemptImpl, TaskAttemptEvent> {
private final boolean rescheduled;
@@ -1661,53 +1791,66 @@ public abstract class TaskAttemptImpl implements
}
}
- private static class SucceededTransition implements
+ /**
+ * Transition from SUCCESS_FINISHING_CONTAINER or FAIL_FINISHING_CONTAINER
+ * state upon receiving TA_CONTAINER_COMPLETED event
+ */
+ private static class ExitFinishingOnContainerCompletedTransition implements
SingleArcTransition<TaskAttemptImpl, TaskAttemptEvent> {
@SuppressWarnings("unchecked")
@Override
- public void transition(TaskAttemptImpl taskAttempt,
+ public void transition(TaskAttemptImpl taskAttempt,
+ TaskAttemptEvent event) {
+ taskAttempt.appContext.getTaskAttemptFinishingMonitor().unregister(
+ taskAttempt.attemptId);
+ sendContainerCompleted(taskAttempt);
+ }
+ }
+
+ private static class ExitFinishingOnContainerCleanedupTransition implements
+ SingleArcTransition<TaskAttemptImpl, TaskAttemptEvent> {
+ @SuppressWarnings("unchecked")
+ @Override
+ public void transition(TaskAttemptImpl taskAttempt,
TaskAttemptEvent event) {
- //set the finish time
- taskAttempt.setFinishTime();
- taskAttempt.eventHandler.handle(
- createJobCounterUpdateEventTASucceeded(taskAttempt));
- taskAttempt.logAttemptFinishedEvent(TaskAttemptStateInternal.SUCCEEDED);
- taskAttempt.eventHandler.handle(new TaskTAttemptEvent(
- taskAttempt.attemptId,
- TaskEventType.T_ATTEMPT_SUCCEEDED));
- taskAttempt.eventHandler.handle
- (new SpeculatorEvent
- (taskAttempt.reportedStatus, taskAttempt.clock.getTime()));
- }
+ taskAttempt.appContext.getTaskAttemptFinishingMonitor().unregister(
+ taskAttempt.attemptId);
+ }
}
private static class FailedTransition implements
SingleArcTransition<TaskAttemptImpl, TaskAttemptEvent> {
@SuppressWarnings("unchecked")
@Override
- public void transition(TaskAttemptImpl taskAttempt, TaskAttemptEvent event) {
+ public void transition(TaskAttemptImpl taskAttempt,
+ TaskAttemptEvent event) {
// set the finish time
taskAttempt.setFinishTime();
-
- if (taskAttempt.getLaunchTime() != 0) {
- taskAttempt.eventHandler
- .handle(createJobCounterUpdateEventTAFailed(taskAttempt, false));
- TaskAttemptUnsuccessfulCompletionEvent tauce =
- createTaskAttemptUnsuccessfulCompletionEvent(taskAttempt,
- TaskAttemptStateInternal.FAILED);
- taskAttempt.eventHandler.handle(new JobHistoryEvent(
- taskAttempt.attemptId.getTaskId().getJobId(), tauce));
- // taskAttempt.logAttemptFinishedEvent(TaskAttemptStateInternal.FAILED); Not
- // handling failed map/reduce events.
- }else {
- LOG.debug("Not generating HistoryFinish event since start event not " +
- "generated for taskAttempt: " + taskAttempt.getID());
- }
- taskAttempt.eventHandler.handle(new TaskTAttemptEvent(
- taskAttempt.attemptId, TaskEventType.T_ATTEMPT_FAILED));
+ notifyTaskAttemptFailed(taskAttempt);
}
}
+ private static class FinalizeFailedTransition extends FailedTransition {
+ @SuppressWarnings("unchecked")
+ @Override
+ public void transition(TaskAttemptImpl taskAttempt,
+ TaskAttemptEvent event) {
+ finalizeProgress(taskAttempt);
+ sendContainerCompleted(taskAttempt);
+ super.transition(taskAttempt, event);
+ }
+ }
+
+ @SuppressWarnings("unchecked")
+ private static void sendContainerCompleted(TaskAttemptImpl taskAttempt) {
+ taskAttempt.eventHandler.handle(new ContainerLauncherEvent(
+ taskAttempt.attemptId,
+ taskAttempt.container.getId(), StringInterner
+ .weakIntern(taskAttempt.container.getNodeId().toString()),
+ taskAttempt.container.getContainerToken(),
+ ContainerLauncher.EventType.CONTAINER_COMPLETED));
+ }
+
private static class RecoverTransition implements
MultipleArcTransition<TaskAttemptImpl, TaskAttemptEvent, TaskAttemptStateInternal> {
@@ -1832,6 +1975,35 @@ public abstract class TaskAttemptImpl implements
}
}
+ private static class KilledAfterSucceededFinishingTransition
+ implements MultipleArcTransition<TaskAttemptImpl, TaskAttemptEvent,
+ TaskAttemptStateInternal> {
+
+ @SuppressWarnings("unchecked")
+ @Override
+ public TaskAttemptStateInternal transition(TaskAttemptImpl taskAttempt,
+ TaskAttemptEvent event) {
+ taskAttempt.appContext.getTaskAttemptFinishingMonitor().unregister(
+ taskAttempt.attemptId);
+ sendContainerCleanup(taskAttempt, event);
+ if(taskAttempt.getID().getTaskId().getTaskType() == TaskType.REDUCE) {
+ // after a reduce task has succeeded, its outputs are in safe in HDFS.
+ // logically such a task should not be killed. we only come here when
+ // there is a race condition in the event queue. E.g. some logic sends
+ // a kill request to this attempt when the successful completion event
+ // for this task is already in the event queue. so the kill event will
+ // get executed immediately after the attempt is marked successful and
+ // result in this transition being exercised.
+ // ignore this for reduce tasks
+ LOG.info("Ignoring killed event for successful reduce task attempt" +
+ taskAttempt.getID().toString());
+ return TaskAttemptStateInternal.SUCCESS_CONTAINER_CLEANUP;
+ } else {
+ return TaskAttemptStateInternal.KILL_CONTAINER_CLEANUP;
+ }
+ }
+ }
+
private static class KilledTransition implements
SingleArcTransition<TaskAttemptImpl, TaskAttemptEvent> {
@@ -1887,6 +2059,31 @@ public abstract class TaskAttemptImpl implements
}
}
+ /**
+ * Transition from SUCCESS_FINISHING_CONTAINER or FAIL_FINISHING_CONTAINER
+ * state upon receiving TA_TIMED_OUT event
+ */
+ private static class ExitFinishingOnTimeoutTransition implements
+ SingleArcTransition<TaskAttemptImpl, TaskAttemptEvent> {
+ @SuppressWarnings("unchecked")
+ @Override
+ public void transition(TaskAttemptImpl taskAttempt,
+ TaskAttemptEvent event) {
+ taskAttempt.appContext.getTaskAttemptFinishingMonitor().unregister(
+ taskAttempt.attemptId);
+ // The attempt stays in finishing state for too long
+ String msg = "Task attempt " + taskAttempt.getID() + " is done from " +
+ "TaskUmbilicalProtocol's point of view. However, it stays in " +
+ "finishing state for too long";
+ LOG.warn(msg);
+ taskAttempt.addDiagnosticInfo(msg);
+ sendContainerCleanup(taskAttempt, event);
+ }
+ }
+
+ /**
+ * Finish and clean up the container
+ */
private static class CleanupContainerTransition implements
SingleArcTransition<TaskAttemptImpl, TaskAttemptEvent> {
@SuppressWarnings("unchecked")
@@ -1894,27 +2091,103 @@ public abstract class TaskAttemptImpl implements
public void transition(TaskAttemptImpl taskAttempt,
TaskAttemptEvent event) {
// unregister it to TaskAttemptListener so that it stops listening
- // for it
- taskAttempt.taskAttemptListener.unregister(
- taskAttempt.attemptId, taskAttempt.jvmID);
+ // for it.
+ finalizeProgress(taskAttempt);
+ sendContainerCleanup(taskAttempt, event);
+ }
+ }
- if (event instanceof TaskAttemptKillEvent) {
- taskAttempt.addDiagnosticInfo(
- ((TaskAttemptKillEvent) event).getMessage());
- }
+ @SuppressWarnings("unchecked")
+ private static void sendContainerCleanup(TaskAttemptImpl taskAttempt,
+ TaskAttemptEvent event) {
+ if (event instanceof TaskAttemptKillEvent) {
+ taskAttempt.addDiagnosticInfo(
+ ((TaskAttemptKillEvent) event).getMessage());
+ }
+ //send the cleanup event to containerLauncher
+ taskAttempt.eventHandler.handle(new ContainerLauncherEvent(
+ taskAttempt.attemptId,
+ taskAttempt.container.getId(), StringInterner
+ .weakIntern(taskAttempt.container.getNodeId().toString()),
+ taskAttempt.container.getContainerToken(),
+ ContainerLauncher.EventType.CONTAINER_REMOTE_CLEANUP));
+ }
+
+ /**
+ * Transition to SUCCESS_FINISHING_CONTAINER upon receiving TA_DONE event
+ */
+ private static class MoveContainerToSucceededFinishingTransition implements
+ SingleArcTransition<TaskAttemptImpl, TaskAttemptEvent> {
+ @SuppressWarnings("unchecked")
+ @Override
+ public void transition(TaskAttemptImpl taskAttempt,
+ TaskAttemptEvent event) {
+ finalizeProgress(taskAttempt);
+
+ // register it to finishing state
+ taskAttempt.appContext.getTaskAttemptFinishingMonitor().register(
+ taskAttempt.attemptId);
+
+ // set the finish time
+ taskAttempt.setFinishTime();
+
+ // notify job history
+ taskAttempt.eventHandler.handle(
+ createJobCounterUpdateEventTASucceeded(taskAttempt));
+ taskAttempt.logAttemptFinishedEvent(TaskAttemptStateInternal.SUCCEEDED);
+
+ //notify the task even though the container might not have exited yet.
+ taskAttempt.eventHandler.handle(new TaskTAttemptEvent(
+ taskAttempt.attemptId,
+ TaskEventType.T_ATTEMPT_SUCCEEDED));
+ taskAttempt.eventHandler.handle
+ (new SpeculatorEvent
+ (taskAttempt.reportedStatus, taskAttempt.clock.getTime()));
- taskAttempt.reportedStatus.progress = 1.0f;
- taskAttempt.updateProgressSplits();
- //send the cleanup event to containerLauncher
- taskAttempt.eventHandler.handle(new ContainerLauncherEvent(
- taskAttempt.attemptId,
- taskAttempt.container.getId(), StringInterner
- .weakIntern(taskAttempt.container.getNodeId().toString()),
- taskAttempt.container.getContainerToken(),
- ContainerLauncher.EventType.CONTAINER_REMOTE_CLEANUP));
}
}
+ /**
+ * Transition to FAIL_FINISHING_CONTAINER upon receiving TA_FAILMSG event
+ */
+ private static class MoveContainerToFailedFinishingTransition implements
+ SingleArcTransition<TaskAttemptImpl, TaskAttemptEvent> {
+ @SuppressWarnings("unchecked")
+ @Override
+ public void transition(TaskAttemptImpl taskAttempt,
+ TaskAttemptEvent event) {
+ finalizeProgress(taskAttempt);
+ // register it to finishing state
+ taskAttempt.appContext.getTaskAttemptFinishingMonitor().register(
+ taskAttempt.attemptId);
+ notifyTaskAttemptFailed(taskAttempt);
+ }
+ }
+
+ @SuppressWarnings("unchecked")
+ private static void notifyTaskAttemptFailed(TaskAttemptImpl taskAttempt) {
+ // set the finish time
+ taskAttempt.setFinishTime();
+
+ if (taskAttempt.getLaunchTime() != 0) {
+ taskAttempt.eventHandler
+ .handle(createJobCounterUpdateEventTAFailed(taskAttempt, false));
+ TaskAttemptUnsuccessfulCompletionEvent tauce =
+ createTaskAttemptUnsuccessfulCompletionEvent(taskAttempt,
+ TaskAttemptStateInternal.FAILED);
+ taskAttempt.eventHandler.handle(new JobHistoryEvent(
+ taskAttempt.attemptId.getTaskId().getJobId(), tauce));
+ // taskAttempt.logAttemptFinishedEvent(TaskAttemptStateInternal.FAILED); Not
+ // handling failed map/reduce events.
+ }else {
+ LOG.debug("Not generating HistoryFinish event since start event not " +
+ "generated for taskAttempt: " + taskAttempt.getID());
+ }
+ taskAttempt.eventHandler.handle(new TaskTAttemptEvent(
+ taskAttempt.attemptId, TaskEventType.T_ATTEMPT_FAILED));
+
+ }
+
private void addDiagnosticInfo(String diag) {
if (diag != null && !diag.equals("")) {
diagnostics.add(diag);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/5625ac46/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/launcher/ContainerLauncher.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/launcher/ContainerLauncher.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/launcher/ContainerLauncher.java
index 40ecdb2..82360f0 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/launcher/ContainerLauncher.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/launcher/ContainerLauncher.java
@@ -27,7 +27,13 @@ public interface ContainerLauncher
enum EventType {
CONTAINER_REMOTE_LAUNCH,
- CONTAINER_REMOTE_CLEANUP
+ CONTAINER_REMOTE_CLEANUP,
+ // When TaskAttempt receives TA_CONTAINER_COMPLETED,
+ // it will notify ContainerLauncher so that the container can be removed
+ // from ContainerLauncher's launched containers list
+ // Otherwise, ContainerLauncher will try to stop the containers as part of
+ // serviceStop.
+ CONTAINER_COMPLETED
}
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/5625ac46/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/launcher/ContainerLauncherImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/launcher/ContainerLauncherImpl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/launcher/ContainerLauncherImpl.java
index 9c1125d..a7e966c 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/launcher/ContainerLauncherImpl.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/launcher/ContainerLauncherImpl.java
@@ -121,7 +121,11 @@ public class ContainerLauncherImpl extends AbstractService implements
public synchronized boolean isCompletelyDone() {
return state == ContainerState.DONE || state == ContainerState.FAILED;
}
-
+
+ public synchronized void done() {
+ state = ContainerState.DONE;
+ }
+
@SuppressWarnings("unchecked")
public synchronized void launch(ContainerRemoteLaunchEvent event) {
LOG.info("Launching " + taskAttemptID);
@@ -378,6 +382,11 @@ public class ContainerLauncherImpl extends AbstractService implements
case CONTAINER_REMOTE_CLEANUP:
c.kill();
break;
+
+ case CONTAINER_COMPLETED:
+ c.done();
+ break;
+
}
removeContainerIfDone(containerID);
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/5625ac46/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapred/TestTaskAttemptFinishingMonitor.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapred/TestTaskAttemptFinishingMonitor.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapred/TestTaskAttemptFinishingMonitor.java
new file mode 100644
index 0000000..800f0e2
--- /dev/null
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapred/TestTaskAttemptFinishingMonitor.java
@@ -0,0 +1,108 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+package org.apache.hadoop.mapred;
+
+
+import java.io.IOException;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.mapreduce.MRJobConfig;
+import org.apache.hadoop.mapreduce.security.token.JobTokenSecretManager;
+import org.apache.hadoop.mapreduce.v2.api.records.JobId;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
+import org.apache.hadoop.mapreduce.v2.app.AppContext;
+import org.apache.hadoop.mapreduce.v2.app.TaskAttemptFinishingMonitor;
+import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent;
+import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEventType;
+import org.apache.hadoop.mapreduce.v2.app.rm.preemption.CheckpointAMPreemptionPolicy;
+import org.apache.hadoop.mapreduce.v2.app.rm.RMHeartbeatHandler;
+import org.apache.hadoop.mapreduce.v2.util.MRBuilderUtils;
+import org.apache.hadoop.yarn.event.Event;
+import org.apache.hadoop.yarn.event.EventHandler;
+import org.apache.hadoop.yarn.util.SystemClock;
+
+import org.junit.Test;
+import static org.junit.Assert.assertTrue;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+
+public class TestTaskAttemptFinishingMonitor {
+
+ @Test
+ public void testFinshingAttemptTimeout()
+ throws IOException, InterruptedException {
+ SystemClock clock = new SystemClock();
+ Configuration conf = new Configuration();
+ conf.setInt(MRJobConfig.TASK_EXIT_TIMEOUT, 100);
+ conf.setInt(MRJobConfig.TASK_EXIT_TIMEOUT_CHECK_INTERVAL_MS, 10);
+
+ AppContext appCtx = mock(AppContext.class);
+ JobTokenSecretManager secret = mock(JobTokenSecretManager.class);
+ RMHeartbeatHandler rmHeartbeatHandler =
+ mock(RMHeartbeatHandler.class);
+ MockEventHandler eventHandler = new MockEventHandler();
+ TaskAttemptFinishingMonitor taskAttemptFinishingMonitor =
+ new TaskAttemptFinishingMonitor(eventHandler);
+ taskAttemptFinishingMonitor.init(conf);
+ taskAttemptFinishingMonitor.start();
+
+ when(appCtx.getEventHandler()).thenReturn(eventHandler);
+ when(appCtx.getNMHostname()).thenReturn("0.0.0.0");
+ when(appCtx.getTaskAttemptFinishingMonitor()).thenReturn(
+ taskAttemptFinishingMonitor);
+ when(appCtx.getClock()).thenReturn(clock);
+
+ CheckpointAMPreemptionPolicy policy = new CheckpointAMPreemptionPolicy();
+ policy.init(appCtx);
+ TaskAttemptListenerImpl listener =
+ new TaskAttemptListenerImpl(appCtx, secret, rmHeartbeatHandler, policy);
+
+ listener.init(conf);
+ listener.start();
+
+ JobId jid = MRBuilderUtils.newJobId(12345, 1, 1);
+ TaskId tid = MRBuilderUtils.newTaskId(jid, 0,
+ org.apache.hadoop.mapreduce.v2.api.records.TaskType.MAP);
+ TaskAttemptId attemptId = MRBuilderUtils.newTaskAttemptId(tid, 0);
+ appCtx.getTaskAttemptFinishingMonitor().register(attemptId);
+ int check = 0;
+ while ( !eventHandler.timedOut && check++ < 10 ) {
+ Thread.sleep(100);
+ }
+ taskAttemptFinishingMonitor.stop();
+
+ assertTrue("Finishing attempt didn't time out.", eventHandler.timedOut);
+
+ }
+
+ public static class MockEventHandler implements EventHandler {
+ public boolean timedOut = false;
+
+ @Override
+ public void handle(Event event) {
+ if (event instanceof TaskAttemptEvent) {
+ TaskAttemptEvent attemptEvent = ((TaskAttemptEvent) event);
+ if (TaskAttemptEventType.TA_TIMED_OUT == attemptEvent.getType()) {
+ timedOut = true;
+ }
+ }
+ }
+ };
+
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/5625ac46/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRApp.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRApp.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRApp.java
index 58db925..4fe4c44 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRApp.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRApp.java
@@ -482,6 +482,20 @@ public class MRApp extends MRAppMaster {
}
@Override
+ protected TaskAttemptFinishingMonitor
+ createTaskAttemptFinishingMonitor(
+ EventHandler eventHandler) {
+ return new TaskAttemptFinishingMonitor(eventHandler) {
+ @Override
+ public synchronized void register(TaskAttemptId attemptID) {
+ getContext().getEventHandler().handle(
+ new TaskAttemptEvent(attemptID,
+ TaskAttemptEventType.TA_CONTAINER_COMPLETED));
+ }
+ };
+ }
+
+ @Override
protected TaskAttemptListener createTaskAttemptListener(
AppContext context, AMPreemptionPolicy policy) {
return new TaskAttemptListener(){
@@ -541,6 +555,8 @@ public class MRApp extends MRAppMaster {
new TaskAttemptEvent(event.getTaskAttemptID(),
TaskAttemptEventType.TA_CONTAINER_CLEANED));
break;
+ case CONTAINER_COMPLETED:
+ break;
}
}
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/5625ac46/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MockAppContext.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MockAppContext.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MockAppContext.java
index a900241..e690f3f 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MockAppContext.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MockAppContext.java
@@ -148,4 +148,10 @@ public class MockAppContext implements AppContext {
// bogus - Not Required
return null;
}
+
+ @Override
+ public TaskAttemptFinishingMonitor getTaskAttemptFinishingMonitor() {
+ return null;
+ }
+
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/5625ac46/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestFail.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestFail.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestFail.java
index 4a36938..4d3f6f4 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestFail.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestFail.java
@@ -223,6 +223,8 @@ public class TestFail {
new TaskAttemptEvent(event.getTaskAttemptID(),
TaskAttemptEventType.TA_CONTAINER_CLEANED));
break;
+ case CONTAINER_COMPLETED:
+ super.handle(event);
}
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/5625ac46/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestKill.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestKill.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestKill.java
index c33bd4d..aae591e 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestKill.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestKill.java
@@ -159,7 +159,7 @@ public class TestKill {
super.dispatch(new TaskAttemptEvent(taID,
TaskAttemptEventType.TA_DONE));
super.dispatch(new TaskAttemptEvent(taID,
- TaskAttemptEventType.TA_CONTAINER_CLEANED));
+ TaskAttemptEventType.TA_CONTAINER_COMPLETED));
super.dispatch(new TaskTAttemptEvent(taID,
TaskEventType.T_ATTEMPT_SUCCEEDED));
this.cachedKillEvent = killEvent;
@@ -211,40 +211,9 @@ public class TestKill {
app.getContext().getEventHandler()
.handle(new JobEvent(jobId, JobEventType.JOB_KILL));
- app.waitForInternalState((JobImpl)job, JobStateInternal.KILLED);
- }
-
- static class MyAsyncDispatch extends AsyncDispatcher {
- private CountDownLatch latch;
- private TaskAttemptEventType attemptEventTypeToWait;
- MyAsyncDispatch(CountDownLatch latch, TaskAttemptEventType attemptEventTypeToWait) {
- super();
- this.latch = latch;
- this.attemptEventTypeToWait = attemptEventTypeToWait;
- }
-
- @Override
- protected void dispatch(Event event) {
- if (event instanceof TaskAttemptEvent) {
- TaskAttemptEvent attemptEvent = (TaskAttemptEvent) event;
- TaskAttemptId attemptID = ((TaskAttemptEvent) event).getTaskAttemptID();
- if (attemptEvent.getType() == this.attemptEventTypeToWait
- && attemptID.getTaskId().getId() == 0 && attemptID.getId() == 0 ) {
- try {
- latch.await();
- } catch (InterruptedException e) {
- e.printStackTrace();
- }
- }
- }
- super.dispatch(event);
- }
+ app.waitForInternalState((JobImpl) job, JobStateInternal.KILLED);
}
- // This is to test a race condition where JobEventType.JOB_KILL is generated
- // right after TaskAttemptEventType.TA_DONE is generated.
- // TaskImpl's state machine might receive both T_ATTEMPT_SUCCEEDED
- // and T_ATTEMPT_KILLED from the same attempt.
@Test
public void testKillTaskWaitKillJobAfterTA_DONE() throws Exception {
CountDownLatch latch = new CountDownLatch(1);
@@ -269,15 +238,12 @@ public class TestKill {
TaskAttempt reduceAttempt = reduceTask.getAttempts().values().iterator().next();
app.waitForState(reduceAttempt, TaskAttemptState.RUNNING);
- // The order in the dispatch event queue, from the oldest to the newest
+ // The order in the dispatch event queue, from first to last
// TA_DONE
- // JOB_KILL
- // CONTAINER_REMOTE_CLEANUP ( from TA_DONE's handling )
- // T_KILL ( from JOB_KILL's handling )
- // TA_CONTAINER_CLEANED ( from CONTAINER_REMOTE_CLEANUP's handling )
- // TA_KILL ( from T_KILL's handling )
- // T_ATTEMPT_SUCCEEDED ( from TA_CONTAINER_CLEANED's handling )
- // T_ATTEMPT_KILLED ( from TA_KILL's handling )
+ // JobEventType.JOB_KILL
+ // TaskAttemptEventType.TA_CONTAINER_COMPLETED ( from TA_DONE handling )
+ // TaskEventType.T_KILL ( from JobEventType.JOB_KILL handling )
+ // TaskEventType.T_ATTEMPT_SUCCEEDED ( from TA_CONTAINER_COMPLETED handling )
// Finish map
app.getContext().getEventHandler().handle(
@@ -295,6 +261,100 @@ public class TestKill {
app.waitForInternalState((JobImpl)job, JobStateInternal.KILLED);
}
+
+ @Test
+ public void testKillTaskWaitKillJobBeforeTA_DONE() throws Exception {
+ CountDownLatch latch = new CountDownLatch(1);
+ final Dispatcher dispatcher = new MyAsyncDispatch(latch, JobEventType.JOB_KILL);
+ MRApp app = new MRApp(1, 1, false, this.getClass().getName(), true) {
+ @Override
+ public Dispatcher createDispatcher() {
+ return dispatcher;
+ }
+ };
+ Job job = app.submit(new Configuration());
+ JobId jobId = app.getJobId();
+ app.waitForState(job, JobState.RUNNING);
+ Assert.assertEquals("Num tasks not correct", 2, job.getTasks().size());
+ Iterator<Task> it = job.getTasks().values().iterator();
+ Task mapTask = it.next();
+ Task reduceTask = it.next();
+ app.waitForState(mapTask, TaskState.RUNNING);
+ app.waitForState(reduceTask, TaskState.RUNNING);
+ TaskAttempt mapAttempt = mapTask.getAttempts().values().iterator().next();
+ app.waitForState(mapAttempt, TaskAttemptState.RUNNING);
+ TaskAttempt reduceAttempt = reduceTask.getAttempts().values().iterator().next();
+ app.waitForState(reduceAttempt, TaskAttemptState.RUNNING);
+
+ // The order in the dispatch event queue, from first to last
+ // JobEventType.JOB_KILL
+ // TA_DONE
+ // TaskEventType.T_KILL ( from JobEventType.JOB_KILL handling )
+ // TaskAttemptEventType.TA_CONTAINER_COMPLETED ( from TA_DONE handling )
+ // TaskAttemptEventType.TA_KILL ( from TaskEventType.T_KILL handling )
+ // TaskEventType.T_ATTEMPT_SUCCEEDED ( from TA_CONTAINER_COMPLETED handling )
+ // TaskEventType.T_ATTEMPT_KILLED ( from TA_KILL handling )
+
+ // Now kill the job
+ app.getContext().getEventHandler()
+ .handle(new JobEvent(jobId, JobEventType.JOB_KILL));
+
+ // Finish map
+ app.getContext().getEventHandler().handle(
+ new TaskAttemptEvent(
+ mapAttempt.getID(),
+ TaskAttemptEventType.TA_DONE));
+
+ //unblock
+ latch.countDown();
+
+ app.waitForInternalState((JobImpl)job, JobStateInternal.KILLED);
+ }
+
+ static class MyAsyncDispatch extends AsyncDispatcher {
+ private CountDownLatch latch;
+ private TaskAttemptEventType attemptEventTypeToWait;
+ private JobEventType jobEventTypeToWait;
+ MyAsyncDispatch(CountDownLatch latch, TaskAttemptEventType attemptEventTypeToWait) {
+ super();
+ this.latch = latch;
+ this.attemptEventTypeToWait = attemptEventTypeToWait;
+ }
+
+ MyAsyncDispatch(CountDownLatch latch, JobEventType jobEventTypeToWait) {
+ super();
+ this.latch = latch;
+ this.jobEventTypeToWait = jobEventTypeToWait;
+ }
+
+ @Override
+ protected void dispatch(Event event) {
+ if (event instanceof TaskAttemptEvent) {
+ TaskAttemptEvent attemptEvent = (TaskAttemptEvent) event;
+ TaskAttemptId attemptID = ((TaskAttemptEvent) event).getTaskAttemptID();
+ if (attemptEvent.getType() == this.attemptEventTypeToWait
+ && attemptID.getTaskId().getId() == 0 && attemptID.getId() == 0 ) {
+ try {
+ latch.await();
+ } catch (InterruptedException e) {
+ e.printStackTrace();
+ }
+ }
+ } else if ( event instanceof JobEvent) {
+ JobEvent jobEvent = (JobEvent) event;
+ if (jobEvent.getType() == this.jobEventTypeToWait) {
+ try {
+ latch.await();
+ } catch (InterruptedException e) {
+ e.printStackTrace();
+ }
+ }
+ }
+
+ super.dispatch(event);
+ }
+ }
+
@Test
public void testKillTaskAttempt() throws Exception {
final CountDownLatch latch = new CountDownLatch(1);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/5625ac46/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRuntimeEstimators.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRuntimeEstimators.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRuntimeEstimators.java
index 69f2709..475cd1f 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRuntimeEstimators.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRuntimeEstimators.java
@@ -884,5 +884,10 @@ public class TestRuntimeEstimators {
// bogus - Not Required
return null;
}
+
+ @Override
+ public TaskAttemptFinishingMonitor getTaskAttemptFinishingMonitor() {
+ return null;
+ }
}
}
[29/36] hadoop git commit: MAPREDUCE-6360. TestMapreduceConfigFields
is placed in wrong dir,
introducing compile error (Contributed by Arshad Mohammad)
Posted by zj...@apache.org.
MAPREDUCE-6360. TestMapreduceConfigFields is placed in wrong dir, introducing compile error (Contributed by Arshad Mohammad)
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/abf9e443
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/abf9e443
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/abf9e443
Branch: refs/heads/YARN-2928
Commit: abf9e4438972dcf91b38087e6574d7367f2723d3
Parents: d433b1b
Author: Vinayakumar B <vi...@apache.org>
Authored: Tue May 12 12:49:16 2015 +0530
Committer: Zhijie Shen <zj...@apache.org>
Committed: Tue May 12 13:44:26 2015 -0700
----------------------------------------------------------------------
hadoop-mapreduce-project/CHANGES.txt | 3 +
.../mapred/TestMapreduceConfigFields.java | 76 --------------------
.../mapreduce/TestMapreduceConfigFields.java | 76 ++++++++++++++++++++
3 files changed, 79 insertions(+), 76 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/abf9e443/hadoop-mapreduce-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt
index e28d575..d53974d 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -429,6 +429,9 @@ Release 2.8.0 - UNRELEASED
MAPREDUCE-5465. Tasks are often killed before they exit on their own
(Ming Ma via jlowe)
+ MAPREDUCE-6360. TestMapreduceConfigFields is placed in wrong dir,
+ introducing compile error (Arshad Mohammad via vinayakumarb)
+
Release 2.7.1 - UNRELEASED
INCOMPATIBLE CHANGES
http://git-wip-us.apache.org/repos/asf/hadoop/blob/abf9e443/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapred/TestMapreduceConfigFields.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapred/TestMapreduceConfigFields.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapred/TestMapreduceConfigFields.java
deleted file mode 100644
index 7f18714..0000000
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapred/TestMapreduceConfigFields.java
+++ /dev/null
@@ -1,76 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.mapreduce;
-
-import java.util.HashSet;
-
-import org.apache.hadoop.conf.TestConfigurationFieldsBase;
-import org.apache.hadoop.mapred.JobConf;
-import org.apache.hadoop.mapred.ShuffleHandler;
-import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
-import org.apache.hadoop.mapreduce.lib.input.NLineInputFormat;
-import org.apache.hadoop.mapreduce.lib.output.FileOutputCommitter;
-import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
-import org.apache.hadoop.mapreduce.v2.jobhistory.JHAdminConfig;
-
-/**
- * Unit test class to compare the following MR Configuration classes:
- * <p></p>
- * {@link org.apache.hadoop.mapreduce.MRJobConfig}
- * {@link org.apache.hadoop.mapreduce.MRConfig}
- * {@link org.apache.hadoop.mapreduce.v2.jobhistory.JHAdminConfig}
- * {@link org.apache.hadoop.mapred.ShuffleHandler}
- * {@link org.apache.hadoop.mapreduce.lib.output.FileOutputFormat}
- * {@link org.apache.hadoop.mapreduce.lib.input.FileInputFormat}
- * {@link org.apache.hadoop.mapreduce.Job}
- * {@link org.apache.hadoop.mapreduce.lib.input.NLineInputFormat}
- * {@link org.apache.hadoop.mapred.JobConf}
- * <p></p>
- * against mapred-default.xml for missing properties. Currently only
- * throws an error if the class is missing a property.
- * <p></p>
- * Refer to {@link org.apache.hadoop.conf.TestConfigurationFieldsBase}
- * for how this class works.
- */
-public class TestMapreduceConfigFields extends TestConfigurationFieldsBase {
-
- @SuppressWarnings("deprecation")
- @Override
- public void initializeMemberVariables() {
- xmlFilename = new String("mapred-default.xml");
- configurationClasses = new Class[] { MRJobConfig.class, MRConfig.class,
- JHAdminConfig.class, ShuffleHandler.class, FileOutputFormat.class,
- FileInputFormat.class, Job.class, NLineInputFormat.class,
- JobConf.class, FileOutputCommitter.class };
-
- // Initialize used variables
- configurationPropsToSkipCompare = new HashSet<String>();
-
- // Set error modes
- errorIfMissingConfigProps = true;
- errorIfMissingXmlProps = false;
-
- // Ignore deprecated MR1 properties in JobConf
- configurationPropsToSkipCompare
- .add(JobConf.MAPRED_JOB_MAP_MEMORY_MB_PROPERTY);
- configurationPropsToSkipCompare
- .add(JobConf.MAPRED_JOB_REDUCE_MEMORY_MB_PROPERTY);
- }
-
-}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/abf9e443/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/TestMapreduceConfigFields.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/TestMapreduceConfigFields.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/TestMapreduceConfigFields.java
new file mode 100644
index 0000000..7f18714
--- /dev/null
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/TestMapreduceConfigFields.java
@@ -0,0 +1,76 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.mapreduce;
+
+import java.util.HashSet;
+
+import org.apache.hadoop.conf.TestConfigurationFieldsBase;
+import org.apache.hadoop.mapred.JobConf;
+import org.apache.hadoop.mapred.ShuffleHandler;
+import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
+import org.apache.hadoop.mapreduce.lib.input.NLineInputFormat;
+import org.apache.hadoop.mapreduce.lib.output.FileOutputCommitter;
+import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
+import org.apache.hadoop.mapreduce.v2.jobhistory.JHAdminConfig;
+
+/**
+ * Unit test class to compare the following MR Configuration classes:
+ * <p></p>
+ * {@link org.apache.hadoop.mapreduce.MRJobConfig}
+ * {@link org.apache.hadoop.mapreduce.MRConfig}
+ * {@link org.apache.hadoop.mapreduce.v2.jobhistory.JHAdminConfig}
+ * {@link org.apache.hadoop.mapred.ShuffleHandler}
+ * {@link org.apache.hadoop.mapreduce.lib.output.FileOutputFormat}
+ * {@link org.apache.hadoop.mapreduce.lib.input.FileInputFormat}
+ * {@link org.apache.hadoop.mapreduce.Job}
+ * {@link org.apache.hadoop.mapreduce.lib.input.NLineInputFormat}
+ * {@link org.apache.hadoop.mapred.JobConf}
+ * <p></p>
+ * against mapred-default.xml for missing properties. Currently only
+ * throws an error if the class is missing a property.
+ * <p></p>
+ * Refer to {@link org.apache.hadoop.conf.TestConfigurationFieldsBase}
+ * for how this class works.
+ */
+public class TestMapreduceConfigFields extends TestConfigurationFieldsBase {
+
+ @SuppressWarnings("deprecation")
+ @Override
+ public void initializeMemberVariables() {
+ xmlFilename = new String("mapred-default.xml");
+ configurationClasses = new Class[] { MRJobConfig.class, MRConfig.class,
+ JHAdminConfig.class, ShuffleHandler.class, FileOutputFormat.class,
+ FileInputFormat.class, Job.class, NLineInputFormat.class,
+ JobConf.class, FileOutputCommitter.class };
+
+ // Initialize used variables
+ configurationPropsToSkipCompare = new HashSet<String>();
+
+ // Set error modes
+ errorIfMissingConfigProps = true;
+ errorIfMissingXmlProps = false;
+
+ // Ignore deprecated MR1 properties in JobConf
+ configurationPropsToSkipCompare
+ .add(JobConf.MAPRED_JOB_MAP_MEMORY_MB_PROPERTY);
+ configurationPropsToSkipCompare
+ .add(JobConf.MAPRED_JOB_REDUCE_MEMORY_MB_PROPERTY);
+ }
+
+}
[03/36] hadoop git commit: HDFS-8327. Compute storage type quotas in
INodeFile.computeQuotaDeltaForTruncate(). Contributed by Haohui Mai.
Posted by zj...@apache.org.
HDFS-8327. Compute storage type quotas in INodeFile.computeQuotaDeltaForTruncate(). Contributed by Haohui Mai.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c44b3070
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c44b3070
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c44b3070
Branch: refs/heads/YARN-2928
Commit: c44b3070842a91019a1754dd705bfb0f75eba6c6
Parents: c350985
Author: Haohui Mai <wh...@apache.org>
Authored: Tue May 5 16:38:14 2015 -0700
Committer: Zhijie Shen <zj...@apache.org>
Committed: Tue May 12 13:24:10 2015 -0700
----------------------------------------------------------------------
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 +
.../hdfs/server/namenode/FSDirAttrOp.java | 4 +-
.../hdfs/server/namenode/FSDirectory.java | 26 +-
.../hdfs/server/namenode/FSNamesystem.java | 2 +-
.../hadoop/hdfs/server/namenode/INodeFile.java | 210 ++++++-------
.../snapshot/FileWithSnapshotFeature.java | 45 +--
.../TestCommitBlockSynchronization.java | 4 +-
.../namenode/TestTruncateQuotaUpdate.java | 311 +++++++------------
.../snapshot/TestFileWithSnapshotFeature.java | 89 ++++++
9 files changed, 338 insertions(+), 355 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c44b3070/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 1dbf9f9..88503fb 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -535,6 +535,8 @@ Release 2.8.0 - UNRELEASED
HDFS-6757. Simplify lease manager with INodeID. (wheat9)
+ HDFS-8327. Simplify quota calculations for snapshots and truncate. (wheat9)
+
OPTIMIZATIONS
HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c44b3070/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
index a3881b8..d01e2c8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
@@ -393,7 +393,7 @@ public class FSDirAttrOp {
// if replication > oldBR, then newBR == replication.
// if replication < oldBR, we don't know newBR yet.
if (replication > oldBR) {
- long dsDelta = file.storagespaceConsumed()/oldBR;
+ long dsDelta = file.storagespaceConsumed(null).getStorageSpace() / oldBR;
fsd.updateCount(iip, 0L, dsDelta, oldBR, replication, true);
}
@@ -402,7 +402,7 @@ public class FSDirAttrOp {
final short newBR = file.getBlockReplication();
// check newBR < oldBR case.
if (newBR < oldBR) {
- long dsDelta = file.storagespaceConsumed()/newBR;
+ long dsDelta = file.storagespaceConsumed(null).getStorageSpace() / newBR;
fsd.updateCount(iip, 0L, dsDelta, oldBR, newBR, true);
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c44b3070/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
index bf538ed..b289c39 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
@@ -532,8 +532,8 @@ public class FSDirectory implements Closeable {
INodeFile fileNode, Block block) throws IOException {
// modify file-> block and blocksMap
// fileNode should be under construction
- boolean removed = fileNode.removeLastBlock(block);
- if (!removed) {
+ BlockInfoContiguousUnderConstruction uc = fileNode.removeLastBlock(block);
+ if (uc == null) {
return false;
}
getBlockManager().removeBlockFromMap(block);
@@ -1134,24 +1134,14 @@ public class FSDirectory implements Closeable {
// Do not check quota if edit log is still being processed
return;
}
- final long diff = file.computeQuotaDeltaForTruncate(newLength);
- final short repl = file.getBlockReplication();
- delta.addStorageSpace(diff * repl);
final BlockStoragePolicy policy = getBlockStoragePolicySuite()
.getPolicy(file.getStoragePolicyID());
- List<StorageType> types = policy.chooseStorageTypes(repl);
- for (StorageType t : types) {
- if (t.supportTypeQuota()) {
- delta.addTypeSpace(t, diff);
- }
- }
- if (diff > 0) {
- readLock();
- try {
- verifyQuota(iip, iip.length() - 1, delta, null);
- } finally {
- readUnlock();
- }
+ file.computeQuotaDeltaForTruncate(newLength, policy, delta);
+ readLock();
+ try {
+ verifyQuota(iip, iip.length() - 1, delta, null);
+ } finally {
+ readUnlock();
}
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c44b3070/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index ef069d6..9e30812 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -4330,7 +4330,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
if (deleteblock) {
Block blockToDel = ExtendedBlock.getLocalBlock(oldBlock);
- boolean remove = iFile.removeLastBlock(blockToDel);
+ boolean remove = iFile.removeLastBlock(blockToDel) != null;
if (remove) {
blockManager.removeBlock(storedBlock);
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c44b3070/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
index 1d9c0ad..14fc7b0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
@@ -251,22 +251,24 @@ public class INodeFile extends INodeWithAdditionalFields
* Remove a block from the block list. This block should be
* the last one on the list.
*/
- boolean removeLastBlock(Block oldblock) {
+ BlockInfoContiguousUnderConstruction removeLastBlock(Block oldblock) {
Preconditions.checkState(isUnderConstruction(),
"file is no longer under construction");
if (blocks == null || blocks.length == 0) {
- return false;
+ return null;
}
int size_1 = blocks.length - 1;
if (!blocks[size_1].equals(oldblock)) {
- return false;
+ return null;
}
+ BlockInfoContiguousUnderConstruction uc =
+ (BlockInfoContiguousUnderConstruction)blocks[size_1];
//copy to a new list
BlockInfoContiguous[] newlist = new BlockInfoContiguous[size_1];
System.arraycopy(blocks, 0, newlist, 0, size_1);
setBlocks(newlist);
- return true;
+ return uc;
}
/* End of Under-Construction Feature */
@@ -416,11 +418,6 @@ public class INodeFile extends INodeWithAdditionalFields
return header;
}
- /** @return the storagespace required for a full block. */
- final long getPreferredBlockStoragespace() {
- return getPreferredBlockSize() * getBlockReplication();
- }
-
/** @return the blocks of the file. */
@Override
public BlockInfoContiguous[] getBlocks() {
@@ -567,34 +564,41 @@ public class INodeFile extends INodeWithAdditionalFields
QuotaCounts counts, boolean useCache,
int lastSnapshotId) {
long nsDelta = 1;
+ counts.addNameSpace(nsDelta);
+
+ BlockStoragePolicy bsp = null;
+ if (blockStoragePolicyId != BLOCK_STORAGE_POLICY_ID_UNSPECIFIED) {
+ bsp = bsps.getPolicy(blockStoragePolicyId);
+ }
+
+ FileWithSnapshotFeature sf = getFileWithSnapshotFeature();
+ if (sf == null) {
+ counts.add(storagespaceConsumed(bsp));
+ return counts;
+ }
+
+ FileDiffList fileDiffList = sf.getDiffs();
+ int last = fileDiffList.getLastSnapshotId();
+
+ if (lastSnapshotId == Snapshot.CURRENT_STATE_ID
+ || last == Snapshot.CURRENT_STATE_ID) {
+ counts.add(storagespaceConsumed(bsp));
+ return counts;
+ }
+
final long ssDeltaNoReplication;
short replication;
- FileWithSnapshotFeature sf = getFileWithSnapshotFeature();
- if (sf != null) {
- FileDiffList fileDiffList = sf.getDiffs();
- int last = fileDiffList.getLastSnapshotId();
-
- if (lastSnapshotId == Snapshot.CURRENT_STATE_ID
- || last == Snapshot.CURRENT_STATE_ID) {
- ssDeltaNoReplication = storagespaceConsumedNoReplication();
- replication = getBlockReplication();
- } else if (last < lastSnapshotId) {
- ssDeltaNoReplication = computeFileSize(true, false);
- replication = getFileReplication();
- } else {
- int sid = fileDiffList.getSnapshotById(lastSnapshotId);
- ssDeltaNoReplication = storagespaceConsumedNoReplication(sid);
- replication = getReplication(sid);
- }
+ if (last < lastSnapshotId) {
+ ssDeltaNoReplication = computeFileSize(true, false);
+ replication = getFileReplication();
} else {
- ssDeltaNoReplication = storagespaceConsumedNoReplication();
- replication = getBlockReplication();
+ int sid = fileDiffList.getSnapshotById(lastSnapshotId);
+ ssDeltaNoReplication = computeFileSize(sid);
+ replication = getFileReplication(sid);
}
- counts.addNameSpace(nsDelta);
- counts.addStorageSpace(ssDeltaNoReplication * replication);
- if (blockStoragePolicyId != BLOCK_STORAGE_POLICY_ID_UNSPECIFIED){
- BlockStoragePolicy bsp = bsps.getPolicy(blockStoragePolicyId);
+ counts.addStorageSpace(ssDeltaNoReplication * replication);
+ if (bsp != null) {
List<StorageType> storageTypes = bsp.chooseStorageTypes(replication);
for (StorageType t : storageTypes) {
if (!t.supportTypeQuota()) {
@@ -626,7 +630,8 @@ public class INodeFile extends INodeWithAdditionalFields
}
}
counts.addContent(Content.LENGTH, fileLen);
- counts.addContent(Content.DISKSPACE, storagespaceConsumed());
+ counts.addContent(Content.DISKSPACE, storagespaceConsumed(null)
+ .getStorageSpace());
if (getStoragePolicyID() != BLOCK_STORAGE_POLICY_ID_UNSPECIFIED){
BlockStoragePolicy bsp = summary.getBlockStoragePolicySuite().
@@ -709,61 +714,40 @@ public class INodeFile extends INodeWithAdditionalFields
* including blocks in its snapshots.
* Use preferred block size for the last block if it is under construction.
*/
- public final long storagespaceConsumed() {
- return storagespaceConsumedNoReplication() * getBlockReplication();
- }
-
- public final long storagespaceConsumedNoReplication() {
+ public final QuotaCounts storagespaceConsumed(BlockStoragePolicy bsp) {
+ QuotaCounts counts = new QuotaCounts.Builder().build();
+ final Iterable<BlockInfoContiguous> blocks;
FileWithSnapshotFeature sf = getFileWithSnapshotFeature();
- if(sf == null) {
- return computeFileSize(true, true);
- }
-
- // Collect all distinct blocks
- long size = 0;
- Set<Block> allBlocks = new HashSet<Block>(Arrays.asList(getBlocks()));
- List<FileDiff> diffs = sf.getDiffs().asList();
- for(FileDiff diff : diffs) {
- BlockInfoContiguous[] diffBlocks = diff.getBlocks();
- if (diffBlocks != null) {
- allBlocks.addAll(Arrays.asList(diffBlocks));
- }
- }
- for(Block block : allBlocks) {
- size += block.getNumBytes();
- }
- // check if the last block is under construction
- BlockInfoContiguous lastBlock = getLastBlock();
- if(lastBlock != null &&
- lastBlock instanceof BlockInfoContiguousUnderConstruction) {
- size += getPreferredBlockSize() - lastBlock.getNumBytes();
- }
- return size;
- }
-
- public final long storagespaceConsumed(int lastSnapshotId) {
- if (lastSnapshotId != CURRENT_STATE_ID) {
- return computeFileSize(lastSnapshotId)
- * getFileReplication(lastSnapshotId);
- } else {
- return storagespaceConsumed();
- }
- }
-
- public final short getReplication(int lastSnapshotId) {
- if (lastSnapshotId != CURRENT_STATE_ID) {
- return getFileReplication(lastSnapshotId);
- } else {
- return getBlockReplication();
- }
- }
-
- public final long storagespaceConsumedNoReplication(int lastSnapshotId) {
- if (lastSnapshotId != CURRENT_STATE_ID) {
- return computeFileSize(lastSnapshotId);
+ if (sf == null) {
+ blocks = Arrays.asList(getBlocks());
} else {
- return storagespaceConsumedNoReplication();
+ // Collect all distinct blocks
+ Set<BlockInfoContiguous> allBlocks = new HashSet<>(Arrays.asList(getBlocks()));
+ List<FileDiff> diffs = sf.getDiffs().asList();
+ for(FileDiff diff : diffs) {
+ BlockInfoContiguous[] diffBlocks = diff.getBlocks();
+ if (diffBlocks != null) {
+ allBlocks.addAll(Arrays.asList(diffBlocks));
+ }
+ }
+ blocks = allBlocks;
+ }
+
+ final short replication = getBlockReplication();
+ for (BlockInfoContiguous b : blocks) {
+ long blockSize = b.isComplete() ? b.getNumBytes() :
+ getPreferredBlockSize();
+ counts.addStorageSpace(blockSize * replication);
+ if (bsp != null) {
+ List<StorageType> types = bsp.chooseStorageTypes(replication);
+ for (StorageType t : types) {
+ if (t.supportTypeQuota()) {
+ counts.addTypeSpace(t, blockSize);
+ }
+ }
+ }
}
+ return counts;
}
/**
@@ -832,38 +816,56 @@ public class INodeFile extends INodeWithAdditionalFields
/**
* compute the quota usage change for a truncate op
* @param newLength the length for truncation
- * @return the quota usage delta (not considering replication factor)
- */
- long computeQuotaDeltaForTruncate(final long newLength) {
+ **/
+ void computeQuotaDeltaForTruncate(
+ long newLength, BlockStoragePolicy bsps,
+ QuotaCounts delta) {
final BlockInfoContiguous[] blocks = getBlocks();
if (blocks == null || blocks.length == 0) {
- return 0;
+ return;
}
- int n = 0;
long size = 0;
- for (; n < blocks.length && newLength > size; n++) {
- size += blocks[n].getNumBytes();
- }
- final boolean onBoundary = size == newLength;
-
- long truncateSize = 0;
- for (int i = (onBoundary ? n : n - 1); i < blocks.length; i++) {
- truncateSize += blocks[i].getNumBytes();
+ for (BlockInfoContiguous b : blocks) {
+ size += b.getNumBytes();
}
+ BlockInfoContiguous[] sblocks = null;
FileWithSnapshotFeature sf = getFileWithSnapshotFeature();
if (sf != null) {
FileDiff diff = sf.getDiffs().getLast();
- BlockInfoContiguous[] sblocks = diff != null ? diff.getBlocks() : null;
- if (sblocks != null) {
- for (int i = (onBoundary ? n : n-1); i < blocks.length
- && i < sblocks.length && blocks[i].equals(sblocks[i]); i++) {
- truncateSize -= blocks[i].getNumBytes();
+ sblocks = diff != null ? diff.getBlocks() : null;
+ }
+
+ for (int i = blocks.length - 1; i >= 0 && size > newLength;
+ size -= blocks[i].getNumBytes(), --i) {
+ BlockInfoContiguous bi = blocks[i];
+ long truncatedBytes;
+ if (size - newLength < bi.getNumBytes()) {
+ // Record a full block as the last block will be copied during
+ // recovery
+ truncatedBytes = bi.getNumBytes() - getPreferredBlockSize();
+ } else {
+ truncatedBytes = bi.getNumBytes();
+ }
+
+ // The block exist in snapshot, adding back the truncated bytes in the
+ // existing files
+ if (sblocks != null && i < sblocks.length && bi.equals(sblocks[i])) {
+ truncatedBytes -= bi.getNumBytes();
+ }
+
+ delta.addStorageSpace(-truncatedBytes * getBlockReplication());
+ if (bsps != null) {
+ List<StorageType> types = bsps.chooseStorageTypes(
+ getBlockReplication());
+ for (StorageType t : types) {
+ if (t.supportTypeQuota()) {
+ delta.addTypeSpace(t, -truncatedBytes);
+ }
}
}
}
- return onBoundary ? -truncateSize : (getPreferredBlockSize() - truncateSize);
}
void truncateBlocksTo(int n) {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c44b3070/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileWithSnapshotFeature.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileWithSnapshotFeature.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileWithSnapshotFeature.java
index b42b745..7d884d3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileWithSnapshotFeature.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileWithSnapshotFeature.java
@@ -19,8 +19,8 @@ package org.apache.hadoop.hdfs.server.namenode.snapshot;
import java.util.List;
-import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
@@ -32,7 +32,6 @@ import org.apache.hadoop.hdfs.server.namenode.INodeFile;
import org.apache.hadoop.hdfs.server.namenode.INodeFileAttributes;
import org.apache.hadoop.hdfs.server.namenode.QuotaCounts;
import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
-import org.apache.hadoop.hdfs.util.EnumCounters;
/**
* Feature for file with snapshot-related information.
@@ -145,50 +144,36 @@ public class FileWithSnapshotFeature implements INode.Feature {
public QuotaCounts updateQuotaAndCollectBlocks(BlockStoragePolicySuite bsps, INodeFile file,
FileDiff removed, BlocksMapUpdateInfo collectedBlocks,
final List<INode> removedINodes) {
- long oldStoragespace = file.storagespaceConsumed();
byte storagePolicyID = file.getStoragePolicyID();
BlockStoragePolicy bsp = null;
- EnumCounters<StorageType> typeSpaces =
- new EnumCounters<StorageType>(StorageType.class);
if (storagePolicyID != HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED) {
bsp = bsps.getPolicy(file.getStoragePolicyID());
}
+
+ QuotaCounts oldCounts = file.storagespaceConsumed(null);
+ long oldStoragespace;
if (removed.snapshotINode != null) {
short replication = removed.snapshotINode.getFileReplication();
short currentRepl = file.getBlockReplication();
- if (currentRepl == 0) {
- long oldFileSizeNoRep = file.computeFileSize(true, true);
- oldStoragespace = oldFileSizeNoRep * replication;
-
- if (bsp != null) {
- List<StorageType> oldTypeChosen = bsp.chooseStorageTypes(replication);
- for (StorageType t : oldTypeChosen) {
- if (t.supportTypeQuota()) {
- typeSpaces.add(t, -oldFileSizeNoRep);
- }
- }
- }
- } else if (replication > currentRepl) {
- long oldFileSizeNoRep = file.storagespaceConsumedNoReplication();
+ if (replication > currentRepl) {
+ long oldFileSizeNoRep = currentRepl == 0
+ ? file.computeFileSize(true, true)
+ : oldCounts.getStorageSpace() / file.getBlockReplication();
oldStoragespace = oldFileSizeNoRep * replication;
+ oldCounts.setStorageSpace(oldStoragespace);
if (bsp != null) {
List<StorageType> oldTypeChosen = bsp.chooseStorageTypes(replication);
for (StorageType t : oldTypeChosen) {
if (t.supportTypeQuota()) {
- typeSpaces.add(t, -oldFileSizeNoRep);
- }
- }
- List<StorageType> newTypeChosen = bsp.chooseStorageTypes(currentRepl);
- for (StorageType t: newTypeChosen) {
- if (t.supportTypeQuota()) {
- typeSpaces.add(t, oldFileSizeNoRep);
+ oldCounts.addTypeSpace(t, oldFileSizeNoRep);
}
}
}
}
+
AclFeature aclFeature = removed.getSnapshotINode().getAclFeature();
if (aclFeature != null) {
AclStorage.removeAclFeature(aclFeature);
@@ -198,11 +183,9 @@ public class FileWithSnapshotFeature implements INode.Feature {
getDiffs().combineAndCollectSnapshotBlocks(
bsps, file, removed, collectedBlocks, removedINodes);
- long ssDelta = oldStoragespace - file.storagespaceConsumed();
- return new QuotaCounts.Builder().
- storageSpace(ssDelta).
- typeSpaces(typeSpaces).
- build();
+ QuotaCounts current = file.storagespaceConsumed(bsp);
+ oldCounts.subtract(current);
+ return oldCounts;
}
/**
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c44b3070/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCommitBlockSynchronization.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCommitBlockSynchronization.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCommitBlockSynchronization.java
index b7e8c25..3049612 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCommitBlockSynchronization.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCommitBlockSynchronization.java
@@ -69,7 +69,7 @@ public class TestCommitBlockSynchronization {
blockInfo.setBlockCollection(file);
blockInfo.setGenerationStamp(genStamp);
blockInfo.initializeBlockRecovery(genStamp);
- doReturn(true).when(file).removeLastBlock(any(Block.class));
+ doReturn(blockInfo).when(file).removeLastBlock(any(Block.class));
doReturn(true).when(file).isUnderConstruction();
doReturn(blockInfo).when(namesystemSpy).getStoredBlock(any(Block.class));
@@ -152,7 +152,7 @@ public class TestCommitBlockSynchronization {
true, newTargets, null);
// Simulate removing the last block from the file.
- doReturn(false).when(file).removeLastBlock(any(Block.class));
+ doReturn(null).when(file).removeLastBlock(any(Block.class));
// Repeat the call to make sure it does not throw
namesystemSpy.commitBlockSynchronization(
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c44b3070/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestTruncateQuotaUpdate.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestTruncateQuotaUpdate.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestTruncateQuotaUpdate.java
index 49d01c1..f6b18e6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestTruncateQuotaUpdate.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestTruncateQuotaUpdate.java
@@ -17,19 +17,21 @@
*/
package org.apache.hadoop.hdfs.server.namenode;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.StorageType;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.hdfs.DFSTestUtil;
-import org.apache.hadoop.hdfs.DistributedFileSystem;
-import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
-import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotTestHelper;
-import org.junit.After;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.fs.permission.PermissionStatus;
+import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous;
+import org.apache.hadoop.hdfs.server.namenode.snapshot.FileDiff;
+import org.apache.hadoop.hdfs.server.namenode.snapshot.FileDiffList;
+import org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshotFeature;
import org.junit.Assert;
-import org.junit.Before;
import org.junit.Test;
+import org.mockito.internal.util.reflection.Whitebox;
+
+import java.util.ArrayList;
+
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
/**
* Make sure we correctly update the quota usage for truncate.
@@ -45,204 +47,119 @@ import org.junit.Test;
public class TestTruncateQuotaUpdate {
private static final int BLOCKSIZE = 1024;
private static final short REPLICATION = 4;
- private static final long DISKQUOTA = BLOCKSIZE * 20;
- static final long seed = 0L;
- private static final Path dir = new Path("/TestTruncateQuotaUpdate");
- private static final Path file = new Path(dir, "file");
-
- private MiniDFSCluster cluster;
- private FSDirectory fsdir;
- private DistributedFileSystem dfs;
-
- @Before
- public void setUp() throws Exception {
- final Configuration conf = new Configuration();
- conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCKSIZE);
- cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPLICATION)
- .build();
- cluster.waitActive();
-
- fsdir = cluster.getNamesystem().getFSDirectory();
- dfs = cluster.getFileSystem();
-
- dfs.mkdirs(dir);
- dfs.setQuota(dir, Long.MAX_VALUE - 1, DISKQUOTA);
- dfs.setQuotaByStorageType(dir, StorageType.DISK, DISKQUOTA);
- dfs.setStoragePolicy(dir, HdfsServerConstants.HOT_STORAGE_POLICY_NAME);
- }
-
- @After
- public void tearDown() throws Exception {
- if (cluster != null) {
- cluster.shutdown();
- }
- }
+ private long nextMockBlockId;
+ private long nextMockGenstamp;
+ private long nextMockINodeId;
@Test
- public void testTruncateQuotaUpdate() throws Exception {
-
+ public void testTruncateWithoutSnapshot() {
+ INodeFile file = createMockFile(BLOCKSIZE * 2 + BLOCKSIZE / 2, REPLICATION);
+ // case 1: first truncate to 1.5 blocks
+ // we truncate 1 blocks, but not on the boundary, thus the diff should
+ // be -block + (block - 0.5 block) = -0.5 block
+ QuotaCounts count = new QuotaCounts.Builder().build();
+ file.computeQuotaDeltaForTruncate(BLOCKSIZE + BLOCKSIZE / 2, null, count);
+ Assert.assertEquals(-BLOCKSIZE / 2 * REPLICATION, count.getStorageSpace());
+
+ // case 2: truncate to 1 block
+ count = new QuotaCounts.Builder().build();
+ file.computeQuotaDeltaForTruncate(BLOCKSIZE, null, count);
+ Assert.assertEquals(-(BLOCKSIZE + BLOCKSIZE / 2) * REPLICATION,
+ count.getStorageSpace());
+
+ // case 3: truncate to 0
+ count = new QuotaCounts.Builder().build();
+ file.computeQuotaDeltaForTruncate(0, null, count);
+ Assert.assertEquals(-(BLOCKSIZE * 2 + BLOCKSIZE / 2) * REPLICATION,
+ count.getStorageSpace());
}
- public interface TruncateCase {
- public void prepare() throws Exception;
- public void run() throws Exception;
- }
-
- private void testTruncate(long newLength, long expectedDiff,
- long expectedUsage) throws Exception {
- // before doing the real truncation, make sure the computation is correct
- final INodesInPath iip = fsdir.getINodesInPath4Write(file.toString());
- final INodeFile fileNode = iip.getLastINode().asFile();
- fileNode.recordModification(iip.getLatestSnapshotId(), true);
- final long diff = fileNode.computeQuotaDeltaForTruncate(newLength);
- Assert.assertEquals(expectedDiff, diff);
-
- // do the real truncation
- dfs.truncate(file, newLength);
- // wait for truncate to finish
- TestFileTruncate.checkBlockRecovery(file, dfs);
- final INodeDirectory dirNode = fsdir.getINode4Write(dir.toString())
- .asDirectory();
- final long spaceUsed = dirNode.getDirectoryWithQuotaFeature()
- .getSpaceConsumed().getStorageSpace();
- final long diskUsed = dirNode.getDirectoryWithQuotaFeature()
- .getSpaceConsumed().getTypeSpaces().get(StorageType.DISK);
- Assert.assertEquals(expectedUsage, spaceUsed);
- Assert.assertEquals(expectedUsage, diskUsed);
- }
-
- /**
- * case 1~3
- */
- private class TruncateWithoutSnapshot implements TruncateCase {
- @Override
- public void prepare() throws Exception {
- // original file size: 2.5 block
- DFSTestUtil.createFile(dfs, file, BLOCKSIZE * 2 + BLOCKSIZE / 2,
- REPLICATION, 0L);
- }
-
- @Override
- public void run() throws Exception {
- // case 1: first truncate to 1.5 blocks
- long newLength = BLOCKSIZE + BLOCKSIZE / 2;
- // we truncate 1 blocks, but not on the boundary, thus the diff should
- // be -block + (block - 0.5 block) = -0.5 block
- long diff = -BLOCKSIZE / 2;
- // the new quota usage should be BLOCKSIZE * 1.5 * replication
- long usage = (BLOCKSIZE + BLOCKSIZE / 2) * REPLICATION;
- testTruncate(newLength, diff, usage);
-
- // case 2: truncate to 1 block
- newLength = BLOCKSIZE;
- // the diff should be -0.5 block since this is not on boundary
- diff = -BLOCKSIZE / 2;
- // after truncation the quota usage should be BLOCKSIZE * replication
- usage = BLOCKSIZE * REPLICATION;
- testTruncate(newLength, diff, usage);
-
- // case 3: truncate to 0
- testTruncate(0, -BLOCKSIZE, 0);
- }
+ @Test
+ public void testTruncateWithSnapshotNoDivergence() {
+ INodeFile file = createMockFile(BLOCKSIZE * 2 + BLOCKSIZE / 2, REPLICATION);
+ addSnapshotFeature(file, file.getBlocks());
+
+ // case 4: truncate to 1.5 blocks
+ // all the blocks are in snapshot. truncate need to allocate a new block
+ // diff should be +BLOCKSIZE
+ QuotaCounts count = new QuotaCounts.Builder().build();
+ file.computeQuotaDeltaForTruncate(BLOCKSIZE + BLOCKSIZE / 2, null, count);
+ Assert.assertEquals(BLOCKSIZE * REPLICATION, count.getStorageSpace());
+
+ // case 2: truncate to 1 block
+ count = new QuotaCounts.Builder().build();
+ file.computeQuotaDeltaForTruncate(BLOCKSIZE, null, count);
+ Assert.assertEquals(0, count.getStorageSpace());
+
+ // case 3: truncate to 0
+ count = new QuotaCounts.Builder().build();
+ file.computeQuotaDeltaForTruncate(0, null, count);
+ Assert.assertEquals(0, count.getStorageSpace());
}
- /**
- * case 4~6
- */
- private class TruncateWithSnapshot implements TruncateCase {
- @Override
- public void prepare() throws Exception {
- DFSTestUtil.createFile(dfs, file, BLOCKSIZE * 2 + BLOCKSIZE / 2,
- REPLICATION, 0L);
- SnapshotTestHelper.createSnapshot(dfs, dir, "s1");
- }
-
- @Override
- public void run() throws Exception {
- // case 4: truncate to 1.5 blocks
- long newLength = BLOCKSIZE + BLOCKSIZE / 2;
- // all the blocks are in snapshot. truncate need to allocate a new block
- // diff should be +BLOCKSIZE
- long diff = BLOCKSIZE;
- // the new quota usage should be BLOCKSIZE * 3 * replication
- long usage = BLOCKSIZE * 3 * REPLICATION;
- testTruncate(newLength, diff, usage);
-
- // case 5: truncate to 1 block
- newLength = BLOCKSIZE;
- // the block for truncation is not in snapshot, diff should be -0.5 block
- diff = -BLOCKSIZE / 2;
- // after truncation the quota usage should be 2.5 block * repl
- usage = (BLOCKSIZE * 2 + BLOCKSIZE / 2) * REPLICATION;
- testTruncate(newLength, diff, usage);
-
- // case 6: truncate to 0
- testTruncate(0, 0, usage);
- }
+ @Test
+ public void testTruncateWithSnapshotAndDivergence() {
+ INodeFile file = createMockFile(BLOCKSIZE * 2 + BLOCKSIZE / 2, REPLICATION);
+ BlockInfoContiguous[] blocks = new BlockInfoContiguous
+ [file.getBlocks().length];
+ System.arraycopy(file.getBlocks(), 0, blocks, 0, blocks.length);
+ addSnapshotFeature(file, blocks);
+ // Update the last two blocks in the current inode
+ file.getBlocks()[1] = newBlock(BLOCKSIZE, REPLICATION);
+ file.getBlocks()[2] = newBlock(BLOCKSIZE / 2, REPLICATION);
+
+ // case 7: truncate to 1.5 block
+ // the block for truncation is not in snapshot, diff should be the same
+ // as case 1
+ QuotaCounts count = new QuotaCounts.Builder().build();
+ file.computeQuotaDeltaForTruncate(BLOCKSIZE + BLOCKSIZE / 2, null, count);
+ Assert.assertEquals(-BLOCKSIZE / 2 * REPLICATION, count.getStorageSpace());
+
+ // case 8: truncate to 2 blocks
+ // the original 2.5 blocks are in snapshot. the block truncated is not
+ // in snapshot. diff should be -0.5 block
+ count = new QuotaCounts.Builder().build();
+ file.computeQuotaDeltaForTruncate(BLOCKSIZE + BLOCKSIZE / 2, null, count);
+ Assert.assertEquals(-BLOCKSIZE / 2 * REPLICATION, count.getStorageSpace());
+
+ // case 9: truncate to 0
+ count = new QuotaCounts.Builder().build();
+ file.computeQuotaDeltaForTruncate(0, null, count);
+ Assert.assertEquals(-(BLOCKSIZE + BLOCKSIZE / 2) * REPLICATION, count
+ .getStorageSpace());
}
- /**
- * case 7~9
- */
- private class TruncateWithSnapshot2 implements TruncateCase {
- @Override
- public void prepare() throws Exception {
- // original size: 2.5 blocks
- DFSTestUtil.createFile(dfs, file, BLOCKSIZE * 2 + BLOCKSIZE / 2,
- REPLICATION, 0L);
- SnapshotTestHelper.createSnapshot(dfs, dir, "s1");
-
- // truncate to 1.5 block
- dfs.truncate(file, BLOCKSIZE + BLOCKSIZE / 2);
- TestFileTruncate.checkBlockRecovery(file, dfs);
-
- // append another 1 BLOCK
- DFSTestUtil.appendFile(dfs, file, BLOCKSIZE);
- }
-
- @Override
- public void run() throws Exception {
- // case 8: truncate to 2 blocks
- long newLength = BLOCKSIZE * 2;
- // the original 2.5 blocks are in snapshot. the block truncated is not
- // in snapshot. diff should be -0.5 block
- long diff = -BLOCKSIZE / 2;
- // the new quota usage should be BLOCKSIZE * 3.5 * replication
- long usage = (BLOCKSIZE * 3 + BLOCKSIZE / 2) * REPLICATION;
- testTruncate(newLength, diff, usage);
-
- // case 7: truncate to 1.5 block
- newLength = BLOCKSIZE + BLOCKSIZE / 2;
- // the block for truncation is not in snapshot, diff should be
- // -0.5 block + (block - 0.5block) = 0
- diff = 0;
- // after truncation the quota usage should be 3 block * repl
- usage = (BLOCKSIZE * 3) * REPLICATION;
- testTruncate(newLength, diff, usage);
-
- // case 9: truncate to 0
- testTruncate(0, -BLOCKSIZE / 2,
- (BLOCKSIZE * 2 + BLOCKSIZE / 2) * REPLICATION);
+ private INodeFile createMockFile(long size, short replication) {
+ ArrayList<BlockInfoContiguous> blocks = new ArrayList<>();
+ long createdSize = 0;
+ while (createdSize < size) {
+ long blockSize = Math.min(BLOCKSIZE, size - createdSize);
+ BlockInfoContiguous bi = newBlock(blockSize, replication);
+ blocks.add(bi);
+ createdSize += BLOCKSIZE;
}
+ PermissionStatus perm = new PermissionStatus("foo", "bar", FsPermission
+ .createImmutable((short) 0x1ff));
+ return new INodeFile(
+ ++nextMockINodeId, new byte[0], perm, 0, 0,
+ blocks.toArray(new BlockInfoContiguous[blocks.size()]), replication,
+ BLOCKSIZE);
}
- private void testTruncateQuotaUpdate(TruncateCase t) throws Exception {
- t.prepare();
- t.run();
+ private BlockInfoContiguous newBlock(long size, short replication) {
+ Block b = new Block(++nextMockBlockId, size, ++nextMockGenstamp);
+ return new BlockInfoContiguous(b, replication);
}
- @Test
- public void testQuotaNoSnapshot() throws Exception {
- testTruncateQuotaUpdate(new TruncateWithoutSnapshot());
- }
-
- @Test
- public void testQuotaWithSnapshot() throws Exception {
- testTruncateQuotaUpdate(new TruncateWithSnapshot());
- }
-
- @Test
- public void testQuotaWithSnapshot2() throws Exception {
- testTruncateQuotaUpdate(new TruncateWithSnapshot2());
+ private static void addSnapshotFeature(INodeFile file, BlockInfoContiguous[] blocks) {
+ FileDiff diff = mock(FileDiff.class);
+ when(diff.getBlocks()).thenReturn(blocks);
+ FileDiffList diffList = new FileDiffList();
+ @SuppressWarnings("unchecked")
+ ArrayList<FileDiff> diffs = ((ArrayList<FileDiff>)Whitebox.getInternalState
+ (diffList, "diffs"));
+ diffs.add(diff);
+ FileWithSnapshotFeature sf = new FileWithSnapshotFeature(diffList);
+ file.addFeature(sf);
}
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c44b3070/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestFileWithSnapshotFeature.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestFileWithSnapshotFeature.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestFileWithSnapshotFeature.java
new file mode 100644
index 0000000..977b07c
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestFileWithSnapshotFeature.java
@@ -0,0 +1,89 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode.snapshot;
+
+import com.google.common.collect.Lists;
+import junit.framework.Assert;
+import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
+import org.apache.hadoop.hdfs.server.namenode.INode;
+import org.apache.hadoop.hdfs.server.namenode.INodeFile;
+import org.apache.hadoop.hdfs.server.namenode.QuotaCounts;
+import org.junit.Test;
+import org.mockito.internal.util.reflection.Whitebox;
+
+import java.util.ArrayList;
+
+import static org.apache.hadoop.fs.StorageType.DISK;
+import static org.apache.hadoop.fs.StorageType.SSD;
+import static org.mockito.Mockito.anyByte;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+
+public class TestFileWithSnapshotFeature {
+ private static final int BLOCK_SIZE = 1024;
+ private static final short REPL_3 = 3;
+ private static final short REPL_1 = 1;
+
+ @Test
+ public void testUpdateQuotaAndCollectBlocks() {
+ FileDiffList diffs = new FileDiffList();
+ FileWithSnapshotFeature sf = new FileWithSnapshotFeature(diffs);
+ FileDiff diff = mock(FileDiff.class);
+ BlockStoragePolicySuite bsps = mock(BlockStoragePolicySuite.class);
+ BlockStoragePolicy bsp = mock(BlockStoragePolicy.class);
+ BlockInfoContiguous[] blocks = new BlockInfoContiguous[] {
+ new BlockInfoContiguous(new Block(1, BLOCK_SIZE, 1), REPL_1)
+ };
+
+ // No snapshot
+ INodeFile file = mock(INodeFile.class);
+ when(file.getFileWithSnapshotFeature()).thenReturn(sf);
+ when(file.getBlocks()).thenReturn(blocks);
+ when(file.getStoragePolicyID()).thenReturn((byte) 1);
+ when(bsps.getPolicy(anyByte())).thenReturn(bsp);
+ INode.BlocksMapUpdateInfo collectedBlocks = mock(
+ INode.BlocksMapUpdateInfo.class);
+ ArrayList<INode> removedINodes = new ArrayList<>();
+ QuotaCounts counts = sf.updateQuotaAndCollectBlocks(
+ bsps, file, diff, collectedBlocks, removedINodes);
+ Assert.assertEquals(0, counts.getStorageSpace());
+ Assert.assertTrue(counts.getTypeSpaces().allLessOrEqual(0));
+
+ // INode only exists in the snapshot
+ INodeFile snapshotINode = mock(INodeFile.class);
+ when(file.getBlockReplication()).thenReturn(REPL_1);
+ Whitebox.setInternalState(snapshotINode, "header", (long) REPL_3 << 48);
+ Whitebox.setInternalState(diff, "snapshotINode", snapshotINode);
+ when(diff.getSnapshotINode()).thenReturn(snapshotINode);
+
+ when(bsp.chooseStorageTypes(REPL_1))
+ .thenReturn(Lists.newArrayList(SSD));
+ when(bsp.chooseStorageTypes(REPL_3))
+ .thenReturn(Lists.newArrayList(DISK));
+ counts = sf.updateQuotaAndCollectBlocks(
+ bsps, file, diff, collectedBlocks, removedINodes);
+ Assert.assertEquals((REPL_3 - REPL_1) * BLOCK_SIZE,
+ counts.getStorageSpace());
+ Assert.assertEquals(BLOCK_SIZE, counts.getTypeSpaces().get(DISK));
+ Assert.assertEquals(-BLOCK_SIZE, counts.getTypeSpaces().get(SSD));
+ }
+
+}
[17/36] hadoop git commit: HADOOP-11948. test-patch's issue matching
regex should be configurable. (Sean Busbey via aw)
Posted by zj...@apache.org.
HADOOP-11948. test-patch's issue matching regex should be configurable. (Sean Busbey via aw)
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/708e2ca3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/708e2ca3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/708e2ca3
Branch: refs/heads/YARN-2928
Commit: 708e2ca3a9f319de6f7cbd2a8972a819d5e90c81
Parents: 74e8340
Author: Allen Wittenauer <aw...@apache.org>
Authored: Mon May 11 11:53:54 2015 -0700
Committer: Zhijie Shen <zj...@apache.org>
Committed: Tue May 12 13:24:13 2015 -0700
----------------------------------------------------------------------
dev-support/test-patch.sh | 4 ++++
hadoop-common-project/hadoop-common/CHANGES.txt | 3 +++
2 files changed, 7 insertions(+)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/708e2ca3/dev-support/test-patch.sh
----------------------------------------------------------------------
diff --git a/dev-support/test-patch.sh b/dev-support/test-patch.sh
index ad3d22c..ccadfb7 100755
--- a/dev-support/test-patch.sh
+++ b/dev-support/test-patch.sh
@@ -581,6 +581,7 @@ function hadoop_usage
echo "--debug If set, then output some extra stuff to stderr"
echo "--dirty-workspace Allow the local git workspace to have uncommitted changes"
echo "--findbugs-home=<path> Findbugs home directory (default FINDBUGS_HOME environment variable)"
+ echo "--issue-re=<expr> Bash regular expression to use when trying to find a jira ref in the patch name (default '^(HADOOP|YARN|MAPREDUCE|HDFS)-[0-9]+$')"
echo "--modulelist=<list> Specify additional modules to test (comma delimited)"
echo "--offline Avoid connecting to the Internet"
echo "--patch-dir=<dir> The directory for working and output files (default '/tmp/${PROJECT_NAME}-test-patch/pid')"
@@ -668,6 +669,9 @@ function parse_args
hadoop_usage
exit 0
;;
+ --issue-re=*)
+ ISSUE_RE=${i#*=}
+ ;;
--java-home)
JAVA_HOME=${i#*=}
;;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/708e2ca3/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index ee7f09b..44e78ba 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -557,6 +557,9 @@ Release 2.8.0 - UNRELEASED
HADOOP-11950. Add cli option to test-patch to set the project-under-test
(Sean Busbey via aw)
+ HADOOP-11948. test-patch's issue matching regex should be configurable.
+ (Sean Busbey via aw)
+
OPTIMIZATIONS
HADOOP-11785. Reduce the number of listStatus operation in distcp
[32/36] hadoop git commit: HADOOP-11962. Sasl message with MD5
challenge text shouldn't be LOG out even in debug level. Contributed by
Junping Du.
Posted by zj...@apache.org.
HADOOP-11962. Sasl message with MD5 challenge text shouldn't be LOG out even in debug level. Contributed by Junping Du.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/27746bd5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/27746bd5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/27746bd5
Branch: refs/heads/YARN-2928
Commit: 27746bd5c9dfe091f8c05bd233d23f4d851c2bae
Parents: aa03f3f
Author: Haohui Mai <wh...@apache.org>
Authored: Tue May 12 10:30:32 2015 -0700
Committer: Zhijie Shen <zj...@apache.org>
Committed: Tue May 12 13:44:27 2015 -0700
----------------------------------------------------------------------
hadoop-common-project/hadoop-common/CHANGES.txt | 3 +++
.../hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java | 3 ---
.../src/main/java/org/apache/hadoop/security/SaslRpcClient.java | 3 ---
.../java/org/apache/hadoop/security/UserGroupInformation.java | 3 ---
4 files changed, 3 insertions(+), 9 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/27746bd5/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 47731fb..a15444e 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -668,6 +668,9 @@ Release 2.8.0 - UNRELEASED
HADOOP-11947. test-patch should return early from determine-issue when
run in jenkins mode. (Sean Busbey via aw)
+ HADOOP-11962. Sasl message with MD5 challenge text shouldn't be LOG out
+ even in debug level. (Junping Du via wheat9)
+
Release 2.7.1 - UNRELEASED
INCOMPATIBLE CHANGES
http://git-wip-us.apache.org/repos/asf/hadoop/blob/27746bd5/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
index 5f1809a..ac32ac9 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
@@ -1488,9 +1488,6 @@ public abstract class Server {
}
private void doSaslReply(Message message) throws IOException {
- if (LOG.isDebugEnabled()) {
- LOG.debug("Sending sasl message "+message);
- }
setupResponse(saslResponse, saslCall,
RpcStatusProto.SUCCESS, null,
new RpcResponseWrapper(message), null, null);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/27746bd5/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SaslRpcClient.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SaslRpcClient.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SaslRpcClient.java
index 4a1a397..7d3afa8 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SaslRpcClient.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SaslRpcClient.java
@@ -385,9 +385,6 @@ public class SaslRpcClient {
}
RpcSaslProto saslMessage =
RpcSaslProto.parseFrom(responseWrapper.getMessageBytes());
- if (LOG.isDebugEnabled()) {
- LOG.debug("Received SASL message "+saslMessage);
- }
// handle sasl negotiation process
RpcSaslProto.Builder response = null;
switch (saslMessage.getState()) {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/27746bd5/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
index 4b0b5f3..be3d60d 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
@@ -865,9 +865,6 @@ public class UserGroupInformation {
.getPrivateCredentials(KerberosTicket.class);
for (KerberosTicket ticket : tickets) {
if (SecurityUtil.isOriginalTGT(ticket)) {
- if (LOG.isDebugEnabled()) {
- LOG.debug("Found tgt " + ticket);
- }
return ticket;
}
}
[09/36] hadoop git commit: YARN-1287. Consolidate MockClocks.
(Sebastian Wong and Anubhav Dhoot via kasha)
Posted by zj...@apache.org.
YARN-1287. Consolidate MockClocks. (Sebastian Wong and Anubhav Dhoot via kasha)
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ca59e771
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ca59e771
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ca59e771
Branch: refs/heads/YARN-2928
Commit: ca59e771062da7e857fd4a5da8cd14982221b36e
Parents: 33ae623
Author: Karthik Kambatla <ka...@apache.org>
Authored: Sat May 9 14:34:54 2015 -0700
Committer: Zhijie Shen <zj...@apache.org>
Committed: Tue May 12 13:24:11 2015 -0700
----------------------------------------------------------------------
.../mapreduce/v2/app/TestRuntimeEstimators.java | 26 ++--------
hadoop-yarn-project/CHANGES.txt | 3 ++
.../hadoop/yarn/util/ControlledClock.java | 15 ++++++
.../util/TestCgroupsLCEResourcesHandler.java | 18 ++-----
.../scheduler/fair/FairSchedulerTestBase.java | 12 -----
.../fair/TestAllocationFileLoaderService.java | 19 ++-----
.../fair/TestContinuousScheduling.java | 15 ++----
.../scheduler/fair/TestFSAppAttempt.java | 26 +++-------
.../scheduler/fair/TestFairScheduler.java | 53 ++++++++++----------
.../fair/TestFairSchedulerPreemption.java | 12 +++--
.../fair/TestMaxRunningAppsEnforcer.java | 7 +--
11 files changed, 82 insertions(+), 124 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ca59e771/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRuntimeEstimators.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRuntimeEstimators.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRuntimeEstimators.java
index fe0f341..69f2709 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRuntimeEstimators.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRuntimeEstimators.java
@@ -69,13 +69,13 @@ import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.NodeId;
-import org.apache.hadoop.yarn.api.records.Token;
import org.apache.hadoop.yarn.event.AsyncDispatcher;
import org.apache.hadoop.yarn.event.EventHandler;
import org.apache.hadoop.yarn.factories.RecordFactory;
import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
import org.apache.hadoop.yarn.security.client.ClientToAMTokenSecretManager;
import org.apache.hadoop.yarn.util.Clock;
+import org.apache.hadoop.yarn.util.ControlledClock;
import org.apache.hadoop.yarn.util.SystemClock;
import org.junit.Assert;
import org.junit.Test;
@@ -90,7 +90,7 @@ public class TestRuntimeEstimators {
private static int MAP_TASKS = 200;
private static int REDUCE_TASKS = 150;
- MockClock clock;
+ ControlledClock clock;
Job myJob;
@@ -120,7 +120,7 @@ public class TestRuntimeEstimators {
private void coreTestEstimator
(TaskRuntimeEstimator testedEstimator, int expectedSpeculations) {
estimator = testedEstimator;
- clock = new MockClock();
+ clock = new ControlledClock();
dispatcher = new AsyncDispatcher();
myJob = null;
slotsInUse.set(0);
@@ -129,7 +129,7 @@ public class TestRuntimeEstimators {
successfulSpeculations.set(0);
taskTimeSavedBySpeculation.set(0);
- clock.advanceTime(1000);
+ clock.tickMsec(1000);
Configuration conf = new Configuration();
@@ -230,7 +230,7 @@ public class TestRuntimeEstimators {
}
}
- clock.advanceTime(1000L);
+ clock.tickMsec(1000L);
if (clock.getTime() % 10000L == 0L) {
speculator.scanForSpeculations();
@@ -777,22 +777,6 @@ public class TestRuntimeEstimators {
}
}
- static class MockClock implements Clock {
- private long currentTime = 0;
-
- public long getTime() {
- return currentTime;
- }
-
- void setMeasuredTime(long newTime) {
- currentTime = newTime;
- }
-
- void advanceTime(long increment) {
- currentTime += increment;
- }
- }
-
class MyAppMaster extends CompositeService {
final Clock clock;
public MyAppMaster(Clock clock) {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ca59e771/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 2be7604..f7c56f1 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -291,6 +291,9 @@ Release 2.8.0 - UNRELEASED
YARN-1912. ResourceLocalizer started without any jvm memory control.
(Masatake Iwasaki via xgong)
+ YARN-1287. Consolidate MockClocks.
+ (Sebastian Wong and Anubhav Dhoot via kasha)
+
OPTIMIZATIONS
YARN-3339. TestDockerContainerExecutor should pull a single image and not
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ca59e771/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/ControlledClock.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/ControlledClock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/ControlledClock.java
index 16bd785..6dcb1dd 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/ControlledClock.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/ControlledClock.java
@@ -21,6 +21,11 @@ package org.apache.hadoop.yarn.util;
public class ControlledClock implements Clock {
private long time = -1;
private final Clock actualClock;
+ // Convenience for getting a controlled clock with overridden time
+ public ControlledClock() {
+ this(new SystemClock());
+ setTime(0);
+ }
public ControlledClock(Clock actualClock) {
this.actualClock = actualClock;
}
@@ -30,6 +35,16 @@ public class ControlledClock implements Clock {
public synchronized void reset() {
time = -1;
}
+ public synchronized void tickSec(int seconds) {
+ tickMsec(seconds * 1000L);
+ }
+ public synchronized void tickMsec(long millisec) {
+ if (time == -1) {
+ throw new IllegalStateException("ControlledClock setTime should be " +
+ "called before incrementing time");
+ }
+ time = time + millisec;
+ }
@Override
public synchronized long getTime() {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ca59e771/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/util/TestCgroupsLCEResourcesHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/util/TestCgroupsLCEResourcesHandler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/util/TestCgroupsLCEResourcesHandler.java
index 8e9d787..440f9ea 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/util/TestCgroupsLCEResourcesHandler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/util/TestCgroupsLCEResourcesHandler.java
@@ -22,10 +22,11 @@ import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.server.nodemanager.LinuxContainerExecutor;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources.TestCGroupsHandlerImpl;
+
+import org.apache.hadoop.yarn.util.ControlledClock;
import org.apache.hadoop.yarn.util.ResourceCalculatorPlugin;
import org.junit.Assert;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
-import org.apache.hadoop.yarn.util.Clock;
import org.junit.Test;
import org.junit.After;
import org.junit.Before;
@@ -39,14 +40,6 @@ import java.util.concurrent.CountDownLatch;
public class TestCgroupsLCEResourcesHandler {
static File cgroupDir = null;
- static class MockClock implements Clock {
- long time;
- @Override
- public long getTime() {
- return time;
- }
- }
-
@Before
public void setUp() throws Exception {
cgroupDir =
@@ -93,8 +86,7 @@ public class TestCgroupsLCEResourcesHandler {
// Verify DeleteCgroup times out if "tasks" file contains data
@Test
public void testDeleteCgroup() throws Exception {
- final MockClock clock = new MockClock();
- clock.time = System.currentTimeMillis();
+ final ControlledClock clock = new ControlledClock();
CgroupsLCEResourcesHandler handler = new CgroupsLCEResourcesHandler();
handler.setConf(new YarnConfiguration());
handler.initConfig();
@@ -118,8 +110,8 @@ public class TestCgroupsLCEResourcesHandler {
} catch (InterruptedException ex) {
//NOP
}
- clock.time += YarnConfiguration.
- DEFAULT_NM_LINUX_CONTAINER_CGROUPS_DELETE_TIMEOUT;
+ clock.tickMsec(YarnConfiguration.
+ DEFAULT_NM_LINUX_CONTAINER_CGROUPS_DELETE_TIMEOUT);
}
}.start();
latch.await();
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ca59e771/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerTestBase.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerTestBase.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerTestBase.java
index 0a9c389..23b708a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerTestBase.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerTestBase.java
@@ -49,18 +49,6 @@ import org.apache.hadoop.yarn.server.utils.BuilderUtils;
import org.apache.hadoop.yarn.util.Clock;
public class FairSchedulerTestBase {
- protected static class MockClock implements Clock {
- private long time = 0;
- @Override
- public long getTime() {
- return time;
- }
-
- public void tick(int seconds) {
- time = time + seconds * 1000;
- }
- }
-
public final static String TEST_DIR =
new File(System.getProperty("test.build.data", "/tmp")).getAbsolutePath();
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ca59e771/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestAllocationFileLoaderService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestAllocationFileLoaderService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestAllocationFileLoaderService.java
index b09573c..202eb09 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestAllocationFileLoaderService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestAllocationFileLoaderService.java
@@ -31,7 +31,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.reservation.ReservationSche
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.QueuePlacementRule.NestedUserQueue;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.policies.DominantResourceFairnessPolicy;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.policies.FairSharePolicy;
-import org.apache.hadoop.yarn.util.Clock;
+import org.apache.hadoop.yarn.util.ControlledClock;
import org.apache.hadoop.yarn.util.resource.Resources;
import org.junit.Test;
@@ -43,18 +43,6 @@ public class TestAllocationFileLoaderService {
final static String ALLOC_FILE = new File(TEST_DIR,
"test-queues").getAbsolutePath();
- private class MockClock implements Clock {
- private long time = 0;
- @Override
- public long getTime() {
- return time;
- }
-
- public void tick(long ms) {
- time += ms;
- }
- }
-
@Test
public void testGetAllocationFileFromClasspath() {
Configuration conf = new Configuration();
@@ -81,7 +69,8 @@ public class TestAllocationFileLoaderService {
out.println("</allocations>");
out.close();
- MockClock clock = new MockClock();
+ ControlledClock clock = new ControlledClock();
+ clock.setTime(0);
Configuration conf = new Configuration();
conf.set(FairSchedulerConfiguration.ALLOCATION_FILE, ALLOC_FILE);
@@ -126,7 +115,7 @@ public class TestAllocationFileLoaderService {
out.println("</allocations>");
out.close();
- clock.tick(System.currentTimeMillis()
+ clock.tickMsec(System.currentTimeMillis()
+ AllocationFileLoaderService.ALLOC_RELOAD_WAIT_MS + 10000);
allocLoader.start();
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ca59e771/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestContinuousScheduling.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestContinuousScheduling.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestContinuousScheduling.java
index a72e393..53382de 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestContinuousScheduling.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestContinuousScheduling.java
@@ -19,20 +19,16 @@
package org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair;
import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.metrics2.lib.MutableRate;
-import org.apache.hadoop.metrics2.util.SampleStat;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ContainerId;
-import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.api.records.ResourceRequest;
import org.apache.hadoop.yarn.server.resourcemanager.MockNodes;
import org.apache.hadoop.yarn.server.resourcemanager.MockRM;
-import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppImpl;
import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeAddedSchedulerEvent;
-import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event
- .NodeUpdateSchedulerEvent;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeUpdateSchedulerEvent;
+import org.apache.hadoop.yarn.util.ControlledClock;
import org.apache.hadoop.yarn.util.resource.Resources;
import org.junit.After;
import static org.junit.Assert.assertEquals;
@@ -41,11 +37,10 @@ import org.junit.Before;
import org.junit.Test;
import java.util.ArrayList;
-import java.util.Collections;
import java.util.List;
public class TestContinuousScheduling extends FairSchedulerTestBase {
- private MockClock mockClock;
+ private ControlledClock mockClock;
@Override
public Configuration createConfiguration() {
@@ -59,7 +54,7 @@ public class TestContinuousScheduling extends FairSchedulerTestBase {
@Before
public void setup() {
- mockClock = new MockClock();
+ mockClock = new ControlledClock();
conf = createConfiguration();
resourceManager = new MockRM(conf);
resourceManager.start();
@@ -108,7 +103,7 @@ public class TestContinuousScheduling extends FairSchedulerTestBase {
FSAppAttempt app = scheduler.getSchedulerApp(appAttemptId);
// Advance time and let continuous scheduling kick in
- mockClock.tick(1);
+ mockClock.tickSec(1);
while (1024 != app.getCurrentConsumption().getMemory()) {
Thread.sleep(100);
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ca59e771/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFSAppAttempt.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFSAppAttempt.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFSAppAttempt.java
index 43fe186..7aa62a8 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFSAppAttempt.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFSAppAttempt.java
@@ -31,7 +31,8 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueMetrics;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.policies.DominantResourceFairnessPolicy;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.policies.FairSharePolicy;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.policies.FifoPolicy;
-import org.apache.hadoop.yarn.util.Clock;
+
+import org.apache.hadoop.yarn.util.ControlledClock;
import org.apache.hadoop.yarn.util.resource.Resources;
import org.junit.Before;
import org.junit.Test;
@@ -39,19 +40,6 @@ import org.mockito.Mockito;
public class TestFSAppAttempt extends FairSchedulerTestBase {
- private class MockClock implements Clock {
- private long time = 0;
- @Override
- public long getTime() {
- return time;
- }
-
- public void tick(int seconds) {
- time = time + seconds * 1000;
- }
-
- }
-
@Before
public void setup() {
Configuration conf = createConfiguration();
@@ -125,7 +113,7 @@ public class TestFSAppAttempt extends FairSchedulerTestBase {
Priority prio = Mockito.mock(Priority.class);
Mockito.when(prio.getPriority()).thenReturn(1);
- MockClock clock = new MockClock();
+ ControlledClock clock = new ControlledClock();
scheduler.setClock(clock);
long nodeLocalityDelayMs = 5 * 1000L; // 5 seconds
@@ -143,13 +131,13 @@ public class TestFSAppAttempt extends FairSchedulerTestBase {
nodeLocalityDelayMs, rackLocalityDelayMs, clock.getTime()));
// after 4 seconds should remain node local
- clock.tick(4);
+ clock.tickSec(4);
assertEquals(NodeType.NODE_LOCAL,
schedulerApp.getAllowedLocalityLevelByTime(prio,
nodeLocalityDelayMs, rackLocalityDelayMs, clock.getTime()));
// after 6 seconds should switch to rack local
- clock.tick(2);
+ clock.tickSec(2);
assertEquals(NodeType.RACK_LOCAL,
schedulerApp.getAllowedLocalityLevelByTime(prio,
nodeLocalityDelayMs, rackLocalityDelayMs, clock.getTime()));
@@ -162,12 +150,12 @@ public class TestFSAppAttempt extends FairSchedulerTestBase {
nodeLocalityDelayMs, rackLocalityDelayMs, clock.getTime()));
// Now escalate again to rack-local, then to off-switch
- clock.tick(6);
+ clock.tickSec(6);
assertEquals(NodeType.RACK_LOCAL,
schedulerApp.getAllowedLocalityLevelByTime(prio,
nodeLocalityDelayMs, rackLocalityDelayMs, clock.getTime()));
- clock.tick(7);
+ clock.tickSec(7);
assertEquals(NodeType.OFF_SWITCH,
schedulerApp.getAllowedLocalityLevelByTime(prio,
nodeLocalityDelayMs, rackLocalityDelayMs, clock.getTime()));
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ca59e771/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
index fc69de7..a26209b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
@@ -72,7 +72,6 @@ import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
import org.apache.hadoop.yarn.server.resourcemanager.ApplicationMasterService;
import org.apache.hadoop.yarn.server.resourcemanager.MockNodes;
import org.apache.hadoop.yarn.server.resourcemanager.MockRM;
-import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
import org.apache.hadoop.yarn.server.resourcemanager.resource.ResourceType;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.MockRMApp;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
@@ -100,6 +99,8 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.QueuePlaceme
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.policies.DominantResourceFairnessPolicy;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.policies.FifoPolicy;
import org.apache.hadoop.yarn.server.utils.BuilderUtils;
+import org.apache.hadoop.yarn.util.ControlledClock;
+import org.apache.hadoop.yarn.util.SystemClock;
import org.apache.hadoop.yarn.util.resource.Resources;
import org.junit.After;
import org.junit.Assert;
@@ -1489,7 +1490,7 @@ public class TestFairScheduler extends FairSchedulerTestBase {
conf.set(FairSchedulerConfiguration.ALLOCATION_FILE + ".allocation.file", ALLOC_FILE);
conf.set(FairSchedulerConfiguration.USER_AS_DEFAULT_QUEUE, "false");
- MockClock clock = new MockClock();
+ ControlledClock clock = new ControlledClock();
scheduler.setClock(clock);
PrintWriter out = new PrintWriter(new FileWriter(ALLOC_FILE));
@@ -1587,7 +1588,7 @@ public class TestFairScheduler extends FairSchedulerTestBase {
scheduler.getSchedulerApp(app2).getPreemptionContainers()));
// Pretend 15 seconds have passed
- clock.tick(15);
+ clock.tickSec(15);
// Trigger a kill by insisting we want containers back
scheduler.preemptResources(Resources.createResource(2 * 1024));
@@ -1617,7 +1618,7 @@ public class TestFairScheduler extends FairSchedulerTestBase {
scheduler.preemptResources(Resources.createResource(2 * 1024));
// Pretend 15 seconds have passed
- clock.tick(15);
+ clock.tickSec(15);
// We should be able to claw back another container from A and B each.
// For queueA (fifo), continue preempting from app2.
@@ -1649,7 +1650,7 @@ public class TestFairScheduler extends FairSchedulerTestBase {
conf.set(FairSchedulerConfiguration.ALLOCATION_FILE, ALLOC_FILE);
conf.set(FairSchedulerConfiguration.USER_AS_DEFAULT_QUEUE, "false");
- MockClock clock = new MockClock();
+ ControlledClock clock = new ControlledClock();
scheduler.setClock(clock);
PrintWriter out = new PrintWriter(new FileWriter(ALLOC_FILE));
@@ -1702,7 +1703,7 @@ public class TestFairScheduler extends FairSchedulerTestBase {
scheduler.update();
// Let 11 sec pass
- clock.tick(11);
+ clock.tickSec(11);
scheduler.update();
Resource toPreempt = scheduler.resToPreempt(scheduler.getQueueManager()
@@ -1722,7 +1723,7 @@ public class TestFairScheduler extends FairSchedulerTestBase {
*/
public void testPreemptionDecision() throws Exception {
conf.set(FairSchedulerConfiguration.ALLOCATION_FILE, ALLOC_FILE);
- MockClock clock = new MockClock();
+ ControlledClock clock = new ControlledClock();
scheduler.setClock(clock);
PrintWriter out = new PrintWriter(new FileWriter(ALLOC_FILE));
@@ -1833,7 +1834,7 @@ public class TestFairScheduler extends FairSchedulerTestBase {
Resources.none(), scheduler.resToPreempt(schedD, clock.getTime())));
// After minSharePreemptionTime has passed, they should want to preempt min
// share.
- clock.tick(6);
+ clock.tickSec(6);
assertEquals(
1024, scheduler.resToPreempt(schedC, clock.getTime()).getMemory());
assertEquals(
@@ -1842,7 +1843,7 @@ public class TestFairScheduler extends FairSchedulerTestBase {
// After fairSharePreemptionTime has passed, they should want to preempt
// fair share.
scheduler.update();
- clock.tick(6);
+ clock.tickSec(6);
assertEquals(
1536 , scheduler.resToPreempt(schedC, clock.getTime()).getMemory());
assertEquals(
@@ -1855,7 +1856,7 @@ public class TestFairScheduler extends FairSchedulerTestBase {
*/
public void testPreemptionDecisionWithVariousTimeout() throws Exception {
conf.set(FairSchedulerConfiguration.ALLOCATION_FILE, ALLOC_FILE);
- MockClock clock = new MockClock();
+ ControlledClock clock = new ControlledClock();
scheduler.setClock(clock);
PrintWriter out = new PrintWriter(new FileWriter(ALLOC_FILE));
@@ -1971,7 +1972,7 @@ public class TestFairScheduler extends FairSchedulerTestBase {
// After 5 seconds, queueB1 wants to preempt min share
scheduler.update();
- clock.tick(6);
+ clock.tickSec(6);
assertEquals(
1024, scheduler.resToPreempt(queueB1, clock.getTime()).getMemory());
assertEquals(
@@ -1981,7 +1982,7 @@ public class TestFairScheduler extends FairSchedulerTestBase {
// After 10 seconds, queueB2 wants to preempt min share
scheduler.update();
- clock.tick(5);
+ clock.tickSec(5);
assertEquals(
1024, scheduler.resToPreempt(queueB1, clock.getTime()).getMemory());
assertEquals(
@@ -1991,7 +1992,7 @@ public class TestFairScheduler extends FairSchedulerTestBase {
// After 15 seconds, queueC wants to preempt min share
scheduler.update();
- clock.tick(5);
+ clock.tickSec(5);
assertEquals(
1024, scheduler.resToPreempt(queueB1, clock.getTime()).getMemory());
assertEquals(
@@ -2001,7 +2002,7 @@ public class TestFairScheduler extends FairSchedulerTestBase {
// After 20 seconds, queueB2 should want to preempt fair share
scheduler.update();
- clock.tick(5);
+ clock.tickSec(5);
assertEquals(
1024, scheduler.resToPreempt(queueB1, clock.getTime()).getMemory());
assertEquals(
@@ -2011,7 +2012,7 @@ public class TestFairScheduler extends FairSchedulerTestBase {
// After 25 seconds, queueB1 should want to preempt fair share
scheduler.update();
- clock.tick(5);
+ clock.tickSec(5);
assertEquals(
1536, scheduler.resToPreempt(queueB1, clock.getTime()).getMemory());
assertEquals(
@@ -2021,7 +2022,7 @@ public class TestFairScheduler extends FairSchedulerTestBase {
// After 30 seconds, queueC should want to preempt fair share
scheduler.update();
- clock.tick(5);
+ clock.tickSec(5);
assertEquals(
1536, scheduler.resToPreempt(queueB1, clock.getTime()).getMemory());
assertEquals(
@@ -3703,7 +3704,7 @@ public class TestFairScheduler extends FairSchedulerTestBase {
@Test
public void testMaxRunningAppsHierarchicalQueues() throws Exception {
conf.set(FairSchedulerConfiguration.ALLOCATION_FILE, ALLOC_FILE);
- MockClock clock = new MockClock();
+ ControlledClock clock = new ControlledClock();
scheduler.setClock(clock);
PrintWriter out = new PrintWriter(new FileWriter(ALLOC_FILE));
@@ -3728,28 +3729,28 @@ public class TestFairScheduler extends FairSchedulerTestBase {
ApplicationAttemptId attId1 = createSchedulingRequest(1024, "queue1.sub1", "user1");
verifyAppRunnable(attId1, true);
verifyQueueNumRunnable("queue1.sub1", 1, 0);
- clock.tick(10);
+ clock.tickSec(10);
// exceeds no limits
ApplicationAttemptId attId2 = createSchedulingRequest(1024, "queue1.sub3", "user1");
verifyAppRunnable(attId2, true);
verifyQueueNumRunnable("queue1.sub3", 1, 0);
- clock.tick(10);
+ clock.tickSec(10);
// exceeds no limits
ApplicationAttemptId attId3 = createSchedulingRequest(1024, "queue1.sub2", "user1");
verifyAppRunnable(attId3, true);
verifyQueueNumRunnable("queue1.sub2", 1, 0);
- clock.tick(10);
+ clock.tickSec(10);
// exceeds queue1 limit
ApplicationAttemptId attId4 = createSchedulingRequest(1024, "queue1.sub2", "user1");
verifyAppRunnable(attId4, false);
verifyQueueNumRunnable("queue1.sub2", 1, 1);
- clock.tick(10);
+ clock.tickSec(10);
// exceeds sub3 limit
ApplicationAttemptId attId5 = createSchedulingRequest(1024, "queue1.sub3", "user1");
verifyAppRunnable(attId5, false);
verifyQueueNumRunnable("queue1.sub3", 1, 1);
- clock.tick(10);
-
+ clock.tickSec(10);
+
// Even though the app was removed from sub3, the app from sub2 gets to go
// because it came in first
AppAttemptRemovedSchedulerEvent appRemovedEvent1 =
@@ -3923,7 +3924,7 @@ public class TestFairScheduler extends FairSchedulerTestBase {
public void testRecoverRequestAfterPreemption() throws Exception {
conf.setLong(FairSchedulerConfiguration.WAIT_TIME_BEFORE_KILL, 10);
- MockClock clock = new MockClock();
+ ControlledClock clock = new ControlledClock();
scheduler.setClock(clock);
scheduler.init(conf);
scheduler.start();
@@ -3974,8 +3975,8 @@ public class TestFairScheduler extends FairSchedulerTestBase {
scheduler.warnOrKillContainer(rmContainer);
// Wait for few clock ticks
- clock.tick(5);
-
+ clock.tickSec(5);
+
// preempt now
scheduler.warnOrKillContainer(rmContainer);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ca59e771/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairSchedulerPreemption.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairSchedulerPreemption.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairSchedulerPreemption.java
index 458b06d..6f759ce 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairSchedulerPreemption.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairSchedulerPreemption.java
@@ -26,6 +26,8 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeAddedSchedulerEvent;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeUpdateSchedulerEvent;
+
+import org.apache.hadoop.yarn.util.ControlledClock;
import org.apache.hadoop.yarn.util.resource.Resources;
import org.junit.After;
import org.junit.Before;
@@ -43,7 +45,7 @@ public class TestFairSchedulerPreemption extends FairSchedulerTestBase {
private final static String ALLOC_FILE = new File(TEST_DIR,
TestFairSchedulerPreemption.class.getName() + ".xml").getAbsolutePath();
- private MockClock clock;
+ private ControlledClock clock;
private static class StubbedFairScheduler extends FairScheduler {
public int lastPreemptMemory = -1;
@@ -70,7 +72,7 @@ public class TestFairSchedulerPreemption extends FairSchedulerTestBase {
@Before
public void setup() throws IOException {
conf = createConfiguration();
- clock = new MockClock();
+ clock = new ControlledClock();
}
@After
@@ -148,7 +150,7 @@ public class TestFairSchedulerPreemption extends FairSchedulerTestBase {
// Verify submitting another request triggers preemption
createSchedulingRequest(1024, "queueB", "user1", 1, 1);
scheduler.update();
- clock.tick(6);
+ clock.tickSec(6);
((StubbedFairScheduler) scheduler).resetLastPreemptResources();
scheduler.preemptTasksIfNecessary();
@@ -164,7 +166,7 @@ public class TestFairSchedulerPreemption extends FairSchedulerTestBase {
// Verify submitting another request doesn't trigger preemption
createSchedulingRequest(1024, "queueB", "user1", 1, 1);
scheduler.update();
- clock.tick(6);
+ clock.tickSec(6);
((StubbedFairScheduler) scheduler).resetLastPreemptResources();
scheduler.preemptTasksIfNecessary();
@@ -180,7 +182,7 @@ public class TestFairSchedulerPreemption extends FairSchedulerTestBase {
// Verify submitting another request triggers preemption
createSchedulingRequest(1024, "queueB", "user1", 1, 1);
scheduler.update();
- clock.tick(6);
+ clock.tickSec(6);
((StubbedFairScheduler) scheduler).resetLastPreemptResources();
scheduler.preemptTasksIfNecessary();
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ca59e771/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestMaxRunningAppsEnforcer.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestMaxRunningAppsEnforcer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestMaxRunningAppsEnforcer.java
index ac5748f..6cca19a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestMaxRunningAppsEnforcer.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestMaxRunningAppsEnforcer.java
@@ -31,6 +31,7 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.util.ControlledClock;
import org.junit.Before;
import org.junit.Test;
@@ -40,14 +41,14 @@ public class TestMaxRunningAppsEnforcer {
private Map<String, Integer> userMaxApps;
private MaxRunningAppsEnforcer maxAppsEnforcer;
private int appNum;
- private TestFairScheduler.MockClock clock;
+ private ControlledClock clock;
private RMContext rmContext;
private FairScheduler scheduler;
@Before
public void setup() throws Exception {
Configuration conf = new Configuration();
- clock = new TestFairScheduler.MockClock();
+ clock = new ControlledClock();
scheduler = mock(FairScheduler.class);
when(scheduler.getConf()).thenReturn(
new FairSchedulerConfiguration(conf));
@@ -151,7 +152,7 @@ public class TestMaxRunningAppsEnforcer {
FSAppAttempt app1 = addApp(leaf1, "user");
addApp(leaf2, "user");
addApp(leaf2, "user");
- clock.tick(20);
+ clock.tickSec(20);
addApp(leaf1, "user");
assertEquals(1, leaf1.getNumRunnableApps());
assertEquals(1, leaf2.getNumRunnableApps());
[36/36] hadoop git commit: YARN-3613. TestContainerManagerSecurity
should init and start Yarn cluster in setup instead of individual methods.
(nijel via kasha)
Posted by zj...@apache.org.
YARN-3613. TestContainerManagerSecurity should init and start Yarn cluster in setup instead of individual methods. (nijel via kasha)
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6da88e37
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6da88e37
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6da88e37
Branch: refs/heads/YARN-2928
Commit: 6da88e371a19432878d3eb7cfaf3401507b277a1
Parents: 27746bd
Author: Karthik Kambatla <ka...@apache.org>
Authored: Tue May 12 10:45:33 2015 -0700
Committer: Zhijie Shen <zj...@apache.org>
Committed: Tue May 12 13:44:28 2015 -0700
----------------------------------------------------------------------
hadoop-yarn-project/CHANGES.txt | 3 ++
.../server/TestContainerManagerSecurity.java | 46 +++++---------------
2 files changed, 15 insertions(+), 34 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6da88e37/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 49c6a78..dce2103 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -303,6 +303,9 @@ Release 2.8.0 - UNRELEASED
YARN-3513. Remove unused variables in ContainersMonitorImpl and add debug
log for overall resource usage by all containers. (Naganarasimha G R via devaraj)
+ YARN-3613. TestContainerManagerSecurity should init and start Yarn cluster in
+ setup instead of individual methods. (nijel via kasha)
+
OPTIMIZATIONS
YARN-3339. TestDockerContainerExecutor should pull a single image and not
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6da88e37/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestContainerManagerSecurity.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestContainerManagerSecurity.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestContainerManagerSecurity.java
index f0dcb56..59bb6aa 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestContainerManagerSecurity.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestContainerManagerSecurity.java
@@ -82,8 +82,6 @@ import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
import org.junit.runners.Parameterized.Parameters;
-import com.google.common.io.ByteArrayDataInput;
-import com.google.common.io.ByteStreams;
@RunWith(Parameterized.class)
public class TestContainerManagerSecurity extends KerberosSecurityTestcase {
@@ -105,10 +103,20 @@ public class TestContainerManagerSecurity extends KerberosSecurityTestcase {
testRootDir.mkdirs();
httpSpnegoKeytabFile.deleteOnExit();
getKdc().createPrincipal(httpSpnegoKeytabFile, httpSpnegoPrincipal);
+
+ yarnCluster =
+ new MiniYARNCluster(TestContainerManagerSecurity.class.getName(), 1, 1,
+ 1);
+ yarnCluster.init(conf);
+ yarnCluster.start();
}
@After
public void tearDown() {
+ if (yarnCluster != null) {
+ yarnCluster.stop();
+ yarnCluster = null;
+ }
testRootDir.delete();
}
@@ -144,11 +152,6 @@ public class TestContainerManagerSecurity extends KerberosSecurityTestcase {
@Test (timeout = 120000)
public void testContainerManager() throws Exception {
- try {
- yarnCluster = new MiniYARNCluster(TestContainerManagerSecurity.class
- .getName(), 1, 1, 1);
- yarnCluster.init(conf);
- yarnCluster.start();
// TestNMTokens.
testNMTokens(conf);
@@ -156,36 +159,11 @@ public class TestContainerManagerSecurity extends KerberosSecurityTestcase {
// Testing for container token tampering
testContainerToken(conf);
- } catch (Exception e) {
- e.printStackTrace();
- throw e;
- } finally {
- if (yarnCluster != null) {
- yarnCluster.stop();
- yarnCluster = null;
- }
- }
- }
-
- @Test (timeout = 120000)
- public void testContainerManagerWithEpoch() throws Exception {
- try {
- yarnCluster = new MiniYARNCluster(TestContainerManagerSecurity.class
- .getName(), 1, 1, 1);
- yarnCluster.init(conf);
- yarnCluster.start();
-
- // Testing for container token tampering
+ // Testing for container token tampering with epoch
testContainerTokenWithEpoch(conf);
- } finally {
- if (yarnCluster != null) {
- yarnCluster.stop();
- yarnCluster = null;
- }
- }
}
-
+
private void testNMTokens(Configuration conf) throws Exception {
NMTokenSecretManagerInRM nmTokenSecretManagerRM =
yarnCluster.getResourceManager().getRMContext()
[13/36] hadoop git commit: HDFS-8357. Consolidate parameters of
INode.CleanSubtree() into a parameter objects. Contributed by Li Lu.
Posted by zj...@apache.org.
HDFS-8357. Consolidate parameters of INode.CleanSubtree() into a parameter objects. Contributed by Li Lu.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/51812970
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/51812970
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/51812970
Branch: refs/heads/YARN-2928
Commit: 5181297087f71d3978d87d8cec542c0a3ae6c5e9
Parents: 86ff073
Author: Haohui Mai <wh...@apache.org>
Authored: Sat May 9 22:51:58 2015 -0700
Committer: Zhijie Shen <zj...@apache.org>
Committed: Tue May 12 13:24:12 2015 -0700
----------------------------------------------------------------------
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +
.../hdfs/server/namenode/FSDirDeleteOp.java | 11 +-
.../hdfs/server/namenode/FSDirRenameOp.java | 12 +-
.../hadoop/hdfs/server/namenode/INode.java | 89 ++++++++------
.../hdfs/server/namenode/INodeDirectory.java | 44 +++----
.../hadoop/hdfs/server/namenode/INodeFile.java | 35 +++---
.../hadoop/hdfs/server/namenode/INodeMap.java | 9 +-
.../hdfs/server/namenode/INodeReference.java | 59 ++++-----
.../hdfs/server/namenode/INodeSymlink.java | 15 +--
.../namenode/snapshot/AbstractINodeDiff.java | 15 +--
.../snapshot/AbstractINodeDiffList.java | 20 ++-
.../snapshot/DirectorySnapshottableFeature.java | 12 +-
.../snapshot/DirectoryWithSnapshotFeature.java | 121 ++++++++-----------
.../hdfs/server/namenode/snapshot/FileDiff.java | 18 ++-
.../server/namenode/snapshot/FileDiffList.java | 11 +-
.../snapshot/FileWithSnapshotFeature.java | 35 +++---
.../namenode/snapshot/SnapshotManager.java | 6 +-
.../snapshot/TestFileWithSnapshotFeature.java | 8 +-
18 files changed, 232 insertions(+), 291 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/51812970/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 88503fb..f733f22 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -537,6 +537,9 @@ Release 2.8.0 - UNRELEASED
HDFS-8327. Simplify quota calculations for snapshots and truncate. (wheat9)
+ HDFS-8357. Consolidate parameters of INode.CleanSubtree() into a parameter
+ objects. (Li Lu via wheat9)
+
OPTIMIZATIONS
HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than
http://git-wip-us.apache.org/repos/asf/hadoop/blob/51812970/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirDeleteOp.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirDeleteOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirDeleteOp.java
index c31d75f..f99e50c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirDeleteOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirDeleteOp.java
@@ -250,13 +250,14 @@ class FSDirDeleteOp {
}
// collect block and update quota
+ INode.ReclaimContext reclaimContext = new INode.ReclaimContext(
+ fsd.getBlockStoragePolicySuite(), collectedBlocks,
+ removedINodes, removedUCFiles);
if (!targetNode.isInLatestSnapshot(latestSnapshot)) {
- targetNode.destroyAndCollectBlocks(fsd.getBlockStoragePolicySuite(),
- collectedBlocks, removedINodes, removedUCFiles);
+ targetNode.destroyAndCollectBlocks(reclaimContext);
} else {
- QuotaCounts counts = targetNode.cleanSubtree(
- fsd.getBlockStoragePolicySuite(), CURRENT_STATE_ID,
- latestSnapshot, collectedBlocks, removedINodes, removedUCFiles);
+ QuotaCounts counts = targetNode.cleanSubtree(reclaimContext,
+ CURRENT_STATE_ID, latestSnapshot);
removed = counts.getNameSpace();
fsd.updateCountNoQuotaCheck(iip, iip.length() -1, counts.negation());
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/51812970/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java
index d5faa78..7675907 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java
@@ -732,14 +732,16 @@ class FSDirRenameOp {
List<Long> removedUCFiles = new ChunkedArrayList<>();
final boolean filesDeleted;
if (!oldDstChild.isInLatestSnapshot(dstIIP.getLatestSnapshotId())) {
- oldDstChild.destroyAndCollectBlocks(bsps, collectedBlocks, removedINodes,
- removedUCFiles);
+ oldDstChild.destroyAndCollectBlocks(
+ new INode.ReclaimContext(bsps, collectedBlocks, removedINodes, removedUCFiles));
filesDeleted = true;
} else {
filesDeleted = oldDstChild.cleanSubtree(
- bsps, Snapshot.CURRENT_STATE_ID,
- dstIIP.getLatestSnapshotId(), collectedBlocks,
- removedINodes, removedUCFiles).getNameSpace() >= 0;
+ new INode.ReclaimContext(bsps, collectedBlocks, removedINodes,
+ removedUCFiles),
+ Snapshot.CURRENT_STATE_ID,
+ dstIIP.getLatestSnapshotId())
+ .getNameSpace() >= 0;
}
fsd.getFSNamesystem().removeLeasesAndINodes(
removedUCFiles, removedINodes, false);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/51812970/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
index b65879f..64af76f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
@@ -387,30 +387,20 @@ public abstract class INode implements INodeAttributes, Diff.Element<byte[]> {
* snapshot in its diff list. Recursively clean its children.
* </pre>
*
- * @param bsps
- * block storage policy suite to calculate intended storage type usage
+ * @param reclaimContext
+ * Record blocks and inodes that need to be reclaimed.
* @param snapshotId
- * The id of the snapshot to delete.
- * {@link Snapshot#CURRENT_STATE_ID} means to delete the current
- * file/directory.
+ * The id of the snapshot to delete.
+ * {@link Snapshot#CURRENT_STATE_ID} means to delete the current
+ * file/directory.
* @param priorSnapshotId
- * The id of the latest snapshot before the to-be-deleted snapshot.
- * When deleting a current inode, this parameter captures the latest
- * snapshot.
- * @param collectedBlocks
- * blocks collected from the descents for further block
- * deletion/update will be added to the given map.
- * @param removedINodes
- * INodes collected from the descents for further cleaning up of
- * inodeMap
- * @param removedUCFiles
- * INodes whose leases need to be released
+ * The id of the latest snapshot before the to-be-deleted snapshot.
+ * When deleting a current inode, this parameter captures the latest
+ * snapshot.
* @return quota usage delta when deleting a snapshot
*/
public abstract QuotaCounts cleanSubtree(
- final BlockStoragePolicySuite bsps, final int snapshotId,
- int priorSnapshotId, BlocksMapUpdateInfo collectedBlocks,
- List<INode> removedINodes, List<Long> removedUCFiles);
+ ReclaimContext reclaimContext, final int snapshotId, int priorSnapshotId);
/**
* Destroy self and clear everything! If the INode is a file, this method
@@ -418,22 +408,11 @@ public abstract class INode implements INodeAttributes, Diff.Element<byte[]> {
* directory, the method goes down the subtree and collects blocks from the
* descents, and clears its parent/children references as well. The method
* also clears the diff list if the INode contains snapshot diff list.
- * @param bsps
- * block storage policy suite to calculate intended storage type usage
- * This is needed because INodeReference#destroyAndCollectBlocks() needs
- * to call INode#cleanSubtree(), which calls INode#computeQuotaUsage().
- * @param collectedBlocks
- * blocks collected from the descents for further block
- * deletion/update will be added to this map.
- * @param removedINodes
- * INodes collected from the descents for further cleaning up of
- * inodeMap
- * @param removedUCFiles
- * INodes whose leases need to be released
- */
- public abstract void destroyAndCollectBlocks(
- BlockStoragePolicySuite bsps, BlocksMapUpdateInfo collectedBlocks,
- List<INode> removedINodes, List<Long> removedUCFiles);
+ *
+ * @param reclaimContext
+ * Record blocks and inodes that need to be reclaimed.
+ */
+ public abstract void destroyAndCollectBlocks(ReclaimContext reclaimContext);
/** Compute {@link ContentSummary}. Blocking call */
public final ContentSummary computeContentSummary(BlockStoragePolicySuite bsps) {
@@ -823,7 +802,45 @@ public abstract class INode implements INodeAttributes, Diff.Element<byte[]> {
out.print(getParentString());
out.print(", " + getPermissionStatus(snapshotId));
}
-
+
+ /**
+ * Context object to record blocks and inodes that need to be reclaimed
+ */
+ public static class ReclaimContext {
+ protected final BlockStoragePolicySuite bsps;
+ protected final BlocksMapUpdateInfo collectedBlocks;
+ protected final List<INode> removedINodes;
+ protected final List<Long> removedUCFiles;
+ /**
+ * @param bsps
+ * block storage policy suite to calculate intended storage type
+ * usage
+ * @param collectedBlocks
+ * blocks collected from the descents for further block
+ * deletion/update will be added to the given map.
+ * @param removedINodes
+ * INodes collected from the descents for further cleaning up of
+ * @param removedUCFiles
+ * files that the NN need to remove the leases
+ */
+ public ReclaimContext(
+ BlockStoragePolicySuite bsps, BlocksMapUpdateInfo collectedBlocks,
+ List<INode> removedINodes, List<Long> removedUCFiles) {
+ this.bsps = bsps;
+ this.collectedBlocks = collectedBlocks;
+ this.removedINodes = removedINodes;
+ this.removedUCFiles = removedUCFiles;
+ }
+
+ public BlockStoragePolicySuite storagePolicySuite() {
+ return bsps;
+ }
+
+ public BlocksMapUpdateInfo collectedBlocks() {
+ return collectedBlocks;
+ }
+ }
+
/**
* Information used for updating the blocksMap when deleting files.
*/
http://git-wip-us.apache.org/repos/asf/hadoop/blob/51812970/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
index fa63889..106c9f8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
@@ -264,11 +264,11 @@ public class INodeDirectory extends INodeWithAdditionalFields
return getDirectorySnapshottableFeature().addSnapshot(this, id, name);
}
- public Snapshot removeSnapshot(BlockStoragePolicySuite bsps, String snapshotName,
- BlocksMapUpdateInfo collectedBlocks, final List<INode> removedINodes)
+ public Snapshot removeSnapshot(
+ ReclaimContext reclaimContext, String snapshotName)
throws SnapshotException {
- return getDirectorySnapshottableFeature().removeSnapshot(bsps, this,
- snapshotName, collectedBlocks, removedINodes);
+ return getDirectorySnapshottableFeature().removeSnapshot(
+ reclaimContext, this, snapshotName);
}
public void renameSnapshot(String path, String oldName, String newName)
@@ -754,9 +754,7 @@ public class INodeDirectory extends INodeWithAdditionalFields
/** Call cleanSubtree(..) recursively down the subtree. */
public QuotaCounts cleanSubtreeRecursively(
- final BlockStoragePolicySuite bsps, final int snapshot, int prior,
- final BlocksMapUpdateInfo collectedBlocks,
- final List<INode> removedINodes, List<Long> removedUCFiles,
+ ReclaimContext reclaimContext, final int snapshot, int prior,
final Map<INode, INode> excludedNodes) {
QuotaCounts counts = new QuotaCounts.Builder().build();
// in case of deletion snapshot, since this call happens after we modify
@@ -771,8 +769,7 @@ public class INodeDirectory extends INodeWithAdditionalFields
&& excludedNodes.containsKey(child)) {
continue;
} else {
- QuotaCounts childCounts = child.cleanSubtree(bsps, snapshot, prior,
- collectedBlocks, removedINodes, removedUCFiles);
+ QuotaCounts childCounts = child.cleanSubtree(reclaimContext, snapshot, prior);
counts.add(childCounts);
}
}
@@ -780,49 +777,42 @@ public class INodeDirectory extends INodeWithAdditionalFields
}
@Override
- public void destroyAndCollectBlocks(
- final BlockStoragePolicySuite bsps,
- final BlocksMapUpdateInfo collectedBlocks,
- final List<INode> removedINodes, List<Long> removedUCFiles) {
+ public void destroyAndCollectBlocks(ReclaimContext reclaimContext) {
final DirectoryWithSnapshotFeature sf = getDirectoryWithSnapshotFeature();
if (sf != null) {
- sf.clear(bsps, this, collectedBlocks, removedINodes, removedUCFiles);
+ sf.clear(reclaimContext, this);
}
for (INode child : getChildrenList(Snapshot.CURRENT_STATE_ID)) {
- child.destroyAndCollectBlocks(bsps, collectedBlocks, removedINodes,
- removedUCFiles);
+ child.destroyAndCollectBlocks(reclaimContext);
}
if (getAclFeature() != null) {
AclStorage.removeAclFeature(getAclFeature());
}
clear();
- removedINodes.add(this);
+ reclaimContext.removedINodes.add(this);
}
@Override
public QuotaCounts cleanSubtree(
- final BlockStoragePolicySuite bsps, final int snapshotId, int priorSnapshotId,
- final BlocksMapUpdateInfo collectedBlocks,
- final List<INode> removedINodes, List<Long> removedUCFiles) {
+ ReclaimContext reclaimContext, final int snapshotId, int priorSnapshotId) {
DirectoryWithSnapshotFeature sf = getDirectoryWithSnapshotFeature();
// there is snapshot data
if (sf != null) {
- return sf.cleanDirectory(bsps, this, snapshotId, priorSnapshotId,
- collectedBlocks, removedINodes, removedUCFiles);
+ return sf.cleanDirectory(reclaimContext, this, snapshotId,
+ priorSnapshotId);
}
// there is no snapshot data
if (priorSnapshotId == Snapshot.NO_SNAPSHOT_ID
&& snapshotId == Snapshot.CURRENT_STATE_ID) {
// destroy the whole subtree and collect blocks that should be deleted
QuotaCounts counts = new QuotaCounts.Builder().build();
- this.computeQuotaUsage(bsps, counts, true);
- destroyAndCollectBlocks(bsps, collectedBlocks, removedINodes,
- removedUCFiles);
+ this.computeQuotaUsage(reclaimContext.bsps, counts, true);
+ destroyAndCollectBlocks(reclaimContext);
return counts;
} else {
// process recursively down the subtree
- QuotaCounts counts = cleanSubtreeRecursively(bsps, snapshotId, priorSnapshotId,
- collectedBlocks, removedINodes, removedUCFiles, null);
+ QuotaCounts counts = cleanSubtreeRecursively(
+ reclaimContext, snapshotId, priorSnapshotId, null);
if (isQuotaSet()) {
getDirectoryWithQuotaFeature().addSpaceConsumed2Cache(counts.negation());
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/51812970/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
index 14fc7b0..3790c74 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
@@ -492,13 +492,10 @@ public class INodeFile extends INodeWithAdditionalFields
@Override
public QuotaCounts cleanSubtree(
- BlockStoragePolicySuite bsps, final int snapshot, int priorSnapshotId,
- final BlocksMapUpdateInfo collectedBlocks,
- final List<INode> removedINodes, List<Long> removedUCFiles) {
+ ReclaimContext reclaimContext, final int snapshot, int priorSnapshotId) {
FileWithSnapshotFeature sf = getFileWithSnapshotFeature();
if (sf != null) {
- return sf.cleanFile(bsps, this, snapshot, priorSnapshotId, collectedBlocks,
- removedINodes);
+ return sf.cleanFile(reclaimContext, this, snapshot, priorSnapshotId);
}
QuotaCounts counts = new QuotaCounts.Builder().build();
@@ -506,17 +503,16 @@ public class INodeFile extends INodeWithAdditionalFields
if (priorSnapshotId == NO_SNAPSHOT_ID) {
// this only happens when deleting the current file and the file is not
// in any snapshot
- computeQuotaUsage(bsps, counts, false);
- destroyAndCollectBlocks(bsps, collectedBlocks, removedINodes,
- removedUCFiles);
+ computeQuotaUsage(reclaimContext.bsps, counts, false);
+ destroyAndCollectBlocks(reclaimContext);
} else {
FileUnderConstructionFeature uc = getFileUnderConstructionFeature();
// when deleting the current file and the file is in snapshot, we should
// clean the 0-sized block if the file is UC
if (uc != null) {
- uc.cleanZeroSizeBlock(this, collectedBlocks);
- if (removedUCFiles != null) {
- removedUCFiles.add(getId());
+ uc.cleanZeroSizeBlock(this, reclaimContext.collectedBlocks);
+ if (reclaimContext.removedUCFiles != null) {
+ reclaimContext.removedUCFiles.add(getId());
}
}
}
@@ -525,12 +521,10 @@ public class INodeFile extends INodeWithAdditionalFields
}
@Override
- public void destroyAndCollectBlocks(
- BlockStoragePolicySuite bsps, BlocksMapUpdateInfo collectedBlocks,
- final List<INode> removedINodes, List<Long> removedUCFiles) {
- if (blocks != null && collectedBlocks != null) {
+ public void destroyAndCollectBlocks(ReclaimContext reclaimContext) {
+ if (blocks != null && reclaimContext.collectedBlocks != null) {
for (BlockInfoContiguous blk : blocks) {
- collectedBlocks.addDeleteBlock(blk);
+ reclaimContext.collectedBlocks.addDeleteBlock(blk);
blk.setBlockCollection(null);
}
}
@@ -539,14 +533,15 @@ public class INodeFile extends INodeWithAdditionalFields
AclStorage.removeAclFeature(getAclFeature());
}
clear();
- removedINodes.add(this);
+ reclaimContext.removedINodes.add(this);
FileWithSnapshotFeature sf = getFileWithSnapshotFeature();
if (sf != null) {
- sf.getDiffs().destroyAndCollectSnapshotBlocks(collectedBlocks);
+ sf.getDiffs().destroyAndCollectSnapshotBlocks(
+ reclaimContext.collectedBlocks);
sf.clearDiffs();
}
- if (isUnderConstruction() && removedUCFiles != null) {
- removedUCFiles.add(getId());
+ if (isUnderConstruction() && reclaimContext.removedUCFiles != null) {
+ reclaimContext.removedUCFiles.add(getId());
}
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/51812970/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeMap.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeMap.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeMap.java
index 5f16bd6..e1cb869 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeMap.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeMap.java
@@ -18,7 +18,6 @@
package org.apache.hadoop.hdfs.server.namenode;
import java.util.Iterator;
-import java.util.List;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.permission.PermissionStatus;
@@ -97,9 +96,7 @@ public class INodeMap {
}
@Override
- public void destroyAndCollectBlocks(
- BlockStoragePolicySuite bsps, BlocksMapUpdateInfo collectedBlocks,
- List<INode> removedINodes, List<Long> removedUCFiles) {
+ public void destroyAndCollectBlocks(ReclaimContext reclaimContext) {
// Nothing to do
}
@@ -118,9 +115,7 @@ public class INodeMap {
@Override
public QuotaCounts cleanSubtree(
- BlockStoragePolicySuite bsps, int snapshotId, int priorSnapshotId,
- BlocksMapUpdateInfo collectedBlocks, List<INode> removedINodes,
- List<Long> removedUCFiles) {
+ ReclaimContext reclaimContext, int snapshotId, int priorSnapshotId) {
return null;
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/51812970/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java
index 5008dc0..90a2eb1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java
@@ -303,19 +303,15 @@ public abstract class INodeReference extends INode {
@Override // used by WithCount
public QuotaCounts cleanSubtree(
- BlockStoragePolicySuite bsps, int snapshot, int prior, BlocksMapUpdateInfo collectedBlocks,
- final List<INode> removedINodes, List<Long> removedUCFiles) {
- return referred.cleanSubtree(bsps, snapshot, prior, collectedBlocks,
- removedINodes, removedUCFiles);
+ ReclaimContext reclaimContext, int snapshot, int prior) {
+ return referred.cleanSubtree(reclaimContext,
+ snapshot, prior);
}
@Override // used by WithCount
- public void destroyAndCollectBlocks(
- BlockStoragePolicySuite bsps, BlocksMapUpdateInfo collectedBlocks,
- final List<INode> removedINodes, List<Long> removedUCFiles) {
+ public void destroyAndCollectBlocks(ReclaimContext reclaimContext) {
if (removeReference(this) <= 0) {
- referred.destroyAndCollectBlocks(bsps, collectedBlocks, removedINodes,
- removedUCFiles);
+ referred.destroyAndCollectBlocks(reclaimContext);
}
}
@@ -544,8 +540,7 @@ public abstract class INodeReference extends INode {
@Override
public QuotaCounts cleanSubtree(
- BlockStoragePolicySuite bsps, final int snapshot, int prior, final BlocksMapUpdateInfo collectedBlocks,
- final List<INode> removedINodes, List<Long> removedUCFiles) {
+ ReclaimContext reclaimContext, final int snapshot, int prior) {
// since WithName node resides in deleted list acting as a snapshot copy,
// the parameter snapshot must be non-null
Preconditions.checkArgument(snapshot != Snapshot.CURRENT_STATE_ID);
@@ -560,8 +555,8 @@ public abstract class INodeReference extends INode {
return new QuotaCounts.Builder().build();
}
- QuotaCounts counts = getReferredINode().cleanSubtree(bsps, snapshot, prior,
- collectedBlocks, removedINodes, removedUCFiles);
+ QuotaCounts counts = getReferredINode().cleanSubtree(reclaimContext,
+ snapshot, prior);
INodeReference ref = getReferredINode().getParentReference();
if (ref != null) {
try {
@@ -582,13 +577,10 @@ public abstract class INodeReference extends INode {
}
@Override
- public void destroyAndCollectBlocks(
- BlockStoragePolicySuite bsps, BlocksMapUpdateInfo collectedBlocks,
- final List<INode> removedINodes, List<Long> removedUCFiles) {
+ public void destroyAndCollectBlocks(ReclaimContext reclaimContext) {
int snapshot = getSelfSnapshot();
if (removeReference(this) <= 0) {
- getReferredINode().destroyAndCollectBlocks(bsps, collectedBlocks,
- removedINodes, removedUCFiles);
+ getReferredINode().destroyAndCollectBlocks(reclaimContext);
} else {
int prior = getPriorSnapshot(this);
INode referred = getReferredINode().asReference().getReferredINode();
@@ -607,8 +599,8 @@ public abstract class INodeReference extends INode {
return;
}
try {
- QuotaCounts counts = referred.cleanSubtree(bsps, snapshot, prior,
- collectedBlocks, removedINodes, removedUCFiles);
+ QuotaCounts counts = referred.cleanSubtree(reclaimContext,
+ snapshot, prior);
INodeReference ref = getReferredINode().getParentReference();
if (ref != null) {
ref.addSpaceConsumed(counts.negation(), true);
@@ -663,15 +655,12 @@ public abstract class INodeReference extends INode {
@Override
public QuotaCounts cleanSubtree(
- BlockStoragePolicySuite bsps, int snapshot, int prior,
- BlocksMapUpdateInfo collectedBlocks, List<INode> removedINodes,
- List<Long> removedUCFiles) {
+ ReclaimContext reclaimContext, int snapshot, int prior) {
if (snapshot == Snapshot.CURRENT_STATE_ID
&& prior == Snapshot.NO_SNAPSHOT_ID) {
QuotaCounts counts = new QuotaCounts.Builder().build();
- this.computeQuotaUsage(bsps, counts, true);
- destroyAndCollectBlocks(bsps, collectedBlocks, removedINodes,
- removedUCFiles);
+ this.computeQuotaUsage(reclaimContext.bsps, counts, true);
+ destroyAndCollectBlocks(reclaimContext);
return counts;
} else {
// if prior is NO_SNAPSHOT_ID, we need to check snapshot belonging to
@@ -687,8 +676,7 @@ public abstract class INodeReference extends INode {
&& Snapshot.ID_INTEGER_COMPARATOR.compare(snapshot, prior) <= 0) {
return new QuotaCounts.Builder().build();
}
- return getReferredINode().cleanSubtree(bsps, snapshot, prior,
- collectedBlocks, removedINodes, removedUCFiles);
+ return getReferredINode().cleanSubtree(reclaimContext, snapshot, prior);
}
}
@@ -701,14 +689,12 @@ public abstract class INodeReference extends INode {
* referred node's subtree and delete everything created after the last
* rename operation, i.e., everything outside of the scope of the prior
* WithName nodes.
+ * @param reclaimContext
*/
@Override
- public void destroyAndCollectBlocks(
- BlockStoragePolicySuite bsps, BlocksMapUpdateInfo collectedBlocks,
- final List<INode> removedINodes, List<Long> removedUCFiles) {
+ public void destroyAndCollectBlocks(ReclaimContext reclaimContext) {
if (removeReference(this) <= 0) {
- getReferredINode().destroyAndCollectBlocks(bsps, collectedBlocks,
- removedINodes, removedUCFiles);
+ getReferredINode().destroyAndCollectBlocks(reclaimContext);
} else {
// we will clean everything, including files, directories, and
// snapshots, that were created after this prior snapshot
@@ -730,16 +716,15 @@ public abstract class INodeReference extends INode {
// when calling cleanSubtree of the referred node, since we
// compute quota usage updates before calling this destroy
// function, we use true for countDiffChange
- referred.cleanSubtree(bsps, snapshot, prior, collectedBlocks,
- removedINodes, removedUCFiles);
+ referred.cleanSubtree(reclaimContext, snapshot, prior);
} else if (referred.isDirectory()) {
// similarly, if referred is a directory, it must be an
// INodeDirectory with snapshot
INodeDirectory dir = referred.asDirectory();
Preconditions.checkState(dir.isWithSnapshot());
try {
- DirectoryWithSnapshotFeature.destroyDstSubtree(bsps, dir, snapshot,
- prior, collectedBlocks, removedINodes, removedUCFiles);
+ DirectoryWithSnapshotFeature.destroyDstSubtree(
+ reclaimContext, dir, snapshot, prior);
} catch (QuotaExceededException e) {
LOG.error("should not exceed quota while snapshot deletion", e);
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/51812970/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeSymlink.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeSymlink.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeSymlink.java
index 7ce893f..ad696b4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeSymlink.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeSymlink.java
@@ -18,7 +18,6 @@
package org.apache.hadoop.hdfs.server.namenode;
import java.io.PrintWriter;
-import java.util.List;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.fs.permission.PermissionStatus;
@@ -74,23 +73,17 @@ public class INodeSymlink extends INodeWithAdditionalFields {
@Override
public QuotaCounts cleanSubtree(
- BlockStoragePolicySuite bsps, final int snapshotId, int priorSnapshotId,
- final BlocksMapUpdateInfo collectedBlocks,
- final List<INode> removedINodes, List<Long> removedUCFiles) {
+ ReclaimContext reclaimContext, final int snapshotId, int priorSnapshotId) {
if (snapshotId == Snapshot.CURRENT_STATE_ID
&& priorSnapshotId == Snapshot.NO_SNAPSHOT_ID) {
- destroyAndCollectBlocks(bsps, collectedBlocks, removedINodes,
- removedUCFiles);
+ destroyAndCollectBlocks(reclaimContext);
}
return new QuotaCounts.Builder().nameSpace(1).build();
}
@Override
- public void destroyAndCollectBlocks(
- final BlockStoragePolicySuite bsps,
- final BlocksMapUpdateInfo collectedBlocks,
- final List<INode> removedINodes, List<Long> removedUCFiles) {
- removedINodes.add(this);
+ public void destroyAndCollectBlocks(ReclaimContext reclaimContext) {
+ reclaimContext.removedINodes.add(this);
}
@Override
http://git-wip-us.apache.org/repos/asf/hadoop/blob/51812970/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/AbstractINodeDiff.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/AbstractINodeDiff.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/AbstractINodeDiff.java
index 691d717..89d3625 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/AbstractINodeDiff.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/AbstractINodeDiff.java
@@ -116,21 +116,18 @@ abstract class AbstractINodeDiff<N extends INode,
/** Combine the posterior diff and collect blocks for deletion. */
abstract QuotaCounts combinePosteriorAndCollectBlocks(
- final BlockStoragePolicySuite bsps, final N currentINode,
- final D posterior, final BlocksMapUpdateInfo collectedBlocks,
- final List<INode> removedINodes);
+ INode.ReclaimContext reclaimContext, final N currentINode,
+ final D posterior);
/**
* Delete and clear self.
- * @param bsps The block storage policy suite used to retrieve storage policy
+ * @param reclaimContext blocks and inodes that need to be reclaimed
* @param currentINode The inode where the deletion happens.
- * @param collectedBlocks Used to collect blocks for deletion.
- * @param removedINodes INodes removed
- * @return quota usage delta
+ *
+ * @return usage delta
*/
abstract QuotaCounts destroyDiffAndCollectBlocks(
- final BlockStoragePolicySuite bsps, final N currentINode,
- final BlocksMapUpdateInfo collectedBlocks, final List<INode> removedINodes);
+ INode.ReclaimContext reclaimContext, final N currentINode);
@Override
public String toString() {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/51812970/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/AbstractINodeDiffList.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/AbstractINodeDiffList.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/AbstractINodeDiffList.java
index fb13e09..43127fb 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/AbstractINodeDiffList.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/AbstractINodeDiffList.java
@@ -22,9 +22,7 @@ import java.util.Collections;
import java.util.Iterator;
import java.util.List;
-import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
import org.apache.hadoop.hdfs.server.namenode.INode;
-import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo;
import org.apache.hadoop.hdfs.server.namenode.INodeAttributes;
import org.apache.hadoop.hdfs.server.namenode.QuotaCounts;
@@ -62,16 +60,14 @@ abstract class AbstractINodeDiffList<N extends INode,
* outside. If the diff to remove is not the first one in the diff list, we
* need to combine the diff with its previous one.
*
+ * @param reclaimContext blocks and inodes that need to be reclaimed
* @param snapshot The id of the snapshot to be deleted
* @param prior The id of the snapshot taken before the to-be-deleted snapshot
- * @param collectedBlocks Used to collect information for blocksMap update
* @return delta in namespace.
*/
- public final QuotaCounts deleteSnapshotDiff(BlockStoragePolicySuite bsps,
- final int snapshot,
- final int prior, final N currentINode,
- final BlocksMapUpdateInfo collectedBlocks,
- final List<INode> removedINodes) {
+ public final QuotaCounts deleteSnapshotDiff(
+ INode.ReclaimContext reclaimContext, final int snapshot, final int prior,
+ final N currentINode) {
int snapshotIndex = Collections.binarySearch(diffs, snapshot);
QuotaCounts counts = new QuotaCounts.Builder().build();
@@ -82,8 +78,8 @@ abstract class AbstractINodeDiffList<N extends INode,
diffs.get(snapshotIndex).setSnapshotId(prior);
} else { // there is no snapshot before
removed = diffs.remove(0);
- counts.add(removed.destroyDiffAndCollectBlocks(bsps, currentINode,
- collectedBlocks, removedINodes));
+ counts.add(removed.destroyDiffAndCollectBlocks(reclaimContext,
+ currentINode));
}
} else if (snapshotIndex > 0) {
final AbstractINodeDiff<N, A, D> previous = diffs.get(snapshotIndex - 1);
@@ -96,8 +92,8 @@ abstract class AbstractINodeDiffList<N extends INode,
previous.snapshotINode = removed.snapshotINode;
}
- counts.add(previous.combinePosteriorAndCollectBlocks(
- bsps, currentINode, removed, collectedBlocks, removedINodes));
+ counts.add(previous.combinePosteriorAndCollectBlocks(reclaimContext,
+ currentINode, removed));
previous.setPosterior(removed.getPosterior());
removed.setPosterior(null);
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/51812970/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectorySnapshottableFeature.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectorySnapshottableFeature.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectorySnapshottableFeature.java
index dc58856..504a64a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectorySnapshottableFeature.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectorySnapshottableFeature.java
@@ -197,15 +197,15 @@ public class DirectorySnapshottableFeature extends DirectoryWithSnapshotFeature
* Remove the snapshot with the given name from {@link #snapshotsByNames},
* and delete all the corresponding DirectoryDiff.
*
+ * @param reclaimContext records blocks and inodes that need to be reclaimed
* @param snapshotRoot The directory where we take snapshots
* @param snapshotName The name of the snapshot to be removed
- * @param collectedBlocks Used to collect information to update blocksMap
* @return The removed snapshot. Null if no snapshot with the given name
* exists.
*/
- public Snapshot removeSnapshot(BlockStoragePolicySuite bsps, INodeDirectory snapshotRoot,
- String snapshotName, BlocksMapUpdateInfo collectedBlocks,
- final List<INode> removedINodes) throws SnapshotException {
+ public Snapshot removeSnapshot(
+ INode.ReclaimContext reclaimContext, INodeDirectory snapshotRoot,
+ String snapshotName) throws SnapshotException {
final int i = searchSnapshot(DFSUtil.string2Bytes(snapshotName));
if (i < 0) {
throw new SnapshotException("Cannot delete snapshot " + snapshotName
@@ -215,8 +215,8 @@ public class DirectorySnapshottableFeature extends DirectoryWithSnapshotFeature
final Snapshot snapshot = snapshotsByNames.get(i);
int prior = Snapshot.findLatestSnapshot(snapshotRoot, snapshot.getId());
try {
- QuotaCounts counts = snapshotRoot.cleanSubtree(bsps, snapshot.getId(),
- prior, collectedBlocks, removedINodes, null);
+ QuotaCounts counts = snapshotRoot.cleanSubtree(reclaimContext,
+ snapshot.getId(), prior);
INodeDirectory parent = snapshotRoot.getParent();
if (parent != null) {
// there will not be any WithName node corresponding to the deleted
http://git-wip-us.apache.org/repos/asf/hadoop/blob/51812970/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java
index bd2dc2d..adb012a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java
@@ -35,7 +35,6 @@ import org.apache.hadoop.hdfs.server.namenode.ContentCounts;
import org.apache.hadoop.hdfs.server.namenode.ContentSummaryComputationContext;
import org.apache.hadoop.hdfs.server.namenode.FSImageSerialization;
import org.apache.hadoop.hdfs.server.namenode.INode;
-import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo;
import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
import org.apache.hadoop.hdfs.server.namenode.INodeDirectoryAttributes;
import org.apache.hadoop.hdfs.server.namenode.INodeFile;
@@ -97,15 +96,12 @@ public class DirectoryWithSnapshotFeature implements INode.Feature {
/** clear the created list */
private QuotaCounts destroyCreatedList(
- final BlockStoragePolicySuite bsps, final INodeDirectory currentINode,
- final BlocksMapUpdateInfo collectedBlocks,
- final List<INode> removedINodes, List<Long> removedUCFiles) {
+ INode.ReclaimContext reclaimContext, final INodeDirectory currentINode) {
QuotaCounts counts = new QuotaCounts.Builder().build();
final List<INode> createdList = getList(ListType.CREATED);
for (INode c : createdList) {
- c.computeQuotaUsage(bsps, counts, true);
- c.destroyAndCollectBlocks(bsps, collectedBlocks, removedINodes,
- removedUCFiles);
+ c.computeQuotaUsage(reclaimContext.storagePolicySuite(), counts, true);
+ c.destroyAndCollectBlocks(reclaimContext);
// c should be contained in the children list, remove it
currentINode.removeChild(c);
}
@@ -114,16 +110,12 @@ public class DirectoryWithSnapshotFeature implements INode.Feature {
}
/** clear the deleted list */
- private QuotaCounts destroyDeletedList(
- final BlockStoragePolicySuite bsps,
- final BlocksMapUpdateInfo collectedBlocks,
- final List<INode> removedINodes, List<Long> removedUCFiles) {
+ private QuotaCounts destroyDeletedList(INode.ReclaimContext reclaimContext) {
QuotaCounts counts = new QuotaCounts.Builder().build();
final List<INode> deletedList = getList(ListType.DELETED);
for (INode d : deletedList) {
- d.computeQuotaUsage(bsps, counts, false);
- d.destroyAndCollectBlocks(bsps, collectedBlocks, removedINodes,
- removedUCFiles);
+ d.computeQuotaUsage(reclaimContext.storagePolicySuite(), counts, false);
+ d.destroyAndCollectBlocks(reclaimContext);
}
deletedList.clear();
return counts;
@@ -211,19 +203,17 @@ public class DirectoryWithSnapshotFeature implements INode.Feature {
@Override
QuotaCounts combinePosteriorAndCollectBlocks(
- final BlockStoragePolicySuite bsps, final INodeDirectory currentDir,
- final DirectoryDiff posterior,
- final BlocksMapUpdateInfo collectedBlocks,
- final List<INode> removedINodes) {
+ final INode.ReclaimContext reclaimContext,
+ final INodeDirectory currentDir,
+ final DirectoryDiff posterior) {
final QuotaCounts counts = new QuotaCounts.Builder().build();
diff.combinePosterior(posterior.diff, new Diff.Processor<INode>() {
/** Collect blocks for deleted files. */
@Override
public void process(INode inode) {
if (inode != null) {
- inode.computeQuotaUsage(bsps, counts, false);
- inode.destroyAndCollectBlocks(bsps, collectedBlocks, removedINodes,
- null);
+ inode.computeQuotaUsage(reclaimContext.storagePolicySuite(), counts, false);
+ inode.destroyAndCollectBlocks(reclaimContext);
}
}
});
@@ -322,12 +312,10 @@ public class DirectoryWithSnapshotFeature implements INode.Feature {
@Override
QuotaCounts destroyDiffAndCollectBlocks(
- BlockStoragePolicySuite bsps, INodeDirectory currentINode,
- BlocksMapUpdateInfo collectedBlocks, final List<INode> removedINodes) {
+ INode.ReclaimContext reclaimContext, INodeDirectory currentINode) {
// this diff has been deleted
QuotaCounts counts = new QuotaCounts.Builder().build();
- counts.add(diff.destroyDeletedList(bsps, collectedBlocks, removedINodes,
- null));
+ counts.add(diff.destroyDeletedList(reclaimContext));
INodeDirectoryAttributes snapshotINode = getSnapshotINode();
if (snapshotINode != null && snapshotINode.getAclFeature() != null) {
AclStorage.removeAclFeature(snapshotINode.getAclFeature());
@@ -412,25 +400,24 @@ public class DirectoryWithSnapshotFeature implements INode.Feature {
* Destroy a subtree under a DstReference node.
*/
public static void destroyDstSubtree(
- final BlockStoragePolicySuite bsps, INode inode, final int snapshot,
- final int prior, final BlocksMapUpdateInfo collectedBlocks,
- final List<INode> removedINodes, List<Long> removedUCFiles) throws QuotaExceededException {
+ INode.ReclaimContext reclaimContext, INode inode, final int snapshot,
+ final int prior) throws QuotaExceededException {
Preconditions.checkArgument(prior != Snapshot.NO_SNAPSHOT_ID);
if (inode.isReference()) {
if (inode instanceof INodeReference.WithName
&& snapshot != Snapshot.CURRENT_STATE_ID) {
// this inode has been renamed before the deletion of the DstReference
// subtree
- inode.cleanSubtree(bsps, snapshot, prior, collectedBlocks, removedINodes,
- removedUCFiles);
+ inode.cleanSubtree(reclaimContext,
+ snapshot, prior);
} else {
// for DstReference node, continue this process to its subtree
- destroyDstSubtree(bsps, inode.asReference().getReferredINode(), snapshot,
- prior, collectedBlocks, removedINodes, removedUCFiles);
+ destroyDstSubtree(reclaimContext,
+ inode.asReference().getReferredINode(), snapshot,
+ prior);
}
} else if (inode.isFile()) {
- inode.cleanSubtree(bsps, snapshot, prior, collectedBlocks, removedINodes,
- removedUCFiles);
+ inode.cleanSubtree(reclaimContext, snapshot, prior);
} else if (inode.isDirectory()) {
Map<INode, INode> excludedNodes = null;
INodeDirectory dir = inode.asDirectory();
@@ -444,21 +431,19 @@ public class DirectoryWithSnapshotFeature implements INode.Feature {
}
if (snapshot != Snapshot.CURRENT_STATE_ID) {
- diffList.deleteSnapshotDiff(bsps, snapshot, prior, dir, collectedBlocks,
- removedINodes);
+ diffList.deleteSnapshotDiff(reclaimContext,
+ snapshot, prior, dir);
}
priorDiff = diffList.getDiffById(prior);
if (priorDiff != null && priorDiff.getSnapshotId() == prior) {
- priorDiff.diff.destroyCreatedList(bsps, dir, collectedBlocks,
- removedINodes, removedUCFiles);
+ priorDiff.diff.destroyCreatedList(reclaimContext, dir);
}
}
for (INode child : inode.asDirectory().getChildrenList(prior)) {
if (excludedNodes != null && excludedNodes.containsKey(child)) {
continue;
}
- destroyDstSubtree(bsps, child, snapshot, prior, collectedBlocks,
- removedINodes, removedUCFiles);
+ destroyDstSubtree(reclaimContext, child, snapshot, prior);
}
}
}
@@ -466,18 +451,15 @@ public class DirectoryWithSnapshotFeature implements INode.Feature {
/**
* Clean an inode while we move it from the deleted list of post to the
* deleted list of prior.
- * @param bsps The block storage policy suite.
+ * @param reclaimContext blocks and inodes that need to be reclaimed
* @param inode The inode to clean.
* @param post The post snapshot.
* @param prior The id of the prior snapshot.
- * @param collectedBlocks Used to collect blocks for later deletion.
- * @param removedUCFiles
* @return Quota usage update.
*/
private static QuotaCounts cleanDeletedINode(
- final BlockStoragePolicySuite bsps, INode inode, final int post, final int prior,
- final BlocksMapUpdateInfo collectedBlocks,
- final List<INode> removedINodes, List<Long> removedUCFiles) {
+ INode.ReclaimContext reclaimContext, INode inode, final int post,
+ final int prior) {
QuotaCounts counts = new QuotaCounts.Builder().build();
Deque<INode> queue = new ArrayDeque<INode>();
queue.addLast(inode);
@@ -486,15 +468,13 @@ public class DirectoryWithSnapshotFeature implements INode.Feature {
if (topNode instanceof INodeReference.WithName) {
INodeReference.WithName wn = (INodeReference.WithName) topNode;
if (wn.getLastSnapshotId() >= post) {
- wn.cleanSubtree(bsps, post, prior, collectedBlocks, removedINodes,
- removedUCFiles);
+ wn.cleanSubtree(reclaimContext, post, prior);
}
// For DstReference node, since the node is not in the created list of
// prior, we should treat it as regular file/dir
} else if (topNode.isFile() && topNode.asFile().isWithSnapshot()) {
INodeFile file = topNode.asFile();
- counts.add(file.getDiffs().deleteSnapshotDiff(bsps, post, prior, file,
- collectedBlocks, removedINodes));
+ counts.add(file.getDiffs().deleteSnapshotDiff(reclaimContext, post, prior, file));
} else if (topNode.isDirectory()) {
INodeDirectory dir = topNode.asDirectory();
ChildrenDiff priorChildrenDiff = null;
@@ -505,8 +485,8 @@ public class DirectoryWithSnapshotFeature implements INode.Feature {
DirectoryDiff priorDiff = sf.getDiffs().getDiffById(prior);
if (priorDiff != null && priorDiff.getSnapshotId() == prior) {
priorChildrenDiff = priorDiff.getChildrenDiff();
- counts.add(priorChildrenDiff.destroyCreatedList(bsps, dir,
- collectedBlocks, removedINodes, removedUCFiles));
+ counts.add(priorChildrenDiff.destroyCreatedList(reclaimContext,
+ dir));
}
}
@@ -636,13 +616,11 @@ public class DirectoryWithSnapshotFeature implements INode.Feature {
return child;
}
- public void clear(BlockStoragePolicySuite bsps, INodeDirectory currentINode,
- final BlocksMapUpdateInfo collectedBlocks, final List<INode>
- removedINodes, final List<Long> removedUCFiles) {
+ public void clear(
+ INode.ReclaimContext reclaimContext, INodeDirectory currentINode) {
// destroy its diff list
for (DirectoryDiff diff : diffs) {
- diff.destroyDiffAndCollectBlocks(bsps, currentINode, collectedBlocks,
- removedINodes);
+ diff.destroyDiffAndCollectBlocks(reclaimContext, currentINode);
}
diffs.clear();
}
@@ -729,9 +707,8 @@ public class DirectoryWithSnapshotFeature implements INode.Feature {
}
public QuotaCounts cleanDirectory(
- final BlockStoragePolicySuite bsps, final INodeDirectory currentINode,
- final int snapshot, int prior, final BlocksMapUpdateInfo collectedBlocks,
- final List<INode> removedINodes, List<Long> removedUCFiles) {
+ INode.ReclaimContext reclaimContext, final INodeDirectory currentINode,
+ final int snapshot, int prior) {
QuotaCounts counts = new QuotaCounts.Builder().build();
Map<INode, INode> priorCreated = null;
Map<INode, INode> priorDeleted = null;
@@ -740,11 +717,11 @@ public class DirectoryWithSnapshotFeature implements INode.Feature {
// delete everything in created list
DirectoryDiff lastDiff = diffs.getLast();
if (lastDiff != null) {
- counts.add(lastDiff.diff.destroyCreatedList(bsps, currentINode,
- collectedBlocks, removedINodes, removedUCFiles));
+ counts.add(lastDiff.diff.destroyCreatedList(reclaimContext,
+ currentINode));
}
- counts.add(currentINode.cleanSubtreeRecursively(bsps, snapshot, prior,
- collectedBlocks, removedINodes, removedUCFiles, priorDeleted));
+ counts.add(currentINode.cleanSubtreeRecursively(reclaimContext,
+ snapshot, prior, priorDeleted));
} else {
// update prior
prior = getDiffs().updatePrior(snapshot, prior);
@@ -760,10 +737,10 @@ public class DirectoryWithSnapshotFeature implements INode.Feature {
}
}
- counts.add(getDiffs().deleteSnapshotDiff(bsps, snapshot, prior,
- currentINode, collectedBlocks, removedINodes));
- counts.add(currentINode.cleanSubtreeRecursively(bsps, snapshot, prior,
- collectedBlocks, removedINodes, removedUCFiles, priorDeleted));
+ counts.add(getDiffs().deleteSnapshotDiff(reclaimContext, snapshot, prior,
+ currentINode));
+ counts.add(currentINode.cleanSubtreeRecursively(reclaimContext,
+ snapshot, prior, priorDeleted));
// check priorDiff again since it may be created during the diff deletion
if (prior != Snapshot.NO_SNAPSHOT_ID) {
@@ -779,8 +756,8 @@ public class DirectoryWithSnapshotFeature implements INode.Feature {
for (INode cNode : priorDiff.getChildrenDiff().getList(
ListType.CREATED)) {
if (priorCreated.containsKey(cNode)) {
- counts.add(cNode.cleanSubtree(bsps, snapshot, Snapshot.NO_SNAPSHOT_ID,
- collectedBlocks, removedINodes, removedUCFiles));
+ counts.add(cNode.cleanSubtree(reclaimContext,
+ snapshot, Snapshot.NO_SNAPSHOT_ID));
}
}
}
@@ -796,8 +773,8 @@ public class DirectoryWithSnapshotFeature implements INode.Feature {
for (INode dNode : priorDiff.getChildrenDiff().getList(
ListType.DELETED)) {
if (priorDeleted == null || !priorDeleted.containsKey(dNode)) {
- counts.add(cleanDeletedINode(bsps, dNode, snapshot, prior,
- collectedBlocks, removedINodes, removedUCFiles));
+ counts.add(cleanDeletedINode(reclaimContext,
+ dNode, snapshot, prior));
}
}
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/51812970/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileDiff.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileDiff.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileDiff.java
index 931f7f0..dd8be82 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileDiff.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileDiff.java
@@ -20,10 +20,8 @@ package org.apache.hadoop.hdfs.server.namenode.snapshot;
import java.io.DataOutput;
import java.io.IOException;
import java.util.Arrays;
-import java.util.List;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous;
-import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
import org.apache.hadoop.hdfs.server.namenode.FSImageSerialization;
import org.apache.hadoop.hdfs.server.namenode.INode;
import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo;
@@ -82,13 +80,12 @@ public class FileDiff extends
@Override
QuotaCounts combinePosteriorAndCollectBlocks(
- BlockStoragePolicySuite bsps, INodeFile currentINode,
- FileDiff posterior, BlocksMapUpdateInfo collectedBlocks,
- final List<INode> removedINodes) {
+ INode.ReclaimContext reclaimContext, INodeFile currentINode,
+ FileDiff posterior) {
FileWithSnapshotFeature sf = currentINode.getFileWithSnapshotFeature();
assert sf != null : "FileWithSnapshotFeature is null";
- return sf.updateQuotaAndCollectBlocks(
- bsps, currentINode, posterior, collectedBlocks, removedINodes);
+ return sf.updateQuotaAndCollectBlocks(reclaimContext,
+ currentINode, posterior);
}
@Override
@@ -112,11 +109,10 @@ public class FileDiff extends
}
@Override
- QuotaCounts destroyDiffAndCollectBlocks(BlockStoragePolicySuite bsps, INodeFile currentINode,
- BlocksMapUpdateInfo collectedBlocks, final List<INode> removedINodes) {
+ QuotaCounts destroyDiffAndCollectBlocks(
+ INode.ReclaimContext reclaimContext, INodeFile currentINode) {
return currentINode.getFileWithSnapshotFeature()
- .updateQuotaAndCollectBlocks(bsps, currentINode, this, collectedBlocks,
- removedINodes);
+ .updateQuotaAndCollectBlocks(reclaimContext, currentINode, this);
}
public void destroyAndCollectSnapshotBlocks(
http://git-wip-us.apache.org/repos/asf/hadoop/blob/51812970/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileDiffList.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileDiffList.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileDiffList.java
index 5c9e121..0788e75 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileDiffList.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileDiffList.java
@@ -23,7 +23,6 @@ import java.util.List;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction;
-import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.hdfs.server.namenode.INode;
import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo;
@@ -97,16 +96,14 @@ public class FileDiffList extends
* up to the file length of the latter.
* Collect unused blocks of the removed snapshot.
*/
- void combineAndCollectSnapshotBlocks(BlockStoragePolicySuite bsps, INodeFile file,
- FileDiff removed,
- BlocksMapUpdateInfo collectedBlocks,
- List<INode> removedINodes) {
+ void combineAndCollectSnapshotBlocks(
+ INode.ReclaimContext reclaimContext, INodeFile file, FileDiff removed) {
BlockInfoContiguous[] removedBlocks = removed.getBlocks();
if(removedBlocks == null) {
FileWithSnapshotFeature sf = file.getFileWithSnapshotFeature();
assert sf != null : "FileWithSnapshotFeature is null";
if(sf.isCurrentFileDeleted())
- sf.collectBlocksAndClear(bsps, file, collectedBlocks, removedINodes);
+ sf.collectBlocksAndClear(reclaimContext, file);
return;
}
int p = getPrior(removed.getSnapshotId(), true);
@@ -139,7 +136,7 @@ public class FileDiffList extends
// Collect the remaining blocks of the file, ignoring truncate block
for(;i < removedBlocks.length; i++) {
if(dontRemoveBlock == null || !removedBlocks[i].equals(dontRemoveBlock)) {
- collectedBlocks.addDeleteBlock(removedBlocks[i]);
+ reclaimContext.collectedBlocks().addDeleteBlock(removedBlocks[i]);
}
}
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/51812970/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileWithSnapshotFeature.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileWithSnapshotFeature.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileWithSnapshotFeature.java
index 7d884d3..3bb549b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileWithSnapshotFeature.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileWithSnapshotFeature.java
@@ -23,10 +23,8 @@ import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous;
-import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
import org.apache.hadoop.hdfs.server.namenode.AclFeature;
import org.apache.hadoop.hdfs.server.namenode.INode;
-import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo;
import org.apache.hadoop.hdfs.server.namenode.AclStorage;
import org.apache.hadoop.hdfs.server.namenode.INodeFile;
import org.apache.hadoop.hdfs.server.namenode.INodeFileAttributes;
@@ -118,22 +116,21 @@ public class FileWithSnapshotFeature implements INode.Feature {
return (isCurrentFileDeleted()? "(DELETED), ": ", ") + diffs;
}
- public QuotaCounts cleanFile(final BlockStoragePolicySuite bsps,
+ public QuotaCounts cleanFile(INode.ReclaimContext reclaimContext,
final INodeFile file, final int snapshotId,
- int priorSnapshotId, final BlocksMapUpdateInfo collectedBlocks,
- final List<INode> removedINodes) {
+ int priorSnapshotId) {
if (snapshotId == Snapshot.CURRENT_STATE_ID) {
// delete the current file while the file has snapshot feature
if (!isCurrentFileDeleted()) {
file.recordModification(priorSnapshotId);
deleteCurrentFile();
}
- collectBlocksAndClear(bsps, file, collectedBlocks, removedINodes);
+ collectBlocksAndClear(reclaimContext, file);
return new QuotaCounts.Builder().build();
} else { // delete the snapshot
priorSnapshotId = getDiffs().updatePrior(snapshotId, priorSnapshotId);
- return diffs.deleteSnapshotDiff(bsps, snapshotId, priorSnapshotId, file,
- collectedBlocks, removedINodes);
+ return diffs.deleteSnapshotDiff(reclaimContext,
+ snapshotId, priorSnapshotId, file);
}
}
@@ -141,14 +138,12 @@ public class FileWithSnapshotFeature implements INode.Feature {
this.diffs.clear();
}
- public QuotaCounts updateQuotaAndCollectBlocks(BlockStoragePolicySuite bsps, INodeFile file,
- FileDiff removed, BlocksMapUpdateInfo collectedBlocks,
- final List<INode> removedINodes) {
-
+ public QuotaCounts updateQuotaAndCollectBlocks(
+ INode.ReclaimContext reclaimContext, INodeFile file, FileDiff removed) {
byte storagePolicyID = file.getStoragePolicyID();
BlockStoragePolicy bsp = null;
if (storagePolicyID != HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED) {
- bsp = bsps.getPolicy(file.getStoragePolicyID());
+ bsp = reclaimContext.storagePolicySuite().getPolicy(file.getStoragePolicyID());
}
@@ -180,8 +175,7 @@ public class FileWithSnapshotFeature implements INode.Feature {
}
}
- getDiffs().combineAndCollectSnapshotBlocks(
- bsps, file, removed, collectedBlocks, removedINodes);
+ getDiffs().combineAndCollectSnapshotBlocks(reclaimContext, file, removed);
QuotaCounts current = file.storagespaceConsumed(bsp);
oldCounts.subtract(current);
@@ -192,11 +186,11 @@ public class FileWithSnapshotFeature implements INode.Feature {
* If some blocks at the end of the block list no longer belongs to
* any inode, collect them and update the block list.
*/
- public void collectBlocksAndClear(final BlockStoragePolicySuite bsps, final INodeFile file,
- final BlocksMapUpdateInfo info, final List<INode> removedINodes) {
+ public void collectBlocksAndClear(
+ INode.ReclaimContext reclaimContext, final INodeFile file) {
// check if everything is deleted.
if (isCurrentFileDeleted() && getDiffs().asList().isEmpty()) {
- file.destroyAndCollectBlocks(bsps, info, removedINodes, null);
+ file.destroyAndCollectBlocks(reclaimContext);
return;
}
// find max file size.
@@ -212,8 +206,9 @@ public class FileWithSnapshotFeature implements INode.Feature {
FileDiff last = diffs.getLast();
BlockInfoContiguous[] snapshotBlocks = last == null ? null : last.getBlocks();
if(snapshotBlocks == null)
- file.collectBlocksBeyondMax(max, info);
+ file.collectBlocksBeyondMax(max, reclaimContext.collectedBlocks());
else
- file.collectBlocksBeyondSnapshot(snapshotBlocks, info);
+ file.collectBlocksBeyondSnapshot(snapshotBlocks,
+ reclaimContext.collectedBlocks());
}
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/51812970/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java
index 27d2986..bc9544b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java
@@ -235,8 +235,10 @@ public class SnapshotManager implements SnapshotStatsMXBean {
BlocksMapUpdateInfo collectedBlocks, final List<INode> removedINodes)
throws IOException {
INodeDirectory srcRoot = getSnapshottableRoot(iip);
- srcRoot.removeSnapshot(fsdir.getBlockStoragePolicySuite(), snapshotName,
- collectedBlocks, removedINodes);
+ srcRoot.removeSnapshot(
+ new INode.ReclaimContext(fsdir.getBlockStoragePolicySuite(),
+ collectedBlocks, removedINodes, null),
+ snapshotName);
numSnapshots.getAndDecrement();
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/51812970/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestFileWithSnapshotFeature.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestFileWithSnapshotFeature.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestFileWithSnapshotFeature.java
index 977b07c..831d65d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestFileWithSnapshotFeature.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestFileWithSnapshotFeature.java
@@ -62,8 +62,9 @@ public class TestFileWithSnapshotFeature {
INode.BlocksMapUpdateInfo collectedBlocks = mock(
INode.BlocksMapUpdateInfo.class);
ArrayList<INode> removedINodes = new ArrayList<>();
- QuotaCounts counts = sf.updateQuotaAndCollectBlocks(
- bsps, file, diff, collectedBlocks, removedINodes);
+ INode.ReclaimContext ctx = new INode.ReclaimContext(
+ bsps, collectedBlocks, removedINodes, null);
+ QuotaCounts counts = sf.updateQuotaAndCollectBlocks(ctx, file, diff);
Assert.assertEquals(0, counts.getStorageSpace());
Assert.assertTrue(counts.getTypeSpaces().allLessOrEqual(0));
@@ -78,8 +79,7 @@ public class TestFileWithSnapshotFeature {
.thenReturn(Lists.newArrayList(SSD));
when(bsp.chooseStorageTypes(REPL_3))
.thenReturn(Lists.newArrayList(DISK));
- counts = sf.updateQuotaAndCollectBlocks(
- bsps, file, diff, collectedBlocks, removedINodes);
+ counts = sf.updateQuotaAndCollectBlocks(ctx, file, diff);
Assert.assertEquals((REPL_3 - REPL_1) * BLOCK_SIZE,
counts.getStorageSpace());
Assert.assertEquals(BLOCK_SIZE, counts.getTypeSpaces().get(DISK));
[06/36] hadoop git commit: YARN-1912. ResourceLocalizer started
without any jvm memory control. Contributed by Masatake Iwasaki
Posted by zj...@apache.org.
YARN-1912. ResourceLocalizer started without any jvm memory control.
Contributed by Masatake Iwasaki
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/20471e47
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/20471e47
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/20471e47
Branch: refs/heads/YARN-2928
Commit: 20471e476bdc10fc961edf90f2885396c6281b3e
Parents: 951d7fc
Author: Xuan <xg...@apache.org>
Authored: Fri May 8 20:01:21 2015 -0700
Committer: Zhijie Shen <zj...@apache.org>
Committed: Tue May 12 13:24:10 2015 -0700
----------------------------------------------------------------------
hadoop-yarn-project/CHANGES.txt | 3 +++
.../apache/hadoop/yarn/conf/YarnConfiguration.java | 7 +++++++
.../server/nodemanager/LinuxContainerExecutor.java | 1 +
.../nodemanager/WindowsSecureContainerExecutor.java | 3 ++-
.../localizer/ContainerLocalizer.java | 13 ++++++++++++-
.../TestLinuxContainerExecutorWithMocks.java | 15 ++++++++-------
6 files changed, 33 insertions(+), 9 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/20471e47/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 1bff9c7..2be7604 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -288,6 +288,9 @@ Release 2.8.0 - UNRELEASED
YARN-3271. FairScheduler: Move tests related to max-runnable-apps from
TestFairScheduler to TestAppRunnability. (nijel via kasha)
+ YARN-1912. ResourceLocalizer started without any jvm memory control.
+ (Masatake Iwasaki via xgong)
+
OPTIMIZATIONS
YARN-3339. TestDockerContainerExecutor should pull a single image and not
http://git-wip-us.apache.org/repos/asf/hadoop/blob/20471e47/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 32cc9f1..4c1dea0 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -1025,6 +1025,13 @@ public class YarnConfiguration extends Configuration {
public static final String NM_HEALTH_CHECK_SCRIPT_OPTS =
NM_PREFIX + "health-checker.script.opts";
+ /** The JVM options used on forking ContainerLocalizer process
+ by container executor. */
+ public static final String NM_CONTAINER_LOCALIZER_JAVA_OPTS_KEY =
+ NM_PREFIX + "container-localizer.java.opts";
+ public static final String NM_CONTAINER_LOCALIZER_JAVA_OPTS_DEFAULT =
+ "-Xmx256m";
+
/** The Docker image name(For DockerContainerExecutor).*/
public static final String NM_DOCKER_CONTAINER_EXECUTOR_IMAGE_NAME =
NM_PREFIX + "docker-container-executor.image-name";
http://git-wip-us.apache.org/repos/asf/hadoop/blob/20471e47/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
index f8da958..16068d7 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
@@ -244,6 +244,7 @@ public class LinuxContainerExecutor extends ContainerExecutor {
if (javaLibPath != null) {
command.add("-Djava.library.path=" + javaLibPath);
}
+ command.addAll(ContainerLocalizer.getJavaOpts(getConf()));
buildMainArgs(command, user, appId, locId, nmAddr, localDirs);
String[] commandArray = command.toArray(new String[command.size()]);
ShellCommandExecutor shExec = new ShellCommandExecutor(commandArray);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/20471e47/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/WindowsSecureContainerExecutor.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/WindowsSecureContainerExecutor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/WindowsSecureContainerExecutor.java
index b7bec5f..619b845 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/WindowsSecureContainerExecutor.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/WindowsSecureContainerExecutor.java
@@ -697,7 +697,8 @@ public class WindowsSecureContainerExecutor extends DefaultContainerExecutor {
if (javaLibPath != null) {
command.add("-Djava.library.path=" + javaLibPath);
}
-
+ command.addAll(ContainerLocalizer.getJavaOpts(getConf()));
+
ContainerLocalizer.buildMainArgs(command, user, appId, locId, nmAddr,
localDirs);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/20471e47/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ContainerLocalizer.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ContainerLocalizer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ContainerLocalizer.java
index 32e3553..f82f894 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ContainerLocalizer.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ContainerLocalizer.java
@@ -55,6 +55,7 @@ import org.apache.hadoop.util.DiskChecker;
import org.apache.hadoop.yarn.YarnUncaughtExceptionHandler;
import org.apache.hadoop.yarn.api.records.LocalResource;
import org.apache.hadoop.yarn.api.records.SerializedException;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.factories.RecordFactory;
import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
@@ -312,7 +313,17 @@ public class ContainerLocalizer {
status.addAllResources(currentResources);
return status;
}
-
+
+ /**
+ * Returns the JVM options to to launch the resource localizer.
+ * @param conf the configuration properties to launch the resource localizer.
+ */
+ public static List<String> getJavaOpts(Configuration conf) {
+ String opts = conf.get(YarnConfiguration.NM_CONTAINER_LOCALIZER_JAVA_OPTS_KEY,
+ YarnConfiguration.NM_CONTAINER_LOCALIZER_JAVA_OPTS_DEFAULT);
+ return Arrays.asList(opts.split(" "));
+ }
+
/**
* Adds the ContainerLocalizer arguments for a @{link ShellCommandExecutor},
* as expected by ContainerLocalizer.main
http://git-wip-us.apache.org/repos/asf/hadoop/blob/20471e47/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLinuxContainerExecutorWithMocks.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLinuxContainerExecutorWithMocks.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLinuxContainerExecutorWithMocks.java
index 98ab8e0..dce2cd3 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLinuxContainerExecutorWithMocks.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLinuxContainerExecutorWithMocks.java
@@ -187,19 +187,20 @@ public class TestLinuxContainerExecutorWithMocks {
try {
mockExec.startLocalizer(nmPrivateCTokensPath, address, "test", "application_0", "12345", dirsHandler);
List<String> result=readMockParams();
- Assert.assertEquals(result.size(), 17);
+ Assert.assertEquals(result.size(), 18);
Assert.assertEquals(result.get(0), YarnConfiguration.DEFAULT_NM_NONSECURE_MODE_LOCAL_USER);
Assert.assertEquals(result.get(1), "test");
Assert.assertEquals(result.get(2), "0" );
Assert.assertEquals(result.get(3),"application_0" );
Assert.assertEquals(result.get(4), "/bin/nmPrivateCTokensPath");
Assert.assertEquals(result.get(8), "-classpath" );
- Assert.assertEquals(result.get(11),"org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.ContainerLocalizer" );
- Assert.assertEquals(result.get(12), "test");
- Assert.assertEquals(result.get(13), "application_0");
- Assert.assertEquals(result.get(14),"12345" );
- Assert.assertEquals(result.get(15),"localhost" );
- Assert.assertEquals(result.get(16),"8040" );
+ Assert.assertEquals(result.get(11), "-Xmx256m" );
+ Assert.assertEquals(result.get(12),"org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.ContainerLocalizer" );
+ Assert.assertEquals(result.get(13), "test");
+ Assert.assertEquals(result.get(14), "application_0");
+ Assert.assertEquals(result.get(15),"12345" );
+ Assert.assertEquals(result.get(16),"localhost" );
+ Assert.assertEquals(result.get(17),"8040" );
} catch (InterruptedException e) {
LOG.error("Error:"+e.getMessage(),e);
[19/36] hadoop git commit: HADOOP-11951. test-patch should give
better info about failures to handle dev-support updates without resetrepo
option (Sean Busbey via aw)
Posted by zj...@apache.org.
HADOOP-11951. test-patch should give better info about failures to handle dev-support updates without resetrepo option (Sean Busbey via aw)
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ab597d09
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ab597d09
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ab597d09
Branch: refs/heads/YARN-2928
Commit: ab597d09e32af68e0b80d808ccdeb257450f0b35
Parents: c08d736
Author: Allen Wittenauer <aw...@apache.org>
Authored: Mon May 11 11:45:47 2015 -0700
Committer: Zhijie Shen <zj...@apache.org>
Committed: Tue May 12 13:24:13 2015 -0700
----------------------------------------------------------------------
dev-support/test-patch.sh | 8 ++++++++
hadoop-common-project/hadoop-common/CHANGES.txt | 3 +++
2 files changed, 11 insertions(+)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ab597d09/dev-support/test-patch.sh
----------------------------------------------------------------------
diff --git a/dev-support/test-patch.sh b/dev-support/test-patch.sh
index a3ce567..11fd9a9 100755
--- a/dev-support/test-patch.sh
+++ b/dev-support/test-patch.sh
@@ -1446,6 +1446,14 @@ function check_reexec
fi
big_console_header "dev-support patch detected"
+
+ if [[ ${RESETREPO} == false ]]; then
+ ((RESULT = RESULT + 1))
+ hadoop_debug "can't destructively change the working directory. run with '--resetrepo' please. :("
+ add_jira_table -1 dev-support "Couldn't test dev-support changes because we aren't configured to destructively change the working directory."
+ return
+ fi
+
printf "\n\nRe-executing against patched versions to test.\n\n"
apply_patch_file
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ab597d09/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 929ada1..f237b85 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -656,6 +656,9 @@ Release 2.8.0 - UNRELEASED
HADOOP-11928. Test-patch check for @author tags incorrectly flags
removal of @author tags (Kengo Seki via aw)
+ HADOOP-11951. test-patch should give better info about failures to handle
+ dev-support updates without resetrepo option (Sean Busbey via aw)
+
Release 2.7.1 - UNRELEASED
INCOMPATIBLE CHANGES
[30/36] hadoop git commit: HDFS-8362. Java Compilation Error in
TestHdfsConfigFields.java (Contributed by Arshad Mohammad)
Posted by zj...@apache.org.
HDFS-8362. Java Compilation Error in TestHdfsConfigFields.java (Contributed by Arshad Mohammad)
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d433b1bf
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d433b1bf
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d433b1bf
Branch: refs/heads/YARN-2928
Commit: d433b1bfc06eac7c4604bd28f7b47715115daf87
Parents: 39b40d9
Author: Vinayakumar B <vi...@apache.org>
Authored: Tue May 12 12:09:13 2015 +0530
Committer: Zhijie Shen <zj...@apache.org>
Committed: Tue May 12 13:44:26 2015 -0700
----------------------------------------------------------------------
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++
.../test/java/org/apache/hadoop/tools/TestHdfsConfigFields.java | 2 +-
2 files changed, 4 insertions(+), 1 deletion(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d433b1bf/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index b67caed..7cff8d4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -738,6 +738,9 @@ Release 2.8.0 - UNRELEASED
HDFS-8351. Remove namenode -finalize option from document. (aajisaka)
+ HDFS-8362. Java Compilation Error in TestHdfsConfigFields.java
+ (Arshad Mohammad via vinayakumarb)
+
Release 2.7.1 - UNRELEASED
INCOMPATIBLE CHANGES
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d433b1bf/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestHdfsConfigFields.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestHdfsConfigFields.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestHdfsConfigFields.java
index 0e75d81..a1f8a3c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestHdfsConfigFields.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestHdfsConfigFields.java
@@ -16,7 +16,7 @@
* limitations under the License.
*/
-package org.apache.hadoop.hdfs.tools;
+package org.apache.hadoop.tools;
import java.util.HashSet;
[23/36] hadoop git commit: Moved YARN-3434. (Interaction between
reservations and userlimit can result in significant ULF violation.) From
2.8.0 to 2.7.1
Posted by zj...@apache.org.
Moved YARN-3434. (Interaction between reservations and userlimit can result in significant ULF violation.) From 2.8.0 to 2.7.1
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/aa3e32df
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/aa3e32df
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/aa3e32df
Branch: refs/heads/YARN-2928
Commit: aa3e32dfa80f2fab6bed09ce8cadb1fa8f688dd3
Parents: d7597a2
Author: Wangda Tan <wa...@apache.org>
Authored: Mon May 11 15:21:35 2015 -0700
Committer: Zhijie Shen <zj...@apache.org>
Committed: Tue May 12 13:24:14 2015 -0700
----------------------------------------------------------------------
hadoop-yarn-project/CHANGES.txt | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/aa3e32df/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 6605632..0a1b853 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -380,9 +380,6 @@ Release 2.8.0 - UNRELEASED
YARN-3495. Confusing log generated by FairScheduler.
(Brahma Reddy Battula via ozawa)
- YARN-3434. Interaction between reservations and userlimit can result in
- significant ULF violation (tgraves)
-
YARN-3387. Previous AM's container completed status couldn't pass to current
AM if AM and RM restarted during the same time. (sandflee via jianhe)
@@ -535,6 +532,9 @@ Release 2.7.1 - UNRELEASED
YARN-3476. Nodemanager can fail to delete local logs if log aggregation
fails (Rohith via jlowe)
+ YARN-3434. Interaction between reservations and userlimit can result in
+ significant ULF violation (tgraves)
+
Release 2.7.0 - 2015-04-20
INCOMPATIBLE CHANGES
[31/36] hadoop git commit: HDFS-8255. Rename getBlockReplication to
getPreferredBlockReplication. (Contributed by Zhe Zhang)
Posted by zj...@apache.org.
HDFS-8255. Rename getBlockReplication to getPreferredBlockReplication. (Contributed by Zhe Zhang)
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/dc8c120b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/dc8c120b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/dc8c120b
Branch: refs/heads/YARN-2928
Commit: dc8c120b75e869ca7d3d24d136465bf6ad1295a7
Parents: 65935b0
Author: yliu <yl...@apache.org>
Authored: Tue May 12 21:29:22 2015 +0800
Committer: Zhijie Shen <zj...@apache.org>
Committed: Tue May 12 13:44:27 2015 -0700
----------------------------------------------------------------------
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++
.../server/blockmanagement/BlockCollection.java | 2 +-
.../blockmanagement/BlockInfoContiguous.java | 2 +-
.../server/blockmanagement/BlockManager.java | 16 ++++++-------
.../blockmanagement/DecommissionManager.java | 10 ++++----
.../hdfs/server/namenode/FSDirAttrOp.java | 4 ++--
.../hdfs/server/namenode/FSDirConcatOp.java | 4 ++--
.../hdfs/server/namenode/FSDirectory.java | 4 ++--
.../hdfs/server/namenode/FSEditLogLoader.java | 7 +++---
.../hdfs/server/namenode/FSNamesystem.java | 4 ++--
.../hadoop/hdfs/server/namenode/INodeFile.java | 8 +++----
.../hdfs/server/namenode/NamenodeFsck.java | 9 ++++---
.../snapshot/FileWithSnapshotFeature.java | 5 ++--
.../blockmanagement/TestBlockManager.java | 6 ++---
.../blockmanagement/TestReplicationPolicy.java | 4 ++--
.../snapshot/TestFileWithSnapshotFeature.java | 2 +-
.../namenode/snapshot/TestSnapshotDeletion.java | 4 ++--
.../snapshot/TestSnapshotReplication.java | 25 ++++++++++++--------
18 files changed, 66 insertions(+), 53 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/dc8c120b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 7cff8d4..cd477af 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -543,6 +543,9 @@ Release 2.8.0 - UNRELEASED
HDFS-8357. Consolidate parameters of INode.CleanSubtree() into a parameter
objects. (Li Lu via wheat9)
+ HDFS-8255. Rename getBlockReplication to getPreferredBlockReplication.
+ (Contributed by Zhe Zhang)
+
OPTIMIZATIONS
HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than
http://git-wip-us.apache.org/repos/asf/hadoop/blob/dc8c120b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java
index e9baf85..c0a959c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java
@@ -58,7 +58,7 @@ public interface BlockCollection {
* Get block replication for the collection
* @return block replication value
*/
- public short getBlockReplication();
+ public short getPreferredBlockReplication();
/**
* @return the storage policy ID.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/dc8c120b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguous.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguous.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguous.java
index df27882..1ba3536 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguous.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguous.java
@@ -380,7 +380,7 @@ public class BlockInfoContiguous extends Block
if(isComplete()) {
BlockInfoContiguousUnderConstruction ucBlock =
new BlockInfoContiguousUnderConstruction(this,
- getBlockCollection().getBlockReplication(), s, targets);
+ getBlockCollection().getPreferredBlockReplication(), s, targets);
ucBlock.setBlockCollection(getBlockCollection());
return ucBlock;
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/dc8c120b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 9d9a631..ab2607b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -1173,7 +1173,7 @@ public class BlockManager {
return;
}
short expectedReplicas =
- b.corrupted.getBlockCollection().getBlockReplication();
+ b.corrupted.getBlockCollection().getPreferredBlockReplication();
// Add replica to the data-node if it is not already there
if (storageInfo != null) {
@@ -1348,7 +1348,7 @@ public class BlockManager {
continue;
}
- requiredReplication = bc.getBlockReplication();
+ requiredReplication = bc.getPreferredBlockReplication();
// get a source data-node
containingNodes = new ArrayList<DatanodeDescriptor>();
@@ -1432,7 +1432,7 @@ public class BlockManager {
rw.targets = null;
continue;
}
- requiredReplication = bc.getBlockReplication();
+ requiredReplication = bc.getPreferredBlockReplication();
// do not schedule more if enough replicas is already pending
NumberReplicas numReplicas = countNodes(block);
@@ -2584,7 +2584,7 @@ public class BlockManager {
}
// handle underReplication/overReplication
- short fileReplication = bc.getBlockReplication();
+ short fileReplication = bc.getPreferredBlockReplication();
if (!isNeededReplication(storedBlock, fileReplication, numCurrentReplica)) {
neededReplications.remove(storedBlock, numCurrentReplica,
num.decommissionedAndDecommissioning(), fileReplication);
@@ -2815,7 +2815,7 @@ public class BlockManager {
}
// calculate current replication
short expectedReplication =
- block.getBlockCollection().getBlockReplication();
+ block.getBlockCollection().getPreferredBlockReplication();
NumberReplicas num = countNodes(block);
int numCurrentReplica = num.liveReplicas();
// add to under-replicated queue if need to be
@@ -3316,7 +3316,7 @@ public class BlockManager {
while(it.hasNext()) {
final Block block = it.next();
BlockCollection bc = blocksMap.getBlockCollection(block);
- short expectedReplication = bc.getBlockReplication();
+ short expectedReplication = bc.getPreferredBlockReplication();
NumberReplicas num = countNodes(block);
int numCurrentReplica = num.liveReplicas();
if (numCurrentReplica > expectedReplication) {
@@ -3430,7 +3430,7 @@ public class BlockManager {
* process it as an over replicated block.
*/
public void checkReplication(BlockCollection bc) {
- final short expected = bc.getBlockReplication();
+ final short expected = bc.getPreferredBlockReplication();
for (Block block : bc.getBlocks()) {
final NumberReplicas n = countNodes(block);
if (isNeededReplication(block, expected, n.liveReplicas())) {
@@ -3469,7 +3469,7 @@ public class BlockManager {
*/
private int getReplication(Block block) {
final BlockCollection bc = blocksMap.getBlockCollection(block);
- return bc == null? 0: bc.getBlockReplication();
+ return bc == null? 0: bc.getPreferredBlockReplication();
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/dc8c120b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java
index 5c9aec7..5f7366e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java
@@ -251,7 +251,7 @@ public class DecommissionManager {
private boolean isSufficientlyReplicated(BlockInfoContiguous block,
BlockCollection bc,
NumberReplicas numberReplicas) {
- final int numExpected = bc.getBlockReplication();
+ final int numExpected = bc.getPreferredBlockReplication();
final int numLive = numberReplicas.liveReplicas();
if (!blockManager.isNeededReplication(block, numExpected, numLive)) {
// Block doesn't need replication. Skip.
@@ -288,7 +288,7 @@ public class DecommissionManager {
DatanodeDescriptor srcNode, NumberReplicas num,
Iterable<DatanodeStorageInfo> storages) {
int curReplicas = num.liveReplicas();
- int curExpectedReplicas = bc.getBlockReplication();
+ int curExpectedReplicas = bc.getPreferredBlockReplication();
StringBuilder nodeList = new StringBuilder();
for (DatanodeStorageInfo storage : storages) {
final DatanodeDescriptor node = storage.getDatanodeDescriptor();
@@ -564,8 +564,8 @@ public class DecommissionManager {
// Schedule under-replicated blocks for replication if not already
// pending
- if (blockManager.isNeededReplication(block, bc.getBlockReplication(),
- liveReplicas)) {
+ if (blockManager.isNeededReplication(block,
+ bc.getPreferredBlockReplication(), liveReplicas)) {
if (!blockManager.neededReplications.contains(block) &&
blockManager.pendingReplications.getNumReplicas(block) == 0 &&
namesystem.isPopulatingReplQueues()) {
@@ -573,7 +573,7 @@ public class DecommissionManager {
blockManager.neededReplications.add(block,
curReplicas,
num.decommissionedAndDecommissioning(),
- bc.getBlockReplication());
+ bc.getPreferredBlockReplication());
}
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/dc8c120b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
index d01e2c8..879738d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
@@ -387,7 +387,7 @@ public class FSDirAttrOp {
return null;
}
INodeFile file = inode.asFile();
- final short oldBR = file.getBlockReplication();
+ final short oldBR = file.getPreferredBlockReplication();
// before setFileReplication, check for increasing block replication.
// if replication > oldBR, then newBR == replication.
@@ -399,7 +399,7 @@ public class FSDirAttrOp {
file.setFileReplication(replication, iip.getLatestSnapshotId());
- final short newBR = file.getBlockReplication();
+ final short newBR = file.getPreferredBlockReplication();
// check newBR < oldBR case.
if (newBR < oldBR) {
long dsDelta = file.storagespaceConsumed(null).getStorageSpace() / newBR;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/dc8c120b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java
index 31a6af7..3f22f51 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java
@@ -167,9 +167,9 @@ class FSDirConcatOp {
private static QuotaCounts computeQuotaDeltas(FSDirectory fsd,
INodeFile target, INodeFile[] srcList) {
QuotaCounts deltas = new QuotaCounts.Builder().build();
- final short targetRepl = target.getBlockReplication();
+ final short targetRepl = target.getPreferredBlockReplication();
for (INodeFile src : srcList) {
- short srcRepl = src.getBlockReplication();
+ short srcRepl = src.getPreferredBlockReplication();
long fileSize = src.computeFileSize();
if (targetRepl != srcRepl) {
deltas.addStorageSpace(fileSize * (targetRepl - srcRepl));
http://git-wip-us.apache.org/repos/asf/hadoop/blob/dc8c120b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
index b289c39..c981626 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
@@ -489,7 +489,7 @@ public class FSDirectory implements Closeable {
// check quota limits and updated space consumed
updateCount(inodesInPath, 0, fileINode.getPreferredBlockSize(),
- fileINode.getBlockReplication(), true);
+ fileINode.getPreferredBlockReplication(), true);
// associate new last block for the file
BlockInfoContiguousUnderConstruction blockInfo =
@@ -546,7 +546,7 @@ public class FSDirectory implements Closeable {
// update space consumed
updateCount(iip, 0, -fileNode.getPreferredBlockSize(),
- fileNode.getBlockReplication(), true);
+ fileNode.getPreferredBlockReplication(), true);
return true;
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/dc8c120b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
index 7964188..b7a4870 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
@@ -971,7 +971,7 @@ public class FSEditLogLoader {
}
// add the new block
BlockInfoContiguous newBI = new BlockInfoContiguousUnderConstruction(
- newBlock, file.getBlockReplication());
+ newBlock, file.getPreferredBlockReplication());
fsNamesys.getBlockManager().addBlockCollection(newBI, file);
file.addBlock(newBI);
fsNamesys.getBlockManager().processQueuedMessagesForBlock(newBlock);
@@ -1050,13 +1050,14 @@ public class FSEditLogLoader {
// what about an old-version fsync() where fsync isn't called
// until several blocks in?
newBI = new BlockInfoContiguousUnderConstruction(
- newBlock, file.getBlockReplication());
+ newBlock, file.getPreferredBlockReplication());
} else {
// OP_CLOSE should add finalized blocks. This code path
// is only executed when loading edits written by prior
// versions of Hadoop. Current versions always log
// OP_ADD operations as each block is allocated.
- newBI = new BlockInfoContiguous(newBlock, file.getBlockReplication());
+ newBI = new BlockInfoContiguous(newBlock,
+ file.getPreferredBlockReplication());
}
fsNamesys.getBlockManager().addBlockCollection(newBI, file);
file.addBlock(newBI);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/dc8c120b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 9e30812..33aaa72 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -2106,7 +2106,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
// Add new truncateBlock into blocksMap and
// use oldBlock as a source for copy-on-truncate recovery
truncatedBlockUC = new BlockInfoContiguousUnderConstruction(newBlock,
- file.getBlockReplication());
+ file.getPreferredBlockReplication());
truncatedBlockUC.setNumBytes(oldBlock.getNumBytes() - lastBlockDelta);
truncatedBlockUC.setTruncateBlock(oldBlock);
file.setLastBlock(truncatedBlockUC, blockManager.getStorages(oldBlock));
@@ -2807,7 +2807,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
final BlockInfoContiguous lastBlock = file.getLastBlock();
if (lastBlock != null) {
final long diff = file.getPreferredBlockSize() - lastBlock.getNumBytes();
- final short repl = file.getBlockReplication();
+ final short repl = file.getPreferredBlockReplication();
delta.addStorageSpace(diff * repl);
final BlockStoragePolicy policy = dir.getBlockStoragePolicySuite()
.getPolicy(file.getStoragePolicyID());
http://git-wip-us.apache.org/repos/asf/hadoop/blob/dc8c120b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
index 3790c74..44f23bb 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
@@ -355,7 +355,7 @@ public class INodeFile extends INodeWithAdditionalFields
}
@Override // BlockCollection
- public short getBlockReplication() {
+ public short getPreferredBlockReplication() {
short max = getFileReplication(CURRENT_STATE_ID);
FileWithSnapshotFeature sf = this.getFileWithSnapshotFeature();
if (sf != null) {
@@ -728,7 +728,7 @@ public class INodeFile extends INodeWithAdditionalFields
blocks = allBlocks;
}
- final short replication = getBlockReplication();
+ final short replication = getPreferredBlockReplication();
for (BlockInfoContiguous b : blocks) {
long blockSize = b.isComplete() ? b.getNumBytes() :
getPreferredBlockSize();
@@ -850,10 +850,10 @@ public class INodeFile extends INodeWithAdditionalFields
truncatedBytes -= bi.getNumBytes();
}
- delta.addStorageSpace(-truncatedBytes * getBlockReplication());
+ delta.addStorageSpace(-truncatedBytes * getPreferredBlockReplication());
if (bsps != null) {
List<StorageType> types = bsps.chooseStorageTypes(
- getBlockReplication());
+ getPreferredBlockReplication());
for (StorageType t : types) {
if (t.supportTypeQuota()) {
delta.addTypeSpace(t, -truncatedBytes);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/dc8c120b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
index 11e89c9..61f8fdb 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
@@ -257,15 +257,18 @@ public class NamenodeFsck implements DataEncryptionKeyFactory {
NumberReplicas numberReplicas= bm.countNodes(block);
out.println("Block Id: " + blockId);
out.println("Block belongs to: "+iNode.getFullPathName());
- out.println("No. of Expected Replica: " + bc.getBlockReplication());
+ out.println("No. of Expected Replica: " +
+ bc.getPreferredBlockReplication());
out.println("No. of live Replica: " + numberReplicas.liveReplicas());
out.println("No. of excess Replica: " + numberReplicas.excessReplicas());
- out.println("No. of stale Replica: " + numberReplicas.replicasOnStaleNodes());
+ out.println("No. of stale Replica: " +
+ numberReplicas.replicasOnStaleNodes());
out.println("No. of decommissioned Replica: "
+ numberReplicas.decommissioned());
out.println("No. of decommissioning Replica: "
+ numberReplicas.decommissioning());
- out.println("No. of corrupted Replica: " + numberReplicas.corruptReplicas());
+ out.println("No. of corrupted Replica: " +
+ numberReplicas.corruptReplicas());
//record datanodes that have corrupted block replica
Collection<DatanodeDescriptor> corruptionRecord = null;
if (bm.getCorruptReplicas(block) != null) {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/dc8c120b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileWithSnapshotFeature.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileWithSnapshotFeature.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileWithSnapshotFeature.java
index 3bb549b..213c186 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileWithSnapshotFeature.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileWithSnapshotFeature.java
@@ -151,11 +151,12 @@ public class FileWithSnapshotFeature implements INode.Feature {
long oldStoragespace;
if (removed.snapshotINode != null) {
short replication = removed.snapshotINode.getFileReplication();
- short currentRepl = file.getBlockReplication();
+ short currentRepl = file.getPreferredBlockReplication();
if (replication > currentRepl) {
long oldFileSizeNoRep = currentRepl == 0
? file.computeFileSize(true, true)
- : oldCounts.getStorageSpace() / file.getBlockReplication();
+ : oldCounts.getStorageSpace() /
+ file.getPreferredBlockReplication();
oldStoragespace = oldFileSizeNoRep * replication;
oldCounts.setStorageSpace(oldStoragespace);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/dc8c120b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
index 1e09e19..58210c1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
@@ -433,7 +433,7 @@ public class TestBlockManager {
private BlockInfoContiguous addBlockOnNodes(long blockId, List<DatanodeDescriptor> nodes) {
BlockCollection bc = Mockito.mock(BlockCollection.class);
- Mockito.doReturn((short)3).when(bc).getBlockReplication();
+ Mockito.doReturn((short)3).when(bc).getPreferredBlockReplication();
BlockInfoContiguous blockInfo = blockOnNodes(blockId, nodes);
bm.blocksMap.addBlockCollection(blockInfo, bc);
@@ -740,7 +740,7 @@ public class TestBlockManager {
BlockInfoContiguous blockInfo =
new BlockInfoContiguous(block, (short) 3);
BlockCollection bc = Mockito.mock(BlockCollection.class);
- Mockito.doReturn((short) 3).when(bc).getBlockReplication();
+ Mockito.doReturn((short) 3).when(bc).getPreferredBlockReplication();
bm.blocksMap.addBlockCollection(blockInfo, bc);
return blockInfo;
}
@@ -750,7 +750,7 @@ public class TestBlockManager {
BlockInfoContiguousUnderConstruction blockInfo =
new BlockInfoContiguousUnderConstruction(block, (short) 3);
BlockCollection bc = Mockito.mock(BlockCollection.class);
- Mockito.doReturn((short) 3).when(bc).getBlockReplication();
+ Mockito.doReturn((short) 3).when(bc).getPreferredBlockReplication();
bm.blocksMap.addBlockCollection(blockInfo, bc);
return blockInfo;
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/dc8c120b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
index 1d6dad8..f117ef7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
@@ -1171,7 +1171,7 @@ public class TestReplicationPolicy {
// queue.
BlockInfoContiguousUnderConstruction info = new BlockInfoContiguousUnderConstruction(block1, (short) 1);
BlockCollection bc = mock(BlockCollection.class);
- when(bc.getBlockReplication()).thenReturn((short)1);
+ when(bc.getPreferredBlockReplication()).thenReturn((short)1);
bm.addBlockCollection(info, bc);
// Adding this block will increase its current replication, and that will
@@ -1215,7 +1215,7 @@ public class TestReplicationPolicy {
final BlockCollection mbc = mock(BlockCollection.class);
when(mbc.getLastBlock()).thenReturn(info);
when(mbc.getPreferredBlockSize()).thenReturn(block1.getNumBytes() + 1);
- when(mbc.getBlockReplication()).thenReturn((short)1);
+ when(mbc.getPreferredBlockReplication()).thenReturn((short)1);
when(mbc.isUnderConstruction()).thenReturn(true);
ContentSummary cs = mock(ContentSummary.class);
when(cs.getLength()).thenReturn((long)1);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/dc8c120b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestFileWithSnapshotFeature.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestFileWithSnapshotFeature.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestFileWithSnapshotFeature.java
index 831d65d..1fc0628 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestFileWithSnapshotFeature.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestFileWithSnapshotFeature.java
@@ -70,7 +70,7 @@ public class TestFileWithSnapshotFeature {
// INode only exists in the snapshot
INodeFile snapshotINode = mock(INodeFile.class);
- when(file.getBlockReplication()).thenReturn(REPL_1);
+ when(file.getPreferredBlockReplication()).thenReturn(REPL_1);
Whitebox.setInternalState(snapshotINode, "header", (long) REPL_3 << 48);
Whitebox.setInternalState(diff, "snapshotINode", snapshotINode);
when(diff.getSnapshotINode()).thenReturn(snapshotINode);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/dc8c120b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java
index a679183..97a77ab 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java
@@ -841,12 +841,12 @@ public class TestSnapshotDeletion {
}
INodeFile nodeFile13 = (INodeFile) fsdir.getINode(file13.toString());
- assertEquals(REPLICATION_1, nodeFile13.getBlockReplication());
+ assertEquals(REPLICATION_1, nodeFile13.getPreferredBlockReplication());
TestSnapshotBlocksMap.assertBlockCollection(file13.toString(), 1, fsdir,
blockmanager);
INodeFile nodeFile12 = (INodeFile) fsdir.getINode(file12_s1.toString());
- assertEquals(REPLICATION_1, nodeFile12.getBlockReplication());
+ assertEquals(REPLICATION_1, nodeFile12.getPreferredBlockReplication());
}
/** Test deleting snapshots with modification on the metadata of directory */
http://git-wip-us.apache.org/repos/asf/hadoop/blob/dc8c120b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotReplication.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotReplication.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotReplication.java
index 5264cb7..4eac634 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotReplication.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotReplication.java
@@ -40,7 +40,7 @@ import org.junit.Test;
/**
* This class tests the replication handling/calculation of snapshots. In
* particular, {@link INodeFile#getFileReplication()} and
- * {@link INodeFile#getBlockReplication()} are tested to make sure
+ * {@link INodeFile#getPreferredBlockReplication()} are tested to make sure
* the number of replication is calculated correctly with/without snapshots.
*/
public class TestSnapshotReplication {
@@ -81,7 +81,7 @@ public class TestSnapshotReplication {
/**
* Check the replication of a given file. We test both
* {@link INodeFile#getFileReplication()} and
- * {@link INodeFile#getBlockReplication()}.
+ * {@link INodeFile#getPreferredBlockReplication()}.
*
* @param file The given file
* @param replication The expected replication number
@@ -95,10 +95,11 @@ public class TestSnapshotReplication {
// INodeFile#getFileReplication().
short fileReplication = hdfs.getFileStatus(file1).getReplication();
assertEquals(replication, fileReplication);
- // Check the correctness of getBlockReplication()
+ // Check the correctness of getPreferredBlockReplication()
INode inode = fsdir.getINode(file1.toString());
assertTrue(inode instanceof INodeFile);
- assertEquals(blockReplication, ((INodeFile) inode).getBlockReplication());
+ assertEquals(blockReplication,
+ ((INodeFile) inode).getPreferredBlockReplication());
}
/**
@@ -137,16 +138,19 @@ public class TestSnapshotReplication {
*/
private void checkSnapshotFileReplication(Path currentFile,
Map<Path, Short> snapshotRepMap, short expectedBlockRep) throws Exception {
- // First check the getBlockReplication for the INode of the currentFile
+ // First check the getPreferredBlockReplication for the INode of
+ // the currentFile
final INodeFile inodeOfCurrentFile = getINodeFile(currentFile);
- assertEquals(expectedBlockRep, inodeOfCurrentFile.getBlockReplication());
+ assertEquals(expectedBlockRep,
+ inodeOfCurrentFile.getPreferredBlockReplication());
// Then check replication for every snapshot
for (Path ss : snapshotRepMap.keySet()) {
final INodesInPath iip = fsdir.getINodesInPath(ss.toString(), true);
final INodeFile ssInode = iip.getLastINode().asFile();
// The replication number derived from the
- // INodeFileWithLink#getBlockReplication should always == expectedBlockRep
- assertEquals(expectedBlockRep, ssInode.getBlockReplication());
+ // INodeFileWithLink#getPreferredBlockReplication should
+ // always == expectedBlockRep
+ assertEquals(expectedBlockRep, ssInode.getPreferredBlockReplication());
// Also check the number derived from INodeFile#getFileReplication
assertEquals(snapshotRepMap.get(ss).shortValue(),
ssInode.getFileReplication(iip.getPathSnapshotId()));
@@ -218,8 +222,9 @@ public class TestSnapshotReplication {
for (Path ss : snapshotRepMap.keySet()) {
final INodeFile ssInode = getINodeFile(ss);
// The replication number derived from the
- // INodeFileWithLink#getBlockReplication should always == expectedBlockRep
- assertEquals(REPLICATION, ssInode.getBlockReplication());
+ // INodeFileWithLink#getPreferredBlockReplication should
+ // always == expectedBlockRep
+ assertEquals(REPLICATION, ssInode.getPreferredBlockReplication());
// Also check the number derived from INodeFile#getFileReplication
assertEquals(snapshotRepMap.get(ss).shortValue(),
ssInode.getFileReplication());
[34/36] hadoop git commit: YARN-3629. NodeID is always printed as
"null" in node manager initialization log. Contributed by nijel.
Posted by zj...@apache.org.
YARN-3629. NodeID is always printed as "null" in node manager
initialization log. Contributed by nijel.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/aa03f3f9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/aa03f3f9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/aa03f3f9
Branch: refs/heads/YARN-2928
Commit: aa03f3f9e61c8dc5ad985e5c33a6dcf4b5b66b4e
Parents: 40e8064
Author: Devaraj K <de...@apache.org>
Authored: Tue May 12 22:20:25 2015 +0530
Committer: Zhijie Shen <zj...@apache.org>
Committed: Tue May 12 13:44:27 2015 -0700
----------------------------------------------------------------------
hadoop-yarn-project/CHANGES.txt | 3 +++
.../hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java | 3 ++-
2 files changed, 5 insertions(+), 1 deletion(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/aa03f3f9/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 78bda68..49c6a78 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -467,6 +467,9 @@ Release 2.8.0 - UNRELEASED
YARN-3602. TestResourceLocalizationService.testPublicResourceInitializesLocalDir
fails Intermittently due to IOException from cleanup. (zhihai xu via xgong)
+ YARN-3629. NodeID is always printed as "null" in node manager initialization log.
+ (nijel via devaraj)
+
Release 2.7.1 - UNRELEASED
INCOMPATIBLE CHANGES
http://git-wip-us.apache.org/repos/asf/hadoop/blob/aa03f3f9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java
index 033e780..88348a1 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java
@@ -201,7 +201,7 @@ public class NodeStatusUpdaterImpl extends AbstractService implements
+ durationToTrackStoppedContainers);
}
super.serviceInit(conf);
- LOG.info("Initialized nodemanager for " + nodeId + ":" +
+ LOG.info("Initialized nodemanager with :" +
" physical-memory=" + memoryMb + " virtual-memory=" + virtualMemoryMb +
" virtual-cores=" + virtualCores);
@@ -215,6 +215,7 @@ public class NodeStatusUpdaterImpl extends AbstractService implements
// NodeManager is the last service to start, so NodeId is available.
this.nodeId = this.context.getNodeId();
+ LOG.info("Node ID assigned is : " + this.nodeId);
this.httpPort = this.context.getHttpPort();
this.nodeManagerVersionId = YarnVersionInfo.getVersion();
try {
[27/36] hadoop git commit: YARN-3489.
RMServerUtils.validateResourceRequests should only obtain queue info once.
(Varun Saxena via wangda)
Posted by zj...@apache.org.
YARN-3489. RMServerUtils.validateResourceRequests should only obtain queue info once. (Varun Saxena via wangda)
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ca82dc48
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ca82dc48
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ca82dc48
Branch: refs/heads/YARN-2928
Commit: ca82dc4858ee6e12fffe8924ec3f242713b4aeb8
Parents: 5625ac4
Author: Wangda Tan <wa...@apache.org>
Authored: Mon May 11 17:31:15 2015 -0700
Committer: Zhijie Shen <zj...@apache.org>
Committed: Tue May 12 13:44:26 2015 -0700
----------------------------------------------------------------------
hadoop-yarn-project/CHANGES.txt | 3 ++
.../server/resourcemanager/RMServerUtils.java | 10 ++++++-
.../scheduler/SchedulerUtils.java | 30 +++++++++++++++-----
3 files changed, 35 insertions(+), 8 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ca82dc48/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 0a1b853..e02a564 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -478,6 +478,9 @@ Release 2.7.1 - UNRELEASED
YARN-3243. CapacityScheduler should pass headroom from parent to children
to make sure ParentQueue obey its capacity limits. (Wangda Tan via jianhe)
+ YARN-3489. RMServerUtils.validateResourceRequests should only obtain queue
+ info once. (Varun Saxena via wangda)
+
OPTIMIZATIONS
BUG FIXES
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ca82dc48/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMServerUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMServerUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMServerUtils.java
index 4669a28..4d2e41c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMServerUtils.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMServerUtils.java
@@ -35,6 +35,7 @@ import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ApplicationResourceUsageReport;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.NodeState;
+import org.apache.hadoop.yarn.api.records.QueueInfo;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.api.records.ResourceBlacklistRequest;
import org.apache.hadoop.yarn.api.records.ResourceRequest;
@@ -94,9 +95,16 @@ public class RMServerUtils {
Resource maximumResource, String queueName, YarnScheduler scheduler,
RMContext rmContext)
throws InvalidResourceRequestException {
+ // Get queue from scheduler
+ QueueInfo queueInfo = null;
+ try {
+ queueInfo = scheduler.getQueueInfo(queueName, false, false);
+ } catch (IOException e) {
+ }
+
for (ResourceRequest resReq : ask) {
SchedulerUtils.normalizeAndvalidateRequest(resReq, maximumResource,
- queueName, scheduler, rmContext);
+ queueName, scheduler, rmContext, queueInfo);
}
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ca82dc48/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerUtils.java
index 0ef5c1e..8047d0b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerUtils.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerUtils.java
@@ -213,13 +213,21 @@ public class SchedulerUtils {
Resource maximumResource, String queueName, YarnScheduler scheduler,
boolean isRecovery, RMContext rmContext)
throws InvalidResourceRequestException {
+ normalizeAndValidateRequest(resReq, maximumResource, queueName, scheduler,
+ isRecovery, rmContext, null);
+ }
- QueueInfo queueInfo = null;
- try {
- queueInfo = scheduler.getQueueInfo(queueName, false, false);
- } catch (IOException e) {
- // it is possible queue cannot get when queue mapping is set, just ignore
- // the queueInfo here, and move forward
+ public static void normalizeAndValidateRequest(ResourceRequest resReq,
+ Resource maximumResource, String queueName, YarnScheduler scheduler,
+ boolean isRecovery, RMContext rmContext, QueueInfo queueInfo)
+ throws InvalidResourceRequestException {
+ if (null == queueInfo) {
+ try {
+ queueInfo = scheduler.getQueueInfo(queueName, false, false);
+ } catch (IOException e) {
+ // it is possible queue cannot get when queue mapping is set, just ignore
+ // the queueInfo here, and move forward
+ }
}
SchedulerUtils.normalizeNodeLabelExpressionInRequest(resReq, queueInfo);
if (!isRecovery) {
@@ -231,8 +239,16 @@ public class SchedulerUtils {
Resource maximumResource, String queueName, YarnScheduler scheduler,
RMContext rmContext)
throws InvalidResourceRequestException {
+ normalizeAndvalidateRequest(resReq, maximumResource, queueName, scheduler,
+ rmContext, null);
+ }
+
+ public static void normalizeAndvalidateRequest(ResourceRequest resReq,
+ Resource maximumResource, String queueName, YarnScheduler scheduler,
+ RMContext rmContext, QueueInfo queueInfo)
+ throws InvalidResourceRequestException {
normalizeAndValidateRequest(resReq, maximumResource, queueName, scheduler,
- false, rmContext);
+ false, rmContext, queueInfo);
}
/**
[16/36] hadoop git commit: HADOOP-11947. test-patch should return
early from determine-issue when run in jenkins mode. (Sean Busbey via aw)
Posted by zj...@apache.org.
HADOOP-11947. test-patch should return early from determine-issue when run in jenkins mode. (Sean Busbey via aw)
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c00884e0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c00884e0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c00884e0
Branch: refs/heads/YARN-2928
Commit: c00884e0e410590948bb3bd081a12a2a89de9c6d
Parents: 708e2ca
Author: Allen Wittenauer <aw...@apache.org>
Authored: Mon May 11 12:07:48 2015 -0700
Committer: Zhijie Shen <zj...@apache.org>
Committed: Tue May 12 13:24:13 2015 -0700
----------------------------------------------------------------------
dev-support/test-patch.sh | 1 +
hadoop-common-project/hadoop-common/CHANGES.txt | 3 +++
2 files changed, 4 insertions(+)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c00884e0/dev-support/test-patch.sh
----------------------------------------------------------------------
diff --git a/dev-support/test-patch.sh b/dev-support/test-patch.sh
index ccadfb7..ae91b8e 100755
--- a/dev-support/test-patch.sh
+++ b/dev-support/test-patch.sh
@@ -1181,6 +1181,7 @@ function determine_issue
# we can shortcut jenkins
if [[ ${JENKINS} == true ]]; then
ISSUE=${PATCH_OR_ISSUE}
+ return 0
fi
# shellcheck disable=SC2016
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c00884e0/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 44e78ba..47731fb 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -665,6 +665,9 @@ Release 2.8.0 - UNRELEASED
HADOOP-11951. test-patch should give better info about failures to handle
dev-support updates without resetrepo option (Sean Busbey via aw)
+ HADOOP-11947. test-patch should return early from determine-issue when
+ run in jenkins mode. (Sean Busbey via aw)
+
Release 2.7.1 - UNRELEASED
INCOMPATIBLE CHANGES
[24/36] hadoop git commit: MAPREDUCE-5465. Tasks are often killed
before they exit on their own. Contributed by Ming Ma
Posted by zj...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/5625ac46/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestTaskAttempt.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestTaskAttempt.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestTaskAttempt.java
index 1807c1c..79b88d8 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestTaskAttempt.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestTaskAttempt.java
@@ -59,6 +59,7 @@ import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
import org.apache.hadoop.mapreduce.v2.app.AppContext;
import org.apache.hadoop.mapreduce.v2.app.ClusterInfo;
import org.apache.hadoop.mapreduce.v2.app.MRApp;
+import org.apache.hadoop.mapreduce.v2.app.TaskAttemptFinishingMonitor;
import org.apache.hadoop.mapreduce.v2.app.TaskAttemptListener;
import org.apache.hadoop.mapreduce.v2.app.job.Job;
import org.apache.hadoop.mapreduce.v2.app.job.Task;
@@ -407,6 +408,7 @@ public class TestTaskAttempt{
Resource resource = mock(Resource.class);
when(appCtx.getClusterInfo()).thenReturn(clusterInfo);
when(resource.getMemory()).thenReturn(1024);
+ setupTaskAttemptFinishingMonitor(eventHandler, jobConf, appCtx);
TaskAttemptImpl taImpl =
new MapTaskAttemptImpl(taskId, 1, eventHandler, jobFile, 1,
@@ -464,6 +466,7 @@ public class TestTaskAttempt{
Resource resource = mock(Resource.class);
when(appCtx.getClusterInfo()).thenReturn(clusterInfo);
when(resource.getMemory()).thenReturn(1024);
+ setupTaskAttemptFinishingMonitor(eventHandler, jobConf, appCtx);
TaskAttemptImpl taImpl =
new MapTaskAttemptImpl(taskId, 1, eventHandler, jobFile, 1,
@@ -524,6 +527,7 @@ public class TestTaskAttempt{
Resource resource = mock(Resource.class);
when(appCtx.getClusterInfo()).thenReturn(clusterInfo);
when(resource.getMemory()).thenReturn(1024);
+ setupTaskAttemptFinishingMonitor(eventHandler, jobConf, appCtx);
TaskAttemptImpl taImpl =
new MapTaskAttemptImpl(taskId, 1, eventHandler, jobFile, 1,
@@ -546,7 +550,7 @@ public class TestTaskAttempt{
taImpl.handle(new TaskAttemptEvent(attemptId,
TaskAttemptEventType.TA_DONE));
taImpl.handle(new TaskAttemptEvent(attemptId,
- TaskAttemptEventType.TA_CONTAINER_CLEANED));
+ TaskAttemptEventType.TA_CONTAINER_COMPLETED));
assertEquals("Task attempt is not in succeeded state", taImpl.getState(),
TaskAttemptState.SUCCEEDED);
@@ -593,6 +597,7 @@ public class TestTaskAttempt{
Resource resource = mock(Resource.class);
when(appCtx.getClusterInfo()).thenReturn(clusterInfo);
when(resource.getMemory()).thenReturn(1024);
+ setupTaskAttemptFinishingMonitor(eventHandler, jobConf, appCtx);
TaskAttemptImpl taImpl = new MapTaskAttemptImpl(taskId, 1, eventHandler,
jobFile, 1, splits, jobConf, taListener,
@@ -641,6 +646,7 @@ public class TestTaskAttempt{
Resource resource = mock(Resource.class);
when(appCtx.getClusterInfo()).thenReturn(clusterInfo);
when(resource.getMemory()).thenReturn(1024);
+ setupTaskAttemptFinishingMonitor(eventHandler, jobConf, appCtx);
TaskAttemptImpl taImpl =
new MapTaskAttemptImpl(taskId, 1, eventHandler, jobFile, 1,
@@ -663,7 +669,7 @@ public class TestTaskAttempt{
taImpl.handle(new TaskAttemptEvent(attemptId,
TaskAttemptEventType.TA_DONE));
taImpl.handle(new TaskAttemptEvent(attemptId,
- TaskAttemptEventType.TA_CONTAINER_CLEANED));
+ TaskAttemptEventType.TA_CONTAINER_COMPLETED));
assertEquals("Task attempt is not in succeeded state", taImpl.getState(),
TaskAttemptState.SUCCEEDED);
@@ -708,6 +714,7 @@ public class TestTaskAttempt{
Resource resource = mock(Resource.class);
when(appCtx.getClusterInfo()).thenReturn(clusterInfo);
when(resource.getMemory()).thenReturn(1024);
+ setupTaskAttemptFinishingMonitor(eventHandler, jobConf, appCtx);
TaskAttemptImpl taImpl = new MapTaskAttemptImpl(taskId, 1, eventHandler,
jobFile, 1, splits, jobConf, taListener,
@@ -753,6 +760,7 @@ public class TestTaskAttempt{
AppContext appCtx = mock(AppContext.class);
ClusterInfo clusterInfo = mock(ClusterInfo.class);
when(appCtx.getClusterInfo()).thenReturn(clusterInfo);
+ setupTaskAttemptFinishingMonitor(eventHandler, jobConf, appCtx);
TaskAttemptImpl taImpl =
new MapTaskAttemptImpl(taskId, 1, eventHandler, jobFile, 1,
@@ -774,7 +782,7 @@ public class TestTaskAttempt{
taImpl.handle(new TaskAttemptEvent(attemptId,
TaskAttemptEventType.TA_DONE));
taImpl.handle(new TaskAttemptEvent(attemptId,
- TaskAttemptEventType.TA_CONTAINER_CLEANED));
+ TaskAttemptEventType.TA_CONTAINER_COMPLETED));
assertEquals("Task attempt is not in succeeded state", taImpl.getState(),
TaskAttemptState.SUCCEEDED);
@@ -967,6 +975,255 @@ public class TestTaskAttempt{
taImpl.getInternalState());
}
+
+ @Test
+ public void testKillMapTaskWhileSuccessFinishing() throws Exception {
+ MockEventHandler eventHandler = new MockEventHandler();
+ TaskAttemptImpl taImpl = createTaskAttemptImpl(eventHandler);
+
+ taImpl.handle(new TaskAttemptEvent(taImpl.getID(),
+ TaskAttemptEventType.TA_DONE));
+
+ assertEquals("Task attempt is not in SUCCEEDED state", taImpl.getState(),
+ TaskAttemptState.SUCCEEDED);
+ assertEquals("Task attempt's internal state is not " +
+ "SUCCESS_FINISHING_CONTAINER", taImpl.getInternalState(),
+ TaskAttemptStateInternal.SUCCESS_FINISHING_CONTAINER);
+
+ // If the map task is killed when it is in SUCCESS_FINISHING_CONTAINER
+ // state, the state will move to KILL_CONTAINER_CLEANUP
+ taImpl.handle(new TaskAttemptEvent(taImpl.getID(),
+ TaskAttemptEventType.TA_KILL));
+ assertEquals("Task attempt is not in KILLED state", taImpl.getState(),
+ TaskAttemptState.KILLED);
+ assertEquals("Task attempt's internal state is not KILL_CONTAINER_CLEANUP",
+ taImpl.getInternalState(),
+ TaskAttemptStateInternal.KILL_CONTAINER_CLEANUP);
+
+ taImpl.handle(new TaskAttemptEvent(taImpl.getID(),
+ TaskAttemptEventType.TA_CONTAINER_CLEANED));
+ assertEquals("Task attempt's internal state is not KILL_TASK_CLEANUP",
+ taImpl.getInternalState(),
+ TaskAttemptStateInternal.KILL_TASK_CLEANUP);
+
+ taImpl.handle(new TaskAttemptEvent(taImpl.getID(),
+ TaskAttemptEventType.TA_CLEANUP_DONE));
+
+ assertEquals("Task attempt is not in KILLED state", taImpl.getState(),
+ TaskAttemptState.KILLED);
+
+ assertFalse("InternalError occurred", eventHandler.internalError);
+ }
+
+ @Test
+ public void testKillMapTaskWhileFailFinishing() throws Exception {
+ MockEventHandler eventHandler = new MockEventHandler();
+ TaskAttemptImpl taImpl = createTaskAttemptImpl(eventHandler);
+
+ taImpl.handle(new TaskAttemptEvent(taImpl.getID(),
+ TaskAttemptEventType.TA_FAILMSG));
+
+ assertEquals("Task attempt is not in FAILED state", taImpl.getState(),
+ TaskAttemptState.FAILED);
+ assertEquals("Task attempt's internal state is not " +
+ "FAIL_FINISHING_CONTAINER", taImpl.getInternalState(),
+ TaskAttemptStateInternal.FAIL_FINISHING_CONTAINER);
+
+ // If the map task is killed when it is in FAIL_FINISHING_CONTAINER state,
+ // the state will stay in FAIL_FINISHING_CONTAINER.
+ taImpl.handle(new TaskAttemptEvent(taImpl.getID(),
+ TaskAttemptEventType.TA_KILL));
+ assertEquals("Task attempt is not in RUNNING state", taImpl.getState(),
+ TaskAttemptState.FAILED);
+ assertEquals("Task attempt's internal state is not " +
+ "FAIL_FINISHING_CONTAINER", taImpl.getInternalState(),
+ TaskAttemptStateInternal.FAIL_FINISHING_CONTAINER);
+
+ taImpl.handle(new TaskAttemptEvent(taImpl.getID(),
+ TaskAttemptEventType.TA_TIMED_OUT));
+ assertEquals("Task attempt's internal state is not FAIL_CONTAINER_CLEANUP",
+ taImpl.getInternalState(),
+ TaskAttemptStateInternal.FAIL_CONTAINER_CLEANUP);
+
+ taImpl.handle(new TaskAttemptEvent(taImpl.getID(),
+ TaskAttemptEventType.TA_CONTAINER_CLEANED));
+ assertEquals("Task attempt's internal state is not FAIL_TASK_CLEANUP",
+ taImpl.getInternalState(),
+ TaskAttemptStateInternal.FAIL_TASK_CLEANUP);
+
+ taImpl.handle(new TaskAttemptEvent(taImpl.getID(),
+ TaskAttemptEventType.TA_CLEANUP_DONE));
+
+ assertEquals("Task attempt is not in KILLED state", taImpl.getState(),
+ TaskAttemptState.FAILED);
+
+ assertFalse("InternalError occurred", eventHandler.internalError);
+ }
+
+ @Test
+ public void testFailMapTaskByClient() throws Exception {
+ MockEventHandler eventHandler = new MockEventHandler();
+ TaskAttemptImpl taImpl = createTaskAttemptImpl(eventHandler);
+
+ taImpl.handle(new TaskAttemptEvent(taImpl.getID(),
+ TaskAttemptEventType.TA_FAILMSG_BY_CLIENT));
+
+ assertEquals("Task attempt is not in RUNNING state", taImpl.getState(),
+ TaskAttemptState.FAILED);
+ assertEquals("Task attempt's internal state is not " +
+ "FAIL_CONTAINER_CLEANUP", taImpl.getInternalState(),
+ TaskAttemptStateInternal.FAIL_CONTAINER_CLEANUP);
+
+ taImpl.handle(new TaskAttemptEvent(taImpl.getID(),
+ TaskAttemptEventType.TA_CONTAINER_CLEANED));
+ assertEquals("Task attempt's internal state is not FAIL_TASK_CLEANUP",
+ taImpl.getInternalState(),
+ TaskAttemptStateInternal.FAIL_TASK_CLEANUP);
+
+ taImpl.handle(new TaskAttemptEvent(taImpl.getID(),
+ TaskAttemptEventType.TA_CLEANUP_DONE));
+
+ assertEquals("Task attempt is not in KILLED state", taImpl.getState(),
+ TaskAttemptState.FAILED);
+
+ assertFalse("InternalError occurred", eventHandler.internalError);
+ }
+
+ @Test
+ public void testTaskAttemptDiagnosticEventOnFinishing() throws Exception {
+ MockEventHandler eventHandler = new MockEventHandler();
+ TaskAttemptImpl taImpl = createTaskAttemptImpl(eventHandler);
+
+ taImpl.handle(new TaskAttemptEvent(taImpl.getID(),
+ TaskAttemptEventType.TA_DONE));
+
+ assertEquals("Task attempt is not in RUNNING state", taImpl.getState(),
+ TaskAttemptState.SUCCEEDED);
+ assertEquals("Task attempt's internal state is not " +
+ "SUCCESS_FINISHING_CONTAINER", taImpl.getInternalState(),
+ TaskAttemptStateInternal.SUCCESS_FINISHING_CONTAINER);
+
+ // TA_DIAGNOSTICS_UPDATE doesn't change state
+ taImpl.handle(new TaskAttemptDiagnosticsUpdateEvent(taImpl.getID(),
+ "Task got updated"));
+ assertEquals("Task attempt is not in RUNNING state", taImpl.getState(),
+ TaskAttemptState.SUCCEEDED);
+ assertEquals("Task attempt's internal state is not " +
+ "SUCCESS_FINISHING_CONTAINER", taImpl.getInternalState(),
+ TaskAttemptStateInternal.SUCCESS_FINISHING_CONTAINER);
+
+ assertFalse("InternalError occurred", eventHandler.internalError);
+ }
+
+ @Test
+ public void testTimeoutWhileSuccessFinishing() throws Exception {
+ MockEventHandler eventHandler = new MockEventHandler();
+ TaskAttemptImpl taImpl = createTaskAttemptImpl(eventHandler);
+
+ taImpl.handle(new TaskAttemptEvent(taImpl.getID(),
+ TaskAttemptEventType.TA_DONE));
+
+ assertEquals("Task attempt is not in RUNNING state", taImpl.getState(),
+ TaskAttemptState.SUCCEEDED);
+ assertEquals("Task attempt's internal state is not " +
+ "SUCCESS_FINISHING_CONTAINER", taImpl.getInternalState(),
+ TaskAttemptStateInternal.SUCCESS_FINISHING_CONTAINER);
+
+ // If the task stays in SUCCESS_FINISHING_CONTAINER for too long,
+ // TaskAttemptListenerImpl will time out the attempt.
+ taImpl.handle(new TaskAttemptEvent(taImpl.getID(),
+ TaskAttemptEventType.TA_TIMED_OUT));
+ assertEquals("Task attempt is not in RUNNING state", taImpl.getState(),
+ TaskAttemptState.SUCCEEDED);
+ assertEquals("Task attempt's internal state is not " +
+ "SUCCESS_CONTAINER_CLEANUP", taImpl.getInternalState(),
+ TaskAttemptStateInternal.SUCCESS_CONTAINER_CLEANUP);
+
+ assertFalse("InternalError occurred", eventHandler.internalError);
+ }
+
+ @Test
+ public void testTimeoutWhileFailFinishing() throws Exception {
+ MockEventHandler eventHandler = new MockEventHandler();
+ TaskAttemptImpl taImpl = createTaskAttemptImpl(eventHandler);
+
+ taImpl.handle(new TaskAttemptEvent(taImpl.getID(),
+ TaskAttemptEventType.TA_FAILMSG));
+
+ assertEquals("Task attempt is not in RUNNING state", taImpl.getState(),
+ TaskAttemptState.FAILED);
+ assertEquals("Task attempt's internal state is not " +
+ "FAIL_FINISHING_CONTAINER", taImpl.getInternalState(),
+ TaskAttemptStateInternal.FAIL_FINISHING_CONTAINER);
+
+ // If the task stays in FAIL_FINISHING_CONTAINER for too long,
+ // TaskAttemptListenerImpl will time out the attempt.
+ taImpl.handle(new TaskAttemptEvent(taImpl.getID(),
+ TaskAttemptEventType.TA_TIMED_OUT));
+ assertEquals("Task attempt's internal state is not FAIL_CONTAINER_CLEANUP",
+ taImpl.getInternalState(),
+ TaskAttemptStateInternal.FAIL_CONTAINER_CLEANUP);
+
+ assertFalse("InternalError occurred", eventHandler.internalError);
+ }
+
+ private void setupTaskAttemptFinishingMonitor(
+ EventHandler eventHandler, JobConf jobConf, AppContext appCtx) {
+ TaskAttemptFinishingMonitor taskAttemptFinishingMonitor =
+ new TaskAttemptFinishingMonitor(eventHandler);
+ taskAttemptFinishingMonitor.init(jobConf);
+ when(appCtx.getTaskAttemptFinishingMonitor()).
+ thenReturn(taskAttemptFinishingMonitor);
+ }
+
+ private TaskAttemptImpl createTaskAttemptImpl(
+ MockEventHandler eventHandler) {
+ ApplicationId appId = ApplicationId.newInstance(1, 2);
+ ApplicationAttemptId appAttemptId =
+ ApplicationAttemptId.newInstance(appId, 0);
+ JobId jobId = MRBuilderUtils.newJobId(appId, 1);
+ TaskId taskId = MRBuilderUtils.newTaskId(jobId, 1, TaskType.MAP);
+ TaskAttemptId attemptId = MRBuilderUtils.newTaskAttemptId(taskId, 0);
+ Path jobFile = mock(Path.class);
+
+ TaskAttemptListener taListener = mock(TaskAttemptListener.class);
+ when(taListener.getAddress()).thenReturn(new InetSocketAddress("localhost", 0));
+
+ JobConf jobConf = new JobConf();
+ jobConf.setClass("fs.file.impl", StubbedFS.class, FileSystem.class);
+ jobConf.setBoolean("fs.file.impl.disable.cache", true);
+ jobConf.set(JobConf.MAPRED_MAP_TASK_ENV, "");
+ jobConf.set(MRJobConfig.APPLICATION_ATTEMPT_ID, "10");
+
+ TaskSplitMetaInfo splits = mock(TaskSplitMetaInfo.class);
+ when(splits.getLocations()).thenReturn(new String[] {"127.0.0.1"});
+
+ AppContext appCtx = mock(AppContext.class);
+ ClusterInfo clusterInfo = mock(ClusterInfo.class);
+ when(appCtx.getClusterInfo()).thenReturn(clusterInfo);
+ setupTaskAttemptFinishingMonitor(eventHandler, jobConf, appCtx);
+
+ TaskAttemptImpl taImpl =
+ new MapTaskAttemptImpl(taskId, 1, eventHandler, jobFile, 1,
+ splits, jobConf, taListener,
+ mock(Token.class), new Credentials(),
+ new SystemClock(), appCtx);
+
+ NodeId nid = NodeId.newInstance("127.0.0.1", 0);
+ ContainerId contId = ContainerId.newInstance(appAttemptId, 3);
+ Container container = mock(Container.class);
+ when(container.getId()).thenReturn(contId);
+ when(container.getNodeId()).thenReturn(nid);
+ when(container.getNodeHttpAddress()).thenReturn("localhost:0");
+
+ taImpl.handle(new TaskAttemptEvent(attemptId,
+ TaskAttemptEventType.TA_SCHEDULE));
+ taImpl.handle(new TaskAttemptContainerAssignedEvent(attemptId,
+ container, mock(Map.class)));
+ taImpl.handle(new TaskAttemptContainerLaunchedEvent(attemptId, 0));
+ return taImpl;
+ }
+
public static class MockEventHandler implements EventHandler {
public boolean internalError;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/5625ac46/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java
index bc31bb5..1c40632 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java
@@ -235,7 +235,15 @@ public interface MRJobConfig {
public static final String TASK_TIMEOUT = "mapreduce.task.timeout";
public static final String TASK_TIMEOUT_CHECK_INTERVAL_MS = "mapreduce.task.timeout.check-interval-ms";
-
+
+ public static final String TASK_EXIT_TIMEOUT = "mapreduce.task.exit.timeout";
+
+ public static final int TASK_EXIT_TIMEOUT_DEFAULT = 60 * 1000;
+
+ public static final String TASK_EXIT_TIMEOUT_CHECK_INTERVAL_MS = "mapreduce.task.exit.timeout.check-interval-ms";
+
+ public static final int TASK_EXIT_TIMEOUT_CHECK_INTERVAL_MS_DEFAULT = 20 * 1000;
+
public static final String TASK_ID = "mapreduce.task.id";
public static final String TASK_OUTPUT_DIR = "mapreduce.task.output.dir";
http://git-wip-us.apache.org/repos/asf/hadoop/blob/5625ac46/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
index fb761ba..a9e7618 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
@@ -1678,4 +1678,24 @@
app master.
</description>
</property>
+
+<property>
+ <name>mapreduce.task.exit.timeout</name>
+ <value>60000</value>
+ <description>The number of milliseconds before a task will be
+ terminated if it stays in finishing state for too long.
+ After a task attempt completes from TaskUmbilicalProtocol's point of view,
+ it will be transitioned to finishing state. That will give a chance for the
+ task to exit by itself.
+ </description>
+</property>
+
+<property>
+ <name>mapreduce.task.exit.timeout.check-interval-ms</name>
+ <value>20000</value>
+ <description>The interval in milliseconds between which the MR framework
+ checks if task attempts stay in finishing state for too long.
+ </description>
+</property>
+
</configuration>
http://git-wip-us.apache.org/repos/asf/hadoop/blob/5625ac46/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/JobHistory.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/JobHistory.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/JobHistory.java
index 194b85a..41bc90a 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/JobHistory.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/JobHistory.java
@@ -37,6 +37,7 @@ import org.apache.hadoop.mapreduce.v2.api.records.JobId;
import org.apache.hadoop.mapreduce.v2.api.records.JobState;
import org.apache.hadoop.mapreduce.v2.app.ClusterInfo;
import org.apache.hadoop.mapreduce.v2.app.job.Job;
+import org.apache.hadoop.mapreduce.v2.app.TaskAttemptFinishingMonitor;
import org.apache.hadoop.mapreduce.v2.hs.HistoryFileManager.HistoryFileInfo;
import org.apache.hadoop.mapreduce.v2.hs.webapp.dao.JobsInfo;
import org.apache.hadoop.mapreduce.v2.jobhistory.JHAdminConfig;
@@ -399,4 +400,9 @@ public class JobHistory extends AbstractService implements HistoryContext {
// bogus - Not Required
return null;
}
+
+ @Override
+ public TaskAttemptFinishingMonitor getTaskAttemptFinishingMonitor() {
+ return null;
+ }
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/5625ac46/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestSpeculativeExecutionWithMRApp.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestSpeculativeExecutionWithMRApp.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestSpeculativeExecutionWithMRApp.java
index d2edd19..5ce2761 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestSpeculativeExecutionWithMRApp.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestSpeculativeExecutionWithMRApp.java
@@ -102,7 +102,7 @@ public class TestSpeculativeExecutionWithMRApp {
appEventHandler.handle(new TaskAttemptEvent(taskAttempt.getKey(),
TaskAttemptEventType.TA_DONE));
appEventHandler.handle(new TaskAttemptEvent(taskAttempt.getKey(),
- TaskAttemptEventType.TA_CONTAINER_CLEANED));
+ TaskAttemptEventType.TA_CONTAINER_COMPLETED));
app.waitForState(taskAttempt.getValue(), TaskAttemptState.SUCCEEDED);
}
}
@@ -170,7 +170,7 @@ public class TestSpeculativeExecutionWithMRApp {
appEventHandler.handle(new TaskAttemptEvent(taskAttempt.getKey(),
TaskAttemptEventType.TA_DONE));
appEventHandler.handle(new TaskAttemptEvent(taskAttempt.getKey(),
- TaskAttemptEventType.TA_CONTAINER_CLEANED));
+ TaskAttemptEventType.TA_CONTAINER_COMPLETED));
numTasksToFinish--;
app.waitForState(taskAttempt.getValue(), TaskAttemptState.SUCCEEDED);
} else {
@@ -228,7 +228,7 @@ public class TestSpeculativeExecutionWithMRApp {
appEventHandler.handle(
new TaskAttemptEvent(ta[0].getID(), TaskAttemptEventType.TA_DONE));
appEventHandler.handle(new TaskAttemptEvent(ta[0].getID(),
- TaskAttemptEventType.TA_CONTAINER_CLEANED));
+ TaskAttemptEventType.TA_CONTAINER_COMPLETED));
return ta;
}
[18/36] hadoop git commit: HADOOP-11928. Test-patch check for @author
tags incorrectly flags removal of @author tags (Kengo Seki via aw)
Posted by zj...@apache.org.
HADOOP-11928. Test-patch check for @author tags incorrectly flags removal of @author tags (Kengo Seki via aw)
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c08d7368
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c08d7368
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c08d7368
Branch: refs/heads/YARN-2928
Commit: c08d73684c4b70714fc9771d500b9e40c5072bc6
Parents: f652371
Author: Allen Wittenauer <aw...@apache.org>
Authored: Mon May 11 11:35:02 2015 -0700
Committer: Zhijie Shen <zj...@apache.org>
Committed: Tue May 12 13:24:13 2015 -0700
----------------------------------------------------------------------
dev-support/test-patch.sh | 2 +-
hadoop-common-project/hadoop-common/CHANGES.txt | 3 +++
2 files changed, 4 insertions(+), 1 deletion(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c08d7368/dev-support/test-patch.sh
----------------------------------------------------------------------
diff --git a/dev-support/test-patch.sh b/dev-support/test-patch.sh
index 788178b..a3ce567 100755
--- a/dev-support/test-patch.sh
+++ b/dev-support/test-patch.sh
@@ -1495,7 +1495,7 @@ function check_author
return 0
fi
- authorTags=$("${GREP}" -c -i '@author' "${PATCH_DIR}/patch")
+ authorTags=$("${GREP}" -c -i '^[^-].*@author' "${PATCH_DIR}/patch")
echo "There appear to be ${authorTags} @author tags in the patch."
if [[ ${authorTags} != 0 ]] ; then
add_jira_table -1 @author \
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c08d7368/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index e850a55..929ada1 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -653,6 +653,9 @@ Release 2.8.0 - UNRELEASED
HADOOP-9729. The example code of org.apache.hadoop.util.Tool is incorrect
(hellojinjie via jlowe)
+ HADOOP-11928. Test-patch check for @author tags incorrectly flags
+ removal of @author tags (Kengo Seki via aw)
+
Release 2.7.1 - UNRELEASED
INCOMPATIBLE CHANGES
[04/36] hadoop git commit: HDFS-6757. Simplify lease manager with
INodeID. Contributed by Haohui Mai.
Posted by zj...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/12fdc447/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDiskspaceQuotaUpdate.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDiskspaceQuotaUpdate.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDiskspaceQuotaUpdate.java
index 184cd87..255a607 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDiskspaceQuotaUpdate.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDiskspaceQuotaUpdate.java
@@ -213,11 +213,12 @@ public class TestDiskspaceQuotaUpdate {
// ignore
}
+ LeaseManager lm = cluster.getNamesystem().getLeaseManager();
// check that the file exists, isn't UC, and has no dangling lease
INodeFile inode = fsdir.getINode(file.toString()).asFile();
Assert.assertNotNull(inode);
Assert.assertFalse("should not be UC", inode.isUnderConstruction());
- Assert.assertNull("should not have a lease", cluster.getNamesystem().getLeaseManager().getLeaseByPath(file.toString()));
+ Assert.assertNull("should not have a lease", lm.getLease(inode));
// make sure the quota usage is unchanged
final long newSpaceUsed = dirNode.getDirectoryWithQuotaFeature()
.getSpaceConsumed().getStorageSpace();
@@ -256,11 +257,11 @@ public class TestDiskspaceQuotaUpdate {
}
// check that the file exists, isn't UC, and has no dangling lease
+ LeaseManager lm = cluster.getNamesystem().getLeaseManager();
INodeFile inode = fsdir.getINode(file.toString()).asFile();
Assert.assertNotNull(inode);
Assert.assertFalse("should not be UC", inode.isUnderConstruction());
- Assert.assertNull("should not have a lease", cluster.getNamesystem()
- .getLeaseManager().getLeaseByPath(file.toString()));
+ Assert.assertNull("should not have a lease", lm.getLease(inode));
// make sure the quota usage is unchanged
final long newSpaceUsed = dirNode.getDirectoryWithQuotaFeature()
.getSpaceConsumed().getStorageSpace();
@@ -296,11 +297,11 @@ public class TestDiskspaceQuotaUpdate {
}
// check that the file exists, isn't UC, and has no dangling lease
+ LeaseManager lm = cluster.getNamesystem().getLeaseManager();
INodeFile inode = fsdir.getINode(file.toString()).asFile();
Assert.assertNotNull(inode);
Assert.assertFalse("should not be UC", inode.isUnderConstruction());
- Assert.assertNull("should not have a lease", cluster.getNamesystem()
- .getLeaseManager().getLeaseByPath(file.toString()));
+ Assert.assertNull("should not have a lease", lm.getLease(inode));
// make sure the quota usage is unchanged
final long newSpaceUsed = dirNode.getDirectoryWithQuotaFeature()
.getSpaceConsumed().getStorageSpace();
http://git-wip-us.apache.org/repos/asf/hadoop/blob/12fdc447/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java
index 7b9ea93..5653df5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java
@@ -109,7 +109,7 @@ public class TestFSImage {
assertEquals(1, blks.length);
assertEquals(BlockUCState.UNDER_CONSTRUCTION, blks[0].getBlockUCState());
// check lease manager
- Lease lease = fsn.leaseManager.getLeaseByPath(file2.toString());
+ Lease lease = fsn.leaseManager.getLease(file2Node);
Assert.assertNotNull(lease);
} finally {
if (cluster != null) {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/12fdc447/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystem.java
index 3d0259e..8b0662c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystem.java
@@ -76,7 +76,7 @@ public class TestFSNamesystem {
DFSTestUtil.formatNameNode(conf);
FSNamesystem fsn = FSNamesystem.loadFromDisk(conf);
LeaseManager leaseMan = fsn.getLeaseManager();
- leaseMan.addLease("client1", "importantFile");
+ leaseMan.addLease("client1", fsn.getFSDirectory().allocateNewInodeId());
assertEquals(1, leaseMan.countLease());
fsn.clear();
leaseMan = fsn.getLeaseManager();
http://git-wip-us.apache.org/repos/asf/hadoop/blob/12fdc447/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestGetBlockLocations.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestGetBlockLocations.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestGetBlockLocations.java
index a19eb1d..4d0f994 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestGetBlockLocations.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestGetBlockLocations.java
@@ -70,7 +70,8 @@ public class TestGetBlockLocations {
public Void answer(InvocationOnMock invocation) throws Throwable {
INodesInPath iip = fsd.getINodesInPath(FILE_PATH, true);
FSDirDeleteOp.delete(fsd, iip, new INode.BlocksMapUpdateInfo(),
- new ArrayList<INode>(), now());
+ new ArrayList<INode>(), new ArrayList<Long>(),
+ now());
invocation.callRealMethod();
return null;
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/12fdc447/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestLeaseManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestLeaseManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestLeaseManager.java
index 2f114a7..96907f8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestLeaseManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestLeaseManager.java
@@ -21,39 +21,31 @@ import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNull;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdfs.HdfsConfiguration;
-import org.apache.hadoop.hdfs.MiniDFSCluster;
+import com.google.common.collect.Lists;
import org.junit.Test;
import org.mockito.Mockito;
+import java.util.ArrayList;
+
+import static org.mockito.Mockito.*;
public class TestLeaseManager {
- final Configuration conf = new HdfsConfiguration();
-
@Test
- public void testRemoveLeaseWithPrefixPath() throws Exception {
- MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
- cluster.waitActive();
-
- LeaseManager lm = NameNodeAdapter.getLeaseManager(cluster.getNamesystem());
- lm.addLease("holder1", "/a/b");
- lm.addLease("holder2", "/a/c");
- assertNotNull(lm.getLeaseByPath("/a/b"));
- assertNotNull(lm.getLeaseByPath("/a/c"));
-
- lm.removeLeaseWithPrefixPath("/a");
-
- assertNull(lm.getLeaseByPath("/a/b"));
- assertNull(lm.getLeaseByPath("/a/c"));
-
- lm.addLease("holder1", "/a/b");
- lm.addLease("holder2", "/a/c");
-
- lm.removeLeaseWithPrefixPath("/a/");
+ public void testRemoveLeases() throws Exception {
+ FSNamesystem fsn = mock(FSNamesystem.class);
+ LeaseManager lm = new LeaseManager(fsn);
+ ArrayList<Long> ids = Lists.newArrayList(INodeId.ROOT_INODE_ID + 1,
+ INodeId.ROOT_INODE_ID + 2, INodeId.ROOT_INODE_ID + 3,
+ INodeId.ROOT_INODE_ID + 4);
+ for (long id : ids) {
+ lm.addLease("foo", id);
+ }
- assertNull(lm.getLeaseByPath("/a/b"));
- assertNull(lm.getLeaseByPath("/a/c"));
+ assertEquals(4, lm.getINodeIdWithLeases().size());
+ synchronized (lm) {
+ lm.removeLeases(ids);
+ }
+ assertEquals(0, lm.getINodeIdWithLeases().size());
}
/** Check that even if LeaseManager.checkLease is not able to relinquish
@@ -70,13 +62,13 @@ public class TestLeaseManager {
LeaseManager lm = new LeaseManager(fsn);
//Make sure the leases we are going to add exceed the hard limit
- lm.setLeasePeriod(0,0);
+ lm.setLeasePeriod(0, 0);
//Add some leases to the LeaseManager
- lm.addLease("holder1", "src1");
- lm.addLease("holder2", "src2");
- lm.addLease("holder3", "src3");
- assertEquals(lm.getNumSortedLeases(), 3);
+ lm.addLease("holder1", INodeId.ROOT_INODE_ID + 1);
+ lm.addLease("holder2", INodeId.ROOT_INODE_ID + 2);
+ lm.addLease("holder3", INodeId.ROOT_INODE_ID + 3);
+ assertEquals(lm.countLease(), 3);
//Initiate a call to checkLease. This should exit within the test timeout
lm.checkLeases();
http://git-wip-us.apache.org/repos/asf/hadoop/blob/12fdc447/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSaveNamespace.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSaveNamespace.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSaveNamespace.java
index f43edfb..5be1cef 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSaveNamespace.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSaveNamespace.java
@@ -615,7 +615,8 @@ public class TestSaveNamespace {
cluster.waitActive();
DistributedFileSystem fs = cluster.getFileSystem();
try {
- cluster.getNamesystem().leaseManager.addLease("me", "/non-existent");
+ cluster.getNamesystem().leaseManager.addLease("me",
+ INodeId.ROOT_INODE_ID + 1);
fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
cluster.getNameNodeRpc().saveNamespace(0, 0);
fs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/12fdc447/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestINodeFileUnderConstructionWithSnapshot.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestINodeFileUnderConstructionWithSnapshot.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestINodeFileUnderConstructionWithSnapshot.java
index aba3bb3..e716d6d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestINodeFileUnderConstructionWithSnapshot.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestINodeFileUnderConstructionWithSnapshot.java
@@ -40,11 +40,13 @@ import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
import org.apache.hadoop.hdfs.client.HdfsDataOutputStream.SyncFlag;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.INode;
import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
import org.apache.hadoop.hdfs.server.namenode.INodeFile;
+import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.DirectoryDiff;
import org.apache.log4j.Level;
import org.junit.After;
@@ -279,4 +281,31 @@ public class TestINodeFileUnderConstructionWithSnapshot {
assertEquals(BLOCKSIZE - 1, lastBlock.getBlockSize());
out.close();
}
+
+ @Test
+ public void testLease() throws Exception {
+ try {
+ NameNodeAdapter.setLeasePeriod(fsn, 100, 200);
+ final Path foo = new Path(dir, "foo");
+ final Path bar = new Path(foo, "bar");
+ DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPLICATION, 0);
+ HdfsDataOutputStream out = appendFileWithoutClosing(bar, 100);
+ out.hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH));
+ SnapshotTestHelper.createSnapshot(hdfs, dir, "s0");
+
+ hdfs.delete(foo, true);
+ Thread.sleep(1000);
+ try {
+ fsn.writeLock();
+ NameNodeAdapter.getLeaseManager(fsn).runLeaseChecks();
+ } finally {
+ fsn.writeUnlock();
+ }
+ } finally {
+ NameNodeAdapter.setLeasePeriod(
+ fsn,
+ HdfsServerConstants.LEASE_SOFTLIMIT_PERIOD,
+ HdfsServerConstants.LEASE_HARDLIMIT_PERIOD);
+ }
+ }
}
\ No newline at end of file
[12/36] hadoop git commit: HDFS-8351. Remove namenode -finalize
option from document. (aajisaka)
Posted by zj...@apache.org.
HDFS-8351. Remove namenode -finalize option from document. (aajisaka)
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/821f5b45
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/821f5b45
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/821f5b45
Branch: refs/heads/YARN-2928
Commit: 821f5b45aa5f47cab69413245ca5743413c1e90b
Parents: 5181297
Author: Akira Ajisaka <aa...@apache.org>
Authored: Mon May 11 15:34:44 2015 +0900
Committer: Zhijie Shen <zj...@apache.org>
Committed: Tue May 12 13:24:12 2015 -0700
----------------------------------------------------------------------
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 ++
hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md | 2 +-
2 files changed, 3 insertions(+), 1 deletion(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/821f5b45/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index f733f22..6b53e88 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -733,6 +733,8 @@ Release 2.8.0 - UNRELEASED
HDFS-8097. TestFileTruncate is failing intermittently. (Rakesh R via
Arpit Agarwal)
+ HDFS-8351. Remove namenode -finalize option from document. (aajisaka)
+
Release 2.7.1 - UNRELEASED
INCOMPATIBLE CHANGES
http://git-wip-us.apache.org/repos/asf/hadoop/blob/821f5b45/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
index 8f5306b..534d63a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
@@ -432,7 +432,7 @@ Usage:
| `-upgradeOnly` `[-clusterid cid]` [`-renameReserved` \<k-v pairs\>] | Upgrade the specified NameNode and then shutdown it. |
| `-rollback` | Rollback the NameNode to the previous version. This should be used after stopping the cluster and distributing the old Hadoop version. |
| `-rollingUpgrade` \<rollback\|started\> | See [Rolling Upgrade document](./HdfsRollingUpgrade.html#NameNode_Startup_Options) for the detail. |
-| `-finalize` | Finalize will remove the previous state of the files system. Recent upgrade will become permanent. Rollback option will not be available anymore. After finalization it shuts the NameNode down. |
+| `-finalize` | No longer supported. Use `dfsadmin -finalizeUpgrade` instead. |
| `-importCheckpoint` | Loads image from a checkpoint directory and save it into the current one. Checkpoint dir is read from property fs.checkpoint.dir |
| `-initializeSharedEdits` | Format a new shared edits dir and copy in enough edit log segments so that the standby NameNode can start up. |
| `-bootstrapStandby` | Allows the standby NameNode's storage directories to be bootstrapped by copying the latest namespace snapshot from the active NameNode. This is used when first configuring an HA cluster. |
[14/36] hadoop git commit: YARN-3587. Fix the javadoc of
DelegationTokenSecretManager in yarn,
etc. projects. Contributed by Gabor Liptak.
Posted by zj...@apache.org.
YARN-3587. Fix the javadoc of DelegationTokenSecretManager in yarn, etc. projects. Contributed by Gabor Liptak.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/af512626
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/af512626
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/af512626
Branch: refs/heads/YARN-2928
Commit: af5126266340c86e0847668020af159c1f9bb098
Parents: 821f5b4
Author: Junping Du <ju...@apache.org>
Authored: Mon May 11 05:27:07 2015 -0700
Committer: Zhijie Shen <zj...@apache.org>
Committed: Tue May 12 13:24:12 2015 -0700
----------------------------------------------------------------------
.../AbstractDelegationTokenSecretManager.java | 11 +++++++++++
.../token/delegation/DelegationTokenSecretManager.java | 9 +++++----
.../token/delegation/DelegationTokenSecretManager.java | 9 +++++----
.../v2/hs/JHSDelegationTokenSecretManager.java | 9 +++++----
hadoop-yarn-project/CHANGES.txt | 3 +++
.../TimelineDelegationTokenSecretManagerService.java | 12 ++++++++----
.../security/RMDelegationTokenSecretManager.java | 10 ++++++----
7 files changed, 43 insertions(+), 20 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/af512626/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenSecretManager.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenSecretManager.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenSecretManager.java
index 52e6a01..1d7f2f5 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenSecretManager.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenSecretManager.java
@@ -99,6 +99,17 @@ extends AbstractDelegationTokenIdentifier>
*/
protected Object noInterruptsLock = new Object();
+ /**
+ * Create a secret manager
+ * @param delegationKeyUpdateInterval the number of milliseconds for rolling
+ * new secret keys.
+ * @param delegationTokenMaxLifetime the maximum lifetime of the delegation
+ * tokens in milliseconds
+ * @param delegationTokenRenewInterval how often the tokens must be renewed
+ * in milliseconds
+ * @param delegationTokenRemoverScanInterval how often the tokens are scanned
+ * for expired tokens in milliseconds
+ */
public AbstractDelegationTokenSecretManager(long delegationKeyUpdateInterval,
long delegationTokenMaxLifetime, long delegationTokenRenewInterval,
long delegationTokenRemoverScanInterval) {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/af512626/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSecretManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSecretManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSecretManager.java
index 8af7eba..b7f89a8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSecretManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSecretManager.java
@@ -79,13 +79,14 @@ public class DelegationTokenSecretManager
/**
* Create a secret manager
- * @param delegationKeyUpdateInterval the number of seconds for rolling new
- * secret keys.
+ * @param delegationKeyUpdateInterval the number of milliseconds for rolling
+ * new secret keys.
* @param delegationTokenMaxLifetime the maximum lifetime of the delegation
- * tokens
+ * tokens in milliseconds
* @param delegationTokenRenewInterval how often the tokens must be renewed
+ * in milliseconds
* @param delegationTokenRemoverScanInterval how often the tokens are scanned
- * for expired tokens
+ * for expired tokens in milliseconds
* @param storeTokenTrackingId whether to store the token's tracking id
*/
public DelegationTokenSecretManager(long delegationKeyUpdateInterval,
http://git-wip-us.apache.org/repos/asf/hadoop/blob/af512626/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/security/token/delegation/DelegationTokenSecretManager.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/security/token/delegation/DelegationTokenSecretManager.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/security/token/delegation/DelegationTokenSecretManager.java
index b42e0c9..2a109b6 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/security/token/delegation/DelegationTokenSecretManager.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/security/token/delegation/DelegationTokenSecretManager.java
@@ -34,13 +34,14 @@ public class DelegationTokenSecretManager
/**
* Create a secret manager
- * @param delegationKeyUpdateInterval the number of seconds for rolling new
- * secret keys.
+ * @param delegationKeyUpdateInterval the number of milliseconds for rolling
+ * new secret keys.
* @param delegationTokenMaxLifetime the maximum lifetime of the delegation
- * tokens
+ * tokens in milliseconds
* @param delegationTokenRenewInterval how often the tokens must be renewed
+ * in milliseconds
* @param delegationTokenRemoverScanInterval how often the tokens are scanned
- * for expired tokens
+ * for expired tokens in milliseconds
*/
public DelegationTokenSecretManager(long delegationKeyUpdateInterval,
long delegationTokenMaxLifetime,
http://git-wip-us.apache.org/repos/asf/hadoop/blob/af512626/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/JHSDelegationTokenSecretManager.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/JHSDelegationTokenSecretManager.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/JHSDelegationTokenSecretManager.java
index 7fac44d..98d13a4 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/JHSDelegationTokenSecretManager.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/JHSDelegationTokenSecretManager.java
@@ -47,13 +47,14 @@ public class JHSDelegationTokenSecretManager
/**
* Create a secret manager
- * @param delegationKeyUpdateInterval the number of seconds for rolling new
- * secret keys.
+ * @param delegationKeyUpdateInterval the number of milliseconds for rolling
+ * new secret keys.
* @param delegationTokenMaxLifetime the maximum lifetime of the delegation
- * tokens
+ * tokens in milliseconds
* @param delegationTokenRenewInterval how often the tokens must be renewed
+ * in milliseconds
* @param delegationTokenRemoverScanInterval how often the tokens are scanned
- * for expired tokens
+ * for expired tokens in milliseconds
* @param store history server state store for persisting state
*/
public JHSDelegationTokenSecretManager(long delegationKeyUpdateInterval,
http://git-wip-us.apache.org/repos/asf/hadoop/blob/af512626/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 0a6d2cc..6605632 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -297,6 +297,9 @@ Release 2.8.0 - UNRELEASED
YARN-3395. FairScheduler: Trim whitespaces when using username for
queuename. (Zhihai Xu via kasha)
+ YARN-3587. Fix the javadoc of DelegationTokenSecretManager in yarn, etc.
+ projects. (Gabor Liptak via junping_du)
+
OPTIMIZATIONS
YARN-3339. TestDockerContainerExecutor should pull a single image and not
http://git-wip-us.apache.org/repos/asf/hadoop/blob/af512626/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/security/TimelineDelegationTokenSecretManagerService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/security/TimelineDelegationTokenSecretManagerService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/security/TimelineDelegationTokenSecretManagerService.java
index c940eea..60a0348 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/security/TimelineDelegationTokenSecretManagerService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/security/TimelineDelegationTokenSecretManagerService.java
@@ -125,11 +125,15 @@ public class TimelineDelegationTokenSecretManagerService extends
/**
* Create a timeline secret manager
- *
- * @param delegationKeyUpdateInterval the number of seconds for rolling new secret keys.
- * @param delegationTokenMaxLifetime the maximum lifetime of the delegation tokens
+ * @param delegationKeyUpdateInterval the number of milliseconds for rolling
+ * new secret keys.
+ * @param delegationTokenMaxLifetime the maximum lifetime of the delegation
+ * tokens in milliseconds
* @param delegationTokenRenewInterval how often the tokens must be renewed
- * @param delegationTokenRemoverScanInterval how often the tokens are scanned for expired tokens
+ * in milliseconds
+ * @param delegationTokenRemoverScanInterval how often the tokens are
+ * scanned for expired tokens in milliseconds
+ * @param stateStore timeline service state store
*/
public TimelineDelegationTokenSecretManager(
long delegationKeyUpdateInterval,
http://git-wip-us.apache.org/repos/asf/hadoop/blob/af512626/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/RMDelegationTokenSecretManager.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/RMDelegationTokenSecretManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/RMDelegationTokenSecretManager.java
index 83defc5..631ca9d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/RMDelegationTokenSecretManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/RMDelegationTokenSecretManager.java
@@ -56,13 +56,15 @@ public class RMDelegationTokenSecretManager extends
/**
* Create a secret manager
- * @param delegationKeyUpdateInterval the number of seconds for rolling new
- * secret keys.
+ * @param delegationKeyUpdateInterval the number of milliseconds for rolling
+ * new secret keys.
* @param delegationTokenMaxLifetime the maximum lifetime of the delegation
- * tokens
+ * tokens in milliseconds
* @param delegationTokenRenewInterval how often the tokens must be renewed
+ * in milliseconds
* @param delegationTokenRemoverScanInterval how often the tokens are scanned
- * for expired tokens
+ * for expired tokens in milliseconds
+ * @param rmContext current context of the ResourceManager
*/
public RMDelegationTokenSecretManager(long delegationKeyUpdateInterval,
long delegationTokenMaxLifetime,
[02/36] hadoop git commit: Add missing entry in CHANGES.txt for
HDFS-6757.
Posted by zj...@apache.org.
Add missing entry in CHANGES.txt for HDFS-6757.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c350985c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c350985c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c350985c
Branch: refs/heads/YARN-2928
Commit: c350985cc2147dfdd8bc801a625bfb26c42e0292
Parents: 12fdc44
Author: Haohui Mai <wh...@apache.org>
Authored: Fri May 8 23:07:58 2015 -0700
Committer: Zhijie Shen <zj...@apache.org>
Committed: Tue May 12 13:24:10 2015 -0700
----------------------------------------------------------------------
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 ++
1 file changed, 2 insertions(+)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c350985c/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 0b721ee..1dbf9f9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -533,6 +533,8 @@ Release 2.8.0 - UNRELEASED
HDFS-8113. Add check for null BlockCollection pointers in
BlockInfoContiguous structures (Chengbing Liu via Colin P. McCabe)
+ HDFS-6757. Simplify lease manager with INodeID. (wheat9)
+
OPTIMIZATIONS
HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than
[08/36] hadoop git commit: MAPREDUCE-6353. Divide by zero error in MR
AM when calculating available containers. (Anubhav Dhoot via kasha)
Posted by zj...@apache.org.
MAPREDUCE-6353. Divide by zero error in MR AM when calculating available containers. (Anubhav Dhoot via kasha)
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a9a43fa6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a9a43fa6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a9a43fa6
Branch: refs/heads/YARN-2928
Commit: a9a43fa69c61719c1541688211bd5148121788a7
Parents: ca59e77
Author: Karthik Kambatla <ka...@apache.org>
Authored: Sat May 9 14:43:18 2015 -0700
Committer: Zhijie Shen <zj...@apache.org>
Committed: Tue May 12 13:24:11 2015 -0700
----------------------------------------------------------------------
hadoop-mapreduce-project/CHANGES.txt | 3 +
.../v2/app/rm/ResourceCalculatorUtils.java | 16 ++++-
.../v2/app/rm/TestResourceCalculatorUtils.java | 75 ++++++++++++++++++++
3 files changed, 91 insertions(+), 3 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a9a43fa6/hadoop-mapreduce-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt
index ea1a148..2152be0 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -423,6 +423,9 @@ Release 2.8.0 - UNRELEASED
MAPREDUCE-6359. In RM HA setup, "Cluster" tab links populated with AM
hostname instead of RM. (zhaoyunjiong via junping_du)
+ MAPREDUCE-6353. Divide by zero error in MR AM when calculating available
+ containers. (Anubhav Dhoot via kasha)
+
Release 2.7.1 - UNRELEASED
INCOMPATIBLE CHANGES
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a9a43fa6/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/ResourceCalculatorUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/ResourceCalculatorUtils.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/ResourceCalculatorUtils.java
index b9bc8b5..39cb22e 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/ResourceCalculatorUtils.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/ResourceCalculatorUtils.java
@@ -35,10 +35,13 @@ public class ResourceCalculatorUtils {
public static int computeAvailableContainers(Resource available,
Resource required, EnumSet<SchedulerResourceTypes> resourceTypes) {
if (resourceTypes.contains(SchedulerResourceTypes.CPU)) {
- return Math.min(available.getMemory() / required.getMemory(),
- available.getVirtualCores() / required.getVirtualCores());
+ return Math.min(
+ calculateRatioOrMaxValue(available.getMemory(), required.getMemory()),
+ calculateRatioOrMaxValue(available.getVirtualCores(), required
+ .getVirtualCores()));
}
- return available.getMemory() / required.getMemory();
+ return calculateRatioOrMaxValue(
+ available.getMemory(), required.getMemory());
}
public static int divideAndCeilContainers(Resource required, Resource factor,
@@ -49,4 +52,11 @@ public class ResourceCalculatorUtils {
}
return divideAndCeil(required.getMemory(), factor.getMemory());
}
+
+ private static int calculateRatioOrMaxValue(int numerator, int denominator) {
+ if (denominator == 0) {
+ return Integer.MAX_VALUE;
+ }
+ return numerator / denominator;
+ }
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a9a43fa6/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/rm/TestResourceCalculatorUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/rm/TestResourceCalculatorUtils.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/rm/TestResourceCalculatorUtils.java
new file mode 100644
index 0000000..d87f6db
--- /dev/null
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/rm/TestResourceCalculatorUtils.java
@@ -0,0 +1,75 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.mapreduce.v2.app.rm;
+
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.junit.Assert;
+import org.junit.Test;
+
+import java.util.EnumSet;
+
+import static org.apache.hadoop.yarn.proto.YarnServiceProtos.*;
+
+public class TestResourceCalculatorUtils {
+ @Test
+ public void testComputeAvailableContainers() throws Exception {
+ Resource clusterAvailableResources = Resource.newInstance(81920, 40);
+
+ Resource nonZeroResource = Resource.newInstance(1024, 2);
+
+ int expectedNumberOfContainersForMemory = 80;
+ int expectedNumberOfContainersForCPU = 20;
+
+ verifyDifferentResourceTypes(clusterAvailableResources, nonZeroResource,
+ expectedNumberOfContainersForMemory,
+ expectedNumberOfContainersForCPU);
+
+ Resource zeroMemoryResource = Resource.newInstance(0,
+ nonZeroResource.getVirtualCores());
+
+ verifyDifferentResourceTypes(clusterAvailableResources, zeroMemoryResource,
+ Integer.MAX_VALUE,
+ expectedNumberOfContainersForCPU);
+
+ Resource zeroCpuResource = Resource.newInstance(nonZeroResource.getMemory(),
+ 0);
+
+ verifyDifferentResourceTypes(clusterAvailableResources, zeroCpuResource,
+ expectedNumberOfContainersForMemory,
+ expectedNumberOfContainersForMemory);
+ }
+
+ private void verifyDifferentResourceTypes(Resource clusterAvailableResources,
+ Resource nonZeroResource, int expectedNumberOfContainersForMemoryOnly,
+ int expectedNumberOfContainersOverall) {
+
+ Assert.assertEquals("Incorrect number of available containers for Memory",
+ expectedNumberOfContainersForMemoryOnly,
+ ResourceCalculatorUtils.computeAvailableContainers(
+ clusterAvailableResources, nonZeroResource,
+ EnumSet.of(SchedulerResourceTypes.MEMORY)));
+
+ Assert.assertEquals("Incorrect number of available containers overall",
+ expectedNumberOfContainersOverall,
+ ResourceCalculatorUtils.computeAvailableContainers(
+ clusterAvailableResources, nonZeroResource,
+ EnumSet.of(SchedulerResourceTypes.CPU,
+ SchedulerResourceTypes.MEMORY)));
+ }
+}
[35/36] hadoop git commit: MAPREDUCE-6251. Added a new config for
JobClient to retry JobStatus calls so that they don't fail on history-server
backed by DFSes with not so strong guarantees. Contributed by Craig Welch.
Posted by zj...@apache.org.
MAPREDUCE-6251. Added a new config for JobClient to retry JobStatus calls so that they don't fail on history-server backed by DFSes with not so strong guarantees. Contributed by Craig Welch.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b2f589c7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b2f589c7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b2f589c7
Branch: refs/heads/YARN-2928
Commit: b2f589c7f14d32360cb55b1cfb756bfd12113c18
Parents: 6da88e3
Author: Vinod Kumar Vavilapalli <vi...@apache.org>
Authored: Tue May 12 12:11:42 2015 -0700
Committer: Zhijie Shen <zj...@apache.org>
Committed: Tue May 12 13:44:28 2015 -0700
----------------------------------------------------------------------
hadoop-mapreduce-project/CHANGES.txt | 5 ++
.../org/apache/hadoop/mapred/JobClient.java | 51 +++++++++++----
.../apache/hadoop/mapreduce/MRJobConfig.java | 15 +++++
.../src/main/resources/mapred-default.xml | 17 +++++
.../apache/hadoop/mapred/JobClientUnitTest.java | 65 ++++++++++++++++++++
5 files changed, 142 insertions(+), 11 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b2f589c7/hadoop-mapreduce-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt
index 01b0881..9912b6d 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -346,6 +346,7 @@ Release 2.8.0 - UNRELEASED
OPTIMIZATIONS
BUG FIXES
+
MAPREDUCE-6314. TestPipeApplication fails on trunk.
(Varun Vasudev via harsh)
@@ -468,6 +469,10 @@ Release 2.7.1 - UNRELEASED
MAPREDUCE-6259. IllegalArgumentException due to missing job submit time
(zhihai xu via jlowe)
+ MAPREDUCE-6251. Added a new config for JobClient to retry JobStatus calls so
+ that they don't fail on history-server backed by DFSes with not so strong
+ guarantees. (Craig Welch via vinodkv)
+
Release 2.7.0 - 2015-04-20
INCOMPATIBLE CHANGES
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b2f589c7/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobClient.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobClient.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobClient.java
index e91fbfe..cf123c7 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobClient.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobClient.java
@@ -37,6 +37,7 @@ import org.apache.hadoop.mapred.ClusterStatus.BlackListInfo;
import org.apache.hadoop.mapreduce.Cluster;
import org.apache.hadoop.mapreduce.ClusterMetrics;
import org.apache.hadoop.mapreduce.Job;
+import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.mapreduce.QueueInfo;
import org.apache.hadoop.mapreduce.TaskTrackerInfo;
import org.apache.hadoop.mapreduce.TaskType;
@@ -154,6 +155,10 @@ public class JobClient extends CLI {
public static enum TaskStatusFilter { NONE, KILLED, FAILED, SUCCEEDED, ALL }
private TaskStatusFilter taskOutputFilter = TaskStatusFilter.FAILED;
+ private int maxRetry = MRJobConfig.DEFAULT_MR_CLIENT_JOB_MAX_RETRIES;
+ private long retryInterval =
+ MRJobConfig.DEFAULT_MR_CLIENT_JOB_RETRY_INTERVAL;
+
static{
ConfigUtil.loadResources();
}
@@ -469,6 +474,14 @@ public class JobClient extends CLI {
setConf(conf);
cluster = new Cluster(conf);
clientUgi = UserGroupInformation.getCurrentUser();
+
+ maxRetry = conf.getInt(MRJobConfig.MR_CLIENT_JOB_MAX_RETRIES,
+ MRJobConfig.DEFAULT_MR_CLIENT_JOB_MAX_RETRIES);
+
+ retryInterval =
+ conf.getLong(MRJobConfig.MR_CLIENT_JOB_RETRY_INTERVAL,
+ MRJobConfig.DEFAULT_MR_CLIENT_JOB_RETRY_INTERVAL);
+
}
/**
@@ -581,16 +594,8 @@ public class JobClient extends CLI {
}
});
}
- /**
- * Get an {@link RunningJob} object to track an ongoing job. Returns
- * null if the id does not correspond to any known job.
- *
- * @param jobid the jobid of the job.
- * @return the {@link RunningJob} handle to track the job, null if the
- * <code>jobid</code> doesn't correspond to any known job.
- * @throws IOException
- */
- public RunningJob getJob(final JobID jobid) throws IOException {
+
+ protected RunningJob getJobInner(final JobID jobid) throws IOException {
try {
Job job = getJobUsingCluster(jobid);
@@ -607,7 +612,31 @@ public class JobClient extends CLI {
return null;
}
- /**@deprecated Applications should rather use {@link #getJob(JobID)}.
+ /**
+ * Get an {@link RunningJob} object to track an ongoing job. Returns
+ * null if the id does not correspond to any known job.
+ *
+ * @param jobid the jobid of the job.
+ * @return the {@link RunningJob} handle to track the job, null if the
+ * <code>jobid</code> doesn't correspond to any known job.
+ * @throws IOException
+ */
+ public RunningJob getJob(final JobID jobid) throws IOException {
+ for (int i = 0;i <= maxRetry;i++) {
+ if (i > 0) {
+ try {
+ Thread.sleep(retryInterval);
+ } catch (Exception e) { }
+ }
+ RunningJob job = getJobInner(jobid);
+ if (job != null) {
+ return job;
+ }
+ }
+ return null;
+ }
+
+ /**@deprecated Applications should rather use {@link #getJob(JobID)}.
*/
@Deprecated
public RunningJob getJob(String jobid) throws IOException {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b2f589c7/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java
index 1c40632..cc8e586 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java
@@ -474,6 +474,21 @@ public interface MRJobConfig {
MR_PREFIX + "client.max-retries";
public static final int DEFAULT_MR_CLIENT_MAX_RETRIES = 3;
+ /**
+ * How many times to retry jobclient calls (via getjob)
+ */
+ public static final String MR_CLIENT_JOB_MAX_RETRIES =
+ MR_PREFIX + "client.job.max-retries";
+ public static final int DEFAULT_MR_CLIENT_JOB_MAX_RETRIES = 0;
+
+ /**
+ * How long to wait between jobclient retries on failure
+ */
+ public static final String MR_CLIENT_JOB_RETRY_INTERVAL =
+ MR_PREFIX + "client.job.retry-interval";
+ public static final long DEFAULT_MR_CLIENT_JOB_RETRY_INTERVAL =
+ 2000;
+
/** The staging directory for map reduce.*/
public static final String MR_AM_STAGING_DIR =
MR_AM_PREFIX+"staging-dir";
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b2f589c7/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
index a9e7618..821b475 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
@@ -1398,6 +1398,23 @@
</property>
<property>
+ <name>yarn.app.mapreduce.client.job.max-retries</name>
+ <value>0</value>
+ <description>The number of retries the client will make for getJob and
+ dependent calls. The default is 0 as this is generally only needed for
+ non-HDFS DFS where additional, high level retries are required to avoid
+ spurious failures during the getJob call. 30 is a good value for
+ WASB</description>
+</property>
+
+<property>
+ <name>yarn.app.mapreduce.client.job.retry-interval</name>
+ <value>2000</value>
+ <description>The delay between getJob retries in ms for retries configured
+ with yarn.app.mapreduce.client.job.max-retries.</description>
+</property>
+
+<property>
<description>CLASSPATH for MR applications. A comma-separated list
of CLASSPATH entries. If mapreduce.application.framework is set then this
must specify the appropriate classpath for that archive, and the name of
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b2f589c7/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/JobClientUnitTest.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/JobClientUnitTest.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/JobClientUnitTest.java
index 8dfac89..84b76bf 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/JobClientUnitTest.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/JobClientUnitTest.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.mapred;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertNotNull;
import static org.mockito.Matchers.isA;
import static org.mockito.Mockito.atLeastOnce;
import static org.mockito.Mockito.mock;
@@ -35,6 +36,7 @@ import org.apache.hadoop.mapreduce.Cluster;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.JobPriority;
import org.apache.hadoop.mapreduce.JobStatus;
+import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.mapreduce.TaskReport;
import org.apache.hadoop.mapreduce.TaskType;
import org.junit.Assert;
@@ -52,6 +54,42 @@ public class JobClientUnitTest {
void setCluster(Cluster cluster) {
this.cluster = cluster;
}
+
+ }
+
+ public class TestJobClientGetJob extends TestJobClient {
+
+ int lastGetJobRetriesCounter = 0;
+ int getJobRetriesCounter = 0;
+ int getJobRetries = 0;
+ RunningJob runningJob;
+
+ TestJobClientGetJob(JobConf jobConf) throws IOException {
+ super(jobConf);
+ }
+
+ public int getLastGetJobRetriesCounter() {
+ return lastGetJobRetriesCounter;
+ }
+
+ public void setGetJobRetries(int getJobRetries) {
+ this.getJobRetries = getJobRetries;
+ }
+
+ public void setRunningJob(RunningJob runningJob) {
+ this.runningJob = runningJob;
+ }
+
+ protected RunningJob getJobInner(final JobID jobid) throws IOException {
+ if (getJobRetriesCounter >= getJobRetries) {
+ lastGetJobRetriesCounter = getJobRetriesCounter;
+ getJobRetriesCounter = 0;
+ return runningJob;
+ }
+ getJobRetriesCounter++;
+ return null;
+ }
+
}
@Test
@@ -124,6 +162,7 @@ public class JobClientUnitTest {
JobStatus mockJobStatus = mock(JobStatus.class);
when(mockJobStatus.getJobID()).thenReturn(jobID);
+ when(mockJobStatus.getJobName()).thenReturn(jobID.toString());
when(mockJobStatus.getState()).thenReturn(JobStatus.State.RUNNING);
when(mockJobStatus.getStartTime()).thenReturn(startTime);
when(mockJobStatus.getUsername()).thenReturn("mockuser");
@@ -181,4 +220,30 @@ public class JobClientUnitTest {
assertNull(client.getJob(id));
}
+ @Test
+ public void testGetJobRetry() throws Exception {
+
+ //To prevent the test from running for a very long time, lower the retry
+ JobConf conf = new JobConf();
+ conf.set(MRJobConfig.MR_CLIENT_JOB_MAX_RETRIES, "3");
+
+ TestJobClientGetJob client = new TestJobClientGetJob(conf);
+ JobID id = new JobID("ajob",1);
+ RunningJob rj = mock(RunningJob.class);
+ client.setRunningJob(rj);
+
+ //no retry
+ assertNotNull(client.getJob(id));
+ assertEquals(client.getLastGetJobRetriesCounter(), 0);
+
+ //3 retry
+ client.setGetJobRetries(3);
+ assertNotNull(client.getJob(id));
+ assertEquals(client.getLastGetJobRetriesCounter(), 3);
+
+ //beyond MAPREDUCE_JOBCLIENT_GETJOB_MAX_RETRY_KEY, will get null
+ client.setGetJobRetries(5);
+ assertNull(client.getJob(id));
+ }
+
}
[10/36] hadoop git commit: MAPREDUCE-6359. In RM HA setup,
Cluster tab links populated with AM hostname instead of RM.
Contributed by zhaoyunjiong.
Posted by zj...@apache.org.
MAPREDUCE-6359. In RM HA setup, Cluster tab links populated with AM hostname instead of RM. Contributed by zhaoyunjiong.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/33ae623b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/33ae623b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/33ae623b
Branch: refs/heads/YARN-2928
Commit: 33ae623bc8d7421dcb29902cae52a90c1d4e8a95
Parents: c44b307
Author: Junping Du <ju...@apache.org>
Authored: Sat May 9 06:11:13 2015 -0700
Committer: Zhijie Shen <zj...@apache.org>
Committed: Tue May 12 13:24:11 2015 -0700
----------------------------------------------------------------------
hadoop-mapreduce-project/CHANGES.txt | 3 +++
.../org/apache/hadoop/mapreduce/v2/app/webapp/AppController.java | 2 +-
2 files changed, 4 insertions(+), 1 deletion(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/33ae623b/hadoop-mapreduce-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt
index 3aed17d..ea1a148 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -420,6 +420,9 @@ Release 2.8.0 - UNRELEASED
MAPREDUCE-4750. Enable NNBenchWithoutMR in MapredTestDriver (Liang Xie
and Jason Lowe via raviprak)
+ MAPREDUCE-6359. In RM HA setup, "Cluster" tab links populated with AM
+ hostname instead of RM. (zhaoyunjiong via junping_du)
+
Release 2.7.1 - UNRELEASED
INCOMPATIBLE CHANGES
http://git-wip-us.apache.org/repos/asf/hadoop/blob/33ae623b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/AppController.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/AppController.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/AppController.java
index 0f528e4..305ec7e 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/AppController.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/AppController.java
@@ -62,7 +62,7 @@ public class AppController extends Controller implements AMParams {
set(APP_ID, app.context.getApplicationID().toString());
set(RM_WEB,
JOINER.join(MRWebAppUtil.getYARNWebappScheme(),
- WebAppUtils.getResolvedRMWebAppURLWithoutScheme(conf,
+ WebAppUtils.getResolvedRemoteRMWebAppURLWithoutScheme(conf,
MRWebAppUtil.getYARNHttpPolicy())));
}
[33/36] hadoop git commit: MAPREDUCE-6361. NPE issue in shuffle
caused by concurrent issue between copySucceeded() in one thread and
copyFailed() in another thread on the same host. Contributed by Junping Du.
Posted by zj...@apache.org.
MAPREDUCE-6361. NPE issue in shuffle caused by concurrent issue between copySucceeded() in one thread and copyFailed() in another thread on the same host. Contributed by Junping Du.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/40e80647
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/40e80647
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/40e80647
Branch: refs/heads/YARN-2928
Commit: 40e8064716c39746b3a0512755850f372e071f88
Parents: dc8c120
Author: Tsuyoshi Ozawa <oz...@apache.org>
Authored: Wed May 13 00:28:17 2015 +0900
Committer: Zhijie Shen <zj...@apache.org>
Committed: Tue May 12 13:44:27 2015 -0700
----------------------------------------------------------------------
hadoop-mapreduce-project/CHANGES.txt | 4 ++
.../task/reduce/ShuffleSchedulerImpl.java | 14 +++-
.../task/reduce/TestShuffleScheduler.java | 70 ++++++++++++++++++++
3 files changed, 85 insertions(+), 3 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/40e80647/hadoop-mapreduce-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt
index d53974d..01b0881 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -432,6 +432,10 @@ Release 2.8.0 - UNRELEASED
MAPREDUCE-6360. TestMapreduceConfigFields is placed in wrong dir,
introducing compile error (Arshad Mohammad via vinayakumarb)
+ MAPREDUCE-6361. NPE issue in shuffle caused by concurrent issue between
+ copySucceeded() in one thread and copyFailed() in another thread on the
+ same host. (Junping Du via ozawa)
+
Release 2.7.1 - UNRELEASED
INCOMPATIBLE CHANGES
http://git-wip-us.apache.org/repos/asf/hadoop/blob/40e80647/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/ShuffleSchedulerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/ShuffleSchedulerImpl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/ShuffleSchedulerImpl.java
index 8317672..ff0bb4f 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/ShuffleSchedulerImpl.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/ShuffleSchedulerImpl.java
@@ -239,7 +239,7 @@ public class ShuffleSchedulerImpl<K,V> implements ShuffleScheduler<K,V> {
}
private void updateStatus() {
- updateStatus(null);
+ updateStatus(null);
}
public synchronized void hostFailed(String hostname) {
@@ -263,9 +263,17 @@ public class ShuffleSchedulerImpl<K,V> implements ShuffleScheduler<K,V> {
failureCounts.put(mapId, new IntWritable(1));
}
String hostname = host.getHostName();
+ IntWritable hostFailedNum = hostFailures.get(hostname);
+ // MAPREDUCE-6361: hostname could get cleanup from hostFailures in another
+ // thread with copySucceeded.
+ // In this case, add back hostname to hostFailures to get rid of NPE issue.
+ if (hostFailedNum == null) {
+ hostFailures.put(hostname, new IntWritable(1));
+ }
//report failure if already retried maxHostFailures times
- boolean hostFail = hostFailures.get(hostname).get() > getMaxHostFailures() ? true : false;
-
+ boolean hostFail = hostFailures.get(hostname).get() >
+ getMaxHostFailures() ? true : false;
+
if (failures >= abortFailureLimit) {
try {
throw new IOException(failures + " failures downloading " + mapId);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/40e80647/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/task/reduce/TestShuffleScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/task/reduce/TestShuffleScheduler.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/task/reduce/TestShuffleScheduler.java
index 6ac2320..654b748 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/task/reduce/TestShuffleScheduler.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/task/reduce/TestShuffleScheduler.java
@@ -213,6 +213,76 @@ public class TestShuffleScheduler {
Assert.assertEquals(copyMessage(10, 1, 2), progress.toString());
}
+ @SuppressWarnings("rawtypes")
+ @Test
+ public <K, V> void TestSucceedAndFailedCopyMap() throws Exception {
+ JobConf job = new JobConf();
+ job.setNumMapTasks(2);
+ //mock creation
+ TaskUmbilicalProtocol mockUmbilical = mock(TaskUmbilicalProtocol.class);
+ Reporter mockReporter = mock(Reporter.class);
+ FileSystem mockFileSystem = mock(FileSystem.class);
+ Class<? extends org.apache.hadoop.mapred.Reducer> combinerClass = job.getCombinerClass();
+ @SuppressWarnings("unchecked") // needed for mock with generic
+ CombineOutputCollector<K, V> mockCombineOutputCollector =
+ (CombineOutputCollector<K, V>) mock(CombineOutputCollector.class);
+ org.apache.hadoop.mapreduce.TaskAttemptID mockTaskAttemptID =
+ mock(org.apache.hadoop.mapreduce.TaskAttemptID.class);
+ LocalDirAllocator mockLocalDirAllocator = mock(LocalDirAllocator.class);
+ CompressionCodec mockCompressionCodec = mock(CompressionCodec.class);
+ Counter mockCounter = mock(Counter.class);
+ TaskStatus mockTaskStatus = mock(TaskStatus.class);
+ Progress mockProgress = mock(Progress.class);
+ MapOutputFile mockMapOutputFile = mock(MapOutputFile.class);
+ Task mockTask = mock(Task.class);
+ @SuppressWarnings("unchecked")
+ MapOutput<K, V> output = mock(MapOutput.class);
+
+ ShuffleConsumerPlugin.Context<K, V> context =
+ new ShuffleConsumerPlugin.Context<K, V>(
+ mockTaskAttemptID, job, mockFileSystem,
+ mockUmbilical, mockLocalDirAllocator,
+ mockReporter, mockCompressionCodec,
+ combinerClass, mockCombineOutputCollector,
+ mockCounter, mockCounter, mockCounter,
+ mockCounter, mockCounter, mockCounter,
+ mockTaskStatus, mockProgress, mockProgress,
+ mockTask, mockMapOutputFile, null);
+ TaskStatus status = new TaskStatus() {
+ @Override
+ public boolean getIsMap() {
+ return false;
+ }
+ @Override
+ public void addFetchFailedMap(TaskAttemptID mapTaskId) {
+ }
+ };
+ Progress progress = new Progress();
+ ShuffleSchedulerImpl<K, V> scheduler = new ShuffleSchedulerImpl<K, V>(job,
+ status, null, null, progress, context.getShuffledMapsCounter(),
+ context.getReduceShuffleBytes(), context.getFailedShuffleCounter());
+
+ MapHost host1 = new MapHost("host1", null);
+ TaskAttemptID failedAttemptID = new TaskAttemptID(
+ new org.apache.hadoop.mapred.TaskID(
+ new JobID("test",0), TaskType.MAP, 0), 0);
+
+ TaskAttemptID succeedAttemptID = new TaskAttemptID(
+ new org.apache.hadoop.mapred.TaskID(
+ new JobID("test",0), TaskType.MAP, 1), 1);
+
+ // handle output fetch failure for failedAttemptID, part I
+ scheduler.hostFailed(host1.getHostName());
+
+ // handle output fetch succeed for succeedAttemptID
+ long bytes = (long)500 * 1024 * 1024;
+ scheduler.copySucceeded(succeedAttemptID, host1, bytes, 0, 500000, output);
+
+ // handle output fetch failure for failedAttemptID, part II
+ // for MAPREDUCE-6361: verify no NPE exception get thrown out
+ scheduler.copyFailed(failedAttemptID, host1, true, false);
+ }
+
private static String copyMessage(int attemptNo, double rate1, double rate2) {
int attemptZero = attemptNo - 1;
return String.format("copy task(attempt_test_0000_m_%06d_%d succeeded at %1.2f MB/s)"
[22/36] hadoop git commit: HDFS-7916. 'reportBadBlocks' from
datanodes to standby Node BPServiceActor goes for infinite loop. Contributed
by Rushabh Shah.
Posted by zj...@apache.org.
HDFS-7916. 'reportBadBlocks' from datanodes to standby Node BPServiceActor goes for infinite loop. Contributed by Rushabh Shah.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d7597a2d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d7597a2d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d7597a2d
Branch: refs/heads/YARN-2928
Commit: d7597a2d2138530b94c847236e582dbfd69d8078
Parents: ed685f1
Author: Kihwal Lee <ki...@apache.org>
Authored: Mon May 11 14:30:35 2015 -0500
Committer: Zhijie Shen <zj...@apache.org>
Committed: Tue May 12 13:24:14 2015 -0700
----------------------------------------------------------------------
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 ++
.../hdfs/server/datanode/ErrorReportAction.java | 4 ++
.../server/datanode/ReportBadBlockAction.java | 4 ++
.../server/datanode/TestBPOfferService.java | 54 ++++++++++++++++++++
4 files changed, 65 insertions(+)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d7597a2d/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 8060644..b67caed 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -814,6 +814,9 @@ Release 2.7.1 - UNRELEASED
HDFS-8254. Standby namenode doesn't process DELETED_BLOCK if the add block
request is in edit log. (Rushabh S Shah via kihwal)
+ HDFS-7916. 'reportBadBlocks' from datanodes to standby Node BPServiceActor
+ goes for infinite loop (Rushabh S Shah via kihwal)
+
Release 2.7.0 - 2015-04-20
INCOMPATIBLE CHANGES
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d7597a2d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ErrorReportAction.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ErrorReportAction.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ErrorReportAction.java
index 331822a..b7a9dae 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ErrorReportAction.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ErrorReportAction.java
@@ -22,6 +22,7 @@ import java.io.IOException;
import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB;
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
+import org.apache.hadoop.ipc.RemoteException;
/**
@@ -43,6 +44,9 @@ public class ErrorReportAction implements BPServiceActorAction {
DatanodeRegistration bpRegistration) throws BPServiceActorActionException {
try {
bpNamenode.errorReport(bpRegistration, errorCode, errorMessage);
+ } catch (RemoteException re) {
+ DataNode.LOG.info("trySendErrorReport encountered RemoteException "
+ + "errorMessage: " + errorMessage + " errorCode: " + errorCode, re);
} catch(IOException e) {
throw new BPServiceActorActionException("Error reporting "
+ "an error to namenode: ");
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d7597a2d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReportBadBlockAction.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReportBadBlockAction.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReportBadBlockAction.java
index 7155eae..671a1fe 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReportBadBlockAction.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReportBadBlockAction.java
@@ -26,6 +26,7 @@ import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB;
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
+import org.apache.hadoop.ipc.RemoteException;
/**
* ReportBadBlockAction is an instruction issued by {{BPOfferService}} to
@@ -59,6 +60,9 @@ public class ReportBadBlockAction implements BPServiceActorAction {
try {
bpNamenode.reportBadBlocks(locatedBlock);
+ } catch (RemoteException re) {
+ DataNode.LOG.info("reportBadBlock encountered RemoteException for "
+ + "block: " + block , re);
} catch (IOException e) {
throw new BPServiceActorActionException("Failed to report bad block "
+ block + " to namenode: ");
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d7597a2d/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java
index 3aa9a7b..64cc78b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java
@@ -55,6 +55,9 @@ import org.apache.hadoop.hdfs.server.protocol.StorageBlockReport;
import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks;
import org.apache.hadoop.hdfs.server.protocol.StorageReport;
import org.apache.hadoop.hdfs.server.protocol.VolumeFailureSummary;
+import org.apache.hadoop.ipc.RemoteException;
+import org.apache.hadoop.ipc.StandbyException;
+import org.apache.hadoop.ipc.protobuf.RpcHeaderProtos.RpcResponseHeaderProto.RpcErrorCodeProto;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.test.PathUtils;
import org.apache.hadoop.util.Time;
@@ -621,4 +624,55 @@ public class TestBPOfferService {
bpos.stop();
}
}
+
+ /**
+ * This test case doesn't add the reportBadBlock request to
+ * {@link BPServiceActor#bpThreadEnqueue} when the Standby namenode throws
+ * {@link StandbyException}
+ * @throws Exception
+ */
+ @Test
+ public void testReportBadBlocksWhenNNThrowsStandbyException()
+ throws Exception {
+ BPOfferService bpos = setupBPOSForNNs(mockNN1, mockNN2);
+ bpos.start();
+ try {
+ waitForInitialization(bpos);
+ // Should start with neither NN as active.
+ assertNull(bpos.getActiveNN());
+ // Have NN1 claim active at txid 1
+ mockHaStatuses[0] = new NNHAStatusHeartbeat(HAServiceState.ACTIVE, 1);
+ bpos.triggerHeartbeatForTests();
+ // Now mockNN1 is acting like active namenode and mockNN2 as Standby
+ assertSame(mockNN1, bpos.getActiveNN());
+ // Return nothing when active Active Namenode calls reportBadBlocks
+ Mockito.doNothing().when(mockNN1).reportBadBlocks
+ (Mockito.any(LocatedBlock[].class));
+
+ RemoteException re = new RemoteException(StandbyException.class.
+ getName(), "Operation category WRITE is not supported in state "
+ + "standby", RpcErrorCodeProto.ERROR_APPLICATION);
+ // Return StandbyException wrapped in RemoteException when Standby NN
+ // calls reportBadBlocks
+ Mockito.doThrow(re).when(mockNN2).reportBadBlocks
+ (Mockito.any(LocatedBlock[].class));
+
+ bpos.reportBadBlocks(FAKE_BLOCK, mockFSDataset.getVolume(FAKE_BLOCK)
+ .getStorageID(), mockFSDataset.getVolume(FAKE_BLOCK)
+ .getStorageType());
+ // Send heartbeat so that the BpServiceActor can report bad block to
+ // namenode
+ bpos.triggerHeartbeatForTests();
+ Mockito.verify(mockNN2, Mockito.times(1))
+ .reportBadBlocks(Mockito.any(LocatedBlock[].class));
+
+ // Trigger another heartbeat, this will send reportBadBlock again if it
+ // is present in the queue.
+ bpos.triggerHeartbeatForTests();
+ Mockito.verify(mockNN2, Mockito.times(1))
+ .reportBadBlocks(Mockito.any(LocatedBlock[].class));
+ } finally {
+ bpos.stop();
+ }
+ }
}
[11/36] hadoop git commit: HADOOP-11663. Remove description about
Java 6 from docs. Contributed by Masatake Iwasaki.
Posted by zj...@apache.org.
HADOOP-11663. Remove description about Java 6 from docs. Contributed by Masatake Iwasaki.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f6523717
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f6523717
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f6523717
Branch: refs/heads/YARN-2928
Commit: f65237175c251907c88b5e0858d44af082cd763c
Parents: c1d0160
Author: Akira Ajisaka <aa...@apache.org>
Authored: Tue May 12 00:30:59 2015 +0900
Committer: Zhijie Shen <zj...@apache.org>
Committed: Tue May 12 13:24:12 2015 -0700
----------------------------------------------------------------------
hadoop-common-project/hadoop-auth/src/site/markdown/BuildingIt.md | 2 +-
hadoop-common-project/hadoop-common/CHANGES.txt | 3 +++
2 files changed, 4 insertions(+), 1 deletion(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/f6523717/hadoop-common-project/hadoop-auth/src/site/markdown/BuildingIt.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-auth/src/site/markdown/BuildingIt.md b/hadoop-common-project/hadoop-auth/src/site/markdown/BuildingIt.md
index 53a49d4..52d9ddc 100644
--- a/hadoop-common-project/hadoop-auth/src/site/markdown/BuildingIt.md
+++ b/hadoop-common-project/hadoop-auth/src/site/markdown/BuildingIt.md
@@ -18,7 +18,7 @@ Hadoop Auth, Java HTTP SPNEGO - Building It
Requirements
------------
-* Java 6+
+* Java 7+
* Maven 3+
* Kerberos KDC (for running Kerberos test cases)
http://git-wip-us.apache.org/repos/asf/hadoop/blob/f6523717/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index ff79910..e850a55 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -679,6 +679,9 @@ Release 2.7.1 - UNRELEASED
HADOOP-9658. SnappyCodec#checkNativeCodeLoaded may unexpectedly fail when
native code is not loaded. (Zhijie Shen via ozawa)
+ HADOOP-11663. Remove description about Java 6 from docs.
+ (Masatake Iwasaki via aajisaka)
+
Release 2.7.0 - 2015-04-20
INCOMPATIBLE CHANGES
[21/36] hadoop git commit: re-commit of HADOOP-11881
Posted by zj...@apache.org.
re-commit of HADOOP-11881
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ed685f1f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ed685f1f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ed685f1f
Branch: refs/heads/YARN-2928
Commit: ed685f1fe546550ae95da8e3b652baa96715f53a
Parents: c00884e
Author: Allen Wittenauer <aw...@apache.org>
Authored: Mon May 11 12:13:22 2015 -0700
Committer: Zhijie Shen <zj...@apache.org>
Committed: Tue May 12 13:24:14 2015 -0700
----------------------------------------------------------------------
dev-support/test-patch.sh | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ed685f1f/dev-support/test-patch.sh
----------------------------------------------------------------------
diff --git a/dev-support/test-patch.sh b/dev-support/test-patch.sh
index ae91b8e..9cc5bb0 100755
--- a/dev-support/test-patch.sh
+++ b/dev-support/test-patch.sh
@@ -1731,7 +1731,7 @@ function check_javac
> "${PATCH_DIR}/diffJavacWarnings.txt"
add_jira_table -1 javac "The applied patch generated "\
- "$((patchJavacWarnings-${PATCH_BRANCH}JavacWarnings))" \
+ "$((patchJavacWarnings-branchJavacWarnings))" \
" additional warning messages."
add_jira_footer javac "@@BASE@@/diffJavacWarnings.txt"
[05/36] hadoop git commit: HDFS-6757. Simplify lease manager with
INodeID. Contributed by Haohui Mai.
Posted by zj...@apache.org.
HDFS-6757. Simplify lease manager with INodeID. Contributed by Haohui Mai.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/12fdc447
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/12fdc447
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/12fdc447
Branch: refs/heads/YARN-2928
Commit: 12fdc447c1b0ef1ac47e95e6aa5e27558effb818
Parents: 20471e4
Author: Haohui Mai <wh...@apache.org>
Authored: Fri May 8 23:04:31 2015 -0700
Committer: Zhijie Shen <zj...@apache.org>
Committed: Tue May 12 13:24:10 2015 -0700
----------------------------------------------------------------------
.../hdfs/server/namenode/FSDirDeleteOp.java | 22 +-
.../hdfs/server/namenode/FSDirRenameOp.java | 16 +-
.../hdfs/server/namenode/FSEditLogLoader.java | 13 +-
.../hdfs/server/namenode/FSImageFormat.java | 46 ++-
.../server/namenode/FSImageFormatPBINode.java | 23 +-
.../hdfs/server/namenode/FSNamesystem.java | 87 ++----
.../hadoop/hdfs/server/namenode/INode.java | 19 +-
.../hdfs/server/namenode/INodeDirectory.java | 34 ++-
.../hadoop/hdfs/server/namenode/INodeFile.java | 23 +-
.../hadoop/hdfs/server/namenode/INodeMap.java | 12 +-
.../hdfs/server/namenode/INodeReference.java | 55 ++--
.../hdfs/server/namenode/INodeSymlink.java | 14 +-
.../hdfs/server/namenode/LeaseManager.java | 300 ++++++-------------
.../snapshot/AbstractINodeDiffList.java | 2 +-
.../snapshot/DirectorySnapshottableFeature.java | 2 +-
.../snapshot/DirectoryWithSnapshotFeature.java | 69 +++--
.../snapshot/FileWithSnapshotFeature.java | 2 +-
.../namenode/snapshot/SnapshotManager.java | 4 +-
.../java/org/apache/hadoop/hdfs/TestLease.java | 4 +-
.../hdfs/server/namenode/NameNodeAdapter.java | 21 +-
.../namenode/TestDiskspaceQuotaUpdate.java | 11 +-
.../hdfs/server/namenode/TestFSImage.java | 2 +-
.../hdfs/server/namenode/TestFSNamesystem.java | 2 +-
.../server/namenode/TestGetBlockLocations.java | 3 +-
.../hdfs/server/namenode/TestLeaseManager.java | 54 ++--
.../hdfs/server/namenode/TestSaveNamespace.java | 3 +-
...tINodeFileUnderConstructionWithSnapshot.java | 29 ++
27 files changed, 414 insertions(+), 458 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/12fdc447/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirDeleteOp.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirDeleteOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirDeleteOp.java
index 2192c24..c31d75f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirDeleteOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirDeleteOp.java
@@ -41,7 +41,8 @@ class FSDirDeleteOp {
*/
static long delete(
FSDirectory fsd, INodesInPath iip, BlocksMapUpdateInfo collectedBlocks,
- List<INode> removedINodes, long mtime) throws IOException {
+ List<INode> removedINodes, List<Long> removedUCFiles,
+ long mtime) throws IOException {
if (NameNode.stateChangeLog.isDebugEnabled()) {
NameNode.stateChangeLog.debug("DIR* FSDirectory.delete: " + iip.getPath());
}
@@ -54,7 +55,7 @@ class FSDirDeleteOp {
List<INodeDirectory> snapshottableDirs = new ArrayList<>();
FSDirSnapshotOp.checkSnapshot(iip.getLastINode(), snapshottableDirs);
filesRemoved = unprotectedDelete(fsd, iip, collectedBlocks,
- removedINodes, mtime);
+ removedINodes, removedUCFiles, mtime);
fsd.getFSNamesystem().removeSnapshottableDirs(snapshottableDirs);
}
} finally {
@@ -118,6 +119,7 @@ class FSDirDeleteOp {
FSNamesystem fsn = fsd.getFSNamesystem();
BlocksMapUpdateInfo collectedBlocks = new BlocksMapUpdateInfo();
List<INode> removedINodes = new ChunkedArrayList<>();
+ List<Long> removedUCFiles = new ChunkedArrayList<>();
final INodesInPath iip = fsd.getINodesInPath4Write(
FSDirectory.normalizePath(src), false);
@@ -127,11 +129,11 @@ class FSDirDeleteOp {
List<INodeDirectory> snapshottableDirs = new ArrayList<>();
FSDirSnapshotOp.checkSnapshot(iip.getLastINode(), snapshottableDirs);
long filesRemoved = unprotectedDelete(
- fsd, iip, collectedBlocks, removedINodes, mtime);
+ fsd, iip, collectedBlocks, removedINodes, removedUCFiles, mtime);
fsn.removeSnapshottableDirs(snapshottableDirs);
if (filesRemoved >= 0) {
- fsn.removeLeasesAndINodes(src, removedINodes, false);
+ fsn.removeLeasesAndINodes(removedUCFiles, removedINodes, false);
fsn.removeBlocksAndUpdateSafemodeTotal(collectedBlocks);
}
}
@@ -163,18 +165,19 @@ class FSDirDeleteOp {
FSDirectory fsd = fsn.getFSDirectory();
BlocksMapUpdateInfo collectedBlocks = new BlocksMapUpdateInfo();
List<INode> removedINodes = new ChunkedArrayList<>();
+ List<Long> removedUCFiles = new ChunkedArrayList<>();
long mtime = now();
// Unlink the target directory from directory tree
long filesRemoved = delete(
- fsd, iip, collectedBlocks, removedINodes, mtime);
+ fsd, iip, collectedBlocks, removedINodes, removedUCFiles, mtime);
if (filesRemoved < 0) {
return null;
}
fsd.getEditLog().logDelete(src, mtime, logRetryCache);
incrDeletedFileCount(filesRemoved);
- fsn.removeLeasesAndINodes(src, removedINodes, true);
+ fsn.removeLeasesAndINodes(removedUCFiles, removedINodes, true);
if (NameNode.stateChangeLog.isDebugEnabled()) {
NameNode.stateChangeLog.debug("DIR* Namesystem.delete: "
@@ -212,12 +215,13 @@ class FSDirDeleteOp {
* @param iip the inodes resolved from the path
* @param collectedBlocks blocks collected from the deleted path
* @param removedINodes inodes that should be removed from inodeMap
+ * @param removedUCFiles inodes whose leases need to be released
* @param mtime the time the inode is removed
* @return the number of inodes deleted; 0 if no inodes are deleted.
*/
private static long unprotectedDelete(
FSDirectory fsd, INodesInPath iip, BlocksMapUpdateInfo collectedBlocks,
- List<INode> removedINodes, long mtime) {
+ List<INode> removedINodes, List<Long> removedUCFiles, long mtime) {
assert fsd.hasWriteLock();
// check if target node exists
@@ -248,11 +252,11 @@ class FSDirDeleteOp {
// collect block and update quota
if (!targetNode.isInLatestSnapshot(latestSnapshot)) {
targetNode.destroyAndCollectBlocks(fsd.getBlockStoragePolicySuite(),
- collectedBlocks, removedINodes);
+ collectedBlocks, removedINodes, removedUCFiles);
} else {
QuotaCounts counts = targetNode.cleanSubtree(
fsd.getBlockStoragePolicySuite(), CURRENT_STATE_ID,
- latestSnapshot, collectedBlocks, removedINodes);
+ latestSnapshot, collectedBlocks, removedINodes, removedUCFiles);
removed = counts.getNameSpace();
fsd.updateCountNoQuotaCheck(iip, iip.length() -1, counts.negation());
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/12fdc447/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java
index 4a20a62..d5faa78 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java
@@ -680,8 +680,6 @@ class FSDirRenameOp {
srcParent.updateModificationTime(timestamp, srcIIP.getLatestSnapshotId());
final INode dstParent = dstParentIIP.getLastINode();
dstParent.updateModificationTime(timestamp, dstIIP.getLatestSnapshotId());
- // update moved lease with new filename
- fsd.getFSNamesystem().unprotectedChangeLease(src, dst);
}
void restoreSource() throws QuotaExceededException {
@@ -731,16 +729,20 @@ class FSDirRenameOp {
throws QuotaExceededException {
Preconditions.checkState(oldDstChild != null);
List<INode> removedINodes = new ChunkedArrayList<>();
+ List<Long> removedUCFiles = new ChunkedArrayList<>();
final boolean filesDeleted;
if (!oldDstChild.isInLatestSnapshot(dstIIP.getLatestSnapshotId())) {
- oldDstChild.destroyAndCollectBlocks(bsps, collectedBlocks, removedINodes);
+ oldDstChild.destroyAndCollectBlocks(bsps, collectedBlocks, removedINodes,
+ removedUCFiles);
filesDeleted = true;
} else {
- filesDeleted = oldDstChild.cleanSubtree(bsps, Snapshot.CURRENT_STATE_ID,
- dstIIP.getLatestSnapshotId(), collectedBlocks, removedINodes)
- .getNameSpace() >= 0;
+ filesDeleted = oldDstChild.cleanSubtree(
+ bsps, Snapshot.CURRENT_STATE_ID,
+ dstIIP.getLatestSnapshotId(), collectedBlocks,
+ removedINodes, removedUCFiles).getNameSpace() >= 0;
}
- fsd.getFSNamesystem().removeLeasesAndINodes(src, removedINodes, false);
+ fsd.getFSNamesystem().removeLeasesAndINodes(
+ removedUCFiles, removedINodes, false);
return filesDeleted;
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/12fdc447/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
index eaa2e77..7964188 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
@@ -29,6 +29,7 @@ import java.util.EnumMap;
import java.util.EnumSet;
import java.util.List;
+import com.google.common.collect.Lists;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
@@ -374,7 +375,7 @@ public class FSEditLogLoader {
addCloseOp.clientMachine,
addCloseOp.storagePolicyId);
iip = INodesInPath.replace(iip, iip.length() - 1, newFile);
- fsNamesys.leaseManager.addLease(addCloseOp.clientName, path);
+ fsNamesys.leaseManager.addLease(addCloseOp.clientName, newFile.getId());
// add the op into retry cache if necessary
if (toAddRetryCache) {
@@ -446,9 +447,9 @@ public class FSEditLogLoader {
"File is not under construction: " + path);
}
// One might expect that you could use removeLease(holder, path) here,
- // but OP_CLOSE doesn't serialize the holder. So, remove by path.
+ // but OP_CLOSE doesn't serialize the holder. So, remove the inode.
if (file.isUnderConstruction()) {
- fsNamesys.leaseManager.removeLeaseWithPrefixPath(path);
+ fsNamesys.leaseManager.removeLeases(Lists.newArrayList(file.getId()));
file.toCompleteFile(file.getModificationTime());
}
break;
@@ -701,8 +702,8 @@ public class FSEditLogLoader {
renameReservedPathsOnUpgrade(reassignLeaseOp.path, logVersion);
INodeFile pendingFile = fsDir.getINode(path).asFile();
Preconditions.checkState(pendingFile.isUnderConstruction());
- fsNamesys.reassignLeaseInternal(lease,
- path, reassignLeaseOp.newHolder, pendingFile);
+ fsNamesys.reassignLeaseInternal(lease, reassignLeaseOp.newHolder,
+ pendingFile);
break;
}
case OP_START_LOG_SEGMENT:
@@ -739,7 +740,7 @@ public class FSEditLogLoader {
collectedBlocks.clear();
fsNamesys.dir.removeFromInodeMap(removedINodes);
removedINodes.clear();
-
+
if (toAddRetryCache) {
fsNamesys.addCacheEntry(deleteSnapshotOp.rpcClientId,
deleteSnapshotOp.rpcCallId);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/12fdc447/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java
index d1d007f..ec2babd 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java
@@ -966,8 +966,7 @@ public class FSImageFormat {
}
if (!inSnapshot) {
- namesystem.leaseManager.addLease(cons
- .getFileUnderConstructionFeature().getClientName(), path);
+ namesystem.leaseManager.addLease(uc.getClientName(), oldnode.getId());
}
}
}
@@ -1297,7 +1296,7 @@ public class FSImageFormat {
// paths, so that when loading fsimage we do not put them into the lease
// map. In the future, we can remove this hack when we can bump the
// layout version.
- sourceNamesystem.saveFilesUnderConstruction(out, snapshotUCMap);
+ saveFilesUnderConstruction(sourceNamesystem, out, snapshotUCMap);
context.checkCancelled();
sourceNamesystem.saveSecretManagerStateCompat(out, sdPath);
@@ -1448,5 +1447,46 @@ public class FSImageFormat {
counter.increment();
}
}
+
+ /**
+ * Serializes leases.
+ */
+ void saveFilesUnderConstruction(FSNamesystem fsn, DataOutputStream out,
+ Map<Long, INodeFile> snapshotUCMap) throws IOException {
+ // This is run by an inferior thread of saveNamespace, which holds a read
+ // lock on our behalf. If we took the read lock here, we could block
+ // for fairness if a writer is waiting on the lock.
+ final LeaseManager leaseManager = fsn.getLeaseManager();
+ final FSDirectory dir = fsn.getFSDirectory();
+ synchronized (leaseManager) {
+ Collection<Long> filesWithUC = leaseManager.getINodeIdWithLeases();
+ for (Long id : filesWithUC) {
+ // TODO: for HDFS-5428, because of rename operations, some
+ // under-construction files that are
+ // in the current fs directory can also be captured in the
+ // snapshotUCMap. We should remove them from the snapshotUCMap.
+ snapshotUCMap.remove(id);
+ }
+ out.writeInt(filesWithUC.size() + snapshotUCMap.size()); // write the size
+
+ for (Long id : filesWithUC) {
+ INodeFile file = dir.getInode(id).asFile();
+ String path = file.getFullPathName();
+ FSImageSerialization.writeINodeUnderConstruction(
+ out, file, path);
+ }
+
+ for (Map.Entry<Long, INodeFile> entry : snapshotUCMap.entrySet()) {
+ // for those snapshot INodeFileUC, we use "/.reserved/.inodes/<inodeid>"
+ // as their paths
+ StringBuilder b = new StringBuilder();
+ b.append(FSDirectory.DOT_RESERVED_PATH_PREFIX)
+ .append(Path.SEPARATOR).append(FSDirectory.DOT_INODES_STRING)
+ .append(Path.SEPARATOR).append(entry.getValue().getId());
+ FSImageSerialization.writeINodeUnderConstruction(
+ out, entry.getValue(), b.toString());
+ }
+ }
+ }
}
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/12fdc447/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java
index b758458..d966c69 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java
@@ -22,6 +22,7 @@ import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.util.ArrayList;
+import java.util.Collection;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
@@ -279,7 +280,8 @@ public final class FSImageFormatPBINode {
INodeFile file = dir.getInode(entry.getInodeId()).asFile();
FileUnderConstructionFeature uc = file.getFileUnderConstructionFeature();
Preconditions.checkState(uc != null); // file must be under-construction
- fsn.leaseManager.addLease(uc.getClientName(), entry.getFullPath());
+ fsn.leaseManager.addLease(uc.getClientName(),
+ entry.getInodeId());
}
}
@@ -576,10 +578,21 @@ public final class FSImageFormatPBINode {
}
void serializeFilesUCSection(OutputStream out) throws IOException {
- Map<String, INodeFile> ucMap = fsn.getFilesUnderConstruction();
- for (Map.Entry<String, INodeFile> entry : ucMap.entrySet()) {
- String path = entry.getKey();
- INodeFile file = entry.getValue();
+ Collection<Long> filesWithUC = fsn.getLeaseManager()
+ .getINodeIdWithLeases();
+ for (Long id : filesWithUC) {
+ INode inode = fsn.getFSDirectory().getInode(id);
+ if (inode == null) {
+ LOG.warn("Fail to find inode " + id + " when saving the leases.");
+ continue;
+ }
+ INodeFile file = inode.asFile();
+ if (!file.isUnderConstruction()) {
+ LOG.warn("Fail to save the lease for inode id " + id
+ + " as the file is not under construction");
+ continue;
+ }
+ String path = file.getFullPathName();
FileUnderConstructionEntry.Builder b = FileUnderConstructionEntry
.newBuilder().setInodeId(file.getId()).setFullPath(path);
FileUnderConstructionEntry e = b.build();
http://git-wip-us.apache.org/repos/asf/hadoop/blob/12fdc447/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 60495af..ef069d6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -2087,12 +2087,11 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
Block newBlock)
throws IOException {
INodeFile file = iip.getLastINode().asFile();
- String src = iip.getPath();
file.recordModification(iip.getLatestSnapshotId());
file.toUnderConstruction(leaseHolder, clientMachine);
assert file.isUnderConstruction() : "inode should be under construction.";
leaseManager.addLease(
- file.getFileUnderConstructionFeature().getClientName(), src);
+ file.getFileUnderConstructionFeature().getClientName(), file.getId());
boolean shouldRecoverNow = (newBlock == null);
BlockInfoContiguous oldBlock = file.getLastBlock();
boolean shouldCopyOnTruncate = shouldCopyOnTruncate(file, oldBlock);
@@ -2568,13 +2567,15 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
} else {
if (overwrite) {
toRemoveBlocks = new BlocksMapUpdateInfo();
- List<INode> toRemoveINodes = new ChunkedArrayList<INode>();
- long ret = FSDirDeleteOp.delete(dir, iip, toRemoveBlocks,
- toRemoveINodes, now());
+ List<INode> toRemoveINodes = new ChunkedArrayList<>();
+ List<Long> toRemoveUCFiles = new ChunkedArrayList<>();
+ long ret = FSDirDeleteOp.delete(
+ dir, iip, toRemoveBlocks, toRemoveINodes,
+ toRemoveUCFiles, now());
if (ret >= 0) {
iip = INodesInPath.replace(iip, iip.length() - 1, null);
FSDirDeleteOp.incrDeletedFileCount(ret);
- removeLeasesAndINodes(src, toRemoveINodes, true);
+ removeLeasesAndINodes(toRemoveUCFiles, toRemoveINodes, true);
}
} else {
// If lease soft limit time is expired, recover the lease
@@ -2601,7 +2602,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
throw new IOException("Unable to add " + src + " to namespace");
}
leaseManager.addLease(newNode.getFileUnderConstructionFeature()
- .getClientName(), src);
+ .getClientName(), newNode.getId());
// Set encryption attributes if necessary
if (feInfo != null) {
@@ -2745,7 +2746,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
file.toUnderConstruction(leaseHolder, clientMachine);
leaseManager.addLease(
- file.getFileUnderConstructionFeature().getClientName(), src);
+ file.getFileUnderConstructionFeature().getClientName(), file.getId());
LocatedBlock ret = null;
if (!newBlock) {
@@ -2897,7 +2898,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
Lease lease = leaseManager.getLease(holder);
if (!force && lease != null) {
- Lease leaseFile = leaseManager.getLeaseByPath(src);
+ Lease leaseFile = leaseManager.getLease(file);
if (leaseFile != null && leaseFile.equals(lease)) {
// We found the lease for this file but the original
// holder is trying to obtain it again.
@@ -3758,15 +3759,16 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
/**
* Remove leases and inodes related to a given path
- * @param src The given path
+ * @param removedUCFiles INodes whose leases need to be released
* @param removedINodes Containing the list of inodes to be removed from
* inodesMap
* @param acquireINodeMapLock Whether to acquire the lock for inode removal
*/
- void removeLeasesAndINodes(String src, List<INode> removedINodes,
+ void removeLeasesAndINodes(List<Long> removedUCFiles,
+ List<INode> removedINodes,
final boolean acquireINodeMapLock) {
assert hasWriteLock();
- leaseManager.removeLeaseWithPrefixPath(src);
+ leaseManager.removeLeases(removedUCFiles);
// remove inodes from inodesMap
if (removedINodes != null) {
if (acquireINodeMapLock) {
@@ -4156,14 +4158,13 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
return lease;
// The following transaction is not synced. Make sure it's sync'ed later.
logReassignLease(lease.getHolder(), src, newHolder);
- return reassignLeaseInternal(lease, src, newHolder, pendingFile);
+ return reassignLeaseInternal(lease, newHolder, pendingFile);
}
- Lease reassignLeaseInternal(Lease lease, String src, String newHolder,
- INodeFile pendingFile) {
+ Lease reassignLeaseInternal(Lease lease, String newHolder, INodeFile pendingFile) {
assert hasWriteLock();
pendingFile.getFileUnderConstructionFeature().setClientName(newHolder);
- return leaseManager.reassignLease(lease, src, newHolder);
+ return leaseManager.reassignLease(lease, pendingFile, newHolder);
}
private void commitOrCompleteLastBlock(final INodeFile fileINode,
@@ -4191,7 +4192,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
FileUnderConstructionFeature uc = pendingFile.getFileUnderConstructionFeature();
Preconditions.checkArgument(uc != null);
- leaseManager.removeLease(uc.getClientName(), src);
+ leaseManager.removeLease(uc.getClientName(), pendingFile);
pendingFile.recordModification(latestSnapshot);
@@ -6401,58 +6402,6 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
persistBlocks(src, pendingFile, logRetryCache);
}
- // rename was successful. If any part of the renamed subtree had
- // files that were being written to, update with new filename.
- void unprotectedChangeLease(String src, String dst) {
- assert hasWriteLock();
- leaseManager.changeLease(src, dst);
- }
-
- /**
- * Serializes leases.
- */
- void saveFilesUnderConstruction(DataOutputStream out,
- Map<Long, INodeFile> snapshotUCMap) throws IOException {
- // This is run by an inferior thread of saveNamespace, which holds a read
- // lock on our behalf. If we took the read lock here, we could block
- // for fairness if a writer is waiting on the lock.
- synchronized (leaseManager) {
- Map<String, INodeFile> nodes = leaseManager.getINodesUnderConstruction();
- for (Map.Entry<String, INodeFile> entry : nodes.entrySet()) {
- // TODO: for HDFS-5428, because of rename operations, some
- // under-construction files that are
- // in the current fs directory can also be captured in the
- // snapshotUCMap. We should remove them from the snapshotUCMap.
- snapshotUCMap.remove(entry.getValue().getId());
- }
-
- out.writeInt(nodes.size() + snapshotUCMap.size()); // write the size
- for (Map.Entry<String, INodeFile> entry : nodes.entrySet()) {
- FSImageSerialization.writeINodeUnderConstruction(
- out, entry.getValue(), entry.getKey());
- }
- for (Map.Entry<Long, INodeFile> entry : snapshotUCMap.entrySet()) {
- // for those snapshot INodeFileUC, we use "/.reserved/.inodes/<inodeid>"
- // as their paths
- StringBuilder b = new StringBuilder();
- b.append(FSDirectory.DOT_RESERVED_PATH_PREFIX)
- .append(Path.SEPARATOR).append(FSDirectory.DOT_INODES_STRING)
- .append(Path.SEPARATOR).append(entry.getValue().getId());
- FSImageSerialization.writeINodeUnderConstruction(
- out, entry.getValue(), b.toString());
- }
- }
- }
-
- /**
- * @return all the under-construction files in the lease map
- */
- Map<String, INodeFile> getFilesUnderConstruction() {
- synchronized (leaseManager) {
- return leaseManager.getINodesUnderConstruction();
- }
- }
-
/**
* Register a Backup name-node, verifying that it belongs
* to the correct namespace, and adding it to the set of
http://git-wip-us.apache.org/repos/asf/hadoop/blob/12fdc447/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
index f8efd76..b65879f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
@@ -390,7 +390,7 @@ public abstract class INode implements INodeAttributes, Diff.Element<byte[]> {
* @param bsps
* block storage policy suite to calculate intended storage type usage
* @param snapshotId
- * The id of the snapshot to delete.
+ * The id of the snapshot to delete.
* {@link Snapshot#CURRENT_STATE_ID} means to delete the current
* file/directory.
* @param priorSnapshotId
@@ -401,14 +401,16 @@ public abstract class INode implements INodeAttributes, Diff.Element<byte[]> {
* blocks collected from the descents for further block
* deletion/update will be added to the given map.
* @param removedINodes
- * INodes collected from the descents for further cleaning up of
+ * INodes collected from the descents for further cleaning up of
* inodeMap
+ * @param removedUCFiles
+ * INodes whose leases need to be released
* @return quota usage delta when deleting a snapshot
*/
- public abstract QuotaCounts cleanSubtree(final BlockStoragePolicySuite bsps,
- final int snapshotId,
+ public abstract QuotaCounts cleanSubtree(
+ final BlockStoragePolicySuite bsps, final int snapshotId,
int priorSnapshotId, BlocksMapUpdateInfo collectedBlocks,
- List<INode> removedINodes);
+ List<INode> removedINodes, List<Long> removedUCFiles);
/**
* Destroy self and clear everything! If the INode is a file, this method
@@ -416,7 +418,6 @@ public abstract class INode implements INodeAttributes, Diff.Element<byte[]> {
* directory, the method goes down the subtree and collects blocks from the
* descents, and clears its parent/children references as well. The method
* also clears the diff list if the INode contains snapshot diff list.
- *
* @param bsps
* block storage policy suite to calculate intended storage type usage
* This is needed because INodeReference#destroyAndCollectBlocks() needs
@@ -427,10 +428,12 @@ public abstract class INode implements INodeAttributes, Diff.Element<byte[]> {
* @param removedINodes
* INodes collected from the descents for further cleaning up of
* inodeMap
+ * @param removedUCFiles
+ * INodes whose leases need to be released
*/
public abstract void destroyAndCollectBlocks(
- BlockStoragePolicySuite bsps,
- BlocksMapUpdateInfo collectedBlocks, List<INode> removedINodes);
+ BlockStoragePolicySuite bsps, BlocksMapUpdateInfo collectedBlocks,
+ List<INode> removedINodes, List<Long> removedUCFiles);
/** Compute {@link ContentSummary}. Blocking call */
public final ContentSummary computeContentSummary(BlockStoragePolicySuite bsps) {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/12fdc447/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
index 098594d..fa63889 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
@@ -753,10 +753,11 @@ public class INodeDirectory extends INodeWithAdditionalFields
}
/** Call cleanSubtree(..) recursively down the subtree. */
- public QuotaCounts cleanSubtreeRecursively(final BlockStoragePolicySuite bsps,
- final int snapshot,
- int prior, final BlocksMapUpdateInfo collectedBlocks,
- final List<INode> removedINodes, final Map<INode, INode> excludedNodes) {
+ public QuotaCounts cleanSubtreeRecursively(
+ final BlockStoragePolicySuite bsps, final int snapshot, int prior,
+ final BlocksMapUpdateInfo collectedBlocks,
+ final List<INode> removedINodes, List<Long> removedUCFiles,
+ final Map<INode, INode> excludedNodes) {
QuotaCounts counts = new QuotaCounts.Builder().build();
// in case of deletion snapshot, since this call happens after we modify
// the diff list, the snapshot to be deleted has been combined or renamed
@@ -771,7 +772,7 @@ public class INodeDirectory extends INodeWithAdditionalFields
continue;
} else {
QuotaCounts childCounts = child.cleanSubtree(bsps, snapshot, prior,
- collectedBlocks, removedINodes);
+ collectedBlocks, removedINodes, removedUCFiles);
counts.add(childCounts);
}
}
@@ -779,15 +780,17 @@ public class INodeDirectory extends INodeWithAdditionalFields
}
@Override
- public void destroyAndCollectBlocks(final BlockStoragePolicySuite bsps,
+ public void destroyAndCollectBlocks(
+ final BlockStoragePolicySuite bsps,
final BlocksMapUpdateInfo collectedBlocks,
- final List<INode> removedINodes) {
+ final List<INode> removedINodes, List<Long> removedUCFiles) {
final DirectoryWithSnapshotFeature sf = getDirectoryWithSnapshotFeature();
if (sf != null) {
- sf.clear(bsps, this, collectedBlocks, removedINodes);
+ sf.clear(bsps, this, collectedBlocks, removedINodes, removedUCFiles);
}
for (INode child : getChildrenList(Snapshot.CURRENT_STATE_ID)) {
- child.destroyAndCollectBlocks(bsps, collectedBlocks, removedINodes);
+ child.destroyAndCollectBlocks(bsps, collectedBlocks, removedINodes,
+ removedUCFiles);
}
if (getAclFeature() != null) {
AclStorage.removeAclFeature(getAclFeature());
@@ -797,15 +800,15 @@ public class INodeDirectory extends INodeWithAdditionalFields
}
@Override
- public QuotaCounts cleanSubtree(final BlockStoragePolicySuite bsps,
- final int snapshotId, int priorSnapshotId,
+ public QuotaCounts cleanSubtree(
+ final BlockStoragePolicySuite bsps, final int snapshotId, int priorSnapshotId,
final BlocksMapUpdateInfo collectedBlocks,
- final List<INode> removedINodes) {
+ final List<INode> removedINodes, List<Long> removedUCFiles) {
DirectoryWithSnapshotFeature sf = getDirectoryWithSnapshotFeature();
// there is snapshot data
if (sf != null) {
return sf.cleanDirectory(bsps, this, snapshotId, priorSnapshotId,
- collectedBlocks, removedINodes);
+ collectedBlocks, removedINodes, removedUCFiles);
}
// there is no snapshot data
if (priorSnapshotId == Snapshot.NO_SNAPSHOT_ID
@@ -813,12 +816,13 @@ public class INodeDirectory extends INodeWithAdditionalFields
// destroy the whole subtree and collect blocks that should be deleted
QuotaCounts counts = new QuotaCounts.Builder().build();
this.computeQuotaUsage(bsps, counts, true);
- destroyAndCollectBlocks(bsps, collectedBlocks, removedINodes);
+ destroyAndCollectBlocks(bsps, collectedBlocks, removedINodes,
+ removedUCFiles);
return counts;
} else {
// process recursively down the subtree
QuotaCounts counts = cleanSubtreeRecursively(bsps, snapshotId, priorSnapshotId,
- collectedBlocks, removedINodes, null);
+ collectedBlocks, removedINodes, removedUCFiles, null);
if (isQuotaSet()) {
getDirectoryWithQuotaFeature().addSpaceConsumed2Cache(counts.negation());
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/12fdc447/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
index 110bd71..1d9c0ad 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
@@ -494,28 +494,33 @@ public class INodeFile extends INodeWithAdditionalFields
}
@Override
- public QuotaCounts cleanSubtree(BlockStoragePolicySuite bsps, final int snapshot,
- int priorSnapshotId,
+ public QuotaCounts cleanSubtree(
+ BlockStoragePolicySuite bsps, final int snapshot, int priorSnapshotId,
final BlocksMapUpdateInfo collectedBlocks,
- final List<INode> removedINodes) {
+ final List<INode> removedINodes, List<Long> removedUCFiles) {
FileWithSnapshotFeature sf = getFileWithSnapshotFeature();
if (sf != null) {
return sf.cleanFile(bsps, this, snapshot, priorSnapshotId, collectedBlocks,
removedINodes);
}
QuotaCounts counts = new QuotaCounts.Builder().build();
+
if (snapshot == CURRENT_STATE_ID) {
if (priorSnapshotId == NO_SNAPSHOT_ID) {
// this only happens when deleting the current file and the file is not
// in any snapshot
computeQuotaUsage(bsps, counts, false);
- destroyAndCollectBlocks(bsps, collectedBlocks, removedINodes);
+ destroyAndCollectBlocks(bsps, collectedBlocks, removedINodes,
+ removedUCFiles);
} else {
+ FileUnderConstructionFeature uc = getFileUnderConstructionFeature();
// when deleting the current file and the file is in snapshot, we should
// clean the 0-sized block if the file is UC
- FileUnderConstructionFeature uc = getFileUnderConstructionFeature();
if (uc != null) {
uc.cleanZeroSizeBlock(this, collectedBlocks);
+ if (removedUCFiles != null) {
+ removedUCFiles.add(getId());
+ }
}
}
}
@@ -523,8 +528,9 @@ public class INodeFile extends INodeWithAdditionalFields
}
@Override
- public void destroyAndCollectBlocks(BlockStoragePolicySuite bsps,
- BlocksMapUpdateInfo collectedBlocks, final List<INode> removedINodes) {
+ public void destroyAndCollectBlocks(
+ BlockStoragePolicySuite bsps, BlocksMapUpdateInfo collectedBlocks,
+ final List<INode> removedINodes, List<Long> removedUCFiles) {
if (blocks != null && collectedBlocks != null) {
for (BlockInfoContiguous blk : blocks) {
collectedBlocks.addDeleteBlock(blk);
@@ -542,6 +548,9 @@ public class INodeFile extends INodeWithAdditionalFields
sf.getDiffs().destroyAndCollectSnapshotBlocks(collectedBlocks);
sf.clearDiffs();
}
+ if (isUnderConstruction() && removedUCFiles != null) {
+ removedUCFiles.add(getId());
+ }
}
@Override
http://git-wip-us.apache.org/repos/asf/hadoop/blob/12fdc447/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeMap.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeMap.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeMap.java
index 7b1332b..5f16bd6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeMap.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeMap.java
@@ -97,8 +97,9 @@ public class INodeMap {
}
@Override
- public void destroyAndCollectBlocks(BlockStoragePolicySuite bsps,
- BlocksMapUpdateInfo collectedBlocks, List<INode> removedINodes) {
+ public void destroyAndCollectBlocks(
+ BlockStoragePolicySuite bsps, BlocksMapUpdateInfo collectedBlocks,
+ List<INode> removedINodes, List<Long> removedUCFiles) {
// Nothing to do
}
@@ -116,9 +117,10 @@ public class INodeMap {
}
@Override
- public QuotaCounts cleanSubtree(BlockStoragePolicySuite bsps,
- int snapshotId, int priorSnapshotId,
- BlocksMapUpdateInfo collectedBlocks, List<INode> removedINodes) {
+ public QuotaCounts cleanSubtree(
+ BlockStoragePolicySuite bsps, int snapshotId, int priorSnapshotId,
+ BlocksMapUpdateInfo collectedBlocks, List<INode> removedINodes,
+ List<Long> removedUCFiles) {
return null;
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/12fdc447/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java
index b33a93c..5008dc0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java
@@ -302,19 +302,20 @@ public abstract class INodeReference extends INode {
}
@Override // used by WithCount
- public QuotaCounts cleanSubtree(BlockStoragePolicySuite bsps, int snapshot,
- int prior, BlocksMapUpdateInfo collectedBlocks,
- final List<INode> removedINodes) {
+ public QuotaCounts cleanSubtree(
+ BlockStoragePolicySuite bsps, int snapshot, int prior, BlocksMapUpdateInfo collectedBlocks,
+ final List<INode> removedINodes, List<Long> removedUCFiles) {
return referred.cleanSubtree(bsps, snapshot, prior, collectedBlocks,
- removedINodes);
+ removedINodes, removedUCFiles);
}
@Override // used by WithCount
public void destroyAndCollectBlocks(
- BlockStoragePolicySuite bsps,
- BlocksMapUpdateInfo collectedBlocks, final List<INode> removedINodes) {
+ BlockStoragePolicySuite bsps, BlocksMapUpdateInfo collectedBlocks,
+ final List<INode> removedINodes, List<Long> removedUCFiles) {
if (removeReference(this) <= 0) {
- referred.destroyAndCollectBlocks(bsps, collectedBlocks, removedINodes);
+ referred.destroyAndCollectBlocks(bsps, collectedBlocks, removedINodes,
+ removedUCFiles);
}
}
@@ -542,9 +543,9 @@ public abstract class INodeReference extends INode {
}
@Override
- public QuotaCounts cleanSubtree(BlockStoragePolicySuite bsps,
- final int snapshot, int prior, final BlocksMapUpdateInfo collectedBlocks,
- final List<INode> removedINodes) {
+ public QuotaCounts cleanSubtree(
+ BlockStoragePolicySuite bsps, final int snapshot, int prior, final BlocksMapUpdateInfo collectedBlocks,
+ final List<INode> removedINodes, List<Long> removedUCFiles) {
// since WithName node resides in deleted list acting as a snapshot copy,
// the parameter snapshot must be non-null
Preconditions.checkArgument(snapshot != Snapshot.CURRENT_STATE_ID);
@@ -560,7 +561,7 @@ public abstract class INodeReference extends INode {
}
QuotaCounts counts = getReferredINode().cleanSubtree(bsps, snapshot, prior,
- collectedBlocks, removedINodes);
+ collectedBlocks, removedINodes, removedUCFiles);
INodeReference ref = getReferredINode().getParentReference();
if (ref != null) {
try {
@@ -581,13 +582,13 @@ public abstract class INodeReference extends INode {
}
@Override
- public void destroyAndCollectBlocks(BlockStoragePolicySuite bsps,
- BlocksMapUpdateInfo collectedBlocks,
- final List<INode> removedINodes) {
+ public void destroyAndCollectBlocks(
+ BlockStoragePolicySuite bsps, BlocksMapUpdateInfo collectedBlocks,
+ final List<INode> removedINodes, List<Long> removedUCFiles) {
int snapshot = getSelfSnapshot();
if (removeReference(this) <= 0) {
getReferredINode().destroyAndCollectBlocks(bsps, collectedBlocks,
- removedINodes);
+ removedINodes, removedUCFiles);
} else {
int prior = getPriorSnapshot(this);
INode referred = getReferredINode().asReference().getReferredINode();
@@ -607,7 +608,7 @@ public abstract class INodeReference extends INode {
}
try {
QuotaCounts counts = referred.cleanSubtree(bsps, snapshot, prior,
- collectedBlocks, removedINodes);
+ collectedBlocks, removedINodes, removedUCFiles);
INodeReference ref = getReferredINode().getParentReference();
if (ref != null) {
ref.addSpaceConsumed(counts.negation(), true);
@@ -661,13 +662,16 @@ public abstract class INodeReference extends INode {
}
@Override
- public QuotaCounts cleanSubtree(BlockStoragePolicySuite bsps, int snapshot, int prior,
- BlocksMapUpdateInfo collectedBlocks, List<INode> removedINodes) {
+ public QuotaCounts cleanSubtree(
+ BlockStoragePolicySuite bsps, int snapshot, int prior,
+ BlocksMapUpdateInfo collectedBlocks, List<INode> removedINodes,
+ List<Long> removedUCFiles) {
if (snapshot == Snapshot.CURRENT_STATE_ID
&& prior == Snapshot.NO_SNAPSHOT_ID) {
QuotaCounts counts = new QuotaCounts.Builder().build();
this.computeQuotaUsage(bsps, counts, true);
- destroyAndCollectBlocks(bsps, collectedBlocks, removedINodes);
+ destroyAndCollectBlocks(bsps, collectedBlocks, removedINodes,
+ removedUCFiles);
return counts;
} else {
// if prior is NO_SNAPSHOT_ID, we need to check snapshot belonging to
@@ -684,7 +688,7 @@ public abstract class INodeReference extends INode {
return new QuotaCounts.Builder().build();
}
return getReferredINode().cleanSubtree(bsps, snapshot, prior,
- collectedBlocks, removedINodes);
+ collectedBlocks, removedINodes, removedUCFiles);
}
}
@@ -699,11 +703,12 @@ public abstract class INodeReference extends INode {
* WithName nodes.
*/
@Override
- public void destroyAndCollectBlocks(BlockStoragePolicySuite bsps,
- BlocksMapUpdateInfo collectedBlocks, final List<INode> removedINodes) {
+ public void destroyAndCollectBlocks(
+ BlockStoragePolicySuite bsps, BlocksMapUpdateInfo collectedBlocks,
+ final List<INode> removedINodes, List<Long> removedUCFiles) {
if (removeReference(this) <= 0) {
getReferredINode().destroyAndCollectBlocks(bsps, collectedBlocks,
- removedINodes);
+ removedINodes, removedUCFiles);
} else {
// we will clean everything, including files, directories, and
// snapshots, that were created after this prior snapshot
@@ -726,7 +731,7 @@ public abstract class INodeReference extends INode {
// compute quota usage updates before calling this destroy
// function, we use true for countDiffChange
referred.cleanSubtree(bsps, snapshot, prior, collectedBlocks,
- removedINodes);
+ removedINodes, removedUCFiles);
} else if (referred.isDirectory()) {
// similarly, if referred is a directory, it must be an
// INodeDirectory with snapshot
@@ -734,7 +739,7 @@ public abstract class INodeReference extends INode {
Preconditions.checkState(dir.isWithSnapshot());
try {
DirectoryWithSnapshotFeature.destroyDstSubtree(bsps, dir, snapshot,
- prior, collectedBlocks, removedINodes);
+ prior, collectedBlocks, removedINodes, removedUCFiles);
} catch (QuotaExceededException e) {
LOG.error("should not exceed quota while snapshot deletion", e);
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/12fdc447/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeSymlink.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeSymlink.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeSymlink.java
index 21a9e4f..7ce893f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeSymlink.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeSymlink.java
@@ -73,21 +73,23 @@ public class INodeSymlink extends INodeWithAdditionalFields {
}
@Override
- public QuotaCounts cleanSubtree(BlockStoragePolicySuite bsps,
- final int snapshotId, int priorSnapshotId,
+ public QuotaCounts cleanSubtree(
+ BlockStoragePolicySuite bsps, final int snapshotId, int priorSnapshotId,
final BlocksMapUpdateInfo collectedBlocks,
- final List<INode> removedINodes) {
+ final List<INode> removedINodes, List<Long> removedUCFiles) {
if (snapshotId == Snapshot.CURRENT_STATE_ID
&& priorSnapshotId == Snapshot.NO_SNAPSHOT_ID) {
- destroyAndCollectBlocks(bsps, collectedBlocks, removedINodes);
+ destroyAndCollectBlocks(bsps, collectedBlocks, removedINodes,
+ removedUCFiles);
}
return new QuotaCounts.Builder().nameSpace(1).build();
}
@Override
- public void destroyAndCollectBlocks(final BlockStoragePolicySuite bsps,
+ public void destroyAndCollectBlocks(
+ final BlockStoragePolicySuite bsps,
final BlocksMapUpdateInfo collectedBlocks,
- final List<INode> removedINodes) {
+ final List<INode> removedINodes, List<Long> removedUCFiles) {
removedINodes.add(this);
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/12fdc447/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java
index c6a92be..ade2312 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java
@@ -22,20 +22,17 @@ import static org.apache.hadoop.util.Time.monotonicNow;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
+import java.util.Comparator;
import java.util.HashMap;
+import java.util.HashSet;
import java.util.List;
-import java.util.Map;
-import java.util.NavigableSet;
-import java.util.NoSuchElementException;
+import java.util.PriorityQueue;
import java.util.SortedMap;
import java.util.TreeMap;
-import java.util.TreeSet;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.UnresolvedLinkException;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.util.Daemon;
@@ -78,15 +75,17 @@ public class LeaseManager {
// Used for handling lock-leases
// Mapping: leaseHolder -> Lease
//
- private final SortedMap<String, Lease> leases = new TreeMap<String, Lease>();
+ private final SortedMap<String, Lease> leases = new TreeMap<>();
// Set of: Lease
- private final NavigableSet<Lease> sortedLeases = new TreeSet<Lease>();
-
- //
- // Map path names to leases. It is protected by the sortedLeases lock.
- // The map stores pathnames in lexicographical order.
- //
- private final SortedMap<String, Lease> sortedLeasesByPath = new TreeMap<String, Lease>();
+ private final PriorityQueue<Lease> sortedLeases = new PriorityQueue<>(512,
+ new Comparator<Lease>() {
+ @Override
+ public int compare(Lease o1, Lease o2) {
+ return Long.signum(o1.getLastUpdate() - o2.getLastUpdate());
+ }
+ });
+ // INodeID -> Lease
+ private final HashMap<Long, Lease> leasesById = new HashMap<>();
private Daemon lmthread;
private volatile boolean shouldRunMonitor;
@@ -97,60 +96,44 @@ public class LeaseManager {
return leases.get(holder);
}
- @VisibleForTesting
- int getNumSortedLeases() {return sortedLeases.size();}
-
/**
* This method iterates through all the leases and counts the number of blocks
* which are not COMPLETE. The FSNamesystem read lock MUST be held before
* calling this method.
- * @return
*/
synchronized long getNumUnderConstructionBlocks() {
assert this.fsnamesystem.hasReadLock() : "The FSNamesystem read lock wasn't"
+ "acquired before counting under construction blocks";
long numUCBlocks = 0;
- for (Lease lease : sortedLeases) {
- for (String path : lease.getPaths()) {
- final INodeFile cons;
- try {
- cons = this.fsnamesystem.getFSDirectory().getINode(path).asFile();
- Preconditions.checkState(cons.isUnderConstruction());
- } catch (UnresolvedLinkException e) {
- throw new AssertionError("Lease files should reside on this FS");
- }
- BlockInfoContiguous[] blocks = cons.getBlocks();
- if(blocks == null)
- continue;
- for(BlockInfoContiguous b : blocks) {
- if(!b.isComplete())
- numUCBlocks++;
- }
+ for (Long id : getINodeIdWithLeases()) {
+ final INodeFile cons = fsnamesystem.getFSDirectory().getInode(id).asFile();
+ Preconditions.checkState(cons.isUnderConstruction());
+ BlockInfoContiguous[] blocks = cons.getBlocks();
+ if(blocks == null) {
+ continue;
+ }
+ for(BlockInfoContiguous b : blocks) {
+ if(!b.isComplete())
+ numUCBlocks++;
}
}
LOG.info("Number of blocks under construction: " + numUCBlocks);
return numUCBlocks;
}
+ Collection<Long> getINodeIdWithLeases() {return leasesById.keySet();}
+
/** @return the lease containing src */
- public Lease getLeaseByPath(String src) {return sortedLeasesByPath.get(src);}
+ public synchronized Lease getLease(INodeFile src) {return leasesById.get(src.getId());}
/** @return the number of leases currently in the system */
+ @VisibleForTesting
public synchronized int countLease() {return sortedLeases.size();}
- /** @return the number of paths contained in all leases */
- synchronized int countPath() {
- int count = 0;
- for(Lease lease : sortedLeases) {
- count += lease.getPaths().size();
- }
- return count;
- }
-
/**
* Adds (or re-adds) the lease for the specified file.
*/
- synchronized Lease addLease(String holder, String src) {
+ synchronized Lease addLease(String holder, long inodeId) {
Lease lease = getLease(holder);
if (lease == null) {
lease = new Lease(holder);
@@ -159,23 +142,24 @@ public class LeaseManager {
} else {
renewLease(lease);
}
- sortedLeasesByPath.put(src, lease);
- lease.paths.add(src);
+ leasesById.put(inodeId, lease);
+ lease.files.add(inodeId);
return lease;
}
/**
* Remove the specified lease and src.
*/
- synchronized void removeLease(Lease lease, String src) {
- sortedLeasesByPath.remove(src);
- if (!lease.removePath(src)) {
+ private synchronized void removeLease(Lease lease, long inodeId) {
+ leasesById.remove(inodeId);
+ if (!lease.removeFile(inodeId)) {
if (LOG.isDebugEnabled()) {
- LOG.debug(src + " not found in lease.paths (=" + lease.paths + ")");
+ LOG.debug("inode " + inodeId + " not found in lease.files (=" + lease
+ + ")");
}
}
- if (!lease.hasPath()) {
+ if (!lease.hasFiles()) {
leases.remove(lease.holder);
if (!sortedLeases.remove(lease)) {
LOG.error(lease + " not found in sortedLeases");
@@ -186,31 +170,32 @@ public class LeaseManager {
/**
* Remove the lease for the specified holder and src
*/
- synchronized void removeLease(String holder, String src) {
+ synchronized void removeLease(String holder, INodeFile src) {
Lease lease = getLease(holder);
if (lease != null) {
- removeLease(lease, src);
+ removeLease(lease, src.getId());
} else {
LOG.warn("Removing non-existent lease! holder=" + holder +
- " src=" + src);
+ " src=" + src.getFullPathName());
}
}
synchronized void removeAllLeases() {
sortedLeases.clear();
- sortedLeasesByPath.clear();
+ leasesById.clear();
leases.clear();
}
/**
* Reassign lease for file src to the new holder.
*/
- synchronized Lease reassignLease(Lease lease, String src, String newHolder) {
+ synchronized Lease reassignLease(Lease lease, INodeFile src,
+ String newHolder) {
assert newHolder != null : "new lease holder is null";
if (lease != null) {
- removeLease(lease, src);
+ removeLease(lease, src.getId());
}
- return addLease(newHolder, src);
+ return addLease(newHolder, src.getId());
}
/**
@@ -243,10 +228,10 @@ public class LeaseManager {
* checks in. If the client dies and allows its lease to
* expire, all the corresponding locks can be released.
*************************************************************/
- class Lease implements Comparable<Lease> {
+ class Lease {
private final String holder;
private long lastUpdate;
- private final Collection<String> paths = new TreeSet<String>();
+ private final HashSet<Long> files = new HashSet<>();
/** Only LeaseManager object can create a lease */
private Lease(String holder) {
@@ -269,127 +254,43 @@ public class LeaseManager {
}
/** Does this lease contain any path? */
- boolean hasPath() {return !paths.isEmpty();}
+ boolean hasFiles() {return !files.isEmpty();}
- boolean removePath(String src) {
- return paths.remove(src);
+ boolean removeFile(long inodeId) {
+ return files.remove(inodeId);
}
@Override
public String toString() {
return "[Lease. Holder: " + holder
- + ", pendingcreates: " + paths.size() + "]";
+ + ", pending creates: " + files.size() + "]";
}
-
- @Override
- public int compareTo(Lease o) {
- Lease l1 = this;
- Lease l2 = o;
- long lu1 = l1.lastUpdate;
- long lu2 = l2.lastUpdate;
- if (lu1 < lu2) {
- return -1;
- } else if (lu1 > lu2) {
- return 1;
- } else {
- return l1.holder.compareTo(l2.holder);
- }
- }
-
- @Override
- public boolean equals(Object o) {
- if (!(o instanceof Lease)) {
- return false;
- }
- Lease obj = (Lease) o;
- if (lastUpdate == obj.lastUpdate &&
- holder.equals(obj.holder)) {
- return true;
- }
- return false;
- }
-
+
@Override
public int hashCode() {
return holder.hashCode();
}
- Collection<String> getPaths() {
- return paths;
- }
+ private Collection<Long> getFiles() { return files; }
String getHolder() {
return holder;
}
- void replacePath(String oldpath, String newpath) {
- paths.remove(oldpath);
- paths.add(newpath);
- }
-
@VisibleForTesting
long getLastUpdate() {
return lastUpdate;
}
}
- synchronized void changeLease(String src, String dst) {
- if (LOG.isDebugEnabled()) {
- LOG.debug(getClass().getSimpleName() + ".changelease: " +
- " src=" + src + ", dest=" + dst);
- }
-
- final int len = src.length();
- for(Map.Entry<String, Lease> entry
- : findLeaseWithPrefixPath(src, sortedLeasesByPath).entrySet()) {
- final String oldpath = entry.getKey();
- final Lease lease = entry.getValue();
- // replace stem of src with new destination
- final String newpath = dst + oldpath.substring(len);
- if (LOG.isDebugEnabled()) {
- LOG.debug("changeLease: replacing " + oldpath + " with " + newpath);
- }
- lease.replacePath(oldpath, newpath);
- sortedLeasesByPath.remove(oldpath);
- sortedLeasesByPath.put(newpath, lease);
- }
- }
-
- synchronized void removeLeaseWithPrefixPath(String prefix) {
- for(Map.Entry<String, Lease> entry
- : findLeaseWithPrefixPath(prefix, sortedLeasesByPath).entrySet()) {
- if (LOG.isDebugEnabled()) {
- LOG.debug(LeaseManager.class.getSimpleName()
- + ".removeLeaseWithPrefixPath: entry=" + entry);
- }
- removeLease(entry.getValue(), entry.getKey());
- }
- }
-
- static private Map<String, Lease> findLeaseWithPrefixPath(
- String prefix, SortedMap<String, Lease> path2lease) {
- if (LOG.isDebugEnabled()) {
- LOG.debug(LeaseManager.class.getSimpleName() + ".findLease: prefix=" + prefix);
- }
-
- final Map<String, Lease> entries = new HashMap<String, Lease>();
- int srclen = prefix.length();
-
- // prefix may ended with '/'
- if (prefix.charAt(srclen - 1) == Path.SEPARATOR_CHAR) {
- srclen -= 1;
- }
-
- for(Map.Entry<String, Lease> entry : path2lease.tailMap(prefix).entrySet()) {
- final String p = entry.getKey();
- if (!p.startsWith(prefix)) {
- return entries;
- }
- if (p.length() == srclen || p.charAt(srclen) == Path.SEPARATOR_CHAR) {
- entries.put(entry.getKey(), entry.getValue());
+ @VisibleForTesting
+ synchronized void removeLeases(Collection<Long> inodes) {
+ for (long inode : inodes) {
+ Lease lease = leasesById.get(inode);
+ if (lease != null) {
+ removeLease(lease, inode);
}
}
- return entries;
}
public void setLeasePeriod(long softLimit, long hardLimit) {
@@ -428,30 +329,13 @@ public class LeaseManager {
if (LOG.isDebugEnabled()) {
LOG.debug(name + " is interrupted", ie);
}
+ } catch(Throwable e) {
+ LOG.warn("Unexpected throwable: ", e);
}
}
}
}
- /**
- * Get the list of inodes corresponding to valid leases.
- * @return list of inodes
- */
- Map<String, INodeFile> getINodesUnderConstruction() {
- Map<String, INodeFile> inodes = new TreeMap<String, INodeFile>();
- for (String p : sortedLeasesByPath.keySet()) {
- // verify that path exists in namespace
- try {
- INodeFile node = INodeFile.valueOf(fsnamesystem.dir.getINode(p), p);
- Preconditions.checkState(node.isUnderConstruction());
- inodes.put(p, node);
- } catch (IOException ioe) {
- LOG.error(ioe);
- }
- }
- return inodes;
- }
-
/** Check the leases beginning from the oldest.
* @return true is sync is needed.
*/
@@ -459,34 +343,35 @@ public class LeaseManager {
synchronized boolean checkLeases() {
boolean needSync = false;
assert fsnamesystem.hasWriteLock();
- Lease leaseToCheck = null;
- try {
- leaseToCheck = sortedLeases.first();
- } catch(NoSuchElementException e) {}
-
- while(leaseToCheck != null) {
- if (!leaseToCheck.expiredHardLimit()) {
- break;
- }
+ while(!sortedLeases.isEmpty() && sortedLeases.peek().expiredHardLimit()) {
+ Lease leaseToCheck = sortedLeases.poll();
LOG.info(leaseToCheck + " has expired hard limit");
- final List<String> removing = new ArrayList<String>();
- // need to create a copy of the oldest lease paths, because
- // internalReleaseLease() removes paths corresponding to empty files,
+ final List<Long> removing = new ArrayList<>();
+ // need to create a copy of the oldest lease files, because
+ // internalReleaseLease() removes files corresponding to empty files,
// i.e. it needs to modify the collection being iterated over
// causing ConcurrentModificationException
- String[] leasePaths = new String[leaseToCheck.getPaths().size()];
- leaseToCheck.getPaths().toArray(leasePaths);
- for(String p : leasePaths) {
+ Collection<Long> files = leaseToCheck.getFiles();
+ Long[] leaseINodeIds = files.toArray(new Long[files.size()]);
+ FSDirectory fsd = fsnamesystem.getFSDirectory();
+ String p = null;
+ for(Long id : leaseINodeIds) {
try {
- INodesInPath iip = fsnamesystem.getFSDirectory().getINodesInPath(p,
- true);
- boolean completed = fsnamesystem.internalReleaseLease(leaseToCheck, p,
- iip, HdfsServerConstants.NAMENODE_LEASE_HOLDER);
+ INodesInPath iip = INodesInPath.fromINode(fsd.getInode(id));
+ p = iip.getPath();
+ // Sanity check to make sure the path is correct
+ if (!p.startsWith("/")) {
+ throw new IOException("Invalid path in the lease " + p);
+ }
+ boolean completed = fsnamesystem.internalReleaseLease(
+ leaseToCheck, p, iip,
+ HdfsServerConstants.NAMENODE_LEASE_HOLDER);
if (LOG.isDebugEnabled()) {
if (completed) {
- LOG.debug("Lease recovery for " + p + " is complete. File closed.");
+ LOG.debug("Lease recovery for inode " + id + " is complete. " +
+ "File closed.");
} else {
LOG.debug("Started block recovery " + p + " lease " + leaseToCheck);
}
@@ -498,22 +383,15 @@ public class LeaseManager {
} catch (IOException e) {
LOG.error("Cannot release the path " + p + " in the lease "
+ leaseToCheck, e);
- removing.add(p);
+ removing.add(id);
}
}
- for(String p : removing) {
- removeLease(leaseToCheck, p);
+ for(Long id : removing) {
+ removeLease(leaseToCheck, id);
}
- leaseToCheck = sortedLeases.higher(leaseToCheck);
}
- try {
- if(leaseToCheck != sortedLeases.first()) {
- LOG.warn("Unable to release hard-limit expired lease: "
- + sortedLeases.first());
- }
- } catch(NoSuchElementException e) {}
return needSync;
}
@@ -522,7 +400,7 @@ public class LeaseManager {
return getClass().getSimpleName() + "= {"
+ "\n leases=" + leases
+ "\n sortedLeases=" + sortedLeases
- + "\n sortedLeasesByPath=" + sortedLeasesByPath
+ + "\n leasesById=" + leasesById
+ "\n}";
}
@@ -552,9 +430,15 @@ public class LeaseManager {
* its leases immediately. This is for use by unit tests.
*/
@VisibleForTesting
- void triggerMonitorCheckNow() {
+ public void triggerMonitorCheckNow() {
Preconditions.checkState(lmthread != null,
"Lease monitor is not running");
lmthread.interrupt();
}
+
+ @VisibleForTesting
+ public void runLeaseChecks() {
+ checkLeases();
+ }
+
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/12fdc447/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/AbstractINodeDiffList.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/AbstractINodeDiffList.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/AbstractINodeDiffList.java
index 5bd4ed5..fb13e09 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/AbstractINodeDiffList.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/AbstractINodeDiffList.java
@@ -65,7 +65,7 @@ abstract class AbstractINodeDiffList<N extends INode,
* @param snapshot The id of the snapshot to be deleted
* @param prior The id of the snapshot taken before the to-be-deleted snapshot
* @param collectedBlocks Used to collect information for blocksMap update
- * @return delta in namespace.
+ * @return delta in namespace.
*/
public final QuotaCounts deleteSnapshotDiff(BlockStoragePolicySuite bsps,
final int snapshot,
http://git-wip-us.apache.org/repos/asf/hadoop/blob/12fdc447/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectorySnapshottableFeature.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectorySnapshottableFeature.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectorySnapshottableFeature.java
index fa1bf94..dc58856 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectorySnapshottableFeature.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectorySnapshottableFeature.java
@@ -216,7 +216,7 @@ public class DirectorySnapshottableFeature extends DirectoryWithSnapshotFeature
int prior = Snapshot.findLatestSnapshot(snapshotRoot, snapshot.getId());
try {
QuotaCounts counts = snapshotRoot.cleanSubtree(bsps, snapshot.getId(),
- prior, collectedBlocks, removedINodes);
+ prior, collectedBlocks, removedINodes, null);
INodeDirectory parent = snapshotRoot.getParent();
if (parent != null) {
// there will not be any WithName node corresponding to the deleted
http://git-wip-us.apache.org/repos/asf/hadoop/blob/12fdc447/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java
index 95f9d8a..bd2dc2d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java
@@ -97,15 +97,15 @@ public class DirectoryWithSnapshotFeature implements INode.Feature {
/** clear the created list */
private QuotaCounts destroyCreatedList(
- final BlockStoragePolicySuite bsps,
- final INodeDirectory currentINode,
+ final BlockStoragePolicySuite bsps, final INodeDirectory currentINode,
final BlocksMapUpdateInfo collectedBlocks,
- final List<INode> removedINodes) {
+ final List<INode> removedINodes, List<Long> removedUCFiles) {
QuotaCounts counts = new QuotaCounts.Builder().build();
final List<INode> createdList = getList(ListType.CREATED);
for (INode c : createdList) {
c.computeQuotaUsage(bsps, counts, true);
- c.destroyAndCollectBlocks(bsps, collectedBlocks, removedINodes);
+ c.destroyAndCollectBlocks(bsps, collectedBlocks, removedINodes,
+ removedUCFiles);
// c should be contained in the children list, remove it
currentINode.removeChild(c);
}
@@ -117,12 +117,13 @@ public class DirectoryWithSnapshotFeature implements INode.Feature {
private QuotaCounts destroyDeletedList(
final BlockStoragePolicySuite bsps,
final BlocksMapUpdateInfo collectedBlocks,
- final List<INode> removedINodes) {
+ final List<INode> removedINodes, List<Long> removedUCFiles) {
QuotaCounts counts = new QuotaCounts.Builder().build();
final List<INode> deletedList = getList(ListType.DELETED);
for (INode d : deletedList) {
d.computeQuotaUsage(bsps, counts, false);
- d.destroyAndCollectBlocks(bsps, collectedBlocks, removedINodes);
+ d.destroyAndCollectBlocks(bsps, collectedBlocks, removedINodes,
+ removedUCFiles);
}
deletedList.clear();
return counts;
@@ -210,8 +211,8 @@ public class DirectoryWithSnapshotFeature implements INode.Feature {
@Override
QuotaCounts combinePosteriorAndCollectBlocks(
- final BlockStoragePolicySuite bsps,
- final INodeDirectory currentDir, final DirectoryDiff posterior,
+ final BlockStoragePolicySuite bsps, final INodeDirectory currentDir,
+ final DirectoryDiff posterior,
final BlocksMapUpdateInfo collectedBlocks,
final List<INode> removedINodes) {
final QuotaCounts counts = new QuotaCounts.Builder().build();
@@ -221,7 +222,8 @@ public class DirectoryWithSnapshotFeature implements INode.Feature {
public void process(INode inode) {
if (inode != null) {
inode.computeQuotaUsage(bsps, counts, false);
- inode.destroyAndCollectBlocks(bsps, collectedBlocks, removedINodes);
+ inode.destroyAndCollectBlocks(bsps, collectedBlocks, removedINodes,
+ null);
}
}
});
@@ -324,7 +326,8 @@ public class DirectoryWithSnapshotFeature implements INode.Feature {
BlocksMapUpdateInfo collectedBlocks, final List<INode> removedINodes) {
// this diff has been deleted
QuotaCounts counts = new QuotaCounts.Builder().build();
- counts.add(diff.destroyDeletedList(bsps, collectedBlocks, removedINodes));
+ counts.add(diff.destroyDeletedList(bsps, collectedBlocks, removedINodes,
+ null));
INodeDirectoryAttributes snapshotINode = getSnapshotINode();
if (snapshotINode != null && snapshotINode.getAclFeature() != null) {
AclStorage.removeAclFeature(snapshotINode.getAclFeature());
@@ -411,21 +414,23 @@ public class DirectoryWithSnapshotFeature implements INode.Feature {
public static void destroyDstSubtree(
final BlockStoragePolicySuite bsps, INode inode, final int snapshot,
final int prior, final BlocksMapUpdateInfo collectedBlocks,
- final List<INode> removedINodes) throws QuotaExceededException {
+ final List<INode> removedINodes, List<Long> removedUCFiles) throws QuotaExceededException {
Preconditions.checkArgument(prior != Snapshot.NO_SNAPSHOT_ID);
if (inode.isReference()) {
if (inode instanceof INodeReference.WithName
&& snapshot != Snapshot.CURRENT_STATE_ID) {
// this inode has been renamed before the deletion of the DstReference
// subtree
- inode.cleanSubtree(bsps, snapshot, prior, collectedBlocks, removedINodes);
+ inode.cleanSubtree(bsps, snapshot, prior, collectedBlocks, removedINodes,
+ removedUCFiles);
} else {
// for DstReference node, continue this process to its subtree
destroyDstSubtree(bsps, inode.asReference().getReferredINode(), snapshot,
- prior, collectedBlocks, removedINodes);
+ prior, collectedBlocks, removedINodes, removedUCFiles);
}
} else if (inode.isFile()) {
- inode.cleanSubtree(bsps, snapshot, prior, collectedBlocks, removedINodes);
+ inode.cleanSubtree(bsps, snapshot, prior, collectedBlocks, removedINodes,
+ removedUCFiles);
} else if (inode.isDirectory()) {
Map<INode, INode> excludedNodes = null;
INodeDirectory dir = inode.asDirectory();
@@ -445,7 +450,7 @@ public class DirectoryWithSnapshotFeature implements INode.Feature {
priorDiff = diffList.getDiffById(prior);
if (priorDiff != null && priorDiff.getSnapshotId() == prior) {
priorDiff.diff.destroyCreatedList(bsps, dir, collectedBlocks,
- removedINodes);
+ removedINodes, removedUCFiles);
}
}
for (INode child : inode.asDirectory().getChildrenList(prior)) {
@@ -453,7 +458,7 @@ public class DirectoryWithSnapshotFeature implements INode.Feature {
continue;
}
destroyDstSubtree(bsps, child, snapshot, prior, collectedBlocks,
- removedINodes);
+ removedINodes, removedUCFiles);
}
}
}
@@ -466,13 +471,13 @@ public class DirectoryWithSnapshotFeature implements INode.Feature {
* @param post The post snapshot.
* @param prior The id of the prior snapshot.
* @param collectedBlocks Used to collect blocks for later deletion.
+ * @param removedUCFiles
* @return Quota usage update.
*/
private static QuotaCounts cleanDeletedINode(
- final BlockStoragePolicySuite bsps, INode inode,
- final int post, final int prior,
+ final BlockStoragePolicySuite bsps, INode inode, final int post, final int prior,
final BlocksMapUpdateInfo collectedBlocks,
- final List<INode> removedINodes) {
+ final List<INode> removedINodes, List<Long> removedUCFiles) {
QuotaCounts counts = new QuotaCounts.Builder().build();
Deque<INode> queue = new ArrayDeque<INode>();
queue.addLast(inode);
@@ -481,7 +486,8 @@ public class DirectoryWithSnapshotFeature implements INode.Feature {
if (topNode instanceof INodeReference.WithName) {
INodeReference.WithName wn = (INodeReference.WithName) topNode;
if (wn.getLastSnapshotId() >= post) {
- wn.cleanSubtree(bsps, post, prior, collectedBlocks, removedINodes);
+ wn.cleanSubtree(bsps, post, prior, collectedBlocks, removedINodes,
+ removedUCFiles);
}
// For DstReference node, since the node is not in the created list of
// prior, we should treat it as regular file/dir
@@ -500,7 +506,7 @@ public class DirectoryWithSnapshotFeature implements INode.Feature {
if (priorDiff != null && priorDiff.getSnapshotId() == prior) {
priorChildrenDiff = priorDiff.getChildrenDiff();
counts.add(priorChildrenDiff.destroyCreatedList(bsps, dir,
- collectedBlocks, removedINodes));
+ collectedBlocks, removedINodes, removedUCFiles));
}
}
@@ -631,7 +637,8 @@ public class DirectoryWithSnapshotFeature implements INode.Feature {
}
public void clear(BlockStoragePolicySuite bsps, INodeDirectory currentINode,
- final BlocksMapUpdateInfo collectedBlocks, final List<INode> removedINodes) {
+ final BlocksMapUpdateInfo collectedBlocks, final List<INode>
+ removedINodes, final List<Long> removedUCFiles) {
// destroy its diff list
for (DirectoryDiff diff : diffs) {
diff.destroyDiffAndCollectBlocks(bsps, currentINode, collectedBlocks,
@@ -721,10 +728,10 @@ public class DirectoryWithSnapshotFeature implements INode.Feature {
}
}
- public QuotaCounts cleanDirectory(final BlockStoragePolicySuite bsps, final INodeDirectory currentINode,
- final int snapshot, int prior,
- final BlocksMapUpdateInfo collectedBlocks,
- final List<INode> removedINodes) {
+ public QuotaCounts cleanDirectory(
+ final BlockStoragePolicySuite bsps, final INodeDirectory currentINode,
+ final int snapshot, int prior, final BlocksMapUpdateInfo collectedBlocks,
+ final List<INode> removedINodes, List<Long> removedUCFiles) {
QuotaCounts counts = new QuotaCounts.Builder().build();
Map<INode, INode> priorCreated = null;
Map<INode, INode> priorDeleted = null;
@@ -734,10 +741,10 @@ public class DirectoryWithSnapshotFeature implements INode.Feature {
DirectoryDiff lastDiff = diffs.getLast();
if (lastDiff != null) {
counts.add(lastDiff.diff.destroyCreatedList(bsps, currentINode,
- collectedBlocks, removedINodes));
+ collectedBlocks, removedINodes, removedUCFiles));
}
counts.add(currentINode.cleanSubtreeRecursively(bsps, snapshot, prior,
- collectedBlocks, removedINodes, priorDeleted));
+ collectedBlocks, removedINodes, removedUCFiles, priorDeleted));
} else {
// update prior
prior = getDiffs().updatePrior(snapshot, prior);
@@ -756,7 +763,7 @@ public class DirectoryWithSnapshotFeature implements INode.Feature {
counts.add(getDiffs().deleteSnapshotDiff(bsps, snapshot, prior,
currentINode, collectedBlocks, removedINodes));
counts.add(currentINode.cleanSubtreeRecursively(bsps, snapshot, prior,
- collectedBlocks, removedINodes, priorDeleted));
+ collectedBlocks, removedINodes, removedUCFiles, priorDeleted));
// check priorDiff again since it may be created during the diff deletion
if (prior != Snapshot.NO_SNAPSHOT_ID) {
@@ -773,7 +780,7 @@ public class DirectoryWithSnapshotFeature implements INode.Feature {
ListType.CREATED)) {
if (priorCreated.containsKey(cNode)) {
counts.add(cNode.cleanSubtree(bsps, snapshot, Snapshot.NO_SNAPSHOT_ID,
- collectedBlocks, removedINodes));
+ collectedBlocks, removedINodes, removedUCFiles));
}
}
}
@@ -790,7 +797,7 @@ public class DirectoryWithSnapshotFeature implements INode.Feature {
ListType.DELETED)) {
if (priorDeleted == null || !priorDeleted.containsKey(dNode)) {
counts.add(cleanDeletedINode(bsps, dNode, snapshot, prior,
- collectedBlocks, removedINodes));
+ collectedBlocks, removedINodes, removedUCFiles));
}
}
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/12fdc447/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileWithSnapshotFeature.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileWithSnapshotFeature.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileWithSnapshotFeature.java
index c4406a0..b42b745 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileWithSnapshotFeature.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileWithSnapshotFeature.java
@@ -213,7 +213,7 @@ public class FileWithSnapshotFeature implements INode.Feature {
final BlocksMapUpdateInfo info, final List<INode> removedINodes) {
// check if everything is deleted.
if (isCurrentFileDeleted() && getDiffs().asList().isEmpty()) {
- file.destroyAndCollectBlocks(bsps, info, removedINodes);
+ file.destroyAndCollectBlocks(bsps, info, removedINodes, null);
return;
}
// find max file size.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/12fdc447/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java
index 802d64a..27d2986 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java
@@ -228,7 +228,7 @@ public class SnapshotManager implements SnapshotStatsMXBean {
/**
* Delete a snapshot for a snapshottable directory
* @param snapshotName Name of the snapshot to be deleted
- * @param collectedBlocks Used to collect information to update blocksMap
+ * @param collectedBlocks Used to collect information to update blocksMap
* @throws IOException
*/
public void deleteSnapshot(final INodesInPath iip, final String snapshotName,
@@ -266,7 +266,7 @@ public class SnapshotManager implements SnapshotStatsMXBean {
public int getNumSnapshots() {
return numSnapshots.get();
}
-
+
void setNumSnapshots(int num) {
numSnapshots.set(num);
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/12fdc447/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLease.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLease.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLease.java
index 90dc0a7..985f43e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLease.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLease.java
@@ -59,8 +59,8 @@ import org.mockito.Mockito;
public class TestLease {
static boolean hasLease(MiniDFSCluster cluster, Path src) {
- return NameNodeAdapter.getLeaseManager(cluster.getNamesystem()
- ).getLeaseByPath(src.toString()) != null;
+ return NameNodeAdapter.getLeaseForPath(cluster.getNameNode(),
+ src.toString()) != null;
}
static int leaseCount(MiniDFSCluster cluster) {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/12fdc447/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java
index 2540834..4ca5eda 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java
@@ -135,8 +135,19 @@ public class NameNodeAdapter {
namesystem.leaseManager.triggerMonitorCheckNow();
}
+ public static Lease getLeaseForPath(NameNode nn, String path) {
+ final FSNamesystem fsn = nn.getNamesystem();
+ INode inode;
+ try {
+ inode = fsn.getFSDirectory().getINode(path, false);
+ } catch (UnresolvedLinkException e) {
+ throw new RuntimeException("Lease manager should not support symlinks");
+ }
+ return inode == null ? null : fsn.leaseManager.getLease((INodeFile) inode);
+ }
+
public static String getLeaseHolderForPath(NameNode namenode, String path) {
- Lease l = namenode.getNamesystem().leaseManager.getLeaseByPath(path);
+ Lease l = getLeaseForPath(namenode, path);
return l == null? null: l.getHolder();
}
@@ -145,12 +156,8 @@ public class NameNodeAdapter {
* or -1 in the case that the lease doesn't exist.
*/
public static long getLeaseRenewalTime(NameNode nn, String path) {
- LeaseManager lm = nn.getNamesystem().leaseManager;
- Lease l = lm.getLeaseByPath(path);
- if (l == null) {
- return -1;
- }
- return l.getLastUpdate();
+ Lease l = getLeaseForPath(nn, path);
+ return l == null ? -1 : l.getLastUpdate();
}
/**