You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by su...@apache.org on 2012/12/11 21:10:13 UTC
svn commit: r1420375 [1/3] - in
/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project:
hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/
hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/ hadoop-hdfs/
hadoop-hdfs/src/contrib...
Author: suresh
Date: Tue Dec 11 20:08:00 2012
New Revision: 1420375
URL: http://svn.apache.org/viewvc?rev=1420375&view=rev
Log:
Mergng trunk to branch-trunk-win
Added:
hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AuditLogger.java
- copied unchanged from r1420366, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AuditLogger.java
hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupState.java
- copied unchanged from r1420366, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupState.java
hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestVolumeId.java
- copied unchanged from r1420366, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestVolumeId.java
hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogger.java
- copied unchanged from r1420366, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogger.java
Modified:
hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/ (props changed)
hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/UserProvider.java
hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestUserProvider.java
hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/BookKeeperJournalManager.java
hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperJournalManager.java
hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/ (props changed)
hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/HdfsVolumeId.java
hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/VolumeId.java
hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockStorageLocationUtil.java
hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader.java
hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader2.java
hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsDataOutputStream.java
hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java
hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyWithNodeGroup.java
hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/SecureDataNodeStarter.java
hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java
hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java
hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java
hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java
hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java
hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NNHAStatusHeartbeat.java
hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamenodeProtocol.java
hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/UserParam.java
hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/native/ (props changed)
hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto
hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/ (props changed)
hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/ (props changed)
hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/ (props changed)
hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs/ (props changed)
hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestFcHdfsCreateMkdir.java
hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestFcHdfsPermission.java
hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestFcHdfsSetUMask.java
hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestFcHdfsSymlink.java
hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestHDFSFileContextMainOperations.java
hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemAtHdfsRoot.java
hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemHdfs.java
hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsAtHdfsRoot.java
hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsDefaultValue.java
hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsFileStatusHdfs.java
hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsHdfs.java
hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHFlush.java
hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLease.java
hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithNodeGroup.java
hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyWithNodeGroup.java
hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java
hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java
hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java
hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java
hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java
hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java
hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMetaSave.java
hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRecovery.java
hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSaveNamespace.java
hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java
hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestFSMainOperationsWebHdfs.java
hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/resources/TestParam.java
Propchange: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/
------------------------------------------------------------------------------
Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs:r1415787-1420366
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/UserProvider.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/UserProvider.java?rev=1420375&r1=1420374&r2=1420375&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/UserProvider.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/UserProvider.java Tue Dec 11 20:08:00 2012
@@ -53,10 +53,9 @@ public class UserProvider extends Abstra
public String parseParam(String str) {
if (str != null) {
int len = str.length();
- if (len < 1 || len > 31) {
+ if (len < 1) {
throw new IllegalArgumentException(MessageFormat.format(
- "Parameter [{0}], invalid value [{1}], it's length must be between 1 and 31",
- getName(), str));
+ "Parameter [{0}], it's length must be at least 1", getName()));
}
}
return super.parseParam(str);
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestUserProvider.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestUserProvider.java?rev=1420375&r1=1420374&r2=1420375&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestUserProvider.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestUserProvider.java Tue Dec 11 20:08:00 2012
@@ -110,13 +110,6 @@ public class TestUserProvider {
@Test
@TestException(exception = IllegalArgumentException.class)
- public void userNameTooLong() {
- UserProvider.UserParam userParam = new UserProvider.UserParam("username");
- userParam.parseParam("a123456789012345678901234567890x");
- }
-
- @Test
- @TestException(exception = IllegalArgumentException.class)
public void userNameInvalidStart() {
UserProvider.UserParam userParam = new UserProvider.UserParam("username");
userParam.parseParam("1x");
@@ -136,12 +129,6 @@ public class TestUserProvider {
}
@Test
- public void userNameMaxLength() {
- UserProvider.UserParam userParam = new UserProvider.UserParam("username");
- assertNotNull(userParam.parseParam("a123456789012345678901234567890"));
- }
-
- @Test
public void userNameValidDollarSign() {
UserProvider.UserParam userParam = new UserProvider.UserParam("username");
assertNotNull(userParam.parseParam("a$"));
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1420375&r1=1420374&r2=1420375&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Tue Dec 11 20:08:00 2012
@@ -92,17 +92,12 @@ Trunk (Unreleased)
HDFS-3040. TestMulitipleNNDataBlockScanner is misspelled. (Madhukara Phatak
via atm)
- HDFS-3049. During the normal NN startup process, fall back on a different
- edit log if we see one that is corrupt (Colin Patrick McCabe via todd)
-
HDFS-3478. Test quotas with Long.Max_Value. (Sujay Rau via eli)
HDFS-3498. Support replica removal in BlockPlacementPolicy and make
BlockPlacementPolicyDefault extensible for reusing code in subclasses.
(Junping Du via szetszwo)
- HDFS-3571. Allow EditLogFileInputStream to read from a remote URL (todd)
-
HDFS-3510. Editlog pre-allocation is performed prior to writing edits
to avoid partial edits case disk out of space.(Colin McCabe via suresh)
@@ -146,8 +141,6 @@ Trunk (Unreleased)
HDFS-4052. BlockManager#invalidateWork should print log outside the lock.
(Jing Zhao via suresh)
- HDFS-4110. Refine a log printed in JNStorage. (Liang Xie via suresh)
-
HDFS-4124. Refactor INodeDirectory#getExistingPathINodes() to enable
returning more than INode array. (Jing Zhao via suresh)
@@ -160,10 +153,6 @@ Trunk (Unreleased)
HDFS-4152. Add a new class BlocksMapUpdateInfo for the parameter in
INode.collectSubtreeBlocksAndClear(..). (Jing Zhao via szetszwo)
- HDFS-4153. Add START_MSG/SHUTDOWN_MSG for JournalNode. (liang xie via atm)
-
- HDFS-3935. Add JournalNode to the start/stop scripts (Andy Isaacson via todd)
-
HDFS-4206. Change the fields in INode and its subclasses to private.
(szetszwo)
@@ -176,6 +165,11 @@ Trunk (Unreleased)
HDFS-4209. Clean up the addNode/addChild/addChildNoQuotaCheck methods in
FSDirectory and INodeDirectory. (szetszwo)
+ HDFS-3358. Specify explicitly that the NN UI status total is talking
+ of persistent objects on heap. (harsh)
+
+ HDFS-4234. Use generic code for choosing datanode in Balancer. (szetszwo)
+
OPTIMIZATIONS
BUG FIXES
@@ -268,107 +262,12 @@ Trunk (Unreleased)
HDFS-4105. The SPNEGO user for secondary namenode should use the web
keytab. (Arpit Gupta via jitendra)
- BREAKDOWN OF HDFS-3077 SUBTASKS
+ HDFS-4240. For nodegroup-aware block placement, when a node is excluded,
+ the nodes in the same nodegroup should also be excluded. (Junping Du
+ via szetszwo)
- HDFS-3077. Quorum-based protocol for reading and writing edit logs.
- (todd, Brandon Li, and Hari Mankude via todd)
-
- HDFS-3694. Fix getEditLogManifest to fetch httpPort if necessary (todd)
-
- HDFS-3692. Support purgeEditLogs() call to remotely purge logs on JNs
- (todd)
-
- HDFS-3693. JNStorage should read its storage info even before a writer
- becomes active (todd)
-
- HDFS-3725. Fix QJM startup when individual JNs have gaps (todd)
-
- HDFS-3741. Exhaustive failure injection test for skipped RPCs (todd)
-
- HDFS-3773. TestNNWithQJM fails after HDFS-3741. (atm)
-
- HDFS-3793. Implement genericized format() in QJM (todd)
-
- HDFS-3795. QJM: validate journal dir at startup (todd)
-
- HDFS-3798. Avoid throwing NPE when finalizeSegment() is called on invalid
- segment (todd)
-
- HDFS-3799. QJM: handle empty log segments during recovery (todd)
-
- HDFS-3797. QJM: add segment txid as a parameter to journal() RPC (todd)
-
- HDFS-3800. improvements to QJM fault testing (todd)
-
- HDFS-3823. QJM: TestQJMWithFaults fails occasionally because of missed
- setting of HTTP port. (todd and atm)
-
- HDFS-3826. QJM: Some trivial logging / exception text improvements. (todd
- and atm)
-
- HDFS-3839. QJM: hadoop-daemon.sh should be updated to accept "journalnode"
- (eli)
-
- HDFS-3845. Fixes for edge cases in QJM recovery protocol (todd)
-
- HDFS-3877. QJM: Provide defaults for dfs.journalnode.*address (eli)
-
- HDFS-3863. Track last "committed" txid in QJM (todd)
-
- HDFS-3869. Expose non-file journal manager details in web UI (todd)
-
- HDFS-3884. Journal format() should reset cached values (todd)
-
- HDFS-3870. Add metrics to JournalNode (todd)
-
- HDFS-3891. Make selectInputStreams throw IOE instead of RTE (todd)
-
- HDFS-3726. If a logger misses an RPC, don't retry that logger until next
- segment (todd)
-
- HDFS-3893. QJM: Make QJM work with security enabled. (atm)
-
- HDFS-3897. QJM: TestBlockToken fails after HDFS-3893. (atm)
-
- HDFS-3898. QJM: enable TCP_NODELAY for IPC (todd)
-
- HDFS-3885. QJM: optimize log sync when JN is lagging behind (todd)
-
- HDFS-3900. QJM: avoid validating log segments on log rolls (todd)
-
- HDFS-3901. QJM: send 'heartbeat' messages to JNs even when they are
- out-of-sync (todd)
-
- HDFS-3899. QJM: Add client-side metrics (todd)
-
- HDFS-3914. QJM: acceptRecovery should abort current segment (todd)
-
- HDFS-3915. QJM: Failover fails with auth error in secure cluster (todd)
-
- HDFS-3906. QJM: quorum timeout on failover with large log segment (todd)
-
- HDFS-3840. JournalNodes log JournalNotFormattedException backtrace error
- before being formatted (todd)
-
- HDFS-3894. QJM: testRecoverAfterDoubleFailures can be flaky due to IPC
- client caching (todd)
-
- HDFS-3926. QJM: Add user documentation for QJM. (atm)
-
- HDFS-3943. QJM: remove currently-unused md5sum field (todd)
-
- HDFS-3950. QJM: misc TODO cleanup, improved log messages, etc. (todd)
-
- HDFS-3955. QJM: Make acceptRecovery() atomic. (todd)
-
- HDFS-3956. QJM: purge temporary files when no longer within retention
- period (todd)
-
- HDFS-4004. TestJournalNode#testJournal fails because of test case execution
- order (Chao Shi via todd)
-
- HDFS-4017. Unclosed FileInputStream in GetJournalEditServlet
- (Chao Shi via todd)
+ HDFS-4260 Fix HDFS tests to set test dir to a valid HDFS path as opposed
+ to the local build path (Chri Nauroth via Sanjay)
Release 2.0.3-alpha - Unreleased
@@ -389,6 +288,9 @@ Release 2.0.3-alpha - Unreleased
HDFS-4155. libhdfs implementation of hsync API (Liang Xie via todd)
+ HDFS-4213. Add an API to hsync for updating the last block length at the
+ namenode. (Jing Zhao via szetszwo)
+
IMPROVEMENTS
HDFS-3925. Prettify PipelineAck#toString() for printing to a log
@@ -490,6 +392,27 @@ Release 2.0.3-alpha - Unreleased
HDFS-4214. OfflineEditsViewer should print out the offset at which it
encountered an error. (Colin Patrick McCabe via atm)
+ HDFS-4199. Provide test for HdfsVolumeId. (Ivan A. Veselovsky via atm)
+
+ HDFS-3049. During the normal NN startup process, fall back on a different
+ edit log if we see one that is corrupt (Colin Patrick McCabe via todd)
+
+ HDFS-3571. Allow EditLogFileInputStream to read from a remote URL (todd)
+
+ HDFS-4110. Refine a log printed in JNStorage. (Liang Xie via suresh)
+
+ HDFS-4153. Add START_MSG/SHUTDOWN_MSG for JournalNode. (liang xie via atm)
+
+ HDFS-3935. Add JournalNode to the start/stop scripts (Andy Isaacson via todd)
+
+ HDFS-4268. Remove redundant enum NNHAStatusHeartbeat.State. (shv)
+
+ HDFS-3680. Allow customized audit logging in HDFS FSNamesystem. (Marcelo
+ Vanzin via atm)
+
+ HDFS-4130. BKJM: The reading for editlog at NN starting using bkjm is not efficient.
+ (Han Xiao via umamahesh)
+
OPTIMIZATIONS
BUG FIXES
@@ -643,6 +566,142 @@ Release 2.0.3-alpha - Unreleased
of it is undefined after the iteration or modifications of the map.
(szetszwo)
+ HDFS-4231. BackupNode: Introduce BackupState. (shv)
+
+ HDFS-4243. When replacing an INodeDirectory, the parent pointers of the
+ children of the child have to be updated to the new child. (Jing Zhao
+ via szetszwo)
+
+ HDFS-4238. Standby namenode should not do purging of shared
+ storage edits. (todd)
+
+ HDFS-4282. TestEditLog.testFuzzSequences FAILED in all pre-commit test
+ (todd)
+
+ HDFS-4236. Remove artificial limit on username length introduced in
+ HDFS-4171. (tucu via suresh)
+
+ HDFS-4279. NameNode does not initialize generic conf keys when started
+ with -recover. (Colin Patrick McCabe via atm)
+
+ HDFS-4291. edit log unit tests leave stray test_edit_log_file around
+ (Colin Patrick McCabe via todd)
+
+ HDFS-4292. Sanity check not correct in RemoteBlockReader2.newBlockReader
+ (Binglin Chang via todd)
+
+ HDFS-4295. Using port 1023 should be valid when starting Secure DataNode
+ (Stephen Chu via todd)
+
+ HDFS-4294. Backwards compatibility is not maintained for TestVolumeId.
+ (Ivan A. Veselovsky and Robert Parker via atm)
+
+ HDFS-2264. NamenodeProtocol has the wrong value for clientPrincipal in
+ KerberosInfo annotation. (atm)
+
+ BREAKDOWN OF HDFS-3077 SUBTASKS
+
+ HDFS-3077. Quorum-based protocol for reading and writing edit logs.
+ (todd, Brandon Li, and Hari Mankude via todd)
+
+ HDFS-3694. Fix getEditLogManifest to fetch httpPort if necessary (todd)
+
+ HDFS-3692. Support purgeEditLogs() call to remotely purge logs on JNs
+ (todd)
+
+ HDFS-3693. JNStorage should read its storage info even before a writer
+ becomes active (todd)
+
+ HDFS-3725. Fix QJM startup when individual JNs have gaps (todd)
+
+ HDFS-3741. Exhaustive failure injection test for skipped RPCs (todd)
+
+ HDFS-3773. TestNNWithQJM fails after HDFS-3741. (atm)
+
+ HDFS-3793. Implement genericized format() in QJM (todd)
+
+ HDFS-3795. QJM: validate journal dir at startup (todd)
+
+ HDFS-3798. Avoid throwing NPE when finalizeSegment() is called on invalid
+ segment (todd)
+
+ HDFS-3799. QJM: handle empty log segments during recovery (todd)
+
+ HDFS-3797. QJM: add segment txid as a parameter to journal() RPC (todd)
+
+ HDFS-3800. improvements to QJM fault testing (todd)
+
+ HDFS-3823. QJM: TestQJMWithFaults fails occasionally because of missed
+ setting of HTTP port. (todd and atm)
+
+ HDFS-3826. QJM: Some trivial logging / exception text improvements. (todd
+ and atm)
+
+ HDFS-3839. QJM: hadoop-daemon.sh should be updated to accept "journalnode"
+ (eli)
+
+ HDFS-3845. Fixes for edge cases in QJM recovery protocol (todd)
+
+ HDFS-3877. QJM: Provide defaults for dfs.journalnode.*address (eli)
+
+ HDFS-3863. Track last "committed" txid in QJM (todd)
+
+ HDFS-3869. Expose non-file journal manager details in web UI (todd)
+
+ HDFS-3884. Journal format() should reset cached values (todd)
+
+ HDFS-3870. Add metrics to JournalNode (todd)
+
+ HDFS-3891. Make selectInputStreams throw IOE instead of RTE (todd)
+
+ HDFS-3726. If a logger misses an RPC, don't retry that logger until next
+ segment (todd)
+
+ HDFS-3893. QJM: Make QJM work with security enabled. (atm)
+
+ HDFS-3897. QJM: TestBlockToken fails after HDFS-3893. (atm)
+
+ HDFS-3898. QJM: enable TCP_NODELAY for IPC (todd)
+
+ HDFS-3885. QJM: optimize log sync when JN is lagging behind (todd)
+
+ HDFS-3900. QJM: avoid validating log segments on log rolls (todd)
+
+ HDFS-3901. QJM: send 'heartbeat' messages to JNs even when they are
+ out-of-sync (todd)
+
+ HDFS-3899. QJM: Add client-side metrics (todd)
+
+ HDFS-3914. QJM: acceptRecovery should abort current segment (todd)
+
+ HDFS-3915. QJM: Failover fails with auth error in secure cluster (todd)
+
+ HDFS-3906. QJM: quorum timeout on failover with large log segment (todd)
+
+ HDFS-3840. JournalNodes log JournalNotFormattedException backtrace error
+ before being formatted (todd)
+
+ HDFS-3894. QJM: testRecoverAfterDoubleFailures can be flaky due to IPC
+ client caching (todd)
+
+ HDFS-3926. QJM: Add user documentation for QJM. (atm)
+
+ HDFS-3943. QJM: remove currently-unused md5sum field (todd)
+
+ HDFS-3950. QJM: misc TODO cleanup, improved log messages, etc. (todd)
+
+ HDFS-3955. QJM: Make acceptRecovery() atomic. (todd)
+
+ HDFS-3956. QJM: purge temporary files when no longer within retention
+ period (todd)
+
+ HDFS-4004. TestJournalNode#testJournal fails because of test case execution
+ order (Chao Shi via todd)
+
+ HDFS-4017. Unclosed FileInputStream in GetJournalEditServlet
+ (Chao Shi via todd)
+
+
Release 2.0.2-alpha - 2012-09-07
INCOMPATIBLE CHANGES
@@ -2032,6 +2091,11 @@ Release 0.23.6 - UNRELEASED
BUG FIXES
+ HDFS-4247. saveNamespace should be tolerant of dangling lease (daryn)
+
+ HDFS-4248. Renaming directories may incorrectly remove the paths in leases
+ under the tree. (daryn via szetszwo)
+
Release 0.23.5 - UNRELEASED
INCOMPATIBLE CHANGES
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/BookKeeperJournalManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/BookKeeperJournalManager.java?rev=1420375&r1=1420374&r2=1420375&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/BookKeeperJournalManager.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/BookKeeperJournalManager.java Tue Dec 11 20:08:00 2012
@@ -500,16 +500,18 @@ public class BookKeeperJournalManager im
}
}
- EditLogInputStream getInputStream(long fromTxId, boolean inProgressOk)
- throws IOException {
- for (EditLogLedgerMetadata l : getLedgerList(inProgressOk)) {
- long lastTxId = l.getLastTxId();
- if (l.isInProgress()) {
- lastTxId = recoverLastTxId(l, false);
- }
-
- if (fromTxId >= l.getFirstTxId() && fromTxId <= lastTxId) {
- try {
+ @Override
+ public void selectInputStreams(Collection<EditLogInputStream> streams,
+ long fromTxId, boolean inProgressOk) throws IOException {
+ List<EditLogLedgerMetadata> currentLedgerList = getLedgerList(inProgressOk);
+ try {
+ BookKeeperEditLogInputStream elis = null;
+ for (EditLogLedgerMetadata l : currentLedgerList) {
+ long lastTxId = l.getLastTxId();
+ if (l.isInProgress()) {
+ lastTxId = recoverLastTxId(l, false);
+ }
+ if (fromTxId >= l.getFirstTxId() && fromTxId <= lastTxId) {
LedgerHandle h;
if (l.isInProgress()) { // we don't want to fence the current journal
h = bkc.openLedgerNoRecovery(l.getLedgerId(),
@@ -518,42 +520,22 @@ public class BookKeeperJournalManager im
h = bkc.openLedger(l.getLedgerId(), BookKeeper.DigestType.MAC,
digestpw.getBytes());
}
- BookKeeperEditLogInputStream s = new BookKeeperEditLogInputStream(h,
- l);
- s.skipTo(fromTxId);
- return s;
- } catch (BKException e) {
- throw new IOException("Could not open ledger for " + fromTxId, e);
- } catch (InterruptedException ie) {
- Thread.currentThread().interrupt();
- throw new IOException("Interrupted opening ledger for "
- + fromTxId, ie);
+ elis = new BookKeeperEditLogInputStream(h, l);
+ elis.skipTo(fromTxId);
+ } else {
+ return;
}
+ streams.add(elis);
+ if (elis.getLastTxId() == HdfsConstants.INVALID_TXID) {
+ return;
+ }
+ fromTxId = elis.getLastTxId() + 1;
}
- }
- return null;
- }
-
- @Override
- public void selectInputStreams(Collection<EditLogInputStream> streams,
- long fromTxId, boolean inProgressOk) {
- // NOTE: could probably be rewritten more efficiently
- while (true) {
- EditLogInputStream elis;
- try {
- elis = getInputStream(fromTxId, inProgressOk);
- } catch (IOException e) {
- LOG.error(e);
- return;
- }
- if (elis == null) {
- return;
- }
- streams.add(elis);
- if (elis.getLastTxId() == HdfsConstants.INVALID_TXID) {
- return;
- }
- fromTxId = elis.getLastTxId() + 1;
+ } catch (BKException e) {
+ throw new IOException("Could not open ledger for " + fromTxId, e);
+ } catch (InterruptedException ie) {
+ Thread.currentThread().interrupt();
+ throw new IOException("Interrupted opening ledger for " + fromTxId, ie);
}
}
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperJournalManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperJournalManager.java?rev=1420375&r1=1420374&r2=1420375&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperJournalManager.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperJournalManager.java Tue Dec 11 20:08:00 2012
@@ -28,6 +28,7 @@ import org.mockito.Mockito;
import java.io.IOException;
import java.net.URI;
+import java.util.ArrayList;
import java.util.List;
import java.util.ArrayList;
import java.util.Random;
@@ -315,13 +316,13 @@ public class TestBookKeeperJournalManage
out.close();
bkjm.finalizeLogSegment(1, numTransactions);
-
- EditLogInputStream in = bkjm.getInputStream(1, true);
+ List<EditLogInputStream> in = new ArrayList<EditLogInputStream>();
+ bkjm.selectInputStreams(in, 1, true);
try {
assertEquals(numTransactions,
- FSEditLogTestUtil.countTransactionsInStream(in));
+ FSEditLogTestUtil.countTransactionsInStream(in.get(0)));
} finally {
- in.close();
+ in.get(0).close();
}
}
Propchange: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/
------------------------------------------------------------------------------
Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java:r1415787-1420366
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/HdfsVolumeId.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/HdfsVolumeId.java?rev=1420375&r1=1420374&r2=1420375&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/HdfsVolumeId.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/HdfsVolumeId.java Tue Dec 11 20:08:00 2012
@@ -27,26 +27,38 @@ import org.apache.hadoop.classification.
* HDFS-specific volume identifier which implements {@link VolumeId}. Can be
* used to differentiate between the data directories on a single datanode. This
* identifier is only unique on a per-datanode basis.
+ *
+ * Note that invalid IDs are represented by {@link VolumeId#INVALID_VOLUME_ID}.
*/
@InterfaceStability.Unstable
@InterfaceAudience.Public
public class HdfsVolumeId implements VolumeId {
-
+
private final byte[] id;
- private final boolean isValid;
- public HdfsVolumeId(byte[] id, boolean isValid) {
+ public HdfsVolumeId(byte[] id) {
+ if (id == null) {
+ throw new NullPointerException("A valid Id can only be constructed " +
+ "with a non-null byte array.");
+ }
this.id = id;
- this.isValid = isValid;
}
@Override
- public boolean isValid() {
- return isValid;
+ public final boolean isValid() {
+ return true;
}
@Override
public int compareTo(VolumeId arg0) {
+ if (arg0 == null) {
+ return 1;
+ }
+ if (!arg0.isValid()) {
+ // any valid ID is greater
+ // than any invalid ID:
+ return 1;
+ }
return hashCode() - arg0.hashCode();
}
@@ -63,8 +75,10 @@ public class HdfsVolumeId implements Vol
if (obj == this) {
return true;
}
-
HdfsVolumeId that = (HdfsVolumeId) obj;
+ // NB: if (!obj.isValid()) { return false; } check is not necessary
+ // because we have class identity checking above, and for this class
+ // isValid() is always true.
return new EqualsBuilder().append(this.id, that.id).isEquals();
}
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/VolumeId.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/VolumeId.java?rev=1420375&r1=1420374&r2=1420375&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/VolumeId.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/VolumeId.java Tue Dec 11 20:08:00 2012
@@ -29,6 +29,48 @@ import org.apache.hadoop.classification.
public interface VolumeId extends Comparable<VolumeId> {
/**
+ * Represents an invalid Volume ID (ID for unknown content).
+ */
+ public static final VolumeId INVALID_VOLUME_ID = new VolumeId() {
+
+ @Override
+ public int compareTo(VolumeId arg0) {
+ // This object is equal only to itself;
+ // It is greater than null, and
+ // is always less than any other VolumeId:
+ if (arg0 == null) {
+ return 1;
+ }
+ if (arg0 == this) {
+ return 0;
+ } else {
+ return -1;
+ }
+ }
+
+ @Override
+ public boolean equals(Object obj) {
+ // this object is equal only to itself:
+ return (obj == this);
+ }
+
+ @Override
+ public int hashCode() {
+ return Integer.MIN_VALUE;
+ }
+
+ @Override
+ public boolean isValid() {
+ return false;
+ }
+
+ @Override
+ public String toString() {
+ return "Invalid VolumeId";
+ }
+ };
+
+ /**
* Indicates if the disk identifier is valid. Invalid identifiers indicate
* that the block was not present, or the location could otherwise not be
* determined.
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockStorageLocationUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockStorageLocationUtil.java?rev=1420375&r1=1420374&r2=1420375&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockStorageLocationUtil.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockStorageLocationUtil.java Tue Dec 11 20:08:00 2012
@@ -202,7 +202,7 @@ class BlockStorageLocationUtil {
ArrayList<VolumeId> l = new ArrayList<VolumeId>(b.getLocations().length);
// Start off all IDs as invalid, fill it in later with results from RPCs
for (int i = 0; i < b.getLocations().length; i++) {
- l.add(new HdfsVolumeId(null, false));
+ l.add(VolumeId.INVALID_VOLUME_ID);
}
blockVolumeIds.put(b, l);
}
@@ -236,7 +236,7 @@ class BlockStorageLocationUtil {
// Get the VolumeId by indexing into the list of VolumeIds
// provided by the datanode
byte[] volumeId = metaVolumeIds.get(volumeIndex);
- HdfsVolumeId id = new HdfsVolumeId(volumeId, true);
+ HdfsVolumeId id = new HdfsVolumeId(volumeId);
// Find out which index we are in the LocatedBlock's replicas
LocatedBlock locBlock = extBlockToLocBlock.get(extBlock);
DatanodeInfo[] dnInfos = locBlock.getLocations();
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java?rev=1420375&r1=1420374&r2=1420375&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java Tue Dec 11 20:08:00 2012
@@ -246,6 +246,8 @@ public class DFSConfigKeys extends Commo
public static final String DFS_HOSTS = "dfs.hosts";
public static final String DFS_HOSTS_EXCLUDE = "dfs.hosts.exclude";
public static final String DFS_CLIENT_LOCAL_INTERFACES = "dfs.client.local.interfaces";
+ public static final String DFS_NAMENODE_AUDIT_LOGGERS_KEY = "dfs.namenode.audit.loggers";
+ public static final String DFS_NAMENODE_DEFAULT_AUDIT_LOGGER_NAME = "default";
// Much code in hdfs is not yet updated to use these keys.
public static final String DFS_CLIENT_BLOCK_WRITE_LOCATEFOLLOWINGBLOCK_RETRIES_KEY = "dfs.client.block.write.locateFollowingBlock.retries";
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java?rev=1420375&r1=1420374&r2=1420375&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java Tue Dec 11 20:08:00 2012
@@ -46,6 +46,7 @@ import org.apache.hadoop.fs.Syncable;
import org.apache.hadoop.fs.UnresolvedLinkException;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
+import org.apache.hadoop.hdfs.client.HdfsDataOutputStream.SyncFlag;
import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
@@ -1487,9 +1488,14 @@ public class DFSOutputStream extends FSO
*/
@Override
public void hflush() throws IOException {
- flushOrSync(false);
+ flushOrSync(false, EnumSet.noneOf(SyncFlag.class));
}
+ @Override
+ public void hsync() throws IOException {
+ hsync(EnumSet.noneOf(SyncFlag.class));
+ }
+
/**
* The expected semantics is all data have flushed out to all replicas
* and all replicas have done posix fsync equivalent - ie the OS has
@@ -1498,17 +1504,35 @@ public class DFSOutputStream extends FSO
* Note that only the current block is flushed to the disk device.
* To guarantee durable sync across block boundaries the stream should
* be created with {@link CreateFlag#SYNC_BLOCK}.
+ *
+ * @param syncFlags
+ * Indicate the semantic of the sync. Currently used to specify
+ * whether or not to update the block length in NameNode.
*/
- @Override
- public void hsync() throws IOException {
- flushOrSync(true);
+ public void hsync(EnumSet<SyncFlag> syncFlags) throws IOException {
+ flushOrSync(true, syncFlags);
}
- private void flushOrSync(boolean isSync) throws IOException {
+ /**
+ * Flush/Sync buffered data to DataNodes.
+ *
+ * @param isSync
+ * Whether or not to require all replicas to flush data to the disk
+ * device
+ * @param syncFlags
+ * Indicate extra detailed semantic of the flush/sync. Currently
+ * mainly used to specify whether or not to update the file length in
+ * the NameNode
+ * @throws IOException
+ */
+ private void flushOrSync(boolean isSync, EnumSet<SyncFlag> syncFlags)
+ throws IOException {
dfsClient.checkOpen();
isClosed();
try {
long toWaitFor;
+ long lastBlockLength = -1L;
+ boolean updateLength = syncFlags.contains(SyncFlag.UPDATE_LENGTH);
synchronized (this) {
/* Record current blockOffset. This might be changed inside
* flushBuffer() where a partial checksum chunk might be flushed.
@@ -1572,13 +1596,20 @@ public class DFSOutputStream extends FSO
} // end synchronized
waitForAckedSeqno(toWaitFor);
-
- // If any new blocks were allocated since the last flush,
- // then persist block locations on namenode.
- //
- if (persistBlocks.getAndSet(false)) {
+
+ if (updateLength) {
+ synchronized (this) {
+ if (streamer != null && streamer.block != null) {
+ lastBlockLength = streamer.block.getNumBytes();
+ }
+ }
+ }
+ // If 1) any new blocks were allocated since the last flush, or 2) to
+ // update length in NN is requried, then persist block locations on
+ // namenode.
+ if (persistBlocks.getAndSet(false) || updateLength) {
try {
- dfsClient.namenode.fsync(src, dfsClient.clientName);
+ dfsClient.namenode.fsync(src, dfsClient.clientName, lastBlockLength);
} catch (IOException ioe) {
DFSClient.LOG.warn("Unable to persist blocks in hflush for " + src, ioe);
// If we got an error here, it might be because some other thread called
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader.java?rev=1420375&r1=1420374&r2=1420375&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader.java Tue Dec 11 20:08:00 2012
@@ -404,7 +404,7 @@ public class RemoteBlockReader extends F
long firstChunkOffset = checksumInfo.getChunkOffset();
if ( firstChunkOffset < 0 || firstChunkOffset > startOffset ||
- firstChunkOffset >= (startOffset + checksum.getBytesPerChecksum())) {
+ firstChunkOffset <= (startOffset - checksum.getBytesPerChecksum())) {
throw new IOException("BlockReader: error in first chunk offset (" +
firstChunkOffset + ") startOffset is " +
startOffset + " for file " + file);
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader2.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader2.java?rev=1420375&r1=1420374&r2=1420375&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader2.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader2.java Tue Dec 11 20:08:00 2012
@@ -413,7 +413,7 @@ public class RemoteBlockReader2 impleme
long firstChunkOffset = checksumInfo.getChunkOffset();
if ( firstChunkOffset < 0 || firstChunkOffset > startOffset ||
- firstChunkOffset >= (startOffset + checksum.getBytesPerChecksum())) {
+ firstChunkOffset <= (startOffset - checksum.getBytesPerChecksum())) {
throw new IOException("BlockReader: error in first chunk offset (" +
firstChunkOffset + ") startOffset is " +
startOffset + " for file " + file);
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsDataOutputStream.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsDataOutputStream.java?rev=1420375&r1=1420374&r2=1420375&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsDataOutputStream.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsDataOutputStream.java Tue Dec 11 20:08:00 2012
@@ -18,6 +18,7 @@
package org.apache.hadoop.hdfs.client;
import java.io.IOException;
+import java.util.EnumSet;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
@@ -56,4 +57,24 @@ public class HdfsDataOutputStream extend
public synchronized int getCurrentBlockReplication() throws IOException {
return ((DFSOutputStream)getWrappedStream()).getCurrentBlockReplication();
}
+
+ /**
+ * Sync buffered data to DataNodes (flush to disk devices).
+ *
+ * @param syncFlags
+ * Indicate the detailed semantic and actions of the hsync.
+ * @throws IOException
+ * @see FSDataOutputStream#hsync()
+ */
+ public void hsync(EnumSet<SyncFlag> syncFlags) throws IOException {
+ ((DFSOutputStream) getWrappedStream()).hsync(syncFlags);
+ }
+
+ public static enum SyncFlag {
+ /**
+ * When doing sync to DataNodes, also update the metadata (block
+ * length) in the NameNode
+ */
+ UPDATE_LENGTH;
+ }
}
\ No newline at end of file
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java?rev=1420375&r1=1420374&r2=1420375&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java Tue Dec 11 20:08:00 2012
@@ -815,14 +815,15 @@ public interface ClientProtocol {
* The file must be currently open for writing.
* @param src The string representation of the path
* @param client The string representation of the client
- *
+ * @param lastBlockLength The length of the last block (under construction)
+ * to be reported to NameNode
* @throws AccessControlException permission denied
* @throws FileNotFoundException file <code>src</code> is not found
* @throws UnresolvedLinkException if <code>src</code> contains a symlink.
* @throws IOException If an I/O error occurred
*/
@Idempotent
- public void fsync(String src, String client)
+ public void fsync(String src, String client, long lastBlockLength)
throws AccessControlException, FileNotFoundException,
UnresolvedLinkException, IOException;
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java?rev=1420375&r1=1420374&r2=1420375&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java Tue Dec 11 20:08:00 2012
@@ -688,7 +688,7 @@ public class ClientNamenodeProtocolServe
public FsyncResponseProto fsync(RpcController controller,
FsyncRequestProto req) throws ServiceException {
try {
- server.fsync(req.getSrc(), req.getClient());
+ server.fsync(req.getSrc(), req.getClient(), req.getLastBlockLength());
return VOID_FSYNC_RESPONSE;
} catch (IOException e) {
throw new ServiceException(e);
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java?rev=1420375&r1=1420374&r2=1420375&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java Tue Dec 11 20:08:00 2012
@@ -659,12 +659,11 @@ public class ClientNamenodeProtocolTrans
}
@Override
- public void fsync(String src, String client) throws AccessControlException,
- FileNotFoundException, UnresolvedLinkException, IOException {
- FsyncRequestProto req = FsyncRequestProto.newBuilder()
- .setSrc(src)
- .setClient(client)
- .build();
+ public void fsync(String src, String client, long lastBlockLength)
+ throws AccessControlException, FileNotFoundException,
+ UnresolvedLinkException, IOException {
+ FsyncRequestProto req = FsyncRequestProto.newBuilder().setSrc(src)
+ .setClient(client).setLastBlockLength(lastBlockLength).build();
try {
rpcProxy.fsync(null, req);
} catch (ServiceException e) {
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java?rev=1420375&r1=1420374&r2=1420375&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java Tue Dec 11 20:08:00 2012
@@ -26,6 +26,7 @@ import org.apache.hadoop.fs.ContentSumma
import org.apache.hadoop.fs.CreateFlag;
import org.apache.hadoop.fs.FsServerDefaults;
import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
import org.apache.hadoop.hdfs.server.protocol.StorageReport;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
@@ -1232,9 +1233,9 @@ public class PBHelper {
if (s == null) return null;
switch (s.getState()) {
case ACTIVE:
- return new NNHAStatusHeartbeat(NNHAStatusHeartbeat.State.ACTIVE, s.getTxid());
+ return new NNHAStatusHeartbeat(HAServiceState.ACTIVE, s.getTxid());
case STANDBY:
- return new NNHAStatusHeartbeat(NNHAStatusHeartbeat.State.STANDBY, s.getTxid());
+ return new NNHAStatusHeartbeat(HAServiceState.STANDBY, s.getTxid());
default:
throw new IllegalArgumentException("Unexpected NNHAStatusHeartbeat.State:" + s.getState());
}
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java?rev=1420375&r1=1420374&r2=1420375&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java Tue Dec 11 20:08:00 2012
@@ -75,6 +75,7 @@ import org.apache.hadoop.hdfs.server.pro
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.net.NetworkTopology;
+import org.apache.hadoop.net.Node;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.Time;
@@ -557,7 +558,7 @@ public class Balancer {
}
/** Decide if still need to move more bytes */
- protected boolean isMoveQuotaFull() {
+ protected boolean hasSpaceForScheduling() {
return scheduledSize<maxSize2Move;
}
@@ -922,23 +923,53 @@ public class Balancer {
LOG.info(nodes.size() + " " + name + ": " + nodes);
}
- /* Decide all <source, target> pairs and
+ /** A matcher interface for matching nodes. */
+ private interface Matcher {
+ /** Given the cluster topology, does the left node match the right node? */
+ boolean match(NetworkTopology cluster, Node left, Node right);
+ }
+
+ /** Match datanodes in the same node group. */
+ static final Matcher SAME_NODE_GROUP = new Matcher() {
+ @Override
+ public boolean match(NetworkTopology cluster, Node left, Node right) {
+ return cluster.isOnSameNodeGroup(left, right);
+ }
+ };
+
+ /** Match datanodes in the same rack. */
+ static final Matcher SAME_RACK = new Matcher() {
+ @Override
+ public boolean match(NetworkTopology cluster, Node left, Node right) {
+ return cluster.isOnSameRack(left, right);
+ }
+ };
+
+ /** Match any datanode with any other datanode. */
+ static final Matcher ANY_OTHER = new Matcher() {
+ @Override
+ public boolean match(NetworkTopology cluster, Node left, Node right) {
+ return left != right;
+ }
+ };
+
+ /**
+ * Decide all <source, target> pairs and
* the number of bytes to move from a source to a target
* Maximum bytes to be moved per node is
* Min(1 Band worth of bytes, MAX_SIZE_TO_MOVE).
* Return total number of bytes to move in this iteration
*/
private long chooseNodes() {
- // First, match nodes on the same node group if cluster has nodegroup
- // awareness
+ // First, match nodes on the same node group if cluster is node group aware
if (cluster.isNodeGroupAware()) {
- chooseNodesOnSameNodeGroup();
+ chooseNodes(SAME_NODE_GROUP);
}
// Then, match nodes on the same rack
- chooseNodes(true);
- // At last, match nodes on different racks
- chooseNodes(false);
+ chooseNodes(SAME_RACK);
+ // At last, match all remaining nodes
+ chooseNodes(ANY_OTHER);
assert (datanodes.size() >= sources.size()+targets.size())
: "Mismatched number of datanodes (" +
@@ -952,57 +983,55 @@ public class Balancer {
}
return bytesToMove;
}
-
- /**
- * Decide all <source, target> pairs where source and target are
- * on the same NodeGroup
- */
- private void chooseNodesOnSameNodeGroup() {
+ /** Decide all <source, target> pairs according to the matcher. */
+ private void chooseNodes(final Matcher matcher) {
/* first step: match each overUtilized datanode (source) to
- * one or more underUtilized datanodes within same NodeGroup(targets).
+ * one or more underUtilized datanodes (targets).
*/
- chooseOnSameNodeGroup(overUtilizedDatanodes, underUtilizedDatanodes);
-
- /* match each remaining overutilized datanode (source) to below average
- * utilized datanodes within the same NodeGroup(targets).
+ chooseDatanodes(overUtilizedDatanodes, underUtilizedDatanodes, matcher);
+
+ /* match each remaining overutilized datanode (source) to
+ * below average utilized datanodes (targets).
* Note only overutilized datanodes that haven't had that max bytes to move
* satisfied in step 1 are selected
*/
- chooseOnSameNodeGroup(overUtilizedDatanodes, belowAvgUtilizedDatanodes);
+ chooseDatanodes(overUtilizedDatanodes, belowAvgUtilizedDatanodes, matcher);
- /* match each remaining underutilized datanode to above average utilized
- * datanodes within the same NodeGroup.
+ /* match each remaining underutilized datanode (target) to
+ * above average utilized datanodes (source).
* Note only underutilized datanodes that have not had that max bytes to
* move satisfied in step 1 are selected.
*/
- chooseOnSameNodeGroup(underUtilizedDatanodes, aboveAvgUtilizedDatanodes);
+ chooseDatanodes(underUtilizedDatanodes, aboveAvgUtilizedDatanodes, matcher);
}
-
+
/**
- * Match two sets of nodes within the same NodeGroup, one should be source
- * nodes (utilization > Avg), and the other should be destination nodes
- * (utilization < Avg).
- * @param datanodes
- * @param candidates
+ * For each datanode, choose matching nodes from the candidates. Either the
+ * datanodes or the candidates are source nodes with (utilization > Avg), and
+ * the others are target nodes with (utilization < Avg).
*/
private <D extends BalancerDatanode, C extends BalancerDatanode> void
- chooseOnSameNodeGroup(Collection<D> datanodes, Collection<C> candidates) {
+ chooseDatanodes(Collection<D> datanodes, Collection<C> candidates,
+ Matcher matcher) {
for (Iterator<D> i = datanodes.iterator(); i.hasNext();) {
final D datanode = i.next();
- for(; chooseOnSameNodeGroup(datanode, candidates.iterator()); );
- if (!datanode.isMoveQuotaFull()) {
+ for(; chooseForOneDatanode(datanode, candidates, matcher); );
+ if (!datanode.hasSpaceForScheduling()) {
i.remove();
}
}
}
-
+
/**
- * Match one datanode with a set of candidates nodes within the same NodeGroup.
+ * For the given datanode, choose a candidate and then schedule it.
+ * @return true if a candidate is chosen; false if no candidates is chosen.
*/
- private <T extends BalancerDatanode> boolean chooseOnSameNodeGroup(
- BalancerDatanode dn, Iterator<T> candidates) {
- final T chosen = chooseCandidateOnSameNodeGroup(dn, candidates);
+ private <C extends BalancerDatanode> boolean chooseForOneDatanode(
+ BalancerDatanode dn, Collection<C> candidates, Matcher matcher) {
+ final Iterator<C> i = candidates.iterator();
+ final C chosen = chooseCandidate(dn, i, matcher);
+
if (chosen == null) {
return false;
}
@@ -1011,8 +1040,8 @@ public class Balancer {
} else {
matchSourceWithTargetToMove((Source)chosen, dn);
}
- if (!chosen.isMoveQuotaFull()) {
- candidates.remove();
+ if (!chosen.hasSpaceForScheduling()) {
+ i.remove();
}
return true;
}
@@ -1029,19 +1058,15 @@ public class Balancer {
+source.datanode.getName() + " to " + target.datanode.getName());
}
- /** choose a datanode from <code>candidates</code> within the same NodeGroup
- * of <code>dn</code>.
- */
- private <T extends BalancerDatanode> T chooseCandidateOnSameNodeGroup(
- BalancerDatanode dn, Iterator<T> candidates) {
- if (dn.isMoveQuotaFull()) {
+ /** Choose a candidate for the given datanode. */
+ private <D extends BalancerDatanode, C extends BalancerDatanode>
+ C chooseCandidate(D dn, Iterator<C> candidates, Matcher matcher) {
+ if (dn.hasSpaceForScheduling()) {
for(; candidates.hasNext(); ) {
- final T c = candidates.next();
- if (!c.isMoveQuotaFull()) {
+ final C c = candidates.next();
+ if (!c.hasSpaceForScheduling()) {
candidates.remove();
- continue;
- }
- if (cluster.isOnSameNodeGroup(dn.getDatanode(), c.getDatanode())) {
+ } else if (matcher.match(cluster, dn.getDatanode(), c.getDatanode())) {
return c;
}
}
@@ -1049,148 +1074,6 @@ public class Balancer {
return null;
}
- /* if onRack is true, decide all <source, target> pairs
- * where source and target are on the same rack; Otherwise
- * decide all <source, target> pairs where source and target are
- * on different racks
- */
- private void chooseNodes(boolean onRack) {
- /* first step: match each overUtilized datanode (source) to
- * one or more underUtilized datanodes (targets).
- */
- chooseTargets(underUtilizedDatanodes, onRack);
-
- /* match each remaining overutilized datanode (source) to
- * below average utilized datanodes (targets).
- * Note only overutilized datanodes that haven't had that max bytes to move
- * satisfied in step 1 are selected
- */
- chooseTargets(belowAvgUtilizedDatanodes, onRack);
-
- /* match each remaining underutilized datanode (target) to
- * above average utilized datanodes (source).
- * Note only underutilized datanodes that have not had that max bytes to
- * move satisfied in step 1 are selected.
- */
- chooseSources(aboveAvgUtilizedDatanodes, onRack);
- }
-
- /* choose targets from the target candidate list for each over utilized
- * source datanode. OnRackTarget determines if the chosen target
- * should be on the same rack as the source
- */
- private void chooseTargets(
- Collection<BalancerDatanode> targetCandidates, boolean onRackTarget ) {
- for (Iterator<Source> srcIterator = overUtilizedDatanodes.iterator();
- srcIterator.hasNext();) {
- Source source = srcIterator.next();
- while (chooseTarget(source, targetCandidates.iterator(), onRackTarget)) {
- }
- if (!source.isMoveQuotaFull()) {
- srcIterator.remove();
- }
- }
- return;
- }
-
- /* choose sources from the source candidate list for each under utilized
- * target datanode. onRackSource determines if the chosen source
- * should be on the same rack as the target
- */
- private void chooseSources(
- Collection<Source> sourceCandidates, boolean onRackSource) {
- for (Iterator<BalancerDatanode> targetIterator =
- underUtilizedDatanodes.iterator(); targetIterator.hasNext();) {
- BalancerDatanode target = targetIterator.next();
- while (chooseSource(target, sourceCandidates.iterator(), onRackSource)) {
- }
- if (!target.isMoveQuotaFull()) {
- targetIterator.remove();
- }
- }
- return;
- }
-
- /* For the given source, choose targets from the target candidate list.
- * OnRackTarget determines if the chosen target
- * should be on the same rack as the source
- */
- private boolean chooseTarget(Source source,
- Iterator<BalancerDatanode> targetCandidates, boolean onRackTarget) {
- if (!source.isMoveQuotaFull()) {
- return false;
- }
- boolean foundTarget = false;
- BalancerDatanode target = null;
- while (!foundTarget && targetCandidates.hasNext()) {
- target = targetCandidates.next();
- if (!target.isMoveQuotaFull()) {
- targetCandidates.remove();
- continue;
- }
- if (onRackTarget) {
- // choose from on-rack nodes
- if (cluster.isOnSameRack(source.datanode, target.datanode)) {
- foundTarget = true;
- }
- } else {
- // choose from off-rack nodes
- if (!cluster.isOnSameRack(source.datanode, target.datanode)) {
- foundTarget = true;
- }
- }
- }
- if (foundTarget) {
- assert(target != null):"Choose a null target";
- matchSourceWithTargetToMove(source, target);
- if (!target.isMoveQuotaFull()) {
- targetCandidates.remove();
- }
- return true;
- }
- return false;
- }
-
- /* For the given target, choose sources from the source candidate list.
- * OnRackSource determines if the chosen source
- * should be on the same rack as the target
- */
- private boolean chooseSource(BalancerDatanode target,
- Iterator<Source> sourceCandidates, boolean onRackSource) {
- if (!target.isMoveQuotaFull()) {
- return false;
- }
- boolean foundSource = false;
- Source source = null;
- while (!foundSource && sourceCandidates.hasNext()) {
- source = sourceCandidates.next();
- if (!source.isMoveQuotaFull()) {
- sourceCandidates.remove();
- continue;
- }
- if (onRackSource) {
- // choose from on-rack nodes
- if ( cluster.isOnSameRack(source.getDatanode(), target.getDatanode())) {
- foundSource = true;
- }
- } else {
- // choose from off-rack nodes
- if (!cluster.isOnSameRack(source.datanode, target.datanode)) {
- foundSource = true;
- }
- }
- }
- if (foundSource) {
- assert(source != null):"Choose a null source";
- matchSourceWithTargetToMove(source, target);
- if ( !source.isMoveQuotaFull()) {
- sourceCandidates.remove();
- }
- return true;
- }
- return false;
- }
-
private static class BytesMoved {
private long bytesMoved = 0L;;
private synchronized void inc( long bytes ) {
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java?rev=1420375&r1=1420374&r2=1420375&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java Tue Dec 11 20:08:00 2012
@@ -152,8 +152,9 @@ public class BlockPlacementPolicyDefault
List<DatanodeDescriptor> results =
new ArrayList<DatanodeDescriptor>(chosenNodes);
- for (Node node:chosenNodes) {
- excludedNodes.put(node, node);
+ for (DatanodeDescriptor node:chosenNodes) {
+ // add localMachine and related nodes to excludedNodes
+ addToExcludedNodes(node, excludedNodes);
adjustExcludedNodes(excludedNodes, node);
}
@@ -235,7 +236,7 @@ public class BlockPlacementPolicyDefault
+ totalReplicasExpected + "\n"
+ e.getMessage());
if (avoidStaleNodes) {
- // ecxludedNodes now has - initial excludedNodes, any nodes that were
+ // excludedNodes now has - initial excludedNodes, any nodes that were
// chosen and nodes that were tried but were not chosen because they
// were stale, decommissioned or for any other reason a node is not
// chosen for write. Retry again now not avoiding stale node
@@ -273,6 +274,8 @@ public class BlockPlacementPolicyDefault
if (isGoodTarget(localMachine, blocksize, maxNodesPerRack, false,
results, avoidStaleNodes)) {
results.add(localMachine);
+ // add localMachine and related nodes to excludedNode
+ addToExcludedNodes(localMachine, excludedNodes);
return localMachine;
}
}
@@ -281,7 +284,19 @@ public class BlockPlacementPolicyDefault
return chooseLocalRack(localMachine, excludedNodes, blocksize,
maxNodesPerRack, results, avoidStaleNodes);
}
-
+
+ /**
+ * Add <i>localMachine</i> and related nodes to <i>excludedNodes</i>
+ * for next replica choosing. In sub class, we can add more nodes within
+ * the same failure domain of localMachine
+ * @return number of new excluded nodes
+ */
+ protected int addToExcludedNodes(DatanodeDescriptor localMachine,
+ HashMap<Node, Node> excludedNodes) {
+ Node node = excludedNodes.put(localMachine, localMachine);
+ return node == null?1:0;
+ }
+
/* choose one node from the rack that <i>localMachine</i> is on.
* if no such node is available, choose one node from the rack where
* a second replica is on.
@@ -392,6 +407,8 @@ public class BlockPlacementPolicyDefault
if (isGoodTarget(chosenNode, blocksize,
maxNodesPerRack, results, avoidStaleNodes)) {
results.add(chosenNode);
+ // add chosenNode and related nodes to excludedNode
+ addToExcludedNodes(chosenNode, excludedNodes);
adjustExcludedNodes(excludedNodes, chosenNode);
return chosenNode;
} else {
@@ -441,6 +458,9 @@ public class BlockPlacementPolicyDefault
maxNodesPerRack, results, avoidStaleNodes)) {
numOfReplicas--;
results.add(chosenNode);
+ // add chosenNode and related nodes to excludedNode
+ int newExcludedNodes = addToExcludedNodes(chosenNode, excludedNodes);
+ numOfAvailableNodes -= newExcludedNodes;
adjustExcludedNodes(excludedNodes, chosenNode);
} else {
badTarget = true;
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyWithNodeGroup.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyWithNodeGroup.java?rev=1420375&r1=1420374&r2=1420375&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyWithNodeGroup.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyWithNodeGroup.java Tue Dec 11 20:08:00 2012
@@ -240,6 +240,27 @@ public class BlockPlacementPolicyWithNod
String nodeGroupString = cur.getNetworkLocation();
return NetworkTopology.getFirstHalf(nodeGroupString);
}
+
+ /**
+ * Find other nodes in the same nodegroup of <i>localMachine</i> and add them
+ * into <i>excludeNodes</i> as replica should not be duplicated for nodes
+ * within the same nodegroup
+ * @return number of new excluded nodes
+ */
+ protected int addToExcludedNodes(DatanodeDescriptor localMachine,
+ HashMap<Node, Node> excludedNodes) {
+ int countOfExcludedNodes = 0;
+ String nodeGroupScope = localMachine.getNetworkLocation();
+ List<Node> leafNodes = clusterMap.getLeaves(nodeGroupScope);
+ for (Node leafNode : leafNodes) {
+ Node node = excludedNodes.put(leafNode, leafNode);
+ if (node == null) {
+ // not a existing node in excludedNodes
+ countOfExcludedNodes++;
+ }
+ }
+ return countOfExcludedNodes;
+ }
/**
* Pick up replica node set for deleting replica as over-replicated.
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java?rev=1420375&r1=1420374&r2=1420375&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java Tue Dec 11 20:08:00 2012
@@ -26,6 +26,7 @@ import java.util.concurrent.CopyOnWriteA
import org.apache.commons.logging.Log;
import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
@@ -411,7 +412,7 @@ class BPOfferService {
final long txid = nnHaState.getTxId();
final boolean nnClaimsActive =
- nnHaState.getState() == NNHAStatusHeartbeat.State.ACTIVE;
+ nnHaState.getState() == HAServiceState.ACTIVE;
final boolean bposThinksActive = bpServiceToActive == actor;
final boolean isMoreRecentClaim = txid > lastActiveClaimTxId;
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/SecureDataNodeStarter.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/SecureDataNodeStarter.java?rev=1420375&r1=1420374&r2=1420375&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/SecureDataNodeStarter.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/SecureDataNodeStarter.java Tue Dec 11 20:08:00 2012
@@ -140,7 +140,7 @@ public class SecureDataNodeStarter imple
System.err.println("Successfully obtained privileged resources (streaming port = "
+ ss + " ) (http listener port = " + listener.getConnection() +")");
- if ((ss.getLocalPort() >= 1023 || listener.getPort() >= 1023) &&
+ if ((ss.getLocalPort() > 1023 || listener.getPort() > 1023) &&
UserGroupInformation.isSecurityEnabled()) {
throw new RuntimeException("Cannot start secure datanode with unprivileged ports");
}
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java?rev=1420375&r1=1420374&r2=1420375&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java Tue Dec 11 20:08:00 2012
@@ -24,6 +24,7 @@ import java.net.SocketTimeoutException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
+import org.apache.hadoop.ha.ServiceFailedException;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.NameNodeProxies;
@@ -35,6 +36,7 @@ import org.apache.hadoop.hdfs.protocolPB
import org.apache.hadoop.hdfs.protocolPB.JournalProtocolServerSideTranslatorPB;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
import org.apache.hadoop.hdfs.server.common.Storage;
+import org.apache.hadoop.hdfs.server.namenode.ha.HAState;
import org.apache.hadoop.hdfs.server.protocol.FenceResponse;
import org.apache.hadoop.hdfs.server.protocol.JournalInfo;
import org.apache.hadoop.hdfs.server.protocol.JournalProtocol;
@@ -414,14 +416,23 @@ public class BackupNode extends NameNode
+ HdfsConstants.LAYOUT_VERSION + " actual "+ nsInfo.getLayoutVersion();
return nsInfo;
}
-
+
@Override
+ protected String getNameServiceId(Configuration conf) {
+ return DFSUtil.getBackupNameServiceId(conf);
+ }
+
+ protected HAState createHAState() {
+ return new BackupState();
+ }
+
+ @Override // NameNode
protected NameNodeHAContext createHAContext() {
return new BNHAContext();
}
-
+
private class BNHAContext extends NameNodeHAContext {
- @Override // NameNode
+ @Override // NameNodeHAContext
public void checkOperation(OperationCategory op)
throws StandbyException {
if (op == OperationCategory.UNCHECKED ||
@@ -435,10 +446,42 @@ public class BackupNode extends NameNode
throw new StandbyException(msg);
}
}
- }
-
- @Override
- protected String getNameServiceId(Configuration conf) {
- return DFSUtil.getBackupNameServiceId(conf);
+
+ @Override // NameNodeHAContext
+ public void prepareToStopStandbyServices() throws ServiceFailedException {
+ }
+
+ /**
+ * Start services for BackupNode.
+ * <p>
+ * The following services should be muted
+ * (not run or not pass any control commands to DataNodes)
+ * on BackupNode:
+ * {@link LeaseManager.Monitor} protected by SafeMode.
+ * {@link BlockManager.ReplicationMonitor} protected by SafeMode.
+ * {@link HeartbeatManager.Monitor} protected by SafeMode.
+ * {@link DecommissionManager.Monitor} need to prohibit refreshNodes().
+ * {@link PendingReplicationBlocks.PendingReplicationMonitor} harmless,
+ * because ReplicationMonitor is muted.
+ */
+ @Override
+ public void startActiveServices() throws IOException {
+ try {
+ namesystem.startActiveServices();
+ } catch (Throwable t) {
+ doImmediateShutdown(t);
+ }
+ }
+
+ @Override
+ public void stopActiveServices() throws IOException {
+ try {
+ if (namesystem != null) {
+ namesystem.stopActiveServices();
+ }
+ } catch (Throwable t) {
+ doImmediateShutdown(t);
+ }
+ }
}
}
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java?rev=1420375&r1=1420374&r2=1420375&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java Tue Dec 11 20:08:00 2012
@@ -575,6 +575,8 @@ public class FSDirectory implements Clos
// update modification time of dst and the parent of src
srcInodes[srcInodes.length-2].setModificationTime(timestamp);
dstInodes[dstInodes.length-2].setModificationTime(timestamp);
+ // update moved leases with new filename
+ getFSNamesystem().unprotectedChangeLease(src, dst);
return true;
}
} finally {
@@ -729,6 +731,8 @@ public class FSDirectory implements Clos
}
srcInodes[srcInodes.length - 2].setModificationTime(timestamp);
dstInodes[dstInodes.length - 2].setModificationTime(timestamp);
+ // update moved lease with new filename
+ getFSNamesystem().unprotectedChangeLease(src, dst);
// Collect the blocks and remove the lease for previous dst
int filesDeleted = 0;
@@ -1071,31 +1075,39 @@ public class FSDirectory implements Clos
throws IOException, UnresolvedLinkException {
writeLock();
try {
- //
- // Remove the node from the namespace
- //
- if (!oldnode.removeNode()) {
- NameNode.stateChangeLog.warn("DIR* FSDirectory.replaceNode: " +
- "failed to remove " + path);
- throw new IOException("FSDirectory.replaceNode: " +
- "failed to remove " + path);
- }
-
- /* Currently oldnode and newnode are assumed to contain the same
- * blocks. Otherwise, blocks need to be removed from the blocksMap.
- */
- rootDir.addINode(path, newnode);
-
- int index = 0;
- for (BlockInfo b : newnode.getBlocks()) {
- BlockInfo info = getBlockManager().addBlockCollection(b, newnode);
- newnode.setBlock(index, info); // inode refers to the block in BlocksMap
- index++;
- }
+ unprotectedReplaceNode(path, oldnode, newnode);
} finally {
writeUnlock();
}
}
+
+ void unprotectedReplaceNode(String path, INodeFile oldnode, INodeFile newnode)
+ throws IOException, UnresolvedLinkException {
+ assert hasWriteLock();
+ INodeDirectory parent = oldnode.parent;
+ // Remove the node from the namespace
+ if (!oldnode.removeNode()) {
+ NameNode.stateChangeLog.warn("DIR* FSDirectory.replaceNode: " +
+ "failed to remove " + path);
+ throw new IOException("FSDirectory.replaceNode: " +
+ "failed to remove " + path);
+ }
+
+ // Parent should be non-null, otherwise oldnode.removeNode() will return
+ // false
+ newnode.setLocalName(oldnode.getLocalNameBytes());
+ parent.addChild(newnode, true);
+
+ /* Currently oldnode and newnode are assumed to contain the same
+ * blocks. Otherwise, blocks need to be removed from the blocksMap.
+ */
+ int index = 0;
+ for (BlockInfo b : newnode.getBlocks()) {
+ BlockInfo info = getBlockManager().addBlockCollection(b, newnode);
+ newnode.setBlock(index, info); // inode refers to the block in BlocksMap
+ index++;
+ }
+ }
/**
* Get a partial listing of the indicated directory
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java?rev=1420375&r1=1420374&r2=1420375&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java Tue Dec 11 20:08:00 2012
@@ -878,6 +878,11 @@ public class FSEditLog implements LogsPu
return journalSet;
}
+ @VisibleForTesting
+ synchronized void setJournalSetForTesting(JournalSet js) {
+ this.journalSet = js;
+ }
+
/**
* Used only by tests.
*/
@@ -1031,9 +1036,18 @@ public class FSEditLog implements LogsPu
/**
* Archive any log files that are older than the given txid.
+ *
+ * If the edit log is not open for write, then this call returns with no
+ * effect.
*/
@Override
public synchronized void purgeLogsOlderThan(final long minTxIdToKeep) {
+ // Should not purge logs unless they are open for write.
+ // This prevents the SBN from purging logs on shared storage, for example.
+ if (!isOpenForWrite()) {
+ return;
+ }
+
assert curSegmentTxId == HdfsConstants.INVALID_TXID || // on format this is no-op
minTxIdToKeep <= curSegmentTxId :
"cannot purge logs older than txid " + minTxIdToKeep +