You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by ar...@apache.org on 2013/11/22 21:51:08 UTC
svn commit: r1544672 [1/2] - in
/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project:
hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/
hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/ hadoop-hdfs/
hadoop-hdfs/src/main/java/ hado...
Author: arp
Date: Fri Nov 22 20:51:06 2013
New Revision: 1544672
URL: http://svn.apache.org/r1544672
Log:
Merging r1544304 through r1544665 from trunk to branch HDFS-2832
Added:
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileUnderConstructionFeature.java
- copied unchanged from r1544665, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileUnderConstructionFeature.java
Removed:
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/TestPortmapRegister.java
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileUnderConstructionWithSnapshot.java
Modified:
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/ (props changed)
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/DFSClientCache.java
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/ (props changed)
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectoryWithSnapshot.java
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileWithSnapshot.java
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/ (props changed)
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/dust-helpers-1.1.1.min.js
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFSOutputSummer.java
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/CreateEditsLog.java
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCacheDirectives.java
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCommitBlockSynchronization.java
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotBlocksMap.java
Propchange: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/
------------------------------------------------------------------------------
Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs:r1544304-1544665
Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/DFSClientCache.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/DFSClientCache.java?rev=1544672&r1=1544671&r2=1544672&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/DFSClientCache.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/DFSClientCache.java Fri Nov 22 20:51:06 2013
@@ -99,7 +99,7 @@ class DFSClientCache {
this.config = config;
this.clientCache = CacheBuilder.newBuilder()
.maximumSize(clientCache)
- .removalListener(clientRemovealListener())
+ .removalListener(clientRemovalListener())
.build(clientLoader());
this.inputstreamCache = CacheBuilder.newBuilder()
@@ -127,7 +127,7 @@ class DFSClientCache {
};
}
- private RemovalListener<String, DFSClient> clientRemovealListener() {
+ private RemovalListener<String, DFSClient> clientRemovalListener() {
return new RemovalListener<String, DFSClient>() {
@Override
public void onRemoval(RemovalNotification<String, DFSClient> notification) {
Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1544672&r1=1544671&r2=1544672&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Fri Nov 22 20:51:06 2013
@@ -13,9 +13,6 @@ Trunk (Unreleased)
HDFS-3125. Add JournalService to enable Journal Daemon. (suresh)
- HDFS-5444. Choose default web UI based on browser capabilities. (Haohui Mai
- via jing9)
-
IMPROVEMENTS
HDFS-4665. Move TestNetworkTopologyWithNodeGroup to common.
@@ -202,8 +199,6 @@ Trunk (Unreleased)
HDFS-5511. improve CacheManipulator interface to allow better unit testing
(cmccabe)
- HDFS-5525. Inline dust templates for new Web UI. (Haohui Mai via jing9)
-
HDFS-5451. Add byte and file statistics to PathBasedCacheEntry.
(Colin Patrick McCabe via Andrew Wang)
@@ -213,6 +208,10 @@ Trunk (Unreleased)
HDFS-5473. Consistent naming of user-visible caching classes and methods
(cmccabe)
+ HDFS-5285. Flatten INodeFile hierarchy: Replace INodeFileUnderConstruction
+ and INodeFileUnderConstructionWithSnapshot with FileUnderContructionFeature.
+ (jing9 via szetszwo)
+
OPTIMIZATIONS
HDFS-5349. DNA_CACHE and DNA_UNCACHE should be by blockId only. (cmccabe)
@@ -397,6 +396,9 @@ Trunk (Unreleased)
HDFS-5513. CacheAdmin commands fail when using . as the path. (wang)
+ HDFS-5543. Fix narrow race condition in TestPathBasedCacheRequests
+ (cmccabe)
+
Release 2.3.0 - UNRELEASED
INCOMPATIBLE CHANGES
@@ -422,6 +424,9 @@ Release 2.3.0 - UNRELEASED
HDFS-3987. Support webhdfs over HTTPS. (Haohui Mai via jing9)
+ HDFS-5444. Choose default web UI based on browser capabilities. (Haohui Mai
+ via jing9)
+
IMPROVEMENTS
HDFS-5267. Remove volatile from LightWeightHashSet. (Junping Du via llu)
@@ -528,6 +533,8 @@ Release 2.3.0 - UNRELEASED
HDFS-5532. Enable the webhdfs by default to support new HDFS web UI. (Vinay
via jing9)
+
+ HDFS-5525. Inline dust templates for new Web UI. (Haohui Mai via jing9)
OPTIMIZATIONS
@@ -602,6 +609,9 @@ Release 2.3.0 - UNRELEASED
HDFS-5428. Under construction files deletion after snapshot+checkpoint+nn restart
leads nn safemode. (jing9)
+ HDFS-5552. Fix wrong information of "Cluster summay" in dfshealth.html.
+ (Haohui Mai via jing9)
+
Release 2.2.1 - UNRELEASED
INCOMPATIBLE CHANGES
@@ -622,6 +632,8 @@ Release 2.2.1 - UNRELEASED
HDFS-5344. Make LsSnapshottableDir as Tool interface implementation. (Sathish via umamahesh)
+ HDFS-5544. Adding Test case For Checking dfs.checksum type as NULL value. (Sathish via umamahesh)
+
OPTIMIZATIONS
BUG FIXES
@@ -711,6 +723,10 @@ Release 2.2.1 - UNRELEASED
HDFS-5014. Process register commands with out holding BPOfferService lock.
(Vinaykumar B via umamahesh)
+ HDFS-5288. Close idle connections in portmap (Haohui Mai via brandonli)
+
+ HDFS-5407. Fix typos in DFSClientCache (Haohui Mai via brandonli)
+
Release 2.2.0 - 2013-10-13
INCOMPATIBLE CHANGES
Propchange: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/
------------------------------------------------------------------------------
Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java:r1544304-1544665
Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java?rev=1544672&r1=1544671&r2=1544672&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java Fri Nov 22 20:51:06 2013
@@ -64,4 +64,21 @@ public interface BlockCollection {
* Get the name of the collection.
*/
public String getName();
+
+ /**
+ * Set the block at the given index.
+ */
+ public void setBlock(int index, BlockInfo blk);
+
+ /**
+ * Convert the last block of the collection to an under-construction block
+ * and set the locations.
+ */
+ public BlockInfoUnderConstruction setLastBlock(BlockInfo lastBlock,
+ DatanodeStorageInfo[] targets) throws IOException;
+
+ /**
+ * @return whether the block collection is under construction.
+ */
+ public boolean isUnderConstruction();
}
Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java?rev=1544672&r1=1544671&r2=1544672&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java Fri Nov 22 20:51:06 2013
@@ -563,7 +563,7 @@ public class BlockManager {
* @throws IOException if the block does not have at least a minimal number
* of replicas reported from data-nodes.
*/
- public boolean commitOrCompleteLastBlock(MutableBlockCollection bc,
+ public boolean commitOrCompleteLastBlock(BlockCollection bc,
Block commitBlock) throws IOException {
if(commitBlock == null)
return false; // not committing, this is a block allocation retry
@@ -586,7 +586,7 @@ public class BlockManager {
* @throws IOException if the block does not have at least a minimal number
* of replicas reported from data-nodes.
*/
- private BlockInfo completeBlock(final MutableBlockCollection bc,
+ private BlockInfo completeBlock(final BlockCollection bc,
final int blkIndex, boolean force) throws IOException {
if(blkIndex < 0)
return null;
@@ -619,7 +619,7 @@ public class BlockManager {
return blocksMap.replaceBlock(completeBlock);
}
- private BlockInfo completeBlock(final MutableBlockCollection bc,
+ private BlockInfo completeBlock(final BlockCollection bc,
final BlockInfo block, boolean force) throws IOException {
BlockInfo[] fileBlocks = bc.getBlocks();
for(int idx = 0; idx < fileBlocks.length; idx++)
@@ -634,7 +634,7 @@ public class BlockManager {
* regardless of whether enough replicas are present. This is necessary
* when tailing edit logs as a Standby.
*/
- public BlockInfo forceCompleteBlock(final MutableBlockCollection bc,
+ public BlockInfo forceCompleteBlock(final BlockCollection bc,
final BlockInfoUnderConstruction block) throws IOException {
block.commitBlock(block);
return completeBlock(bc, block, true);
@@ -655,7 +655,7 @@ public class BlockManager {
* @return the last block locations if the block is partial or null otherwise
*/
public LocatedBlock convertLastBlockToUnderConstruction(
- MutableBlockCollection bc) throws IOException {
+ BlockCollection bc) throws IOException {
BlockInfo oldBlock = bc.getLastBlock();
if(oldBlock == null ||
bc.getPreferredBlockSize() == oldBlock.getNumBytes())
@@ -1214,7 +1214,7 @@ public class BlockManager {
// block should belong to a file
bc = blocksMap.getBlockCollection(block);
// abandoned block or block reopened for append
- if(bc == null || bc instanceof MutableBlockCollection) {
+ if(bc == null || bc.isUnderConstruction()) {
neededReplications.remove(block, priority); // remove from neededReplications
continue;
}
@@ -1295,7 +1295,7 @@ public class BlockManager {
// block should belong to a file
bc = blocksMap.getBlockCollection(block);
// abandoned block or block reopened for append
- if(bc == null || bc instanceof MutableBlockCollection) {
+ if(bc == null || bc.isUnderConstruction()) {
neededReplications.remove(block, priority); // remove from neededReplications
rw.targets = null;
continue;
@@ -2161,7 +2161,7 @@ assert storedBlock.findDatanode(dn) < 0
int numCurrentReplica = countLiveNodes(storedBlock);
if (storedBlock.getBlockUCState() == BlockUCState.COMMITTED
&& numCurrentReplica >= minReplication) {
- completeBlock((MutableBlockCollection)storedBlock.getBlockCollection(), storedBlock, false);
+ completeBlock(storedBlock.getBlockCollection(), storedBlock, false);
} else if (storedBlock.isComplete()) {
// check whether safe replication is reached for the block
// only complete blocks are counted towards that.
@@ -2232,7 +2232,7 @@ assert storedBlock.findDatanode(dn) < 0
if(storedBlock.getBlockUCState() == BlockUCState.COMMITTED &&
numLiveReplicas >= minReplication) {
- storedBlock = completeBlock((MutableBlockCollection)bc, storedBlock, false);
+ storedBlock = completeBlock(bc, storedBlock, false);
} else if (storedBlock.isComplete()) {
// check whether safe replication is reached for the block
// only complete blocks are counted towards that
@@ -2243,7 +2243,7 @@ assert storedBlock.findDatanode(dn) < 0
}
// if file is under construction, then done for now
- if (bc instanceof MutableBlockCollection) {
+ if (bc.isUnderConstruction()) {
return storedBlock;
}
@@ -2856,7 +2856,7 @@ assert storedBlock.findDatanode(dn) < 0
+ ", corrupt replicas: " + num.corruptReplicas()
+ ", decommissioned replicas: " + num.decommissionedReplicas()
+ ", excess replicas: " + num.excessReplicas()
- + ", Is Open File: " + (bc instanceof MutableBlockCollection)
+ + ", Is Open File: " + bc.isUnderConstruction()
+ ", Datanodes having this block: " + nodeList + ", Current Datanode: "
+ srcNode + ", Is current datanode decommissioning: "
+ srcNode.isDecommissionInProgress());
@@ -2915,7 +2915,7 @@ assert storedBlock.findDatanode(dn) < 0
if ((curReplicas == 0) && (num.decommissionedReplicas() > 0)) {
decommissionOnlyReplicas++;
}
- if (bc instanceof MutableBlockCollection) {
+ if (bc.isUnderConstruction()) {
underReplicatedInOpenFiles++;
}
}
Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java?rev=1544672&r1=1544671&r2=1544672&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java Fri Nov 22 20:51:06 2013
@@ -277,13 +277,9 @@ public class FSDirectory implements Clos
* @throws UnresolvedLinkException
* @throws SnapshotAccessControlException
*/
- INodeFileUnderConstruction addFile(String path,
- PermissionStatus permissions,
- short replication,
- long preferredBlockSize,
- String clientName,
- String clientMachine,
- DatanodeDescriptor clientNode)
+ INodeFile addFile(String path, PermissionStatus permissions,
+ short replication, long preferredBlockSize, String clientName,
+ String clientMachine, DatanodeDescriptor clientNode)
throws FileAlreadyExistsException, QuotaExceededException,
UnresolvedLinkException, SnapshotAccessControlException {
waitForReady();
@@ -301,11 +297,11 @@ public class FSDirectory implements Clos
if (!mkdirs(parent.toString(), permissions, true, modTime)) {
return null;
}
- INodeFileUnderConstruction newNode = new INodeFileUnderConstruction(
- namesystem.allocateNewInodeId(),
- permissions,replication,
- preferredBlockSize, modTime, clientName,
- clientMachine, clientNode);
+ INodeFile newNode = new INodeFile(namesystem.allocateNewInodeId(), null,
+ permissions, modTime, modTime, BlockInfo.EMPTY_ARRAY, replication,
+ preferredBlockSize);
+ newNode.toUnderConstruction(clientName, clientMachine, clientNode);
+
boolean added = false;
writeLock();
try {
@@ -337,8 +333,11 @@ public class FSDirectory implements Clos
final INodeFile newNode;
assert hasWriteLock();
if (underConstruction) {
- newNode = new INodeFileUnderConstruction(id, permissions, replication,
- preferredBlockSize, modificationTime, clientName, clientMachine, null);
+ newNode = new INodeFile(id, null, permissions, modificationTime,
+ modificationTime, BlockInfo.EMPTY_ARRAY, replication,
+ preferredBlockSize);
+ newNode.toUnderConstruction(clientName, clientMachine, null);
+
} else {
newNode = new INodeFile(id, null, permissions, modificationTime, atime,
BlockInfo.EMPTY_ARRAY, replication, preferredBlockSize);
@@ -367,8 +366,8 @@ public class FSDirectory implements Clos
writeLock();
try {
- final INodeFileUnderConstruction fileINode =
- INodeFileUnderConstruction.valueOf(inodesInPath.getLastINode(), path);
+ final INodeFile fileINode = inodesInPath.getLastINode().asFile();
+ Preconditions.checkState(fileINode.isUnderConstruction());
// check quota limits and updated space consumed
updateCount(inodesInPath, 0, fileINode.getBlockDiskspace(), true);
@@ -398,8 +397,8 @@ public class FSDirectory implements Clos
/**
* Persist the block list for the inode.
*/
- void persistBlocks(String path, INodeFileUnderConstruction file,
- boolean logRetryCache) {
+ void persistBlocks(String path, INodeFile file, boolean logRetryCache) {
+ Preconditions.checkArgument(file.isUnderConstruction());
waitForReady();
writeLock();
@@ -438,8 +437,9 @@ public class FSDirectory implements Clos
* Remove a block from the file.
* @return Whether the block exists in the corresponding file
*/
- boolean removeBlock(String path, INodeFileUnderConstruction fileNode,
- Block block) throws IOException {
+ boolean removeBlock(String path, INodeFile fileNode, Block block)
+ throws IOException {
+ Preconditions.checkArgument(fileNode.isUnderConstruction());
waitForReady();
writeLock();
@@ -451,7 +451,8 @@ public class FSDirectory implements Clos
}
boolean unprotectedRemoveBlock(String path,
- INodeFileUnderConstruction fileNode, Block block) throws IOException {
+ INodeFile fileNode, Block block) throws IOException {
+ Preconditions.checkArgument(fileNode.isUnderConstruction());
// modify file-> block and blocksMap
boolean removed = fileNode.removeLastBlock(block);
if (!removed) {
@@ -1479,38 +1480,6 @@ public class FSDirectory implements Clos
}
/**
- * Replaces the specified INodeFile with the specified one.
- */
- void replaceINodeFile(String path, INodeFile oldnode,
- INodeFile newnode) throws IOException {
- writeLock();
- try {
- unprotectedReplaceINodeFile(path, oldnode, newnode);
- } finally {
- writeUnlock();
- }
- }
-
- /** Replace an INodeFile and record modification for the latest snapshot. */
- void unprotectedReplaceINodeFile(final String path, final INodeFile oldnode,
- final INodeFile newnode) {
- Preconditions.checkState(hasWriteLock());
-
- oldnode.getParent().replaceChild(oldnode, newnode, inodeMap);
- oldnode.clear();
-
- /* Currently oldnode and newnode are assumed to contain the same
- * blocks. Otherwise, blocks need to be removed from the blocksMap.
- */
- int index = 0;
- for (BlockInfo b : newnode.getBlocks()) {
- BlockInfo info = getBlockManager().addBlockCollection(b, newnode);
- newnode.setBlock(index, info); // inode refers to the block in BlocksMap
- index++;
- }
- }
-
- /**
* Get a partial listing of the indicated directory
*
* @param src the directory name
Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java?rev=1544672&r1=1544671&r2=1544672&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java Fri Nov 22 20:51:06 2013
@@ -680,8 +680,8 @@ public class FSEditLog implements LogsPu
* Add open lease record to edit log.
* Records the block locations of the last block.
*/
- public void logOpenFile(String path, INodeFileUnderConstruction newNode,
- boolean toLogRpcIds) {
+ public void logOpenFile(String path, INodeFile newNode, boolean toLogRpcIds) {
+ Preconditions.checkArgument(newNode.isUnderConstruction());
AddOp op = AddOp.getInstance(cache.get())
.setInodeId(newNode.getId())
.setPath(path)
@@ -691,8 +691,8 @@ public class FSEditLog implements LogsPu
.setBlockSize(newNode.getPreferredBlockSize())
.setBlocks(newNode.getBlocks())
.setPermissionStatus(newNode.getPermissionStatus())
- .setClientName(newNode.getClientName())
- .setClientMachine(newNode.getClientMachine());
+ .setClientName(newNode.getFileUnderConstructionFeature().getClientName())
+ .setClientMachine(newNode.getFileUnderConstructionFeature().getClientMachine());
logRpcIds(op, toLogRpcIds);
logEdit(op);
}
@@ -713,8 +713,8 @@ public class FSEditLog implements LogsPu
logEdit(op);
}
- public void logUpdateBlocks(String path, INodeFileUnderConstruction file,
- boolean toLogRpcIds) {
+ public void logUpdateBlocks(String path, INodeFile file, boolean toLogRpcIds) {
+ Preconditions.checkArgument(file.isUnderConstruction());
UpdateBlocksOp op = UpdateBlocksOp.getInstance(cache.get())
.setPath(path)
.setBlocks(file.getBlocks());
Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java?rev=1544672&r1=1544671&r2=1544672&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java Fri Nov 22 20:51:06 2013
@@ -85,6 +85,7 @@ import org.apache.hadoop.hdfs.util.Chunk
import org.apache.hadoop.hdfs.util.Holder;
import com.google.common.base.Joiner;
+import com.google.common.base.Preconditions;
@InterfaceAudience.Private
@InterfaceStability.Evolving
@@ -369,15 +370,15 @@ public class FSEditLogLoader {
}
final INodesInPath iip = fsDir.getLastINodeInPath(addCloseOp.path);
- final INodeFile oldFile = INodeFile.valueOf(iip.getINode(0), addCloseOp.path);
+ final INodeFile file = INodeFile.valueOf(iip.getINode(0), addCloseOp.path);
// Update the salient file attributes.
- oldFile.setAccessTime(addCloseOp.atime, null, fsDir.getINodeMap());
- oldFile.setModificationTime(addCloseOp.mtime, null, fsDir.getINodeMap());
- updateBlocks(fsDir, addCloseOp, oldFile);
+ file.setAccessTime(addCloseOp.atime, null, fsDir.getINodeMap());
+ file.setModificationTime(addCloseOp.mtime, null, fsDir.getINodeMap());
+ updateBlocks(fsDir, addCloseOp, file);
// Now close the file
- if (!oldFile.isUnderConstruction() &&
+ if (!file.isUnderConstruction() &&
logVersion <= LayoutVersion.BUGFIX_HDFS_2991_VERSION) {
// There was a bug (HDFS-2991) in hadoop < 0.23.1 where OP_CLOSE
// could show up twice in a row. But after that version, this
@@ -387,11 +388,9 @@ public class FSEditLogLoader {
}
// One might expect that you could use removeLease(holder, path) here,
// but OP_CLOSE doesn't serialize the holder. So, remove by path.
- if (oldFile.isUnderConstruction()) {
- INodeFileUnderConstruction ucFile = (INodeFileUnderConstruction) oldFile;
+ if (file.isUnderConstruction()) {
fsNamesys.leaseManager.removeLeaseWithPrefixPath(addCloseOp.path);
- INodeFile newFile = ucFile.toINodeFile(ucFile.getModificationTime());
- fsDir.unprotectedReplaceINodeFile(addCloseOp.path, ucFile, newFile);
+ file.toCompleteFile(file.getModificationTime());
}
break;
}
@@ -564,9 +563,8 @@ public class FSEditLogLoader {
Lease lease = fsNamesys.leaseManager.getLease(
reassignLeaseOp.leaseHolder);
- INodeFileUnderConstruction pendingFile =
- INodeFileUnderConstruction.valueOf(
- fsDir.getINode(reassignLeaseOp.path), reassignLeaseOp.path);
+ INodeFile pendingFile = fsDir.getINode(reassignLeaseOp.path).asFile();
+ Preconditions.checkState(pendingFile.isUnderConstruction());
fsNamesys.reassignLeaseInternal(lease,
reassignLeaseOp.path, reassignLeaseOp.newHolder, pendingFile);
break;
@@ -751,9 +749,8 @@ public class FSEditLogLoader {
if (oldBlock instanceof BlockInfoUnderConstruction &&
(!isLastBlock || op.shouldCompleteLastBlock())) {
changeMade = true;
- fsNamesys.getBlockManager().forceCompleteBlock(
- (INodeFileUnderConstruction)file,
- (BlockInfoUnderConstruction)oldBlock);
+ fsNamesys.getBlockManager().forceCompleteBlock(file,
+ (BlockInfoUnderConstruction) oldBlock);
}
if (changeMade) {
// The state or gen-stamp of the block has changed. So, we may be
@@ -774,8 +771,7 @@ public class FSEditLogLoader {
+ path);
}
Block oldBlock = oldBlocks[oldBlocks.length - 1];
- boolean removed = fsDir.unprotectedRemoveBlock(path,
- (INodeFileUnderConstruction) file, oldBlock);
+ boolean removed = fsDir.unprotectedRemoveBlock(path, file, oldBlock);
if (!removed && !(op instanceof UpdateBlocksOp)) {
throw new IOException("Trying to delete non-existant block " + oldBlock);
}
Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java?rev=1544672&r1=1544671&r2=1544672&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java Fri Nov 22 20:51:06 2013
@@ -55,7 +55,6 @@ import org.apache.hadoop.hdfs.server.com
import org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshot.FileDiffList;
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectorySnapshottable;
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot;
-import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeFileUnderConstructionWithSnapshot;
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeFileWithSnapshot;
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotFSImageFormat;
@@ -659,13 +658,10 @@ public class FSImageFormat {
// file
// read blocks
- BlockInfo[] blocks = null;
- if (numBlocks >= 0) {
- blocks = new BlockInfo[numBlocks];
- for (int j = 0; j < numBlocks; j++) {
- blocks[j] = new BlockInfo(replication);
- blocks[j].readFields(in);
- }
+ BlockInfo[] blocks = new BlockInfo[numBlocks];
+ for (int j = 0; j < numBlocks; j++) {
+ blocks[j] = new BlockInfo(replication);
+ blocks[j].readFields(in);
}
String clientName = "";
@@ -700,10 +696,9 @@ public class FSImageFormat {
final INodeFile file = new INodeFile(inodeId, localName, permissions,
modificationTime, atime, blocks, replication, blockSize);
if (underConstruction) {
- INodeFileUnderConstruction fileUC = new INodeFileUnderConstruction(
- file, clientName, clientMachine, null);
- return fileDiffs == null ? fileUC :
- new INodeFileUnderConstructionWithSnapshot(fileUC, fileDiffs);
+ file.toUnderConstruction(clientName, clientMachine, null);
+ return fileDiffs == null ? file : new INodeFileWithSnapshot(file,
+ fileDiffs);
} else {
return fileDiffs == null ? file :
new INodeFileWithSnapshot(file, fileDiffs);
@@ -829,8 +824,8 @@ public class FSImageFormat {
LOG.info("Number of files under construction = " + size);
for (int i = 0; i < size; i++) {
- INodeFileUnderConstruction cons = FSImageSerialization
- .readINodeUnderConstruction(in, namesystem, getLayoutVersion());
+ INodeFile cons = FSImageSerialization.readINodeUnderConstruction(in,
+ namesystem, getLayoutVersion());
counter.increment();
// verify that file exists in namespace
@@ -848,33 +843,21 @@ public class FSImageFormat {
final INodesInPath iip = fsDir.getLastINodeInPath(path);
oldnode = INodeFile.valueOf(iip.getINode(0), path);
}
-
- cons.setLocalName(oldnode.getLocalNameBytes());
- INodeReference parentRef = oldnode.getParentReference();
- if (parentRef != null) {
- cons.setParentReference(parentRef);
- } else {
- cons.setParent(oldnode.getParent());
- }
- if (oldnode instanceof INodeFileWithSnapshot) {
- cons = new INodeFileUnderConstructionWithSnapshot(cons,
- ((INodeFileWithSnapshot) oldnode).getDiffs());
+ FileUnderConstructionFeature uc = cons.getFileUnderConstructionFeature();
+ oldnode.toUnderConstruction(uc.getClientName(), uc.getClientMachine(),
+ uc.getClientNode());
+ if (oldnode.numBlocks() > 0) {
+ BlockInfo ucBlock = cons.getLastBlock();
+ // we do not replace the inode, just replace the last block of oldnode
+ BlockInfo info = namesystem.getBlockManager().addBlockCollection(
+ ucBlock, oldnode);
+ oldnode.setBlock(oldnode.numBlocks() - 1, info);
}
if (!inSnapshot) {
- fsDir.replaceINodeFile(path, oldnode, cons);
- namesystem.leaseManager.addLease(cons.getClientName(), path);
- } else {
- if (parentRef != null) {
- // replace oldnode with cons
- parentRef.setReferredINode(cons);
- } else {
- // replace old node in its parent's children list and deleted list
- oldnode.getParent().replaceChildFileInSnapshot(oldnode, cons);
- namesystem.dir.addToInodeMap(cons);
- updateBlocksMap(cons);
- }
+ namesystem.leaseManager.addLease(cons
+ .getFileUnderConstructionFeature().getClientName(), path);
}
}
}
@@ -955,8 +938,8 @@ public class FSImageFormat {
private MD5Hash savedDigest;
private final ReferenceMap referenceMap = new ReferenceMap();
- private final Map<Long, INodeFileUnderConstruction> snapshotUCMap =
- new HashMap<Long, INodeFileUnderConstruction>();
+ private final Map<Long, INodeFile> snapshotUCMap =
+ new HashMap<Long, INodeFile>();
/** @throws IllegalStateException if the instance has not yet saved an image */
private void checkSaved() {
@@ -1096,8 +1079,7 @@ public class FSImageFormat {
dirNum++;
} else if (inSnapshot && child.isFile()
&& child.asFile().isUnderConstruction()) {
- this.snapshotUCMap.put(child.getId(),
- (INodeFileUnderConstruction) child.asFile());
+ this.snapshotUCMap.put(child.getId(), child.asFile());
}
if (i++ % 50 == 0) {
context.checkCancelled();
Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java?rev=1544672&r1=1544671&r2=1544672&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java Fri Nov 22 20:51:06 2013
@@ -108,7 +108,7 @@ public class FSImageSerialization {
// Helper function that reads in an INodeUnderConstruction
// from the input stream
//
- static INodeFileUnderConstruction readINodeUnderConstruction(
+ static INodeFile readINodeUnderConstruction(
DataInput in, FSNamesystem fsNamesys, int imgVersion)
throws IOException {
byte[] name = readBytes(in);
@@ -141,25 +141,17 @@ public class FSImageSerialization {
int numLocs = in.readInt();
assert numLocs == 0 : "Unexpected block locations";
- return new INodeFileUnderConstruction(inodeId,
- name,
- blockReplication,
- modificationTime,
- preferredBlockSize,
- blocks,
- perm,
- clientName,
- clientMachine,
- null);
+ INodeFile file = new INodeFile(inodeId, name, perm, modificationTime,
+ modificationTime, blocks, blockReplication, preferredBlockSize);
+ file.toUnderConstruction(clientName, clientMachine, null);
+ return file;
}
// Helper function that writes an INodeUnderConstruction
// into the input stream
//
- static void writeINodeUnderConstruction(DataOutputStream out,
- INodeFileUnderConstruction cons,
- String path)
- throws IOException {
+ static void writeINodeUnderConstruction(DataOutputStream out, INodeFile cons,
+ String path) throws IOException {
writeString(path, out);
out.writeLong(cons.getId());
out.writeShort(cons.getFileReplication());
@@ -169,8 +161,9 @@ public class FSImageSerialization {
writeBlocks(cons.getBlocks(), out);
cons.getPermissionStatus().write(out);
- writeString(cons.getClientName(), out);
- writeString(cons.getClientMachine(), out);
+ FileUnderConstructionFeature uc = cons.getFileUnderConstructionFeature();
+ writeString(uc.getClientName(), out);
+ writeString(uc.getClientMachine(), out);
out.writeInt(0); // do not store locations of last block
}
@@ -194,9 +187,9 @@ public class FSImageSerialization {
SnapshotFSImageFormat.saveFileDiffList(file, out);
if (writeUnderConstruction) {
- if (file instanceof INodeFileUnderConstruction) {
+ if (file.isUnderConstruction()) {
out.writeBoolean(true);
- final INodeFileUnderConstruction uc = (INodeFileUnderConstruction)file;
+ final FileUnderConstructionFeature uc = file.getFileUnderConstructionFeature();
writeString(uc.getClientName(), out);
writeString(uc.getClientMachine(), out);
} else {
Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java?rev=1544672&r1=1544671&r2=1544672&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java Fri Nov 22 20:51:06 2013
@@ -2203,13 +2203,14 @@ public class FSNamesystem implements Nam
final DatanodeDescriptor clientNode =
blockManager.getDatanodeManager().getDatanodeByHost(clientMachine);
- INodeFileUnderConstruction newNode = dir.addFile(src, permissions,
- replication, blockSize, holder, clientMachine, clientNode);
+ INodeFile newNode = dir.addFile(src, permissions, replication, blockSize,
+ holder, clientMachine, clientNode);
if (newNode == null) {
throw new IOException("DIR* NameSystem.startFile: " +
"Unable to add file to namespace.");
}
- leaseManager.addLease(newNode.getClientName(), src);
+ leaseManager.addLease(newNode.getFileUnderConstructionFeature()
+ .getClientName(), src);
// record file record in log, record new generation stamp
getEditLog().logOpenFile(src, newNode, logRetryEntry);
@@ -2301,11 +2302,11 @@ public class FSNamesystem implements Nam
boolean writeToEditLog, Snapshot latestSnapshot, boolean logRetryCache)
throws IOException {
file = file.recordModification(latestSnapshot, dir.getINodeMap());
- final INodeFileUnderConstruction cons = file.toUnderConstruction(
- leaseHolder, clientMachine, clientNode);
+ final INodeFile cons = file.toUnderConstruction(leaseHolder, clientMachine,
+ clientNode);
- dir.replaceINodeFile(src, file, cons);
- leaseManager.addLease(cons.getClientName(), src);
+ leaseManager.addLease(cons.getFileUnderConstructionFeature()
+ .getClientName(), src);
LocatedBlock ret = blockManager.convertLastBlockToUnderConstruction(cons);
if (writeToEditLog) {
@@ -2368,7 +2369,6 @@ public class FSNamesystem implements Nam
throws IOException {
assert hasWriteLock();
if (fileInode != null && fileInode.isUnderConstruction()) {
- INodeFileUnderConstruction pendingFile = (INodeFileUnderConstruction) fileInode;
//
// If the file is under construction , then it must be in our
// leases. Find the appropriate lease record.
@@ -2391,7 +2391,9 @@ public class FSNamesystem implements Nam
//
// Find the original holder.
//
- lease = leaseManager.getLease(pendingFile.getClientName());
+ FileUnderConstructionFeature uc = fileInode.getFileUnderConstructionFeature();
+ String clientName = uc.getClientName();
+ lease = leaseManager.getLease(clientName);
if (lease == null) {
throw new AlreadyBeingCreatedException(
"failed to create file " + src + " for " + holder +
@@ -2402,26 +2404,26 @@ public class FSNamesystem implements Nam
// close now: no need to wait for soft lease expiration and
// close only the file src
LOG.info("recoverLease: " + lease + ", src=" + src +
- " from client " + pendingFile.getClientName());
+ " from client " + clientName);
internalReleaseLease(lease, src, holder);
} else {
- assert lease.getHolder().equals(pendingFile.getClientName()) :
+ assert lease.getHolder().equals(clientName) :
"Current lease holder " + lease.getHolder() +
- " does not match file creator " + pendingFile.getClientName();
+ " does not match file creator " + clientName;
//
// If the original holder has not renewed in the last SOFTLIMIT
// period, then start lease recovery.
//
if (lease.expiredSoftLimit()) {
LOG.info("startFile: recover " + lease + ", src=" + src + " client "
- + pendingFile.getClientName());
+ + clientName);
boolean isClosed = internalReleaseLease(lease, src, null);
if(!isClosed)
throw new RecoveryInProgressException(
"Failed to close file " + src +
". Lease recovery is in progress. Try again later.");
} else {
- final BlockInfo lastBlock = pendingFile.getLastBlock();
+ final BlockInfo lastBlock = fileInode.getLastBlock();
if (lastBlock != null
&& lastBlock.getBlockUCState() == BlockUCState.UNDER_RECOVERY) {
throw new RecoveryInProgressException("Recovery in progress, file ["
@@ -2430,8 +2432,8 @@ public class FSNamesystem implements Nam
throw new AlreadyBeingCreatedException("Failed to create file ["
+ src + "] for [" + holder + "] on client [" + clientMachine
+ "], because this file is already being created by ["
- + pendingFile.getClientName() + "] on ["
- + pendingFile.getClientMachine() + "]");
+ + clientName + "] on ["
+ + uc.getClientMachine() + "]");
}
}
}
@@ -2561,8 +2563,7 @@ public class FSNamesystem implements Nam
LocatedBlock[] onRetryBlock = new LocatedBlock[1];
final INode[] inodes = analyzeFileState(
src, fileId, clientName, previous, onRetryBlock).getINodes();
- final INodeFileUnderConstruction pendingFile =
- (INodeFileUnderConstruction) inodes[inodes.length - 1].asFile();
+ final INodeFile pendingFile = inodes[inodes.length - 1].asFile();
if (onRetryBlock[0] != null && onRetryBlock[0].getLocations().length > 0) {
// This is a retry. Just return the last block if having locations.
@@ -2575,7 +2576,7 @@ public class FSNamesystem implements Nam
+ maxBlocksPerFile);
}
blockSize = pendingFile.getPreferredBlockSize();
- clientNode = pendingFile.getClientNode();
+ clientNode = pendingFile.getFileUnderConstructionFeature().getClientNode();
replication = pendingFile.getFileReplication();
} finally {
readUnlock();
@@ -2599,8 +2600,7 @@ public class FSNamesystem implements Nam
INodesInPath inodesInPath =
analyzeFileState(src, fileId, clientName, previous, onRetryBlock);
INode[] inodes = inodesInPath.getINodes();
- final INodeFileUnderConstruction pendingFile =
- (INodeFileUnderConstruction) inodes[inodes.length - 1].asFile();
+ final INodeFile pendingFile = inodes[inodes.length - 1].asFile();
if (onRetryBlock[0] != null) {
if (onRetryBlock[0].getLocations().length > 0) {
@@ -2655,7 +2655,7 @@ public class FSNamesystem implements Nam
Block previousBlock = ExtendedBlock.getLocalBlock(previous);
final INodesInPath iip = dir.getINodesInPath4Write(src);
- final INodeFileUnderConstruction pendingFile
+ final INodeFile pendingFile
= checkLease(src, fileId, clientName, iip.getLastINode());
BlockInfo lastBlockInFile = pendingFile.getLastBlock();
if (!Block.matchingIdAndGenStamp(previousBlock, lastBlockInFile)) {
@@ -2761,8 +2761,8 @@ public class FSNamesystem implements Nam
src = FSDirectory.resolvePath(src, pathComponents, dir);
//check lease
- final INodeFileUnderConstruction file = checkLease(src, clientName);
- clientnode = file.getClientNode();
+ final INodeFile file = checkLease(src, clientName);
+ clientnode = file.getFileUnderConstructionFeature().getClientNode();
preferredblocksize = file.getPreferredBlockSize();
//find datanode storages
@@ -2803,7 +2803,7 @@ public class FSNamesystem implements Nam
//
// Remove the block from the pending creates list
//
- INodeFileUnderConstruction file = checkLease(src, holder);
+ INodeFile file = checkLease(src, holder);
boolean removed = dir.removeBlock(src, file,
ExtendedBlock.getLocalBlock(b));
if (!removed) {
@@ -2825,16 +2825,15 @@ public class FSNamesystem implements Nam
}
/** make sure that we still have the lease on this file. */
- private INodeFileUnderConstruction checkLease(String src, String holder)
+ private INodeFile checkLease(String src, String holder)
throws LeaseExpiredException, UnresolvedLinkException,
FileNotFoundException {
return checkLease(src, INodeId.GRANDFATHER_INODE_ID, holder,
dir.getINode(src));
}
- private INodeFileUnderConstruction checkLease(String src, long fileId,
- String holder, INode inode) throws LeaseExpiredException,
- FileNotFoundException {
+ private INodeFile checkLease(String src, long fileId, String holder,
+ INode inode) throws LeaseExpiredException, FileNotFoundException {
assert hasReadLock();
if (inode == null || !inode.isFile()) {
Lease lease = leaseManager.getLease(holder);
@@ -2851,13 +2850,13 @@ public class FSNamesystem implements Nam
+ (lease != null ? lease.toString()
: "Holder " + holder + " does not have any open files."));
}
- INodeFileUnderConstruction pendingFile = (INodeFileUnderConstruction)file;
- if (holder != null && !pendingFile.getClientName().equals(holder)) {
+ String clientName = file.getFileUnderConstructionFeature().getClientName();
+ if (holder != null && !clientName.equals(holder)) {
throw new LeaseExpiredException("Lease mismatch on " + src + " owned by "
- + pendingFile.getClientName() + " but is accessed by " + holder);
+ + clientName + " but is accessed by " + holder);
}
- INodeId.checkId(fileId, pendingFile);
- return pendingFile;
+ INodeId.checkId(fileId, file);
+ return file;
}
/**
@@ -2898,7 +2897,7 @@ public class FSNamesystem implements Nam
UnresolvedLinkException, IOException {
assert hasWriteLock();
final INodesInPath iip = dir.getLastINodeInPath(src);
- final INodeFileUnderConstruction pendingFile;
+ final INodeFile pendingFile;
try {
pendingFile = checkLease(src, fileId, holder, iip.getINode(0));
} catch (LeaseExpiredException lee) {
@@ -3588,9 +3587,10 @@ public class FSNamesystem implements Nam
checkOperation(OperationCategory.WRITE);
checkNameNodeSafeMode("Cannot fsync file " + src);
src = FSDirectory.resolvePath(src, pathComponents, dir);
- INodeFileUnderConstruction pendingFile = checkLease(src, clientName);
+ INodeFile pendingFile = checkLease(src, clientName);
if (lastBlockLength > 0) {
- pendingFile.updateLengthOfLastBlock(lastBlockLength);
+ pendingFile.getFileUnderConstructionFeature().updateLengthOfLastBlock(
+ pendingFile, lastBlockLength);
}
dir.persistBlocks(src, pendingFile, false);
} finally {
@@ -3621,8 +3621,7 @@ public class FSNamesystem implements Nam
assert hasWriteLock();
final INodesInPath iip = dir.getLastINodeInPath(src);
- final INodeFileUnderConstruction pendingFile
- = INodeFileUnderConstruction.valueOf(iip.getINode(0), src);
+ final INodeFile pendingFile = iip.getINode(0).asFile();
int nrBlocks = pendingFile.numBlocks();
BlockInfo[] blocks = pendingFile.getBlocks();
@@ -3744,7 +3743,7 @@ public class FSNamesystem implements Nam
}
private Lease reassignLease(Lease lease, String src, String newHolder,
- INodeFileUnderConstruction pendingFile) {
+ INodeFile pendingFile) {
assert hasWriteLock();
if(newHolder == null)
return lease;
@@ -3754,15 +3753,16 @@ public class FSNamesystem implements Nam
}
Lease reassignLeaseInternal(Lease lease, String src, String newHolder,
- INodeFileUnderConstruction pendingFile) {
+ INodeFile pendingFile) {
assert hasWriteLock();
- pendingFile.setClientName(newHolder);
+ pendingFile.getFileUnderConstructionFeature().setClientName(newHolder);
return leaseManager.reassignLease(lease, src, newHolder);
}
- private void commitOrCompleteLastBlock(final INodeFileUnderConstruction fileINode,
+ private void commitOrCompleteLastBlock(final INodeFile fileINode,
final Block commitBlock) throws IOException {
assert hasWriteLock();
+ Preconditions.checkArgument(fileINode.isUnderConstruction());
if (!blockManager.commitOrCompleteLastBlock(fileINode, commitBlock)) {
return;
}
@@ -3779,19 +3779,21 @@ public class FSNamesystem implements Nam
}
}
- private void finalizeINodeFileUnderConstruction(String src,
- INodeFileUnderConstruction pendingFile, Snapshot latestSnapshot)
- throws IOException, UnresolvedLinkException {
+ private void finalizeINodeFileUnderConstruction(String src,
+ INodeFile pendingFile, Snapshot latestSnapshot) throws IOException,
+ UnresolvedLinkException {
assert hasWriteLock();
- leaseManager.removeLease(pendingFile.getClientName(), src);
+ FileUnderConstructionFeature uc = pendingFile.getFileUnderConstructionFeature();
+ Preconditions.checkArgument(uc != null);
+ leaseManager.removeLease(uc.getClientName(), src);
pendingFile = pendingFile.recordModification(latestSnapshot,
dir.getINodeMap());
// The file is no longer pending.
- // Create permanent INode, update blocks
- final INodeFile newFile = pendingFile.toINodeFile(now());
- dir.replaceINodeFile(src, pendingFile, newFile);
+ // Create permanent INode, update blocks. No need to replace the inode here
+ // since we just remove the uc feature from pendingFile
+ final INodeFile newFile = pendingFile.toCompleteFile(now());
// close file and persist block allocations for this file
dir.closeFile(src, newFile);
@@ -3808,12 +3810,12 @@ public class FSNamesystem implements Nam
public boolean isInSnapshot(BlockInfoUnderConstruction blockUC) {
assert hasReadLock();
final BlockCollection bc = blockUC.getBlockCollection();
- if (bc == null || !(bc instanceof INodeFileUnderConstruction)) {
+ if (bc == null || !(bc instanceof INodeFile)
+ || !((INodeFile) bc).isUnderConstruction()) {
return false;
}
- INodeFileUnderConstruction inodeUC = (INodeFileUnderConstruction) blockUC
- .getBlockCollection();
+ INodeFile inodeUC = (INodeFile) bc;
String fullName = inodeUC.getName();
try {
if (fullName != null && fullName.startsWith(Path.SEPARATOR)
@@ -3891,11 +3893,9 @@ public class FSNamesystem implements Nam
+ recoveryId + " for block " + lastblock);
}
- INodeFileUnderConstruction pendingFile = (INodeFileUnderConstruction)iFile;
-
if (deleteblock) {
Block blockToDel = ExtendedBlock.getLocalBlock(lastblock);
- boolean remove = pendingFile.removeLastBlock(blockToDel);
+ boolean remove = iFile.removeLastBlock(blockToDel);
if (remove) {
blockManager.removeBlockFromMap(storedBlock);
}
@@ -3940,14 +3940,14 @@ public class FSNamesystem implements Nam
blockManager.getDatanodeManager().getDatanodeStorageInfos(
trimmedTargets.toArray(new DatanodeID[trimmedTargets.size()]),
trimmedStorages.toArray(new String[trimmedStorages.size()]));
- pendingFile.setLastBlock(storedBlock, trimmedStorageInfos);
+ iFile.setLastBlock(storedBlock, trimmedStorageInfos);
}
if (closeFile) {
- src = closeFileCommitBlocks(pendingFile, storedBlock);
+ src = closeFileCommitBlocks(iFile, storedBlock);
} else {
// If this commit does not want to close the file, persist blocks
- src = persistBlocks(pendingFile, false);
+ src = persistBlocks(iFile, false);
}
} finally {
writeUnlock();
@@ -3972,10 +3972,8 @@ public class FSNamesystem implements Nam
* @throws IOException
*/
@VisibleForTesting
- String closeFileCommitBlocks(INodeFileUnderConstruction pendingFile,
- BlockInfo storedBlock)
+ String closeFileCommitBlocks(INodeFile pendingFile, BlockInfo storedBlock)
throws IOException {
-
String src = leaseManager.findPath(pendingFile);
// commit the last block and complete it if it has minimum replicas
@@ -3983,7 +3981,7 @@ public class FSNamesystem implements Nam
//remove lease, close file
finalizeINodeFileUnderConstruction(src, pendingFile,
- Snapshot.findLatestSnapshot(pendingFile, null));
+ Snapshot.findLatestSnapshot(pendingFile, null));
return src;
}
@@ -3996,8 +3994,8 @@ public class FSNamesystem implements Nam
* @throws IOException
*/
@VisibleForTesting
- String persistBlocks(INodeFileUnderConstruction pendingFile,
- boolean logRetryCache) throws IOException {
+ String persistBlocks(INodeFile pendingFile, boolean logRetryCache)
+ throws IOException {
String src = leaseManager.findPath(pendingFile);
dir.persistBlocks(src, pendingFile, logRetryCache);
return src;
@@ -5182,13 +5180,12 @@ public class FSNamesystem implements Nam
try {
for (Lease lease : leaseManager.getSortedLeases()) {
for (String path : lease.getPaths()) {
- final INodeFileUnderConstruction cons;
+ final INodeFile cons;
try {
- cons = INodeFileUnderConstruction.valueOf(dir.getINode(path), path);
+ cons = dir.getINode(path).asFile();
+ Preconditions.checkState(cons.isUnderConstruction());
} catch (UnresolvedLinkException e) {
throw new AssertionError("Lease files should reside on this FS");
- } catch (IOException e) {
- throw new RuntimeException(e);
}
BlockInfo[] blocks = cons.getBlocks();
if(blocks == null)
@@ -5764,7 +5761,7 @@ public class FSNamesystem implements Nam
return blockId;
}
- private INodeFileUnderConstruction checkUCBlock(ExtendedBlock block,
+ private INodeFile checkUCBlock(ExtendedBlock block,
String clientName) throws IOException {
assert hasWriteLock();
checkNameNodeSafeMode("Cannot get a new generation stamp and an "
@@ -5780,19 +5777,20 @@ public class FSNamesystem implements Nam
// check file inode
final INodeFile file = ((INode)storedBlock.getBlockCollection()).asFile();
- if (file==null || !file.isUnderConstruction()) {
+ if (file == null || !file.isUnderConstruction()) {
throw new IOException("The file " + storedBlock +
" belonged to does not exist or it is not under construction.");
}
// check lease
- INodeFileUnderConstruction pendingFile = (INodeFileUnderConstruction)file;
- if (clientName == null || !clientName.equals(pendingFile.getClientName())) {
+ if (clientName == null
+ || !clientName.equals(file.getFileUnderConstructionFeature()
+ .getClientName())) {
throw new LeaseExpiredException("Lease mismatch: " + block +
" is accessed by a non lease holder " + clientName);
}
- return pendingFile;
+ return file;
}
/**
@@ -5903,8 +5901,7 @@ public class FSNamesystem implements Nam
throws IOException {
assert hasWriteLock();
// check the vadility of the block and lease holder name
- final INodeFileUnderConstruction pendingFile
- = checkUCBlock(oldBlock, clientName);
+ final INodeFile pendingFile = checkUCBlock(oldBlock, clientName);
final BlockInfoUnderConstruction blockinfo
= (BlockInfoUnderConstruction)pendingFile.getLastBlock();
@@ -5942,15 +5939,13 @@ public class FSNamesystem implements Nam
* Serializes leases.
*/
void saveFilesUnderConstruction(DataOutputStream out,
- Map<Long, INodeFileUnderConstruction> snapshotUCMap) throws IOException {
+ Map<Long, INodeFile> snapshotUCMap) throws IOException {
// This is run by an inferior thread of saveNamespace, which holds a read
// lock on our behalf. If we took the read lock here, we could block
// for fairness if a writer is waiting on the lock.
synchronized (leaseManager) {
- Map<String, INodeFileUnderConstruction> nodes =
- leaseManager.getINodesUnderConstruction();
- for (Map.Entry<String, INodeFileUnderConstruction> entry
- : nodes.entrySet()) {
+ Map<String, INodeFile> nodes = leaseManager.getINodesUnderConstruction();
+ for (Map.Entry<String, INodeFile> entry : nodes.entrySet()) {
// TODO: for HDFS-5428, because of rename operations, some
// under-construction files that are
// in the current fs directory can also be captured in the
@@ -5959,13 +5954,11 @@ public class FSNamesystem implements Nam
}
out.writeInt(nodes.size() + snapshotUCMap.size()); // write the size
- for (Map.Entry<String, INodeFileUnderConstruction> entry
- : nodes.entrySet()) {
+ for (Map.Entry<String, INodeFile> entry : nodes.entrySet()) {
FSImageSerialization.writeINodeUnderConstruction(
out, entry.getValue(), entry.getKey());
}
- for (Map.Entry<Long, INodeFileUnderConstruction> entry
- : snapshotUCMap.entrySet()) {
+ for (Map.Entry<Long, INodeFile> entry : snapshotUCMap.entrySet()) {
// for those snapshot INodeFileUC, we use "/.reserved/.inodes/<inodeid>"
// as their paths
StringBuilder b = new StringBuilder();
Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java?rev=1544672&r1=1544671&r2=1544672&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java Fri Nov 22 20:51:06 2013
@@ -34,7 +34,6 @@ import org.apache.hadoop.hdfs.protocol.S
import org.apache.hadoop.hdfs.server.namenode.INodeReference.WithCount;
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectorySnapshottable;
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot;
-import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeFileUnderConstructionWithSnapshot;
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeFileWithSnapshot;
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
import org.apache.hadoop.hdfs.util.ReadOnlyList;
@@ -205,23 +204,6 @@ public class INodeDirectory extends INod
return newDir;
}
- /**
- * Used when load fileUC from fsimage. The file to be replaced is actually
- * only in snapshot, thus may not be contained in the children list.
- * See HDFS-5428 for details.
- */
- public void replaceChildFileInSnapshot(INodeFile oldChild,
- final INodeFile newChild) {
- if (children != null) {
- final int i = searchChildren(newChild.getLocalNameBytes());
- if (i >= 0 && children.get(i).getId() == oldChild.getId()) {
- // no need to consider reference node here, since we already do the
- // replacement in FSImageFormat.Loader#loadFilesUnderConstruction
- children.set(i, newChild);
- }
- }
- }
-
/** Replace the given child with a new child. */
public void replaceChild(INode oldChild, final INode newChild,
final INodeMap inodeMap) {
@@ -291,17 +273,6 @@ public class INodeDirectory extends INod
return newChild;
}
- /** Replace a child {@link INodeFile} with an {@link INodeFileUnderConstructionWithSnapshot}. */
- INodeFileUnderConstructionWithSnapshot replaceChild4INodeFileUcWithSnapshot(
- final INodeFileUnderConstruction child, final INodeMap inodeMap) {
- Preconditions.checkArgument(!(child instanceof INodeFileUnderConstructionWithSnapshot),
- "Child file is already an INodeFileUnderConstructionWithSnapshot, child=" + child);
- final INodeFileUnderConstructionWithSnapshot newChild
- = new INodeFileUnderConstructionWithSnapshot(child, null);
- replaceChildFile(child, newChild, inodeMap);
- return newChild;
- }
-
@Override
public INodeDirectory recordModification(Snapshot latest,
final INodeMap inodeMap) throws QuotaExceededException {
Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java?rev=1544672&r1=1544671&r2=1544672&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java Fri Nov 22 20:51:06 2013
@@ -20,15 +20,15 @@ package org.apache.hadoop.hdfs.server.na
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.PrintWriter;
+import java.util.Arrays;
import java.util.List;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.fs.permission.PermissionStatus;
+import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
-import org.apache.hadoop.hdfs.server.blockmanagement.BlockCollection;
-import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
-import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
-import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
+import org.apache.hadoop.hdfs.server.blockmanagement.*;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
import org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshot;
import org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshot.FileDiff;
import org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshot.FileDiffList;
@@ -43,6 +43,22 @@ import com.google.common.base.Preconditi
@InterfaceAudience.Private
public class INodeFile extends INodeWithAdditionalFields
implements INodeFileAttributes, BlockCollection {
+ /**
+ * A feature contains specific information for a type of INodeFile. E.g.,
+ * we can have separate features for Under-Construction and Snapshot.
+ */
+ public static abstract class Feature {
+ private Feature nextFeature;
+
+ public Feature getNextFeature() {
+ return nextFeature;
+ }
+
+ public void setNextFeature(Feature next) {
+ this.nextFeature = next;
+ }
+ }
+
/** The same as valueOf(inode, path, false). */
public static INodeFile valueOf(INode inode, String path
) throws FileNotFoundException {
@@ -104,8 +120,11 @@ public class INodeFile extends INodeWith
private BlockInfo[] blocks;
- INodeFile(long id, byte[] name, PermissionStatus permissions, long mtime, long atime,
- BlockInfo[] blklist, short replication, long preferredBlockSize) {
+ private Feature headFeature;
+
+ INodeFile(long id, byte[] name, PermissionStatus permissions, long mtime,
+ long atime, BlockInfo[] blklist, short replication,
+ long preferredBlockSize) {
super(id, name, permissions, mtime, atime);
header = HeaderFormat.combineReplication(header, replication);
header = HeaderFormat.combinePreferredBlockSize(header, preferredBlockSize);
@@ -116,6 +135,48 @@ public class INodeFile extends INodeWith
super(that);
this.header = that.header;
this.blocks = that.blocks;
+ this.headFeature = that.headFeature;
+ }
+
+ /**
+ * If the inode contains a {@link FileUnderConstructionFeature}, return it;
+ * otherwise, return null.
+ */
+ public final FileUnderConstructionFeature getFileUnderConstructionFeature() {
+ for (Feature f = this.headFeature; f != null; f = f.nextFeature) {
+ if (f instanceof FileUnderConstructionFeature) {
+ return (FileUnderConstructionFeature) f;
+ }
+ }
+ return null;
+ }
+
+ /** Is this file under construction? */
+ @Override // BlockCollection
+ public boolean isUnderConstruction() {
+ return getFileUnderConstructionFeature() != null;
+ }
+
+ void addFeature(Feature f) {
+ f.nextFeature = headFeature;
+ headFeature = f;
+ }
+
+ void removeFeature(Feature f) {
+ if (f == headFeature) {
+ headFeature = headFeature.nextFeature;
+ return;
+ } else if (headFeature != null) {
+ Feature prev = headFeature;
+ Feature curr = headFeature.nextFeature;
+ for (; curr != null && curr != f; prev = curr, curr = curr.nextFeature)
+ ;
+ if (curr != null) {
+ prev.nextFeature = curr.nextFeature;
+ return;
+ }
+ }
+ throw new IllegalStateException("Feature " + f + " not found.");
}
/** @return true unconditionally. */
@@ -130,22 +191,88 @@ public class INodeFile extends INodeWith
return this;
}
- /** Is this file under construction? */
- public boolean isUnderConstruction() {
- return false;
- }
+ /* Start of Under-Construction Feature */
/** Convert this file to an {@link INodeFileUnderConstruction}. */
- public INodeFileUnderConstruction toUnderConstruction(
- String clientName,
- String clientMachine,
+ public INodeFile toUnderConstruction(String clientName, String clientMachine,
DatanodeDescriptor clientNode) {
Preconditions.checkState(!isUnderConstruction(),
"file is already an INodeFileUnderConstruction");
- return new INodeFileUnderConstruction(this,
- clientName, clientMachine, clientNode);
+ FileUnderConstructionFeature uc = new FileUnderConstructionFeature(
+ clientName, clientMachine, clientNode);
+ addFeature(uc);
+ return this;
}
+ /**
+ * Convert the file to a complete file, i.e., to remove the Under-Construction
+ * feature.
+ */
+ public INodeFile toCompleteFile(long mtime) {
+ FileUnderConstructionFeature uc = getFileUnderConstructionFeature();
+ if (uc != null) {
+ assertAllBlocksComplete();
+ removeFeature(uc);
+ this.setModificationTime(mtime);
+ }
+ return this;
+ }
+
+ /** Assert all blocks are complete. */
+ private void assertAllBlocksComplete() {
+ if (blocks == null) {
+ return;
+ }
+ for (int i = 0; i < blocks.length; i++) {
+ Preconditions.checkState(blocks[i].isComplete(), "Failed to finalize"
+ + " %s %s since blocks[%s] is non-complete, where blocks=%s.",
+ getClass().getSimpleName(), this, i, Arrays.asList(blocks));
+ }
+ }
+
+ @Override //BlockCollection
+ public void setBlock(int index, BlockInfo blk) {
+ this.blocks[index] = blk;
+ }
+
+ @Override // BlockCollection
+ public BlockInfoUnderConstruction setLastBlock(BlockInfo lastBlock,
+ DatanodeStorageInfo[] locations) throws IOException {
+ Preconditions.checkState(isUnderConstruction());
+
+ if (numBlocks() == 0) {
+ throw new IOException("Failed to set last block: File is empty.");
+ }
+ BlockInfoUnderConstruction ucBlock =
+ lastBlock.convertToBlockUnderConstruction(
+ BlockUCState.UNDER_CONSTRUCTION, locations);
+ ucBlock.setBlockCollection(this);
+ setBlock(numBlocks() - 1, ucBlock);
+ return ucBlock;
+ }
+
+ /**
+ * Remove a block from the block list. This block should be
+ * the last one on the list.
+ */
+ boolean removeLastBlock(Block oldblock) {
+ if (blocks == null || blocks.length == 0) {
+ return false;
+ }
+ int size_1 = blocks.length - 1;
+ if (!blocks[size_1].equals(oldblock)) {
+ return false;
+ }
+
+ //copy to a new list
+ BlockInfo[] newlist = new BlockInfo[size_1];
+ System.arraycopy(blocks, 0, newlist, 0, size_1);
+ setBlocks(newlist);
+ return true;
+ }
+
+ /* End of Under-Construction Feature */
+
@Override
public INodeFileAttributes getSnapshotINode(final Snapshot snapshot) {
return this;
@@ -266,11 +393,6 @@ public class INodeFile extends INodeWith
}
}
- /** Set the block of the file at the given index. */
- public void setBlock(int idx, BlockInfo blk) {
- this.blocks[idx] = blk;
- }
-
/** Set the blocks. */
public void setBlocks(BlockInfo[] blocks) {
this.blocks = blocks;
@@ -286,6 +408,11 @@ public class INodeFile extends INodeWith
// this only happens when deleting the current file
computeQuotaUsage(counts, false);
destroyAndCollectBlocks(collectedBlocks, removedINodes);
+ } else if (snapshot == null && prior != null) {
+ FileUnderConstructionFeature uc = getFileUnderConstructionFeature();
+ if (uc != null) {
+ uc.cleanZeroSizeBlock(this, collectedBlocks);
+ }
}
return counts;
}
Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java?rev=1544672&r1=1544671&r2=1544672&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java Fri Nov 22 20:51:06 2013
@@ -182,9 +182,11 @@ public class LeaseManager {
/**
* Finds the pathname for the specified pendingFile
*/
- public synchronized String findPath(INodeFileUnderConstruction pendingFile)
+ public synchronized String findPath(INodeFile pendingFile)
throws IOException {
- Lease lease = getLease(pendingFile.getClientName());
+ FileUnderConstructionFeature uc = pendingFile.getFileUnderConstructionFeature();
+ Preconditions.checkArgument(uc != null);
+ Lease lease = getLease(uc.getClientName());
if (lease != null) {
String src = lease.findPath(pendingFile);
if (src != null) {
@@ -253,7 +255,7 @@ public class LeaseManager {
/**
* @return the path associated with the pendingFile and null if not found.
*/
- private String findPath(INodeFileUnderConstruction pendingFile) {
+ private String findPath(INodeFile pendingFile) {
try {
for (String src : paths) {
INode node = fsnamesystem.dir.getINode(src);
@@ -433,14 +435,14 @@ public class LeaseManager {
* @return list of inodes
* @throws UnresolvedLinkException
*/
- Map<String, INodeFileUnderConstruction> getINodesUnderConstruction() {
- Map<String, INodeFileUnderConstruction> inodes =
- new TreeMap<String, INodeFileUnderConstruction>();
+ Map<String, INodeFile> getINodesUnderConstruction() {
+ Map<String, INodeFile> inodes = new TreeMap<String, INodeFile>();
for (String p : sortedLeasesByPath.keySet()) {
// verify that path exists in namespace
try {
- INode node = fsnamesystem.dir.getINode(p);
- inodes.put(p, INodeFileUnderConstruction.valueOf(node, p));
+ INodeFile node = INodeFile.valueOf(fsnamesystem.dir.getINode(p), p);
+ Preconditions.checkState(node.isUnderConstruction());
+ inodes.put(p, node);
} catch (IOException ioe) {
LOG.error(ioe);
}
Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectoryWithSnapshot.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectoryWithSnapshot.java?rev=1544672&r1=1544671&r2=1544672&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectoryWithSnapshot.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectoryWithSnapshot.java Fri Nov 22 20:51:06 2013
@@ -38,7 +38,6 @@ import org.apache.hadoop.hdfs.server.nam
import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
import org.apache.hadoop.hdfs.server.namenode.INodeDirectoryAttributes;
import org.apache.hadoop.hdfs.server.namenode.INodeDirectoryWithQuota;
-import org.apache.hadoop.hdfs.server.namenode.INodeFile;
import org.apache.hadoop.hdfs.server.namenode.INodeMap;
import org.apache.hadoop.hdfs.server.namenode.INodeReference;
import org.apache.hadoop.hdfs.server.namenode.Quota;
@@ -594,14 +593,6 @@ public class INodeDirectoryWithSnapshot
}
@Override
- public void replaceChildFileInSnapshot(final INodeFile oldChild,
- final INodeFile newChild) {
- super.replaceChildFileInSnapshot(oldChild, newChild);
- diffs.replaceChild(ListType.DELETED, oldChild, newChild);
- diffs.replaceChild(ListType.CREATED, oldChild, newChild);
- }
-
- @Override
public void replaceChild(final INode oldChild, final INode newChild,
final INodeMap inodeMap) {
super.replaceChild(oldChild, newChild, inodeMap);
Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileWithSnapshot.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileWithSnapshot.java?rev=1544672&r1=1544671&r2=1544672&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileWithSnapshot.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileWithSnapshot.java Fri Nov 22 20:51:06 2013
@@ -21,7 +21,6 @@ import java.util.List;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
-import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
import org.apache.hadoop.hdfs.server.namenode.INode;
import org.apache.hadoop.hdfs.server.namenode.INodeFile;
import org.apache.hadoop.hdfs.server.namenode.INodeFileAttributes;
@@ -48,15 +47,6 @@ public class INodeFileWithSnapshot exten
}
@Override
- public INodeFileUnderConstructionWithSnapshot toUnderConstruction(
- final String clientName,
- final String clientMachine,
- final DatanodeDescriptor clientNode) {
- return new INodeFileUnderConstructionWithSnapshot(this,
- clientName, clientMachine, clientNode, getDiffs());
- }
-
- @Override
public boolean isCurrentFileDeleted() {
return isCurrentFileDeleted;
}
Propchange: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/
------------------------------------------------------------------------------
Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs:r1543902-1544665
Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html?rev=1544672&r1=1544671&r2=1544672&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html Fri Nov 22 20:51:06 2013
@@ -101,7 +101,7 @@
<p>
{#fs}
- {TotalLoad} files and directories, {BlocksTotal} blocks = {FilesTotal} total filesystem object(s).
+ {FilesTotal} files and directories, {BlocksTotal} blocks = {@math key="{FilesTotal}" method="add" operand="{BlocksTotal}"/} total filesystem object(s).
{#helper_fs_max_objects/}
{/fs}
</p>