You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by sz...@apache.org on 2013/11/22 02:39:03 UTC
svn commit: r1544389 [1/2] - in
/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs: ./
src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/
src/main/java/org/apache/hadoop/hdfs/server/namenode/
src/main/java/org/apache/hadoop/hdfs/server/name...
Author: szetszwo
Date: Fri Nov 22 01:39:02 2013
New Revision: 1544389
URL: http://svn.apache.org/r1544389
Log:
HDFS-5285. Flatten INodeFile hierarchy: Replace INodeFileUnderConstruction and INodeFileUnderConstructionWithSnapshot with FileUnderContructionFeature. Contributed by jing9
Added:
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileUnderConstructionFeature.java
- copied, changed from r1544377, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java
Removed:
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileUnderConstructionWithSnapshot.java
Modified:
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectoryWithSnapshot.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileWithSnapshot.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/CreateEditsLog.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCommitBlockSynchronization.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotBlocksMap.java
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1544389&r1=1544388&r2=1544389&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Fri Nov 22 01:39:02 2013
@@ -213,6 +213,10 @@ Trunk (Unreleased)
HDFS-5473. Consistent naming of user-visible caching classes and methods
(cmccabe)
+ HDFS-5285. Flatten INodeFile hierarchy: Replace INodeFileUnderConstruction
+ and INodeFileUnderConstructionWithSnapshot with FileUnderContructionFeature.
+ (jing9 via szetszwo)
+
OPTIMIZATIONS
HDFS-5349. DNA_CACHE and DNA_UNCACHE should be by blockId only. (cmccabe)
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java?rev=1544389&r1=1544388&r2=1544389&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java Fri Nov 22 01:39:02 2013
@@ -64,4 +64,21 @@ public interface BlockCollection {
* Get the name of the collection.
*/
public String getName();
+
+ /**
+ * Set the block at the given index.
+ */
+ public void setBlock(int index, BlockInfo blk);
+
+ /**
+ * Convert the last block of the collection to an under-construction block
+ * and set the locations.
+ */
+ public BlockInfoUnderConstruction setLastBlock(BlockInfo lastBlock,
+ DatanodeDescriptor[] locations) throws IOException;
+
+ /**
+ * @return whether the block collection is under construction.
+ */
+ public boolean isUnderConstruction();
}
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java?rev=1544389&r1=1544388&r2=1544389&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java Fri Nov 22 01:39:02 2013
@@ -560,7 +560,7 @@ public class BlockManager {
* @throws IOException if the block does not have at least a minimal number
* of replicas reported from data-nodes.
*/
- public boolean commitOrCompleteLastBlock(MutableBlockCollection bc,
+ public boolean commitOrCompleteLastBlock(BlockCollection bc,
Block commitBlock) throws IOException {
if(commitBlock == null)
return false; // not committing, this is a block allocation retry
@@ -583,7 +583,7 @@ public class BlockManager {
* @throws IOException if the block does not have at least a minimal number
* of replicas reported from data-nodes.
*/
- private BlockInfo completeBlock(final MutableBlockCollection bc,
+ private BlockInfo completeBlock(final BlockCollection bc,
final int blkIndex, boolean force) throws IOException {
if(blkIndex < 0)
return null;
@@ -616,7 +616,7 @@ public class BlockManager {
return blocksMap.replaceBlock(completeBlock);
}
- private BlockInfo completeBlock(final MutableBlockCollection bc,
+ private BlockInfo completeBlock(final BlockCollection bc,
final BlockInfo block, boolean force) throws IOException {
BlockInfo[] fileBlocks = bc.getBlocks();
for(int idx = 0; idx < fileBlocks.length; idx++)
@@ -631,7 +631,7 @@ public class BlockManager {
* regardless of whether enough replicas are present. This is necessary
* when tailing edit logs as a Standby.
*/
- public BlockInfo forceCompleteBlock(final MutableBlockCollection bc,
+ public BlockInfo forceCompleteBlock(final BlockCollection bc,
final BlockInfoUnderConstruction block) throws IOException {
block.commitBlock(block);
return completeBlock(bc, block, true);
@@ -652,7 +652,7 @@ public class BlockManager {
* @return the last block locations if the block is partial or null otherwise
*/
public LocatedBlock convertLastBlockToUnderConstruction(
- MutableBlockCollection bc) throws IOException {
+ BlockCollection bc) throws IOException {
BlockInfo oldBlock = bc.getLastBlock();
if(oldBlock == null ||
bc.getPreferredBlockSize() == oldBlock.getNumBytes())
@@ -1209,7 +1209,7 @@ public class BlockManager {
// block should belong to a file
bc = blocksMap.getBlockCollection(block);
// abandoned block or block reopened for append
- if(bc == null || bc instanceof MutableBlockCollection) {
+ if(bc == null || bc.isUnderConstruction()) {
neededReplications.remove(block, priority); // remove from neededReplications
continue;
}
@@ -1290,7 +1290,7 @@ public class BlockManager {
// block should belong to a file
bc = blocksMap.getBlockCollection(block);
// abandoned block or block reopened for append
- if(bc == null || bc instanceof MutableBlockCollection) {
+ if(bc == null || bc.isUnderConstruction()) {
neededReplications.remove(block, priority); // remove from neededReplications
rw.targets = null;
continue;
@@ -2145,7 +2145,7 @@ assert storedBlock.findDatanode(dn) < 0
int numCurrentReplica = countLiveNodes(storedBlock);
if (storedBlock.getBlockUCState() == BlockUCState.COMMITTED
&& numCurrentReplica >= minReplication) {
- completeBlock((MutableBlockCollection)storedBlock.getBlockCollection(), storedBlock, false);
+ completeBlock(storedBlock.getBlockCollection(), storedBlock, false);
} else if (storedBlock.isComplete()) {
// check whether safe replication is reached for the block
// only complete blocks are counted towards that.
@@ -2215,7 +2215,7 @@ assert storedBlock.findDatanode(dn) < 0
if(storedBlock.getBlockUCState() == BlockUCState.COMMITTED &&
numLiveReplicas >= minReplication) {
- storedBlock = completeBlock((MutableBlockCollection)bc, storedBlock, false);
+ storedBlock = completeBlock(bc, storedBlock, false);
} else if (storedBlock.isComplete()) {
// check whether safe replication is reached for the block
// only complete blocks are counted towards that
@@ -2226,7 +2226,7 @@ assert storedBlock.findDatanode(dn) < 0
}
// if file is under construction, then done for now
- if (bc instanceof MutableBlockCollection) {
+ if (bc.isUnderConstruction()) {
return storedBlock;
}
@@ -2835,7 +2835,7 @@ assert storedBlock.findDatanode(dn) < 0
+ ", corrupt replicas: " + num.corruptReplicas()
+ ", decommissioned replicas: " + num.decommissionedReplicas()
+ ", excess replicas: " + num.excessReplicas()
- + ", Is Open File: " + (bc instanceof MutableBlockCollection)
+ + ", Is Open File: " + bc.isUnderConstruction()
+ ", Datanodes having this block: " + nodeList + ", Current Datanode: "
+ srcNode + ", Is current datanode decommissioning: "
+ srcNode.isDecommissionInProgress());
@@ -2894,7 +2894,7 @@ assert storedBlock.findDatanode(dn) < 0
if ((curReplicas == 0) && (num.decommissionedReplicas() > 0)) {
decommissionOnlyReplicas++;
}
- if (bc instanceof MutableBlockCollection) {
+ if (bc.isUnderConstruction()) {
underReplicatedInOpenFiles++;
}
}
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java?rev=1544389&r1=1544388&r2=1544389&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java Fri Nov 22 01:39:02 2013
@@ -276,13 +276,9 @@ public class FSDirectory implements Clos
* @throws UnresolvedLinkException
* @throws SnapshotAccessControlException
*/
- INodeFileUnderConstruction addFile(String path,
- PermissionStatus permissions,
- short replication,
- long preferredBlockSize,
- String clientName,
- String clientMachine,
- DatanodeDescriptor clientNode)
+ INodeFile addFile(String path, PermissionStatus permissions,
+ short replication, long preferredBlockSize, String clientName,
+ String clientMachine, DatanodeDescriptor clientNode)
throws FileAlreadyExistsException, QuotaExceededException,
UnresolvedLinkException, SnapshotAccessControlException {
waitForReady();
@@ -300,11 +296,11 @@ public class FSDirectory implements Clos
if (!mkdirs(parent.toString(), permissions, true, modTime)) {
return null;
}
- INodeFileUnderConstruction newNode = new INodeFileUnderConstruction(
- namesystem.allocateNewInodeId(),
- permissions,replication,
- preferredBlockSize, modTime, clientName,
- clientMachine, clientNode);
+ INodeFile newNode = new INodeFile(namesystem.allocateNewInodeId(), null,
+ permissions, modTime, modTime, BlockInfo.EMPTY_ARRAY, replication,
+ preferredBlockSize);
+ newNode.toUnderConstruction(clientName, clientMachine, clientNode);
+
boolean added = false;
writeLock();
try {
@@ -336,8 +332,11 @@ public class FSDirectory implements Clos
final INodeFile newNode;
assert hasWriteLock();
if (underConstruction) {
- newNode = new INodeFileUnderConstruction(id, permissions, replication,
- preferredBlockSize, modificationTime, clientName, clientMachine, null);
+ newNode = new INodeFile(id, null, permissions, modificationTime,
+ modificationTime, BlockInfo.EMPTY_ARRAY, replication,
+ preferredBlockSize);
+ newNode.toUnderConstruction(clientName, clientMachine, null);
+
} else {
newNode = new INodeFile(id, null, permissions, modificationTime, atime,
BlockInfo.EMPTY_ARRAY, replication, preferredBlockSize);
@@ -366,8 +365,8 @@ public class FSDirectory implements Clos
writeLock();
try {
- final INodeFileUnderConstruction fileINode =
- INodeFileUnderConstruction.valueOf(inodesInPath.getLastINode(), path);
+ final INodeFile fileINode = inodesInPath.getLastINode().asFile();
+ Preconditions.checkState(fileINode.isUnderConstruction());
// check quota limits and updated space consumed
updateCount(inodesInPath, 0, fileINode.getBlockDiskspace(), true);
@@ -397,8 +396,8 @@ public class FSDirectory implements Clos
/**
* Persist the block list for the inode.
*/
- void persistBlocks(String path, INodeFileUnderConstruction file,
- boolean logRetryCache) {
+ void persistBlocks(String path, INodeFile file, boolean logRetryCache) {
+ Preconditions.checkArgument(file.isUnderConstruction());
waitForReady();
writeLock();
@@ -437,8 +436,9 @@ public class FSDirectory implements Clos
* Remove a block from the file.
* @return Whether the block exists in the corresponding file
*/
- boolean removeBlock(String path, INodeFileUnderConstruction fileNode,
- Block block) throws IOException {
+ boolean removeBlock(String path, INodeFile fileNode, Block block)
+ throws IOException {
+ Preconditions.checkArgument(fileNode.isUnderConstruction());
waitForReady();
writeLock();
@@ -450,7 +450,8 @@ public class FSDirectory implements Clos
}
boolean unprotectedRemoveBlock(String path,
- INodeFileUnderConstruction fileNode, Block block) throws IOException {
+ INodeFile fileNode, Block block) throws IOException {
+ Preconditions.checkArgument(fileNode.isUnderConstruction());
// modify file-> block and blocksMap
boolean removed = fileNode.removeLastBlock(block);
if (!removed) {
@@ -1478,38 +1479,6 @@ public class FSDirectory implements Clos
}
/**
- * Replaces the specified INodeFile with the specified one.
- */
- void replaceINodeFile(String path, INodeFile oldnode,
- INodeFile newnode) throws IOException {
- writeLock();
- try {
- unprotectedReplaceINodeFile(path, oldnode, newnode);
- } finally {
- writeUnlock();
- }
- }
-
- /** Replace an INodeFile and record modification for the latest snapshot. */
- void unprotectedReplaceINodeFile(final String path, final INodeFile oldnode,
- final INodeFile newnode) {
- Preconditions.checkState(hasWriteLock());
-
- oldnode.getParent().replaceChild(oldnode, newnode, inodeMap);
- oldnode.clear();
-
- /* Currently oldnode and newnode are assumed to contain the same
- * blocks. Otherwise, blocks need to be removed from the blocksMap.
- */
- int index = 0;
- for (BlockInfo b : newnode.getBlocks()) {
- BlockInfo info = getBlockManager().addBlockCollection(b, newnode);
- newnode.setBlock(index, info); // inode refers to the block in BlocksMap
- index++;
- }
- }
-
- /**
* Get a partial listing of the indicated directory
*
* @param src the directory name
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java?rev=1544389&r1=1544388&r2=1544389&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java Fri Nov 22 01:39:02 2013
@@ -680,8 +680,8 @@ public class FSEditLog implements LogsPu
* Add open lease record to edit log.
* Records the block locations of the last block.
*/
- public void logOpenFile(String path, INodeFileUnderConstruction newNode,
- boolean toLogRpcIds) {
+ public void logOpenFile(String path, INodeFile newNode, boolean toLogRpcIds) {
+ Preconditions.checkArgument(newNode.isUnderConstruction());
AddOp op = AddOp.getInstance(cache.get())
.setInodeId(newNode.getId())
.setPath(path)
@@ -691,8 +691,8 @@ public class FSEditLog implements LogsPu
.setBlockSize(newNode.getPreferredBlockSize())
.setBlocks(newNode.getBlocks())
.setPermissionStatus(newNode.getPermissionStatus())
- .setClientName(newNode.getClientName())
- .setClientMachine(newNode.getClientMachine());
+ .setClientName(newNode.getFileUnderConstructionFeature().getClientName())
+ .setClientMachine(newNode.getFileUnderConstructionFeature().getClientMachine());
logRpcIds(op, toLogRpcIds);
logEdit(op);
}
@@ -713,8 +713,8 @@ public class FSEditLog implements LogsPu
logEdit(op);
}
- public void logUpdateBlocks(String path, INodeFileUnderConstruction file,
- boolean toLogRpcIds) {
+ public void logUpdateBlocks(String path, INodeFile file, boolean toLogRpcIds) {
+ Preconditions.checkArgument(file.isUnderConstruction());
UpdateBlocksOp op = UpdateBlocksOp.getInstance(cache.get())
.setPath(path)
.setBlocks(file.getBlocks());
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java?rev=1544389&r1=1544388&r2=1544389&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java Fri Nov 22 01:39:02 2013
@@ -85,6 +85,7 @@ import org.apache.hadoop.hdfs.util.Chunk
import org.apache.hadoop.hdfs.util.Holder;
import com.google.common.base.Joiner;
+import com.google.common.base.Preconditions;
@InterfaceAudience.Private
@InterfaceStability.Evolving
@@ -369,15 +370,15 @@ public class FSEditLogLoader {
}
final INodesInPath iip = fsDir.getLastINodeInPath(addCloseOp.path);
- final INodeFile oldFile = INodeFile.valueOf(iip.getINode(0), addCloseOp.path);
+ final INodeFile file = INodeFile.valueOf(iip.getINode(0), addCloseOp.path);
// Update the salient file attributes.
- oldFile.setAccessTime(addCloseOp.atime, null, fsDir.getINodeMap());
- oldFile.setModificationTime(addCloseOp.mtime, null, fsDir.getINodeMap());
- updateBlocks(fsDir, addCloseOp, oldFile);
+ file.setAccessTime(addCloseOp.atime, null, fsDir.getINodeMap());
+ file.setModificationTime(addCloseOp.mtime, null, fsDir.getINodeMap());
+ updateBlocks(fsDir, addCloseOp, file);
// Now close the file
- if (!oldFile.isUnderConstruction() &&
+ if (!file.isUnderConstruction() &&
logVersion <= LayoutVersion.BUGFIX_HDFS_2991_VERSION) {
// There was a bug (HDFS-2991) in hadoop < 0.23.1 where OP_CLOSE
// could show up twice in a row. But after that version, this
@@ -387,11 +388,9 @@ public class FSEditLogLoader {
}
// One might expect that you could use removeLease(holder, path) here,
// but OP_CLOSE doesn't serialize the holder. So, remove by path.
- if (oldFile.isUnderConstruction()) {
- INodeFileUnderConstruction ucFile = (INodeFileUnderConstruction) oldFile;
+ if (file.isUnderConstruction()) {
fsNamesys.leaseManager.removeLeaseWithPrefixPath(addCloseOp.path);
- INodeFile newFile = ucFile.toINodeFile(ucFile.getModificationTime());
- fsDir.unprotectedReplaceINodeFile(addCloseOp.path, ucFile, newFile);
+ file.toCompleteFile(file.getModificationTime());
}
break;
}
@@ -564,9 +563,8 @@ public class FSEditLogLoader {
Lease lease = fsNamesys.leaseManager.getLease(
reassignLeaseOp.leaseHolder);
- INodeFileUnderConstruction pendingFile =
- INodeFileUnderConstruction.valueOf(
- fsDir.getINode(reassignLeaseOp.path), reassignLeaseOp.path);
+ INodeFile pendingFile = fsDir.getINode(reassignLeaseOp.path).asFile();
+ Preconditions.checkState(pendingFile.isUnderConstruction());
fsNamesys.reassignLeaseInternal(lease,
reassignLeaseOp.path, reassignLeaseOp.newHolder, pendingFile);
break;
@@ -751,9 +749,8 @@ public class FSEditLogLoader {
if (oldBlock instanceof BlockInfoUnderConstruction &&
(!isLastBlock || op.shouldCompleteLastBlock())) {
changeMade = true;
- fsNamesys.getBlockManager().forceCompleteBlock(
- (INodeFileUnderConstruction)file,
- (BlockInfoUnderConstruction)oldBlock);
+ fsNamesys.getBlockManager().forceCompleteBlock(file,
+ (BlockInfoUnderConstruction) oldBlock);
}
if (changeMade) {
// The state or gen-stamp of the block has changed. So, we may be
@@ -774,8 +771,7 @@ public class FSEditLogLoader {
+ path);
}
Block oldBlock = oldBlocks[oldBlocks.length - 1];
- boolean removed = fsDir.unprotectedRemoveBlock(path,
- (INodeFileUnderConstruction) file, oldBlock);
+ boolean removed = fsDir.unprotectedRemoveBlock(path, file, oldBlock);
if (!removed && !(op instanceof UpdateBlocksOp)) {
throw new IOException("Trying to delete non-existant block " + oldBlock);
}
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java?rev=1544389&r1=1544388&r2=1544389&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java Fri Nov 22 01:39:02 2013
@@ -55,7 +55,6 @@ import org.apache.hadoop.hdfs.server.com
import org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshot.FileDiffList;
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectorySnapshottable;
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot;
-import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeFileUnderConstructionWithSnapshot;
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeFileWithSnapshot;
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotFSImageFormat;
@@ -659,13 +658,10 @@ public class FSImageFormat {
// file
// read blocks
- BlockInfo[] blocks = null;
- if (numBlocks >= 0) {
- blocks = new BlockInfo[numBlocks];
- for (int j = 0; j < numBlocks; j++) {
- blocks[j] = new BlockInfo(replication);
- blocks[j].readFields(in);
- }
+ BlockInfo[] blocks = new BlockInfo[numBlocks];
+ for (int j = 0; j < numBlocks; j++) {
+ blocks[j] = new BlockInfo(replication);
+ blocks[j].readFields(in);
}
String clientName = "";
@@ -700,10 +696,9 @@ public class FSImageFormat {
final INodeFile file = new INodeFile(inodeId, localName, permissions,
modificationTime, atime, blocks, replication, blockSize);
if (underConstruction) {
- INodeFileUnderConstruction fileUC = new INodeFileUnderConstruction(
- file, clientName, clientMachine, null);
- return fileDiffs == null ? fileUC :
- new INodeFileUnderConstructionWithSnapshot(fileUC, fileDiffs);
+ file.toUnderConstruction(clientName, clientMachine, null);
+ return fileDiffs == null ? file : new INodeFileWithSnapshot(file,
+ fileDiffs);
} else {
return fileDiffs == null ? file :
new INodeFileWithSnapshot(file, fileDiffs);
@@ -829,8 +824,8 @@ public class FSImageFormat {
LOG.info("Number of files under construction = " + size);
for (int i = 0; i < size; i++) {
- INodeFileUnderConstruction cons = FSImageSerialization
- .readINodeUnderConstruction(in, namesystem, getLayoutVersion());
+ INodeFile cons = FSImageSerialization.readINodeUnderConstruction(in,
+ namesystem, getLayoutVersion());
counter.increment();
// verify that file exists in namespace
@@ -848,33 +843,21 @@ public class FSImageFormat {
final INodesInPath iip = fsDir.getLastINodeInPath(path);
oldnode = INodeFile.valueOf(iip.getINode(0), path);
}
-
- cons.setLocalName(oldnode.getLocalNameBytes());
- INodeReference parentRef = oldnode.getParentReference();
- if (parentRef != null) {
- cons.setParentReference(parentRef);
- } else {
- cons.setParent(oldnode.getParent());
- }
- if (oldnode instanceof INodeFileWithSnapshot) {
- cons = new INodeFileUnderConstructionWithSnapshot(cons,
- ((INodeFileWithSnapshot) oldnode).getDiffs());
+ FileUnderConstructionFeature uc = cons.getFileUnderConstructionFeature();
+ oldnode.toUnderConstruction(uc.getClientName(), uc.getClientMachine(),
+ uc.getClientNode());
+ if (oldnode.numBlocks() > 0) {
+ BlockInfo ucBlock = cons.getLastBlock();
+ // we do not replace the inode, just replace the last block of oldnode
+ BlockInfo info = namesystem.getBlockManager().addBlockCollection(
+ ucBlock, oldnode);
+ oldnode.setBlock(oldnode.numBlocks() - 1, info);
}
if (!inSnapshot) {
- fsDir.replaceINodeFile(path, oldnode, cons);
- namesystem.leaseManager.addLease(cons.getClientName(), path);
- } else {
- if (parentRef != null) {
- // replace oldnode with cons
- parentRef.setReferredINode(cons);
- } else {
- // replace old node in its parent's children list and deleted list
- oldnode.getParent().replaceChildFileInSnapshot(oldnode, cons);
- namesystem.dir.addToInodeMap(cons);
- updateBlocksMap(cons);
- }
+ namesystem.leaseManager.addLease(cons
+ .getFileUnderConstructionFeature().getClientName(), path);
}
}
}
@@ -955,8 +938,8 @@ public class FSImageFormat {
private MD5Hash savedDigest;
private final ReferenceMap referenceMap = new ReferenceMap();
- private final Map<Long, INodeFileUnderConstruction> snapshotUCMap =
- new HashMap<Long, INodeFileUnderConstruction>();
+ private final Map<Long, INodeFile> snapshotUCMap =
+ new HashMap<Long, INodeFile>();
/** @throws IllegalStateException if the instance has not yet saved an image */
private void checkSaved() {
@@ -1096,8 +1079,7 @@ public class FSImageFormat {
dirNum++;
} else if (inSnapshot && child.isFile()
&& child.asFile().isUnderConstruction()) {
- this.snapshotUCMap.put(child.getId(),
- (INodeFileUnderConstruction) child.asFile());
+ this.snapshotUCMap.put(child.getId(), child.asFile());
}
if (i++ % 50 == 0) {
context.checkCancelled();
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java?rev=1544389&r1=1544388&r2=1544389&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java Fri Nov 22 01:39:02 2013
@@ -108,7 +108,7 @@ public class FSImageSerialization {
// Helper function that reads in an INodeUnderConstruction
// from the input stream
//
- static INodeFileUnderConstruction readINodeUnderConstruction(
+ static INodeFile readINodeUnderConstruction(
DataInput in, FSNamesystem fsNamesys, int imgVersion)
throws IOException {
byte[] name = readBytes(in);
@@ -141,25 +141,17 @@ public class FSImageSerialization {
int numLocs = in.readInt();
assert numLocs == 0 : "Unexpected block locations";
- return new INodeFileUnderConstruction(inodeId,
- name,
- blockReplication,
- modificationTime,
- preferredBlockSize,
- blocks,
- perm,
- clientName,
- clientMachine,
- null);
+ INodeFile file = new INodeFile(inodeId, name, perm, modificationTime,
+ modificationTime, blocks, blockReplication, preferredBlockSize);
+ file.toUnderConstruction(clientName, clientMachine, null);
+ return file;
}
// Helper function that writes an INodeUnderConstruction
// into the input stream
//
- static void writeINodeUnderConstruction(DataOutputStream out,
- INodeFileUnderConstruction cons,
- String path)
- throws IOException {
+ static void writeINodeUnderConstruction(DataOutputStream out, INodeFile cons,
+ String path) throws IOException {
writeString(path, out);
out.writeLong(cons.getId());
out.writeShort(cons.getFileReplication());
@@ -169,8 +161,9 @@ public class FSImageSerialization {
writeBlocks(cons.getBlocks(), out);
cons.getPermissionStatus().write(out);
- writeString(cons.getClientName(), out);
- writeString(cons.getClientMachine(), out);
+ FileUnderConstructionFeature uc = cons.getFileUnderConstructionFeature();
+ writeString(uc.getClientName(), out);
+ writeString(uc.getClientMachine(), out);
out.writeInt(0); // do not store locations of last block
}
@@ -194,9 +187,9 @@ public class FSImageSerialization {
SnapshotFSImageFormat.saveFileDiffList(file, out);
if (writeUnderConstruction) {
- if (file instanceof INodeFileUnderConstruction) {
+ if (file.isUnderConstruction()) {
out.writeBoolean(true);
- final INodeFileUnderConstruction uc = (INodeFileUnderConstruction)file;
+ final FileUnderConstructionFeature uc = file.getFileUnderConstructionFeature();
writeString(uc.getClientName(), out);
writeString(uc.getClientMachine(), out);
} else {
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java?rev=1544389&r1=1544388&r2=1544389&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java Fri Nov 22 01:39:02 2013
@@ -2208,13 +2208,14 @@ public class FSNamesystem implements Nam
final DatanodeDescriptor clientNode =
blockManager.getDatanodeManager().getDatanodeByHost(clientMachine);
- INodeFileUnderConstruction newNode = dir.addFile(src, permissions,
- replication, blockSize, holder, clientMachine, clientNode);
+ INodeFile newNode = dir.addFile(src, permissions, replication, blockSize,
+ holder, clientMachine, clientNode);
if (newNode == null) {
throw new IOException("DIR* NameSystem.startFile: " +
"Unable to add file to namespace.");
}
- leaseManager.addLease(newNode.getClientName(), src);
+ leaseManager.addLease(newNode.getFileUnderConstructionFeature()
+ .getClientName(), src);
// record file record in log, record new generation stamp
getEditLog().logOpenFile(src, newNode, logRetryEntry);
@@ -2306,11 +2307,11 @@ public class FSNamesystem implements Nam
boolean writeToEditLog, Snapshot latestSnapshot, boolean logRetryCache)
throws IOException {
file = file.recordModification(latestSnapshot, dir.getINodeMap());
- final INodeFileUnderConstruction cons = file.toUnderConstruction(
- leaseHolder, clientMachine, clientNode);
+ final INodeFile cons = file.toUnderConstruction(leaseHolder, clientMachine,
+ clientNode);
- dir.replaceINodeFile(src, file, cons);
- leaseManager.addLease(cons.getClientName(), src);
+ leaseManager.addLease(cons.getFileUnderConstructionFeature()
+ .getClientName(), src);
LocatedBlock ret = blockManager.convertLastBlockToUnderConstruction(cons);
if (writeToEditLog) {
@@ -2373,7 +2374,6 @@ public class FSNamesystem implements Nam
throws IOException {
assert hasWriteLock();
if (fileInode != null && fileInode.isUnderConstruction()) {
- INodeFileUnderConstruction pendingFile = (INodeFileUnderConstruction) fileInode;
//
// If the file is under construction , then it must be in our
// leases. Find the appropriate lease record.
@@ -2396,7 +2396,9 @@ public class FSNamesystem implements Nam
//
// Find the original holder.
//
- lease = leaseManager.getLease(pendingFile.getClientName());
+ FileUnderConstructionFeature uc = fileInode.getFileUnderConstructionFeature();
+ String clientName = uc.getClientName();
+ lease = leaseManager.getLease(clientName);
if (lease == null) {
throw new AlreadyBeingCreatedException(
"failed to create file " + src + " for " + holder +
@@ -2407,26 +2409,26 @@ public class FSNamesystem implements Nam
// close now: no need to wait for soft lease expiration and
// close only the file src
LOG.info("recoverLease: " + lease + ", src=" + src +
- " from client " + pendingFile.getClientName());
+ " from client " + clientName);
internalReleaseLease(lease, src, holder);
} else {
- assert lease.getHolder().equals(pendingFile.getClientName()) :
+ assert lease.getHolder().equals(clientName) :
"Current lease holder " + lease.getHolder() +
- " does not match file creator " + pendingFile.getClientName();
+ " does not match file creator " + clientName;
//
// If the original holder has not renewed in the last SOFTLIMIT
// period, then start lease recovery.
//
if (lease.expiredSoftLimit()) {
LOG.info("startFile: recover " + lease + ", src=" + src + " client "
- + pendingFile.getClientName());
+ + clientName);
boolean isClosed = internalReleaseLease(lease, src, null);
if(!isClosed)
throw new RecoveryInProgressException(
"Failed to close file " + src +
". Lease recovery is in progress. Try again later.");
} else {
- final BlockInfo lastBlock = pendingFile.getLastBlock();
+ final BlockInfo lastBlock = fileInode.getLastBlock();
if (lastBlock != null
&& lastBlock.getBlockUCState() == BlockUCState.UNDER_RECOVERY) {
throw new RecoveryInProgressException("Recovery in progress, file ["
@@ -2435,8 +2437,8 @@ public class FSNamesystem implements Nam
throw new AlreadyBeingCreatedException("Failed to create file ["
+ src + "] for [" + holder + "] on client [" + clientMachine
+ "], because this file is already being created by ["
- + pendingFile.getClientName() + "] on ["
- + pendingFile.getClientMachine() + "]");
+ + clientName + "] on ["
+ + uc.getClientMachine() + "]");
}
}
}
@@ -2566,8 +2568,7 @@ public class FSNamesystem implements Nam
LocatedBlock[] onRetryBlock = new LocatedBlock[1];
final INode[] inodes = analyzeFileState(
src, fileId, clientName, previous, onRetryBlock).getINodes();
- final INodeFileUnderConstruction pendingFile =
- (INodeFileUnderConstruction) inodes[inodes.length - 1].asFile();
+ final INodeFile pendingFile = inodes[inodes.length - 1].asFile();
if (onRetryBlock[0] != null && onRetryBlock[0].getLocations().length > 0) {
// This is a retry. Just return the last block if having locations.
@@ -2580,7 +2581,7 @@ public class FSNamesystem implements Nam
+ maxBlocksPerFile);
}
blockSize = pendingFile.getPreferredBlockSize();
- clientNode = pendingFile.getClientNode();
+ clientNode = pendingFile.getFileUnderConstructionFeature().getClientNode();
replication = pendingFile.getFileReplication();
} finally {
readUnlock();
@@ -2604,8 +2605,7 @@ public class FSNamesystem implements Nam
INodesInPath inodesInPath =
analyzeFileState(src, fileId, clientName, previous, onRetryBlock);
INode[] inodes = inodesInPath.getINodes();
- final INodeFileUnderConstruction pendingFile =
- (INodeFileUnderConstruction) inodes[inodes.length - 1].asFile();
+ final INodeFile pendingFile = inodes[inodes.length - 1].asFile();
if (onRetryBlock[0] != null) {
if (onRetryBlock[0].getLocations().length > 0) {
@@ -2660,7 +2660,7 @@ public class FSNamesystem implements Nam
Block previousBlock = ExtendedBlock.getLocalBlock(previous);
final INodesInPath iip = dir.getINodesInPath4Write(src);
- final INodeFileUnderConstruction pendingFile
+ final INodeFile pendingFile
= checkLease(src, fileId, clientName, iip.getLastINode());
BlockInfo lastBlockInFile = pendingFile.getLastBlock();
if (!Block.matchingIdAndGenStamp(previousBlock, lastBlockInFile)) {
@@ -2766,8 +2766,8 @@ public class FSNamesystem implements Nam
src = FSDirectory.resolvePath(src, pathComponents, dir);
//check lease
- final INodeFileUnderConstruction file = checkLease(src, clientName);
- clientnode = file.getClientNode();
+ final INodeFile file = checkLease(src, clientName);
+ clientnode = file.getFileUnderConstructionFeature().getClientNode();
preferredblocksize = file.getPreferredBlockSize();
//find datanode descriptors
@@ -2813,7 +2813,7 @@ public class FSNamesystem implements Nam
//
// Remove the block from the pending creates list
//
- INodeFileUnderConstruction file = checkLease(src, holder);
+ INodeFile file = checkLease(src, holder);
boolean removed = dir.removeBlock(src, file,
ExtendedBlock.getLocalBlock(b));
if (!removed) {
@@ -2835,16 +2835,15 @@ public class FSNamesystem implements Nam
}
/** make sure that we still have the lease on this file. */
- private INodeFileUnderConstruction checkLease(String src, String holder)
+ private INodeFile checkLease(String src, String holder)
throws LeaseExpiredException, UnresolvedLinkException,
FileNotFoundException {
return checkLease(src, INodeId.GRANDFATHER_INODE_ID, holder,
dir.getINode(src));
}
- private INodeFileUnderConstruction checkLease(String src, long fileId,
- String holder, INode inode) throws LeaseExpiredException,
- FileNotFoundException {
+ private INodeFile checkLease(String src, long fileId, String holder,
+ INode inode) throws LeaseExpiredException, FileNotFoundException {
assert hasReadLock();
if (inode == null || !inode.isFile()) {
Lease lease = leaseManager.getLease(holder);
@@ -2861,13 +2860,13 @@ public class FSNamesystem implements Nam
+ (lease != null ? lease.toString()
: "Holder " + holder + " does not have any open files."));
}
- INodeFileUnderConstruction pendingFile = (INodeFileUnderConstruction)file;
- if (holder != null && !pendingFile.getClientName().equals(holder)) {
+ String clientName = file.getFileUnderConstructionFeature().getClientName();
+ if (holder != null && !clientName.equals(holder)) {
throw new LeaseExpiredException("Lease mismatch on " + src + " owned by "
- + pendingFile.getClientName() + " but is accessed by " + holder);
+ + clientName + " but is accessed by " + holder);
}
- INodeId.checkId(fileId, pendingFile);
- return pendingFile;
+ INodeId.checkId(fileId, file);
+ return file;
}
/**
@@ -2908,7 +2907,7 @@ public class FSNamesystem implements Nam
UnresolvedLinkException, IOException {
assert hasWriteLock();
final INodesInPath iip = dir.getLastINodeInPath(src);
- final INodeFileUnderConstruction pendingFile;
+ final INodeFile pendingFile;
try {
pendingFile = checkLease(src, fileId, holder, iip.getINode(0));
} catch (LeaseExpiredException lee) {
@@ -3599,9 +3598,10 @@ public class FSNamesystem implements Nam
checkOperation(OperationCategory.WRITE);
checkNameNodeSafeMode("Cannot fsync file " + src);
src = FSDirectory.resolvePath(src, pathComponents, dir);
- INodeFileUnderConstruction pendingFile = checkLease(src, clientName);
+ INodeFile pendingFile = checkLease(src, clientName);
if (lastBlockLength > 0) {
- pendingFile.updateLengthOfLastBlock(lastBlockLength);
+ pendingFile.getFileUnderConstructionFeature().updateLengthOfLastBlock(
+ pendingFile, lastBlockLength);
}
dir.persistBlocks(src, pendingFile, false);
} finally {
@@ -3632,8 +3632,7 @@ public class FSNamesystem implements Nam
assert hasWriteLock();
final INodesInPath iip = dir.getLastINodeInPath(src);
- final INodeFileUnderConstruction pendingFile
- = INodeFileUnderConstruction.valueOf(iip.getINode(0), src);
+ final INodeFile pendingFile = iip.getINode(0).asFile();
int nrBlocks = pendingFile.numBlocks();
BlockInfo[] blocks = pendingFile.getBlocks();
@@ -3755,7 +3754,7 @@ public class FSNamesystem implements Nam
}
private Lease reassignLease(Lease lease, String src, String newHolder,
- INodeFileUnderConstruction pendingFile) {
+ INodeFile pendingFile) {
assert hasWriteLock();
if(newHolder == null)
return lease;
@@ -3765,15 +3764,16 @@ public class FSNamesystem implements Nam
}
Lease reassignLeaseInternal(Lease lease, String src, String newHolder,
- INodeFileUnderConstruction pendingFile) {
+ INodeFile pendingFile) {
assert hasWriteLock();
- pendingFile.setClientName(newHolder);
+ pendingFile.getFileUnderConstructionFeature().setClientName(newHolder);
return leaseManager.reassignLease(lease, src, newHolder);
}
- private void commitOrCompleteLastBlock(final INodeFileUnderConstruction fileINode,
+ private void commitOrCompleteLastBlock(final INodeFile fileINode,
final Block commitBlock) throws IOException {
assert hasWriteLock();
+ Preconditions.checkArgument(fileINode.isUnderConstruction());
if (!blockManager.commitOrCompleteLastBlock(fileINode, commitBlock)) {
return;
}
@@ -3790,19 +3790,21 @@ public class FSNamesystem implements Nam
}
}
- private void finalizeINodeFileUnderConstruction(String src,
- INodeFileUnderConstruction pendingFile, Snapshot latestSnapshot)
- throws IOException, UnresolvedLinkException {
+ private void finalizeINodeFileUnderConstruction(String src,
+ INodeFile pendingFile, Snapshot latestSnapshot) throws IOException,
+ UnresolvedLinkException {
assert hasWriteLock();
- leaseManager.removeLease(pendingFile.getClientName(), src);
+ FileUnderConstructionFeature uc = pendingFile.getFileUnderConstructionFeature();
+ Preconditions.checkArgument(uc != null);
+ leaseManager.removeLease(uc.getClientName(), src);
pendingFile = pendingFile.recordModification(latestSnapshot,
dir.getINodeMap());
// The file is no longer pending.
- // Create permanent INode, update blocks
- final INodeFile newFile = pendingFile.toINodeFile(now());
- dir.replaceINodeFile(src, pendingFile, newFile);
+ // Create permanent INode, update blocks. No need to replace the inode here
+ // since we just remove the uc feature from pendingFile
+ final INodeFile newFile = pendingFile.toCompleteFile(now());
// close file and persist block allocations for this file
dir.closeFile(src, newFile);
@@ -3819,12 +3821,12 @@ public class FSNamesystem implements Nam
public boolean isInSnapshot(BlockInfoUnderConstruction blockUC) {
assert hasReadLock();
final BlockCollection bc = blockUC.getBlockCollection();
- if (bc == null || !(bc instanceof INodeFileUnderConstruction)) {
+ if (bc == null || !(bc instanceof INodeFile)
+ || !((INodeFile) bc).isUnderConstruction()) {
return false;
}
- INodeFileUnderConstruction inodeUC = (INodeFileUnderConstruction) blockUC
- .getBlockCollection();
+ INodeFile inodeUC = (INodeFile) bc;
String fullName = inodeUC.getName();
try {
if (fullName != null && fullName.startsWith(Path.SEPARATOR)
@@ -3902,11 +3904,9 @@ public class FSNamesystem implements Nam
+ recoveryId + " for block " + lastblock);
}
- INodeFileUnderConstruction pendingFile = (INodeFileUnderConstruction)iFile;
-
if (deleteblock) {
Block blockToDel = ExtendedBlock.getLocalBlock(lastblock);
- boolean remove = pendingFile.removeLastBlock(blockToDel);
+ boolean remove = iFile.removeLastBlock(blockToDel);
if (remove) {
blockManager.removeBlockFromMap(storedBlock);
}
@@ -3944,14 +3944,14 @@ public class FSNamesystem implements Nam
// add pipeline locations into the INodeUnderConstruction
DatanodeDescriptor[] targetArray =
new DatanodeDescriptor[targetList.size()];
- pendingFile.setLastBlock(storedBlock, targetList.toArray(targetArray));
+ iFile.setLastBlock(storedBlock, targetList.toArray(targetArray));
}
if (closeFile) {
- src = closeFileCommitBlocks(pendingFile, storedBlock);
+ src = closeFileCommitBlocks(iFile, storedBlock);
} else {
// If this commit does not want to close the file, persist blocks
- src = persistBlocks(pendingFile, false);
+ src = persistBlocks(iFile, false);
}
} finally {
writeUnlock();
@@ -3976,10 +3976,8 @@ public class FSNamesystem implements Nam
* @throws IOException
*/
@VisibleForTesting
- String closeFileCommitBlocks(INodeFileUnderConstruction pendingFile,
- BlockInfo storedBlock)
+ String closeFileCommitBlocks(INodeFile pendingFile, BlockInfo storedBlock)
throws IOException {
-
String src = leaseManager.findPath(pendingFile);
// commit the last block and complete it if it has minimum replicas
@@ -3987,7 +3985,7 @@ public class FSNamesystem implements Nam
//remove lease, close file
finalizeINodeFileUnderConstruction(src, pendingFile,
- Snapshot.findLatestSnapshot(pendingFile, null));
+ Snapshot.findLatestSnapshot(pendingFile, null));
return src;
}
@@ -4000,8 +3998,8 @@ public class FSNamesystem implements Nam
* @throws IOException
*/
@VisibleForTesting
- String persistBlocks(INodeFileUnderConstruction pendingFile,
- boolean logRetryCache) throws IOException {
+ String persistBlocks(INodeFile pendingFile, boolean logRetryCache)
+ throws IOException {
String src = leaseManager.findPath(pendingFile);
dir.persistBlocks(src, pendingFile, logRetryCache);
return src;
@@ -5186,13 +5184,12 @@ public class FSNamesystem implements Nam
try {
for (Lease lease : leaseManager.getSortedLeases()) {
for (String path : lease.getPaths()) {
- final INodeFileUnderConstruction cons;
+ final INodeFile cons;
try {
- cons = INodeFileUnderConstruction.valueOf(dir.getINode(path), path);
+ cons = dir.getINode(path).asFile();
+ Preconditions.checkState(cons.isUnderConstruction());
} catch (UnresolvedLinkException e) {
throw new AssertionError("Lease files should reside on this FS");
- } catch (IOException e) {
- throw new RuntimeException(e);
}
BlockInfo[] blocks = cons.getBlocks();
if(blocks == null)
@@ -5768,7 +5765,7 @@ public class FSNamesystem implements Nam
return blockId;
}
- private INodeFileUnderConstruction checkUCBlock(ExtendedBlock block,
+ private INodeFile checkUCBlock(ExtendedBlock block,
String clientName) throws IOException {
assert hasWriteLock();
checkNameNodeSafeMode("Cannot get a new generation stamp and an "
@@ -5784,19 +5781,20 @@ public class FSNamesystem implements Nam
// check file inode
final INodeFile file = ((INode)storedBlock.getBlockCollection()).asFile();
- if (file==null || !file.isUnderConstruction()) {
+ if (file == null || !file.isUnderConstruction()) {
throw new IOException("The file " + storedBlock +
" belonged to does not exist or it is not under construction.");
}
// check lease
- INodeFileUnderConstruction pendingFile = (INodeFileUnderConstruction)file;
- if (clientName == null || !clientName.equals(pendingFile.getClientName())) {
+ if (clientName == null
+ || !clientName.equals(file.getFileUnderConstructionFeature()
+ .getClientName())) {
throw new LeaseExpiredException("Lease mismatch: " + block +
" is accessed by a non lease holder " + clientName);
}
- return pendingFile;
+ return file;
}
/**
@@ -5905,8 +5903,7 @@ public class FSNamesystem implements Nam
throws IOException {
assert hasWriteLock();
// check the vadility of the block and lease holder name
- final INodeFileUnderConstruction pendingFile
- = checkUCBlock(oldBlock, clientName);
+ final INodeFile pendingFile = checkUCBlock(oldBlock, clientName);
final BlockInfoUnderConstruction blockinfo
= (BlockInfoUnderConstruction)pendingFile.getLastBlock();
@@ -5950,15 +5947,13 @@ public class FSNamesystem implements Nam
* Serializes leases.
*/
void saveFilesUnderConstruction(DataOutputStream out,
- Map<Long, INodeFileUnderConstruction> snapshotUCMap) throws IOException {
+ Map<Long, INodeFile> snapshotUCMap) throws IOException {
// This is run by an inferior thread of saveNamespace, which holds a read
// lock on our behalf. If we took the read lock here, we could block
// for fairness if a writer is waiting on the lock.
synchronized (leaseManager) {
- Map<String, INodeFileUnderConstruction> nodes =
- leaseManager.getINodesUnderConstruction();
- for (Map.Entry<String, INodeFileUnderConstruction> entry
- : nodes.entrySet()) {
+ Map<String, INodeFile> nodes = leaseManager.getINodesUnderConstruction();
+ for (Map.Entry<String, INodeFile> entry : nodes.entrySet()) {
// TODO: for HDFS-5428, because of rename operations, some
// under-construction files that are
// in the current fs directory can also be captured in the
@@ -5967,13 +5962,11 @@ public class FSNamesystem implements Nam
}
out.writeInt(nodes.size() + snapshotUCMap.size()); // write the size
- for (Map.Entry<String, INodeFileUnderConstruction> entry
- : nodes.entrySet()) {
+ for (Map.Entry<String, INodeFile> entry : nodes.entrySet()) {
FSImageSerialization.writeINodeUnderConstruction(
out, entry.getValue(), entry.getKey());
}
- for (Map.Entry<Long, INodeFileUnderConstruction> entry
- : snapshotUCMap.entrySet()) {
+ for (Map.Entry<Long, INodeFile> entry : snapshotUCMap.entrySet()) {
// for those snapshot INodeFileUC, we use "/.reserved/.inodes/<inodeid>"
// as their paths
StringBuilder b = new StringBuilder();
Copied: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileUnderConstructionFeature.java (from r1544377, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java)
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileUnderConstructionFeature.java?p2=hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileUnderConstructionFeature.java&p1=hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java&r1=1544377&r2=1544389&rev=1544389&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileUnderConstructionFeature.java Fri Nov 22 01:39:02 2013
@@ -17,79 +17,27 @@
*/
package org.apache.hadoop.hdfs.server.namenode;
-import java.io.FileNotFoundException;
import java.io.IOException;
-import java.util.Arrays;
-import java.util.List;
import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.fs.permission.PermissionStatus;
-import org.apache.hadoop.hdfs.protocol.Block;
-import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
-import org.apache.hadoop.hdfs.server.blockmanagement.MutableBlockCollection;
-import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
-import org.apache.hadoop.hdfs.server.namenode.Quota.Counts;
-import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeFileUnderConstructionWithSnapshot;
-import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
-
-import com.google.common.base.Preconditions;
+import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo;
/**
* I-node for file being written.
*/
@InterfaceAudience.Private
-public class INodeFileUnderConstruction extends INodeFile implements MutableBlockCollection {
- /** Cast INode to INodeFileUnderConstruction. */
- public static INodeFileUnderConstruction valueOf(INode inode, String path
- ) throws FileNotFoundException {
- final INodeFile file = INodeFile.valueOf(inode, path);
- if (!file.isUnderConstruction()) {
- throw new FileNotFoundException("File is not under construction: " + path);
- }
- return (INodeFileUnderConstruction)file;
- }
-
- private String clientName; // lease holder
+public class FileUnderConstructionFeature extends INodeFile.Feature {
+ private String clientName; // lease holder
private final String clientMachine;
- private final DatanodeDescriptor clientNode; // if client is a cluster node too.
-
- INodeFileUnderConstruction(long id,
- PermissionStatus permissions,
- short replication,
- long preferredBlockSize,
- long modTime,
- String clientName,
- String clientMachine,
- DatanodeDescriptor clientNode) {
- this(id, null, replication, modTime, preferredBlockSize, BlockInfo.EMPTY_ARRAY,
- permissions, clientName, clientMachine, clientNode);
- }
-
- INodeFileUnderConstruction(long id,
- byte[] name,
- short blockReplication,
- long modificationTime,
- long preferredBlockSize,
- BlockInfo[] blocks,
- PermissionStatus perm,
- String clientName,
- String clientMachine,
- DatanodeDescriptor clientNode) {
- super(id, name, perm, modificationTime, modificationTime,
- blocks, blockReplication, preferredBlockSize);
- this.clientName = clientName;
- this.clientMachine = clientMachine;
- this.clientNode = clientNode;
- }
-
- public INodeFileUnderConstruction(final INodeFile that,
- final String clientName,
+ // if client is a cluster node too.
+ private final DatanodeDescriptor clientNode;
+
+ public FileUnderConstructionFeature(final String clientName,
final String clientMachine,
final DatanodeDescriptor clientNode) {
- super(that);
this.clientName = clientName;
this.clientMachine = clientMachine;
this.clientNode = clientNode;
@@ -111,137 +59,41 @@ public class INodeFileUnderConstruction
return clientNode;
}
- /** @return true unconditionally. */
- @Override
- public final boolean isUnderConstruction() {
- return true;
- }
-
/**
- * Converts an INodeFileUnderConstruction to an INodeFile.
- * The original modification time is used as the access time.
- * The new modification is the specified mtime.
+ * Update the length for the last block
+ *
+ * @param lastBlockLength
+ * The length of the last block reported from client
+ * @throws IOException
*/
- protected INodeFile toINodeFile(long mtime) {
- assertAllBlocksComplete();
-
- final INodeFile f = new INodeFile(getId(), getLocalNameBytes(),
- getPermissionStatus(), mtime, getModificationTime(),
- getBlocks(), getFileReplication(), getPreferredBlockSize());
- f.setParent(getParent());
- return f;
- }
-
- @Override
- public Quota.Counts cleanSubtree(final Snapshot snapshot, Snapshot prior,
- final BlocksMapUpdateInfo collectedBlocks,
- final List<INode> removedINodes, final boolean countDiffChange)
- throws QuotaExceededException {
- if (snapshot == null && prior != null) {
- cleanZeroSizeBlock(collectedBlocks);
- return Counts.newInstance();
- } else {
- return super.cleanSubtree(snapshot, prior, collectedBlocks,
- removedINodes, countDiffChange);
- }
+ void updateLengthOfLastBlock(INodeFile f, long lastBlockLength)
+ throws IOException {
+ BlockInfo lastBlock = f.getLastBlock();
+ assert (lastBlock != null) : "The last block for path "
+ + f.getFullPathName() + " is null when updating its length";
+ assert (lastBlock instanceof BlockInfoUnderConstruction)
+ : "The last block for path " + f.getFullPathName()
+ + " is not a BlockInfoUnderConstruction when updating its length";
+ lastBlock.setNumBytes(lastBlockLength);
}
-
+
/**
* When deleting a file in the current fs directory, and the file is contained
- * in a snapshot, we should delete the last block if it's under construction
+ * in a snapshot, we should delete the last block if it's under construction
* and its size is 0.
*/
- private void cleanZeroSizeBlock(final BlocksMapUpdateInfo collectedBlocks) {
- final BlockInfo[] blocks = getBlocks();
+ void cleanZeroSizeBlock(final INodeFile f,
+ final BlocksMapUpdateInfo collectedBlocks) {
+ final BlockInfo[] blocks = f.getBlocks();
if (blocks != null && blocks.length > 0
&& blocks[blocks.length - 1] instanceof BlockInfoUnderConstruction) {
- BlockInfoUnderConstruction lastUC =
+ BlockInfoUnderConstruction lastUC =
(BlockInfoUnderConstruction) blocks[blocks.length - 1];
if (lastUC.getNumBytes() == 0) {
// this is a 0-sized block. do not need check its UC state here
collectedBlocks.addDeleteBlock(lastUC);
- removeLastBlock(lastUC);
+ f.removeLastBlock(lastUC);
}
}
}
-
- @Override
- public INodeFileUnderConstruction recordModification(final Snapshot latest,
- final INodeMap inodeMap) throws QuotaExceededException {
- if (isInLatestSnapshot(latest)) {
- INodeFileUnderConstructionWithSnapshot newFile = getParent()
- .replaceChild4INodeFileUcWithSnapshot(this, inodeMap)
- .recordModification(latest, inodeMap);
- return newFile;
- } else {
- return this;
- }
- }
-
- /** Assert all blocks are complete. */
- protected void assertAllBlocksComplete() {
- final BlockInfo[] blocks = getBlocks();
- for (int i = 0; i < blocks.length; i++) {
- Preconditions.checkState(blocks[i].isComplete(), "Failed to finalize"
- + " %s %s since blocks[%s] is non-complete, where blocks=%s.",
- getClass().getSimpleName(), this, i, Arrays.asList(getBlocks()));
- }
- }
-
- /**
- * Remove a block from the block list. This block should be
- * the last one on the list.
- */
- boolean removeLastBlock(Block oldblock) {
- final BlockInfo[] blocks = getBlocks();
- if (blocks == null || blocks.length == 0) {
- return false;
- }
- int size_1 = blocks.length - 1;
- if (!blocks[size_1].equals(oldblock)) {
- return false;
- }
-
- //copy to a new list
- BlockInfo[] newlist = new BlockInfo[size_1];
- System.arraycopy(blocks, 0, newlist, 0, size_1);
- setBlocks(newlist);
- return true;
- }
-
- /**
- * Convert the last block of the file to an under-construction block.
- * Set its locations.
- */
- @Override
- public BlockInfoUnderConstruction setLastBlock(BlockInfo lastBlock,
- DatanodeDescriptor[] targets) throws IOException {
- if (numBlocks() == 0) {
- throw new IOException("Failed to set last block: File is empty.");
- }
- BlockInfoUnderConstruction ucBlock =
- lastBlock.convertToBlockUnderConstruction(
- BlockUCState.UNDER_CONSTRUCTION, targets);
- ucBlock.setBlockCollection(this);
- setBlock(numBlocks()-1, ucBlock);
- return ucBlock;
- }
-
- /**
- * Update the length for the last block
- *
- * @param lastBlockLength
- * The length of the last block reported from client
- * @throws IOException
- */
- void updateLengthOfLastBlock(long lastBlockLength) throws IOException {
- BlockInfo lastBlock = this.getLastBlock();
- assert (lastBlock != null) : "The last block for path "
- + this.getFullPathName() + " is null when updating its length";
- assert (lastBlock instanceof BlockInfoUnderConstruction) : "The last block for path "
- + this.getFullPathName()
- + " is not a BlockInfoUnderConstruction when updating its length";
- lastBlock.setNumBytes(lastBlockLength);
- }
-
}
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java?rev=1544389&r1=1544388&r2=1544389&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java Fri Nov 22 01:39:02 2013
@@ -34,7 +34,6 @@ import org.apache.hadoop.hdfs.protocol.S
import org.apache.hadoop.hdfs.server.namenode.INodeReference.WithCount;
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectorySnapshottable;
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot;
-import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeFileUnderConstructionWithSnapshot;
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeFileWithSnapshot;
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
import org.apache.hadoop.hdfs.util.ReadOnlyList;
@@ -205,23 +204,6 @@ public class INodeDirectory extends INod
return newDir;
}
- /**
- * Used when load fileUC from fsimage. The file to be replaced is actually
- * only in snapshot, thus may not be contained in the children list.
- * See HDFS-5428 for details.
- */
- public void replaceChildFileInSnapshot(INodeFile oldChild,
- final INodeFile newChild) {
- if (children != null) {
- final int i = searchChildren(newChild.getLocalNameBytes());
- if (i >= 0 && children.get(i).getId() == oldChild.getId()) {
- // no need to consider reference node here, since we already do the
- // replacement in FSImageFormat.Loader#loadFilesUnderConstruction
- children.set(i, newChild);
- }
- }
- }
-
/** Replace the given child with a new child. */
public void replaceChild(INode oldChild, final INode newChild,
final INodeMap inodeMap) {
@@ -291,17 +273,6 @@ public class INodeDirectory extends INod
return newChild;
}
- /** Replace a child {@link INodeFile} with an {@link INodeFileUnderConstructionWithSnapshot}. */
- INodeFileUnderConstructionWithSnapshot replaceChild4INodeFileUcWithSnapshot(
- final INodeFileUnderConstruction child, final INodeMap inodeMap) {
- Preconditions.checkArgument(!(child instanceof INodeFileUnderConstructionWithSnapshot),
- "Child file is already an INodeFileUnderConstructionWithSnapshot, child=" + child);
- final INodeFileUnderConstructionWithSnapshot newChild
- = new INodeFileUnderConstructionWithSnapshot(child, null);
- replaceChildFile(child, newChild, inodeMap);
- return newChild;
- }
-
@Override
public INodeDirectory recordModification(Snapshot latest,
final INodeMap inodeMap) throws QuotaExceededException {
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java?rev=1544389&r1=1544388&r2=1544389&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java Fri Nov 22 01:39:02 2013
@@ -20,15 +20,18 @@ package org.apache.hadoop.hdfs.server.na
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.PrintWriter;
+import java.util.Arrays;
import java.util.List;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.fs.permission.PermissionStatus;
+import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockCollection;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
import org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshot;
import org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshot.FileDiff;
import org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshot.FileDiffList;
@@ -43,6 +46,22 @@ import com.google.common.base.Preconditi
@InterfaceAudience.Private
public class INodeFile extends INodeWithAdditionalFields
implements INodeFileAttributes, BlockCollection {
+ /**
+ * A feature contains specific information for a type of INodeFile. E.g.,
+ * we can have separate features for Under-Construction and Snapshot.
+ */
+ public static abstract class Feature {
+ private Feature nextFeature;
+
+ public Feature getNextFeature() {
+ return nextFeature;
+ }
+
+ public void setNextFeature(Feature next) {
+ this.nextFeature = next;
+ }
+ }
+
/** The same as valueOf(inode, path, false). */
public static INodeFile valueOf(INode inode, String path
) throws FileNotFoundException {
@@ -104,8 +123,11 @@ public class INodeFile extends INodeWith
private BlockInfo[] blocks;
- INodeFile(long id, byte[] name, PermissionStatus permissions, long mtime, long atime,
- BlockInfo[] blklist, short replication, long preferredBlockSize) {
+ private Feature headFeature;
+
+ INodeFile(long id, byte[] name, PermissionStatus permissions, long mtime,
+ long atime, BlockInfo[] blklist, short replication,
+ long preferredBlockSize) {
super(id, name, permissions, mtime, atime);
header = HeaderFormat.combineReplication(header, replication);
header = HeaderFormat.combinePreferredBlockSize(header, preferredBlockSize);
@@ -116,6 +138,48 @@ public class INodeFile extends INodeWith
super(that);
this.header = that.header;
this.blocks = that.blocks;
+ this.headFeature = that.headFeature;
+ }
+
+ /**
+ * If the inode contains a {@link FileUnderConstructionFeature}, return it;
+ * otherwise, return null.
+ */
+ public final FileUnderConstructionFeature getFileUnderConstructionFeature() {
+ for (Feature f = this.headFeature; f != null; f = f.nextFeature) {
+ if (f instanceof FileUnderConstructionFeature) {
+ return (FileUnderConstructionFeature) f;
+ }
+ }
+ return null;
+ }
+
+ /** Is this file under construction? */
+ @Override // BlockCollection
+ public boolean isUnderConstruction() {
+ return getFileUnderConstructionFeature() != null;
+ }
+
+ void addFeature(Feature f) {
+ f.nextFeature = headFeature;
+ headFeature = f;
+ }
+
+ void removeFeature(Feature f) {
+ if (f == headFeature) {
+ headFeature = headFeature.nextFeature;
+ return;
+ } else if (headFeature != null) {
+ Feature prev = headFeature;
+ Feature curr = headFeature.nextFeature;
+ for (; curr != null && curr != f; prev = curr, curr = curr.nextFeature)
+ ;
+ if (curr != null) {
+ prev.nextFeature = curr.nextFeature;
+ return;
+ }
+ }
+ throw new IllegalStateException("Feature " + f + " not found.");
}
/** @return true unconditionally. */
@@ -130,22 +194,88 @@ public class INodeFile extends INodeWith
return this;
}
- /** Is this file under construction? */
- public boolean isUnderConstruction() {
- return false;
- }
+ /* Start of Under-Construction Feature */
/** Convert this file to an {@link INodeFileUnderConstruction}. */
- public INodeFileUnderConstruction toUnderConstruction(
- String clientName,
- String clientMachine,
+ public INodeFile toUnderConstruction(String clientName, String clientMachine,
DatanodeDescriptor clientNode) {
Preconditions.checkState(!isUnderConstruction(),
"file is already an INodeFileUnderConstruction");
- return new INodeFileUnderConstruction(this,
- clientName, clientMachine, clientNode);
+ FileUnderConstructionFeature uc = new FileUnderConstructionFeature(
+ clientName, clientMachine, clientNode);
+ addFeature(uc);
+ return this;
}
+ /**
+ * Convert the file to a complete file, i.e., to remove the Under-Construction
+ * feature.
+ */
+ public INodeFile toCompleteFile(long mtime) {
+ FileUnderConstructionFeature uc = getFileUnderConstructionFeature();
+ if (uc != null) {
+ assertAllBlocksComplete();
+ removeFeature(uc);
+ this.setModificationTime(mtime);
+ }
+ return this;
+ }
+
+ /** Assert all blocks are complete. */
+ private void assertAllBlocksComplete() {
+ if (blocks == null) {
+ return;
+ }
+ for (int i = 0; i < blocks.length; i++) {
+ Preconditions.checkState(blocks[i].isComplete(), "Failed to finalize"
+ + " %s %s since blocks[%s] is non-complete, where blocks=%s.",
+ getClass().getSimpleName(), this, i, Arrays.asList(blocks));
+ }
+ }
+
+ @Override //BlockCollection
+ public void setBlock(int index, BlockInfo blk) {
+ this.blocks[index] = blk;
+ }
+
+ @Override // BlockCollection
+ public BlockInfoUnderConstruction setLastBlock(BlockInfo lastBlock,
+ DatanodeDescriptor[] locations) throws IOException {
+ Preconditions.checkState(isUnderConstruction());
+
+ if (numBlocks() == 0) {
+ throw new IOException("Failed to set last block: File is empty.");
+ }
+ BlockInfoUnderConstruction ucBlock =
+ lastBlock.convertToBlockUnderConstruction(
+ BlockUCState.UNDER_CONSTRUCTION, locations);
+ ucBlock.setBlockCollection(this);
+ setBlock(numBlocks() - 1, ucBlock);
+ return ucBlock;
+ }
+
+ /**
+ * Remove a block from the block list. This block should be
+ * the last one on the list.
+ */
+ boolean removeLastBlock(Block oldblock) {
+ if (blocks == null || blocks.length == 0) {
+ return false;
+ }
+ int size_1 = blocks.length - 1;
+ if (!blocks[size_1].equals(oldblock)) {
+ return false;
+ }
+
+ //copy to a new list
+ BlockInfo[] newlist = new BlockInfo[size_1];
+ System.arraycopy(blocks, 0, newlist, 0, size_1);
+ setBlocks(newlist);
+ return true;
+ }
+
+ /* End of Under-Construction Feature */
+
@Override
public INodeFileAttributes getSnapshotINode(final Snapshot snapshot) {
return this;
@@ -266,11 +396,6 @@ public class INodeFile extends INodeWith
}
}
- /** Set the block of the file at the given index. */
- public void setBlock(int idx, BlockInfo blk) {
- this.blocks[idx] = blk;
- }
-
/** Set the blocks. */
public void setBlocks(BlockInfo[] blocks) {
this.blocks = blocks;
@@ -286,6 +411,11 @@ public class INodeFile extends INodeWith
// this only happens when deleting the current file
computeQuotaUsage(counts, false);
destroyAndCollectBlocks(collectedBlocks, removedINodes);
+ } else if (snapshot == null && prior != null) {
+ FileUnderConstructionFeature uc = getFileUnderConstructionFeature();
+ if (uc != null) {
+ uc.cleanZeroSizeBlock(this, collectedBlocks);
+ }
}
return counts;
}
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java?rev=1544389&r1=1544388&r2=1544389&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java Fri Nov 22 01:39:02 2013
@@ -182,9 +182,11 @@ public class LeaseManager {
/**
* Finds the pathname for the specified pendingFile
*/
- public synchronized String findPath(INodeFileUnderConstruction pendingFile)
+ public synchronized String findPath(INodeFile pendingFile)
throws IOException {
- Lease lease = getLease(pendingFile.getClientName());
+ FileUnderConstructionFeature uc = pendingFile.getFileUnderConstructionFeature();
+ Preconditions.checkArgument(uc != null);
+ Lease lease = getLease(uc.getClientName());
if (lease != null) {
String src = lease.findPath(pendingFile);
if (src != null) {
@@ -253,7 +255,7 @@ public class LeaseManager {
/**
* @return the path associated with the pendingFile and null if not found.
*/
- private String findPath(INodeFileUnderConstruction pendingFile) {
+ private String findPath(INodeFile pendingFile) {
try {
for (String src : paths) {
INode node = fsnamesystem.dir.getINode(src);
@@ -433,14 +435,14 @@ public class LeaseManager {
* @return list of inodes
* @throws UnresolvedLinkException
*/
- Map<String, INodeFileUnderConstruction> getINodesUnderConstruction() {
- Map<String, INodeFileUnderConstruction> inodes =
- new TreeMap<String, INodeFileUnderConstruction>();
+ Map<String, INodeFile> getINodesUnderConstruction() {
+ Map<String, INodeFile> inodes = new TreeMap<String, INodeFile>();
for (String p : sortedLeasesByPath.keySet()) {
// verify that path exists in namespace
try {
- INode node = fsnamesystem.dir.getINode(p);
- inodes.put(p, INodeFileUnderConstruction.valueOf(node, p));
+ INodeFile node = INodeFile.valueOf(fsnamesystem.dir.getINode(p), p);
+ Preconditions.checkState(node.isUnderConstruction());
+ inodes.put(p, node);
} catch (IOException ioe) {
LOG.error(ioe);
}
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectoryWithSnapshot.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectoryWithSnapshot.java?rev=1544389&r1=1544388&r2=1544389&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectoryWithSnapshot.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectoryWithSnapshot.java Fri Nov 22 01:39:02 2013
@@ -38,7 +38,6 @@ import org.apache.hadoop.hdfs.server.nam
import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
import org.apache.hadoop.hdfs.server.namenode.INodeDirectoryAttributes;
import org.apache.hadoop.hdfs.server.namenode.INodeDirectoryWithQuota;
-import org.apache.hadoop.hdfs.server.namenode.INodeFile;
import org.apache.hadoop.hdfs.server.namenode.INodeMap;
import org.apache.hadoop.hdfs.server.namenode.INodeReference;
import org.apache.hadoop.hdfs.server.namenode.Quota;
@@ -594,14 +593,6 @@ public class INodeDirectoryWithSnapshot
}
@Override
- public void replaceChildFileInSnapshot(final INodeFile oldChild,
- final INodeFile newChild) {
- super.replaceChildFileInSnapshot(oldChild, newChild);
- diffs.replaceChild(ListType.DELETED, oldChild, newChild);
- diffs.replaceChild(ListType.CREATED, oldChild, newChild);
- }
-
- @Override
public void replaceChild(final INode oldChild, final INode newChild,
final INodeMap inodeMap) {
super.replaceChild(oldChild, newChild, inodeMap);
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileWithSnapshot.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileWithSnapshot.java?rev=1544389&r1=1544388&r2=1544389&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileWithSnapshot.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileWithSnapshot.java Fri Nov 22 01:39:02 2013
@@ -21,7 +21,6 @@ import java.util.List;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
-import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
import org.apache.hadoop.hdfs.server.namenode.INode;
import org.apache.hadoop.hdfs.server.namenode.INodeFile;
import org.apache.hadoop.hdfs.server.namenode.INodeFileAttributes;
@@ -48,15 +47,6 @@ public class INodeFileWithSnapshot exten
}
@Override
- public INodeFileUnderConstructionWithSnapshot toUnderConstruction(
- final String clientName,
- final String clientMachine,
- final DatanodeDescriptor clientNode) {
- return new INodeFileUnderConstructionWithSnapshot(this,
- clientName, clientMachine, clientNode, getDiffs());
- }
-
- @Override
public boolean isCurrentFileDeleted() {
return isCurrentFileDeleted;
}
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java?rev=1544389&r1=1544388&r2=1544389&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java Fri Nov 22 01:39:02 2013
@@ -1113,10 +1113,11 @@ public class TestReplicationPolicy {
assertTheChosenBlocks(chosenBlocks, 1, 0, 0, 0, 0);
final BlockInfo info = new BlockInfo(block1, 1);
- final MutableBlockCollection mbc = mock(MutableBlockCollection.class);
+ final BlockCollection mbc = mock(BlockCollection.class);
when(mbc.getLastBlock()).thenReturn(info);
when(mbc.getPreferredBlockSize()).thenReturn(block1.getNumBytes() + 1);
when(mbc.getBlockReplication()).thenReturn((short)1);
+ when(mbc.isUnderConstruction()).thenReturn(true);
ContentSummary cs = mock(ContentSummary.class);
when(cs.getLength()).thenReturn((long)1);
when(mbc.computeContentSummary()).thenReturn(cs);
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/CreateEditsLog.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/CreateEditsLog.java?rev=1544389&r1=1544388&r2=1544389&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/CreateEditsLog.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/CreateEditsLog.java Fri Nov 22 01:39:02 2013
@@ -82,9 +82,10 @@ public class CreateEditsLog {
blocks[iB].setBlockId(currentBlockId++);
}
- INodeFileUnderConstruction inode = new INodeFileUnderConstruction(
- inodeId.nextValue(), null, replication, 0, blockSize, blocks, p, "",
- "", null);
+ final INodeFile inode = new INodeFile(inodeId.nextValue(), null,
+ p, 0L, 0L, blocks, replication, blockSize);
+ inode.toUnderConstruction("", "", null);
+
// Append path to filename with information about blockIDs
String path = "_" + iF + "_B" + blocks[0].getBlockId() +
"_to_B" + blocks[blocksPerFile-1].getBlockId() + "_";
@@ -96,9 +97,10 @@ public class CreateEditsLog {
dirInode = new INodeDirectory(inodeId.nextValue(), null, p, 0L);
editLog.logMkDir(currentDir, dirInode);
}
- editLog.logOpenFile(filePath,
- new INodeFileUnderConstruction(inodeId.nextValue(), p, replication,
- 0, blockSize, "", "", null), false);
+ INodeFile fileUc = new INodeFile(inodeId.nextValue(), null,
+ p, 0L, 0L, BlockInfo.EMPTY_ARRAY, replication, blockSize);
+ fileUc.toUnderConstruction("", "", null);
+ editLog.logOpenFile(filePath, fileUc, false);
editLog.logCloseFile(filePath, inode);
if (currentBlockId - bidAtSync >= 2000) { // sync every 2K blocks