You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by sh...@apache.org on 2010/09/15 02:29:10 UTC
svn commit: r997157 - in /hadoop/hdfs/trunk: CHANGES.txt
src/java/org/apache/hadoop/hdfs/server/namenode/BlockManager.java
src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
Author: shv
Date: Wed Sep 15 00:29:10 2010
New Revision: 997157
URL: http://svn.apache.org/viewvc?rev=997157&view=rev
Log:
HDFS-1363. Eliminate second synchronized sections in appendFile(). Contributed by Konstantin Shvachko.
Modified:
hadoop/hdfs/trunk/CHANGES.txt
hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/BlockManager.java
hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
Modified: hadoop/hdfs/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/CHANGES.txt?rev=997157&r1=997156&r2=997157&view=diff
==============================================================================
--- hadoop/hdfs/trunk/CHANGES.txt (original)
+++ hadoop/hdfs/trunk/CHANGES.txt Wed Sep 15 00:29:10 2010
@@ -1184,6 +1184,8 @@ Release 0.21.0 - Unreleased
HDFS-1267. fuse-dfs does not compile. (Devaraj Das via tomwhite)
+ HDFS-1363. Eliminate second synchronized sections in appendFile(). (shv)
+
Release 0.20.3 - Unreleased
IMPROVEMENTS
Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/BlockManager.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/BlockManager.java?rev=997157&r1=997156&r2=997157&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/BlockManager.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/BlockManager.java Wed Sep 15 00:29:10 2010
@@ -349,19 +349,44 @@ public class BlockManager {
}
/**
- * Convert the last block of the file to an under construction block.
+ * Convert the last block of the file to an under construction block.<p>
+ * The block is converted only if the file has blocks and the last one
+ * is a partial block (its size is less than the preferred block size).
+ * The converted block is returned to the client.
+ * The client uses the returned block locations to form the data pipeline
+ * for this block.<br>
+ * The methods returns null if there is no partial block at the end.
+ * The client is supposed to allocate a new block with the next call.
+ *
* @param fileINode file
- * @param targets data-nodes that will form the pipeline for this block
+ * @return the last block locations if the block is partial or null otherwise
*/
- void convertLastBlockToUnderConstruction(
- INodeFileUnderConstruction fileINode,
- DatanodeDescriptor[] targets) throws IOException {
+ LocatedBlock convertLastBlockToUnderConstruction(
+ INodeFileUnderConstruction fileINode) throws IOException {
BlockInfo oldBlock = fileINode.getLastBlock();
- if(oldBlock == null)
- return;
+ if(oldBlock == null ||
+ fileINode.getPreferredBlockSize() == oldBlock.getNumBytes())
+ return null;
+ assert oldBlock == getStoredBlock(oldBlock) :
+ "last block of the file is not in blocksMap";
+
+ DatanodeDescriptor[] targets = getNodes(oldBlock);
+
BlockInfoUnderConstruction ucBlock =
fileINode.setLastBlock(oldBlock, targets);
blocksMap.replaceBlock(ucBlock);
+
+ // Remove block from replication queue.
+ updateNeededReplications(oldBlock, 0, 0);
+
+ // remove this block from the list of pending blocks to be deleted.
+ for (DatanodeDescriptor dd : targets) {
+ String datanodeId = dd.getStorageID();
+ removeFromInvalidates(datanodeId, oldBlock);
+ }
+
+ long fileLength = fileINode.computeContentSummary().getLength();
+ return getBlockLocation(ucBlock, fileLength - ucBlock.getNumBytes());
}
/**
@@ -383,7 +408,7 @@ public class BlockManager {
}
List<LocatedBlock> getBlockLocations(BlockInfo[] blocks, long offset,
- long length, int nrBlocksToReturn, boolean needBlockToken) throws IOException {
+ long length, int nrBlocksToReturn) throws IOException {
int curBlk = 0;
long curPos = 0, blkSize = 0;
int nrBlocks = (blocks[0].getNumBytes() == 0) ? 0 : blocks.length;
@@ -402,7 +427,7 @@ public class BlockManager {
long endOff = offset + length;
List<LocatedBlock> results = new ArrayList<LocatedBlock>(blocks.length);
do {
- results.add(getBlockLocation(blocks[curBlk], curPos, needBlockToken));
+ results.add(getBlockLocation(blocks[curBlk], curPos));
curPos += blocks[curBlk].getNumBytes();
curBlk++;
} while (curPos < endOff
@@ -413,12 +438,12 @@ public class BlockManager {
/** @param needBlockToken
* @return a LocatedBlock for the given block */
- LocatedBlock getBlockLocation(final BlockInfo blk, final long pos, boolean needBlockToken
+ LocatedBlock getBlockLocation(final BlockInfo blk, final long pos
) throws IOException {
if (!blk.isComplete()) {
final BlockInfoUnderConstruction uc = (BlockInfoUnderConstruction)blk;
final DatanodeDescriptor[] locations = uc.getExpectedLocations();
- return namesystem.createLocatedBlock(uc, locations, pos, false, needBlockToken);
+ return new LocatedBlock(uc, locations, pos, false);
}
// get block locations
@@ -444,7 +469,7 @@ public class BlockManager {
machines[j++] = d;
}
}
- return namesystem.createLocatedBlock(blk, machines, pos, isCorrupt, needBlockToken);
+ return new LocatedBlock(blk, machines, pos, isCorrupt);
}
/**
Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java?rev=997157&r1=997156&r2=997157&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java Wed Sep 15 00:29:10 2010
@@ -24,6 +24,7 @@ import org.apache.hadoop.classification.
import org.apache.hadoop.conf.*;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.protocol.*;
+import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager;
import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys;
import org.apache.hadoop.hdfs.server.common.GenerationStamp;
@@ -799,34 +800,37 @@ public class FSNamesystem implements FSC
} else {
final long n = inode.computeFileSize(false);
final List<LocatedBlock> locatedblocks = blockManager.getBlockLocations(
- blocks, offset, length, Integer.MAX_VALUE, needBlockToken);
+ blocks, offset, length, Integer.MAX_VALUE);
final BlockInfo last = inode.getLastBlock();
if (LOG.isDebugEnabled()) {
LOG.debug("last = " + last);
}
-
+
+ if(isBlockTokenEnabled && needBlockToken) {
+ setBlockTokens(locatedblocks);
+ }
+
if (last.isComplete()) {
return new LocatedBlocks(n, inode.isUnderConstruction(), locatedblocks,
- blockManager.getBlockLocation(last, n-last.getNumBytes(), needBlockToken), true);
+ blockManager.getBlockLocation(last, n-last.getNumBytes()), true);
} else {
return new LocatedBlocks(n, inode.isUnderConstruction(), locatedblocks,
- blockManager.getBlockLocation(last, n, needBlockToken), false);
+ blockManager.getBlockLocation(last, n), false);
}
}
}
- /** Create a LocatedBlock.
- * @param needBlockToken */
- LocatedBlock createLocatedBlock(final Block b, final DatanodeInfo[] locations,
- final long offset, final boolean corrupt, boolean needBlockToken) throws IOException {
- final LocatedBlock lb = new LocatedBlock(b, locations, offset, corrupt);
- if (isBlockTokenEnabled && needBlockToken) {
- lb.setBlockToken(blockTokenSecretManager.generateToken(b,
- EnumSet.of(BlockTokenSecretManager.AccessMode.READ)));
+ /** Generate block tokens for the blocks to be returned. */
+ private void setBlockTokens(List<LocatedBlock> locatedBlocks) throws IOException {
+ for(LocatedBlock l : locatedBlocks) {
+ Token<BlockTokenIdentifier> token =
+ blockTokenSecretManager.generateToken(l.getBlock(),
+ EnumSet.of(BlockTokenSecretManager.AccessMode.READ));
+
+ l.setBlockToken(token);
}
- return lb;
}
-
+
/**
* Moves all the blocks from srcs and appends them to trg
* To avoid rollbacks we will verify validitity of ALL of the args
@@ -1164,9 +1168,22 @@ public class FSNamesystem implements FSC
}
/**
- * For description of exceptions @see {@link ClientProtocol#create()}
+ * Create new or open an existing file for append.<p>
+ *
+ * In case of opening the file for append, the method returns the last
+ * block of the file if this is a partial block, which can still be used
+ * for writing more data. The client uses the returned block locations
+ * to form the data pipeline for this block.<br>
+ * The method returns null if the last block is full or if this is a
+ * new file. The client then allocates a new block with the next call
+ * using {@link NameNode#addBlock()}.<p>
+ *
+ * For description of parameters and exceptions thrown see
+ * {@link ClientProtocol#create()}
+ *
+ * @return the last block locations if the block is partial or null otherwise
*/
- private synchronized void startFileInternal(String src,
+ private synchronized LocatedBlock startFileInternal(String src,
PermissionStatus permissions, String holder, String clientMachine,
EnumSet<CreateFlag> flag, boolean createParent, short replication,
long blockSize) throws SafeModeException, FileAlreadyExistsException,
@@ -1281,9 +1298,8 @@ public class FSNamesystem implements FSC
+ src + " on client " + clientMachine);
else {
//append & create a nonexist file equals to overwrite
- this.startFileInternal(src, permissions, holder, clientMachine,
+ return startFileInternal(src, permissions, holder, clientMachine,
EnumSet.of(CreateFlag.OVERWRITE), createParent, replication, blockSize);
- return;
}
} else if (myFile.isDirectory()) {
throw new IOException("failed to append to directory " + src
@@ -1321,6 +1337,15 @@ public class FSNamesystem implements FSC
dir.replaceNode(src, node, cons);
leaseManager.addLease(cons.getClientName(), src);
+ // convert last block to under-construction
+ LocatedBlock lb =
+ blockManager.convertLastBlockToUnderConstruction(cons);
+
+ if (lb != null && isBlockTokenEnabled) {
+ lb.setBlockToken(blockTokenSecretManager.generateToken(lb.getBlock(),
+ EnumSet.of(BlockTokenSecretManager.AccessMode.WRITE)));
+ }
+ return lb;
} else {
// Now we can add the name to the filesystem. This file has no
// blocks associated with it.
@@ -1346,6 +1371,7 @@ public class FSNamesystem implements FSC
+ie.getMessage());
throw ie;
}
+ return null;
}
/**
@@ -1359,52 +1385,12 @@ public class FSNamesystem implements FSC
throw new UnsupportedOperationException("Append to hdfs not supported." +
" Please refer to dfs.support.append configuration parameter.");
}
- startFileInternal(src, null, holder, clientMachine, EnumSet.of(CreateFlag.APPEND),
- false, (short)blockManager.maxReplication, (long)0);
+ LocatedBlock lb =
+ startFileInternal(src, null, holder, clientMachine,
+ EnumSet.of(CreateFlag.APPEND),
+ false, (short)blockManager.maxReplication, (long)0);
getEditLog().logSync();
- //
- // Create a LocatedBlock object for the last block of the file
- // to be returned to the client. Return null if the file does not
- // have a partial block at the end.
- //
- LocatedBlock lb = null;
- synchronized (this) {
- INodeFileUnderConstruction file = (INodeFileUnderConstruction)dir.getFileINode(src);
- BlockInfo lastBlock = file.getLastBlock();
- if (lastBlock != null) {
- assert lastBlock == blockManager.getStoredBlock(lastBlock) :
- "last block of the file is not in blocksMap";
- if (file.getPreferredBlockSize() > lastBlock.getNumBytes()) {
- long fileLength = file.computeContentSummary().getLength();
- DatanodeDescriptor[] targets = blockManager.getNodes(lastBlock);
- // remove the replica locations of this block from the node
- for (int i = 0; i < targets.length; i++) {
- targets[i].removeBlock(lastBlock);
- }
- // convert last block to under-construction and set its locations
- blockManager.convertLastBlockToUnderConstruction(file, targets);
-
- lb = new LocatedBlock(lastBlock, targets,
- fileLength-lastBlock.getNumBytes());
- if (isBlockTokenEnabled) {
- lb.setBlockToken(blockTokenSecretManager.generateToken(lb.getBlock(),
- EnumSet.of(BlockTokenSecretManager.AccessMode.WRITE)));
- }
-
- // Remove block from replication queue.
- blockManager.updateNeededReplications(lastBlock, 0, 0);
-
- // remove this block from the list of pending blocks to be deleted.
- // This reduces the possibility of triggering HADOOP-1349.
- //
- for (DatanodeDescriptor dd : targets) {
- String datanodeId = dd.getStorageID();
- blockManager.removeFromInvalidates(datanodeId, lastBlock);
- }
- }
- }
- }
if (lb != null) {
if (NameNode.stateChangeLog.isDebugEnabled()) {
NameNode.stateChangeLog.debug("DIR* NameSystem.appendFile: file "