You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by ji...@apache.org on 2015/02/09 00:34:20 UTC
[8/9] hadoop git commit: HDFS-7743. Code cleanup of BlockInfo and
rename BlockInfo to BlockInfoContiguous. Contributed by Jing Zhao.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/63fd1acc/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 92031c1..3fe47af 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -535,8 +535,8 @@ public class BlockManager {
int usableReplicas = numReplicas.liveReplicas() +
numReplicas.decommissionedReplicas();
- if (block instanceof BlockInfo) {
- BlockCollection bc = ((BlockInfo) block).getBlockCollection();
+ if (block instanceof BlockInfoContiguous) {
+ BlockCollection bc = ((BlockInfoContiguous) block).getBlockCollection();
String fileName = (bc == null) ? "[orphaned]" : bc.getName();
out.print(fileName + ": ");
}
@@ -590,8 +590,9 @@ public class BlockManager {
* @throws IOException if the block does not have at least a minimal number
* of replicas reported from data-nodes.
*/
- private static boolean commitBlock(final BlockInfoUnderConstruction block,
- final Block commitBlock) throws IOException {
+ private static boolean commitBlock(
+ final BlockInfoContiguousUnderConstruction block, final Block commitBlock)
+ throws IOException {
if (block.getBlockUCState() == BlockUCState.COMMITTED)
return false;
assert block.getNumBytes() <= commitBlock.getNumBytes() :
@@ -615,13 +616,14 @@ public class BlockManager {
Block commitBlock) throws IOException {
if(commitBlock == null)
return false; // not committing, this is a block allocation retry
- BlockInfo lastBlock = bc.getLastBlock();
+ BlockInfoContiguous lastBlock = bc.getLastBlock();
if(lastBlock == null)
return false; // no blocks in file yet
if(lastBlock.isComplete())
return false; // already completed (e.g. by syncBlock)
- final boolean b = commitBlock((BlockInfoUnderConstruction)lastBlock, commitBlock);
+ final boolean b = commitBlock(
+ (BlockInfoContiguousUnderConstruction) lastBlock, commitBlock);
if(countNodes(lastBlock).liveReplicas() >= minReplication)
completeBlock(bc, bc.numBlocks()-1, false);
return b;
@@ -634,14 +636,15 @@ public class BlockManager {
* @throws IOException if the block does not have at least a minimal number
* of replicas reported from data-nodes.
*/
- private BlockInfo completeBlock(final BlockCollection bc,
+ private BlockInfoContiguous completeBlock(final BlockCollection bc,
final int blkIndex, boolean force) throws IOException {
if(blkIndex < 0)
return null;
- BlockInfo curBlock = bc.getBlocks()[blkIndex];
+ BlockInfoContiguous curBlock = bc.getBlocks()[blkIndex];
if(curBlock.isComplete())
return curBlock;
- BlockInfoUnderConstruction ucBlock = (BlockInfoUnderConstruction)curBlock;
+ BlockInfoContiguousUnderConstruction ucBlock =
+ (BlockInfoContiguousUnderConstruction) curBlock;
int numNodes = ucBlock.numNodes();
if (!force && numNodes < minReplication)
throw new IOException("Cannot complete block: " +
@@ -649,7 +652,7 @@ public class BlockManager {
if(!force && ucBlock.getBlockUCState() != BlockUCState.COMMITTED)
throw new IOException(
"Cannot complete block: block has not been COMMITTED by the client");
- BlockInfo completeBlock = ucBlock.convertToCompleteBlock();
+ BlockInfoContiguous completeBlock = ucBlock.convertToCompleteBlock();
// replace penultimate block in file
bc.setBlock(blkIndex, completeBlock);
@@ -667,9 +670,9 @@ public class BlockManager {
return blocksMap.replaceBlock(completeBlock);
}
- private BlockInfo completeBlock(final BlockCollection bc,
- final BlockInfo block, boolean force) throws IOException {
- BlockInfo[] fileBlocks = bc.getBlocks();
+ private BlockInfoContiguous completeBlock(final BlockCollection bc,
+ final BlockInfoContiguous block, boolean force) throws IOException {
+ BlockInfoContiguous[] fileBlocks = bc.getBlocks();
for(int idx = 0; idx < fileBlocks.length; idx++)
if(fileBlocks[idx] == block) {
return completeBlock(bc, idx, force);
@@ -682,8 +685,8 @@ public class BlockManager {
* regardless of whether enough replicas are present. This is necessary
* when tailing edit logs as a Standby.
*/
- public BlockInfo forceCompleteBlock(final BlockCollection bc,
- final BlockInfoUnderConstruction block) throws IOException {
+ public BlockInfoContiguous forceCompleteBlock(final BlockCollection bc,
+ final BlockInfoContiguousUnderConstruction block) throws IOException {
block.commitBlock(block);
return completeBlock(bc, block, true);
}
@@ -705,7 +708,7 @@ public class BlockManager {
*/
public LocatedBlock convertLastBlockToUnderConstruction(
BlockCollection bc, long bytesToRemove) throws IOException {
- BlockInfo oldBlock = bc.getLastBlock();
+ BlockInfoContiguous oldBlock = bc.getLastBlock();
if(oldBlock == null ||
bc.getPreferredBlockSize() == oldBlock.getNumBytes() - bytesToRemove)
return null;
@@ -714,7 +717,8 @@ public class BlockManager {
DatanodeStorageInfo[] targets = getStorages(oldBlock);
- BlockInfoUnderConstruction ucBlock = bc.setLastBlock(oldBlock, targets);
+ BlockInfoContiguousUnderConstruction ucBlock =
+ bc.setLastBlock(oldBlock, targets);
blocksMap.replaceBlock(ucBlock);
// Remove block from replication queue.
@@ -756,7 +760,8 @@ public class BlockManager {
return locations;
}
- private List<LocatedBlock> createLocatedBlockList(final BlockInfo[] blocks,
+ private List<LocatedBlock> createLocatedBlockList(
+ final BlockInfoContiguous[] blocks,
final long offset, final long length, final int nrBlocksToReturn,
final AccessMode mode) throws IOException {
int curBlk = 0;
@@ -786,7 +791,7 @@ public class BlockManager {
return results;
}
- private LocatedBlock createLocatedBlock(final BlockInfo[] blocks,
+ private LocatedBlock createLocatedBlock(final BlockInfoContiguous[] blocks,
final long endPos, final AccessMode mode) throws IOException {
int curBlk = 0;
long curPos = 0;
@@ -802,7 +807,7 @@ public class BlockManager {
return createLocatedBlock(blocks[curBlk], curPos, mode);
}
- private LocatedBlock createLocatedBlock(final BlockInfo blk, final long pos,
+ private LocatedBlock createLocatedBlock(final BlockInfoContiguous blk, final long pos,
final BlockTokenSecretManager.AccessMode mode) throws IOException {
final LocatedBlock lb = createLocatedBlock(blk, pos);
if (mode != null) {
@@ -812,15 +817,16 @@ public class BlockManager {
}
/** @return a LocatedBlock for the given block */
- private LocatedBlock createLocatedBlock(final BlockInfo blk, final long pos
+ private LocatedBlock createLocatedBlock(final BlockInfoContiguous blk, final long pos
) throws IOException {
- if (blk instanceof BlockInfoUnderConstruction) {
+ if (blk instanceof BlockInfoContiguousUnderConstruction) {
if (blk.isComplete()) {
throw new IOException(
"blk instanceof BlockInfoUnderConstruction && blk.isComplete()"
+ ", blk=" + blk);
}
- final BlockInfoUnderConstruction uc = (BlockInfoUnderConstruction)blk;
+ final BlockInfoContiguousUnderConstruction uc =
+ (BlockInfoContiguousUnderConstruction) blk;
final DatanodeStorageInfo[] storages = uc.getExpectedStorageLocations();
final ExtendedBlock eb = new ExtendedBlock(namesystem.getBlockPoolId(), blk);
return new LocatedBlock(eb, storages, pos, false);
@@ -859,7 +865,7 @@ public class BlockManager {
}
/** Create a LocatedBlocks. */
- public LocatedBlocks createLocatedBlocks(final BlockInfo[] blocks,
+ public LocatedBlocks createLocatedBlocks(final BlockInfoContiguous[] blocks,
final long fileSizeExcludeBlocksUnderConstruction,
final boolean isFileUnderConstruction, final long offset,
final long length, final boolean needBlockToken,
@@ -882,7 +888,7 @@ public class BlockManager {
final LocatedBlock lastlb;
final boolean isComplete;
if (!inSnapshot) {
- final BlockInfo last = blocks[blocks.length - 1];
+ final BlockInfoContiguous last = blocks[blocks.length - 1];
final long lastPos = last.isComplete()?
fileSizeExcludeBlocksUnderConstruction - last.getNumBytes()
: fileSizeExcludeBlocksUnderConstruction;
@@ -971,7 +977,7 @@ public class BlockManager {
/**
* Check if a block is replicated to at least the minimum replication.
*/
- public boolean isSufficientlyReplicated(BlockInfo b) {
+ public boolean isSufficientlyReplicated(BlockInfoContiguous b) {
// Compare against the lesser of the minReplication and number of live DNs.
final int replication =
Math.min(minReplication, getDatanodeManager().getNumLiveDataNodes());
@@ -1012,7 +1018,7 @@ public class BlockManager {
if(numBlocks == 0) {
return new BlocksWithLocations(new BlockWithLocations[0]);
}
- Iterator<BlockInfo> iter = node.getBlockIterator();
+ Iterator<BlockInfoContiguous> iter = node.getBlockIterator();
int startBlock = DFSUtil.getRandom().nextInt(numBlocks); // starting from a random block
// skip blocks
for(int i=0; i<startBlock; i++) {
@@ -1020,7 +1026,7 @@ public class BlockManager {
}
List<BlockWithLocations> results = new ArrayList<BlockWithLocations>();
long totalSize = 0;
- BlockInfo curBlock;
+ BlockInfoContiguous curBlock;
while(totalSize<size && iter.hasNext()) {
curBlock = iter.next();
if(!curBlock.isComplete()) continue;
@@ -1119,7 +1125,7 @@ public class BlockManager {
public void findAndMarkBlockAsCorrupt(final ExtendedBlock blk,
final DatanodeInfo dn, String storageID, String reason) throws IOException {
assert namesystem.hasWriteLock();
- final BlockInfo storedBlock = getStoredBlock(blk.getLocalBlock());
+ final BlockInfoContiguous storedBlock = getStoredBlock(blk.getLocalBlock());
if (storedBlock == null) {
// Check if the replica is in the blockMap, if not
// ignore the request for now. This could happen when BlockScanner
@@ -1696,11 +1702,11 @@ public class BlockManager {
* reported by the datanode in the block report.
*/
static class StatefulBlockInfo {
- final BlockInfoUnderConstruction storedBlock;
+ final BlockInfoContiguousUnderConstruction storedBlock;
final Block reportedBlock;
final ReplicaState reportedState;
- StatefulBlockInfo(BlockInfoUnderConstruction storedBlock,
+ StatefulBlockInfo(BlockInfoContiguousUnderConstruction storedBlock,
Block reportedBlock, ReplicaState reportedState) {
this.storedBlock = storedBlock;
this.reportedBlock = reportedBlock;
@@ -1714,15 +1720,16 @@ public class BlockManager {
*/
private static class BlockToMarkCorrupt {
/** The corrupted block in a datanode. */
- final BlockInfo corrupted;
+ final BlockInfoContiguous corrupted;
/** The corresponding block stored in the BlockManager. */
- final BlockInfo stored;
+ final BlockInfoContiguous stored;
/** The reason to mark corrupt. */
final String reason;
/** The reason code to be stored */
final Reason reasonCode;
- BlockToMarkCorrupt(BlockInfo corrupted, BlockInfo stored, String reason,
+ BlockToMarkCorrupt(BlockInfoContiguous corrupted,
+ BlockInfoContiguous stored, String reason,
Reason reasonCode) {
Preconditions.checkNotNull(corrupted, "corrupted is null");
Preconditions.checkNotNull(stored, "stored is null");
@@ -1733,13 +1740,14 @@ public class BlockManager {
this.reasonCode = reasonCode;
}
- BlockToMarkCorrupt(BlockInfo stored, String reason, Reason reasonCode) {
+ BlockToMarkCorrupt(BlockInfoContiguous stored, String reason,
+ Reason reasonCode) {
this(stored, stored, reason, reasonCode);
}
- BlockToMarkCorrupt(BlockInfo stored, long gs, String reason,
+ BlockToMarkCorrupt(BlockInfoContiguous stored, long gs, String reason,
Reason reasonCode) {
- this(new BlockInfo(stored), stored, reason, reasonCode);
+ this(new BlockInfoContiguous(stored), stored, reason, reasonCode);
//the corrupted block in datanode has a different generation stamp
corrupted.setGenerationStamp(gs);
}
@@ -1864,7 +1872,7 @@ public class BlockManager {
break;
}
- BlockInfo bi = blocksMap.getStoredBlock(b);
+ BlockInfoContiguous bi = blocksMap.getStoredBlock(b);
if (bi == null) {
if (LOG.isDebugEnabled()) {
LOG.debug("BLOCK* rescanPostponedMisreplicatedBlocks: " +
@@ -1904,7 +1912,7 @@ public class BlockManager {
// Modify the (block-->datanode) map, according to the difference
// between the old and new block report.
//
- Collection<BlockInfo> toAdd = new LinkedList<BlockInfo>();
+ Collection<BlockInfoContiguous> toAdd = new LinkedList<BlockInfoContiguous>();
Collection<Block> toRemove = new TreeSet<Block>();
Collection<Block> toInvalidate = new LinkedList<Block>();
Collection<BlockToMarkCorrupt> toCorrupt = new LinkedList<BlockToMarkCorrupt>();
@@ -1921,7 +1929,7 @@ public class BlockManager {
removeStoredBlock(b, node);
}
int numBlocksLogged = 0;
- for (BlockInfo b : toAdd) {
+ for (BlockInfoContiguous b : toAdd) {
addStoredBlock(b, storageInfo, null, numBlocksLogged < maxNumBlocksToLog);
numBlocksLogged++;
}
@@ -1969,7 +1977,7 @@ public class BlockManager {
continue;
}
- BlockInfo storedBlock = blocksMap.getStoredBlock(iblk);
+ BlockInfoContiguous storedBlock = blocksMap.getStoredBlock(iblk);
// If block does not belong to any file, we are done.
if (storedBlock == null) continue;
@@ -1992,12 +2000,13 @@ public class BlockManager {
// If block is under construction, add this replica to its list
if (isBlockUnderConstruction(storedBlock, ucState, reportedState)) {
- ((BlockInfoUnderConstruction)storedBlock).addReplicaIfNotPresent(
- storageInfo, iblk, reportedState);
+ ((BlockInfoContiguousUnderConstruction)storedBlock)
+ .addReplicaIfNotPresent(storageInfo, iblk, reportedState);
// OpenFileBlocks only inside snapshots also will be added to safemode
// threshold. So we need to update such blocks to safemode
// refer HDFS-5283
- BlockInfoUnderConstruction blockUC = (BlockInfoUnderConstruction) storedBlock;
+ BlockInfoContiguousUnderConstruction blockUC =
+ (BlockInfoContiguousUnderConstruction) storedBlock;
if (namesystem.isInSnapshot(blockUC)) {
int numOfReplicas = blockUC.getNumExpectedLocations();
namesystem.incrementSafeBlockCount(numOfReplicas);
@@ -2013,7 +2022,7 @@ public class BlockManager {
private void reportDiff(DatanodeStorageInfo storageInfo,
BlockListAsLongs newReport,
- Collection<BlockInfo> toAdd, // add to DatanodeDescriptor
+ Collection<BlockInfoContiguous> toAdd, // add to DatanodeDescriptor
Collection<Block> toRemove, // remove from DatanodeDescriptor
Collection<Block> toInvalidate, // should be removed from DN
Collection<BlockToMarkCorrupt> toCorrupt, // add to corrupt replicas list
@@ -2021,7 +2030,7 @@ public class BlockManager {
// place a delimiter in the list which separates blocks
// that have been reported from those that have not
- BlockInfo delimiter = new BlockInfo(new Block(), (short) 1);
+ BlockInfoContiguous delimiter = new BlockInfoContiguous(new Block(), (short) 1);
AddBlockResult result = storageInfo.addBlock(delimiter);
assert result == AddBlockResult.ADDED
: "Delimiting block cannot be present in the node";
@@ -2036,7 +2045,7 @@ public class BlockManager {
while(itBR.hasNext()) {
Block iblk = itBR.next();
ReplicaState iState = itBR.getCurrentReplicaState();
- BlockInfo storedBlock = processReportedBlock(storageInfo,
+ BlockInfoContiguous storedBlock = processReportedBlock(storageInfo,
iblk, iState, toAdd, toInvalidate, toCorrupt, toUC);
// move block to the head of the list
@@ -2048,7 +2057,8 @@ public class BlockManager {
// collect blocks that have not been reported
// all of them are next to the delimiter
- Iterator<BlockInfo> it = storageInfo.new BlockIterator(delimiter.getNext(0));
+ Iterator<BlockInfoContiguous> it =
+ storageInfo.new BlockIterator(delimiter.getNext(0));
while(it.hasNext())
toRemove.add(it.next());
storageInfo.removeBlock(delimiter);
@@ -2085,10 +2095,10 @@ public class BlockManager {
* @return the up-to-date stored block, if it should be kept.
* Otherwise, null.
*/
- private BlockInfo processReportedBlock(
+ private BlockInfoContiguous processReportedBlock(
final DatanodeStorageInfo storageInfo,
final Block block, final ReplicaState reportedState,
- final Collection<BlockInfo> toAdd,
+ final Collection<BlockInfoContiguous> toAdd,
final Collection<Block> toInvalidate,
final Collection<BlockToMarkCorrupt> toCorrupt,
final Collection<StatefulBlockInfo> toUC) {
@@ -2109,7 +2119,7 @@ public class BlockManager {
}
// find block by blockId
- BlockInfo storedBlock = blocksMap.getStoredBlock(block);
+ BlockInfoContiguous storedBlock = blocksMap.getStoredBlock(block);
if(storedBlock == null) {
// If blocksMap does not contain reported block id,
// the replica should be removed from the data-node.
@@ -2152,7 +2162,8 @@ public class BlockManager {
}
if (isBlockUnderConstruction(storedBlock, ucState, reportedState)) {
- toUC.add(new StatefulBlockInfo((BlockInfoUnderConstruction) storedBlock,
+ toUC.add(new StatefulBlockInfo(
+ (BlockInfoContiguousUnderConstruction) storedBlock,
new Block(block), reportedState));
return storedBlock;
}
@@ -2240,7 +2251,7 @@ public class BlockManager {
*/
private BlockToMarkCorrupt checkReplicaCorrupt(
Block reported, ReplicaState reportedState,
- BlockInfo storedBlock, BlockUCState ucState,
+ BlockInfoContiguous storedBlock, BlockUCState ucState,
DatanodeDescriptor dn) {
switch(reportedState) {
case FINALIZED:
@@ -2313,7 +2324,7 @@ public class BlockManager {
}
}
- private boolean isBlockUnderConstruction(BlockInfo storedBlock,
+ private boolean isBlockUnderConstruction(BlockInfoContiguous storedBlock,
BlockUCState ucState, ReplicaState reportedState) {
switch(reportedState) {
case FINALIZED:
@@ -2336,7 +2347,7 @@ public class BlockManager {
void addStoredBlockUnderConstruction(StatefulBlockInfo ucBlock,
DatanodeStorageInfo storageInfo) throws IOException {
- BlockInfoUnderConstruction block = ucBlock.storedBlock;
+ BlockInfoContiguousUnderConstruction block = ucBlock.storedBlock;
block.addReplicaIfNotPresent(
storageInfo, ucBlock.reportedBlock, ucBlock.reportedState);
@@ -2347,9 +2358,8 @@ public class BlockManager {
}
/**
- * Faster version of
- * {@link #addStoredBlock(BlockInfo, DatanodeStorageInfo, DatanodeDescriptor, boolean)}
- * , intended for use with initial block report at startup. If not in startup
+ * Faster version of {@link #addStoredBlock},
+ * intended for use with initial block report at startup. If not in startup
* safe mode, will call standard addStoredBlock(). Assumes this method is
* called "immediately" so there is no need to refresh the storedBlock from
* blocksMap. Doesn't handle underReplication/overReplication, or worry about
@@ -2358,7 +2368,7 @@ public class BlockManager {
*
* @throws IOException
*/
- private void addStoredBlockImmediate(BlockInfo storedBlock,
+ private void addStoredBlockImmediate(BlockInfoContiguous storedBlock,
DatanodeStorageInfo storageInfo)
throws IOException {
assert (storedBlock != null && namesystem.hasWriteLock());
@@ -2390,15 +2400,15 @@ public class BlockManager {
* needed replications if this takes care of the problem.
* @return the block that is stored in blockMap.
*/
- private Block addStoredBlock(final BlockInfo block,
+ private Block addStoredBlock(final BlockInfoContiguous block,
DatanodeStorageInfo storageInfo,
DatanodeDescriptor delNodeHint,
boolean logEveryBlock)
throws IOException {
assert block != null && namesystem.hasWriteLock();
- BlockInfo storedBlock;
+ BlockInfoContiguous storedBlock;
DatanodeDescriptor node = storageInfo.getDatanodeDescriptor();
- if (block instanceof BlockInfoUnderConstruction) {
+ if (block instanceof BlockInfoContiguousUnderConstruction) {
//refresh our copy in case the block got completed in another thread
storedBlock = blocksMap.getStoredBlock(block);
} else {
@@ -2497,7 +2507,8 @@ public class BlockManager {
return storedBlock;
}
- private void logAddStoredBlock(BlockInfo storedBlock, DatanodeDescriptor node) {
+ private void logAddStoredBlock(BlockInfoContiguous storedBlock,
+ DatanodeDescriptor node) {
if (!blockLog.isInfoEnabled()) {
return;
}
@@ -2524,7 +2535,7 @@ public class BlockManager {
*
* @param blk Block whose corrupt replicas need to be invalidated
*/
- private void invalidateCorruptReplicas(BlockInfo blk) {
+ private void invalidateCorruptReplicas(BlockInfoContiguous blk) {
Collection<DatanodeDescriptor> nodes = corruptReplicas.getNodes(blk);
boolean removedFromBlocksMap = true;
if (nodes == null)
@@ -2603,7 +2614,7 @@ public class BlockManager {
long nrInvalid = 0, nrOverReplicated = 0;
long nrUnderReplicated = 0, nrPostponed = 0, nrUnderConstruction = 0;
long startTimeMisReplicatedScan = Time.now();
- Iterator<BlockInfo> blocksItr = blocksMap.getBlocks().iterator();
+ Iterator<BlockInfoContiguous> blocksItr = blocksMap.getBlocks().iterator();
long totalBlocks = blocksMap.size();
replicationQueuesInitProgress = 0;
long totalProcessed = 0;
@@ -2615,7 +2626,7 @@ public class BlockManager {
namesystem.writeLockInterruptibly();
try {
while (processed < numBlocksPerIteration && blocksItr.hasNext()) {
- BlockInfo block = blocksItr.next();
+ BlockInfoContiguous block = blocksItr.next();
MisReplicationResult res = processMisReplicatedBlock(block);
if (LOG.isTraceEnabled()) {
LOG.trace("block " + block + ": " + res);
@@ -2689,7 +2700,7 @@ public class BlockManager {
* appropriate queues if necessary, and returns a result code indicating
* what happened with it.
*/
- private MisReplicationResult processMisReplicatedBlock(BlockInfo block) {
+ private MisReplicationResult processMisReplicatedBlock(BlockInfoContiguous block) {
BlockCollection bc = block.getBlockCollection();
if (bc == null) {
// block does not belong to any file
@@ -3018,7 +3029,7 @@ public class BlockManager {
ReplicaState reportedState, DatanodeDescriptor delHintNode)
throws IOException {
// blockReceived reports a finalized block
- Collection<BlockInfo> toAdd = new LinkedList<BlockInfo>();
+ Collection<BlockInfoContiguous> toAdd = new LinkedList<BlockInfoContiguous>();
Collection<Block> toInvalidate = new LinkedList<Block>();
Collection<BlockToMarkCorrupt> toCorrupt = new LinkedList<BlockToMarkCorrupt>();
Collection<StatefulBlockInfo> toUC = new LinkedList<StatefulBlockInfo>();
@@ -3035,7 +3046,7 @@ public class BlockManager {
addStoredBlockUnderConstruction(b, storageInfo);
}
long numBlocksLogged = 0;
- for (BlockInfo b : toAdd) {
+ for (BlockInfoContiguous b : toAdd) {
addStoredBlock(b, storageInfo, delHintNode, numBlocksLogged < maxNumBlocksToLog);
numBlocksLogged++;
}
@@ -3159,7 +3170,7 @@ public class BlockManager {
* @param b - the block being tested
* @return count of live nodes for this block
*/
- int countLiveNodes(BlockInfo b) {
+ int countLiveNodes(BlockInfoContiguous b) {
if (!namesystem.isInStartupSafeMode()) {
return countNodes(b).liveReplicas();
}
@@ -3314,7 +3325,7 @@ public class BlockManager {
return blocksMap.size();
}
- public DatanodeStorageInfo[] getStorages(BlockInfo block) {
+ public DatanodeStorageInfo[] getStorages(BlockInfoContiguous block) {
final DatanodeStorageInfo[] storages = new DatanodeStorageInfo[block.numNodes()];
int i = 0;
for(DatanodeStorageInfo s : blocksMap.getStorages(block)) {
@@ -3344,7 +3355,7 @@ public class BlockManager {
}
}
- public BlockInfo getStoredBlock(Block block) {
+ public BlockInfoContiguous getStoredBlock(Block block) {
return blocksMap.getStoredBlock(block);
}
@@ -3492,7 +3503,8 @@ public class BlockManager {
return this.neededReplications.getCorruptReplOneBlockSize();
}
- public BlockInfo addBlockCollection(BlockInfo block, BlockCollection bc) {
+ public BlockInfoContiguous addBlockCollection(BlockInfoContiguous block,
+ BlockCollection bc) {
return blocksMap.addBlockCollection(block, bc);
}
@@ -3701,7 +3713,7 @@ public class BlockManager {
/**
* A simple result enum for the result of
- * {@link BlockManager#processMisReplicatedBlock(BlockInfo)}.
+ * {@link BlockManager#processMisReplicatedBlock(BlockInfoContiguous)}.
*/
enum MisReplicationResult {
/** The block should be invalidated since it belongs to a deleted file. */
http://git-wip-us.apache.org/repos/asf/hadoop/blob/63fd1acc/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlocksMap.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlocksMap.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlocksMap.java
index d532e74..806a4cb 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlocksMap.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlocksMap.java
@@ -36,10 +36,10 @@ import com.google.common.collect.Iterables;
*/
class BlocksMap {
private static class StorageIterator implements Iterator<DatanodeStorageInfo> {
- private final BlockInfo blockInfo;
+ private final BlockInfoContiguous blockInfo;
private int nextIdx = 0;
- StorageIterator(BlockInfo blkInfo) {
+ StorageIterator(BlockInfoContiguous blkInfo) {
this.blockInfo = blkInfo;
}
@@ -63,14 +63,14 @@ class BlocksMap {
/** Constant {@link LightWeightGSet} capacity. */
private final int capacity;
- private GSet<Block, BlockInfo> blocks;
+ private GSet<Block, BlockInfoContiguous> blocks;
BlocksMap(int capacity) {
// Use 2% of total memory to size the GSet capacity
this.capacity = capacity;
- this.blocks = new LightWeightGSet<Block, BlockInfo>(capacity) {
+ this.blocks = new LightWeightGSet<Block, BlockInfoContiguous>(capacity) {
@Override
- public Iterator<BlockInfo> iterator() {
+ public Iterator<BlockInfoContiguous> iterator() {
SetIterator iterator = new SetIterator();
/*
* Not tracking any modifications to set. As this set will be used
@@ -97,15 +97,15 @@ class BlocksMap {
}
BlockCollection getBlockCollection(Block b) {
- BlockInfo info = blocks.get(b);
+ BlockInfoContiguous info = blocks.get(b);
return (info != null) ? info.getBlockCollection() : null;
}
/**
* Add block b belonging to the specified block collection to the map.
*/
- BlockInfo addBlockCollection(BlockInfo b, BlockCollection bc) {
- BlockInfo info = blocks.get(b);
+ BlockInfoContiguous addBlockCollection(BlockInfoContiguous b, BlockCollection bc) {
+ BlockInfoContiguous info = blocks.get(b);
if (info != b) {
info = b;
blocks.put(info);
@@ -120,7 +120,7 @@ class BlocksMap {
* and remove all data-node locations associated with the block.
*/
void removeBlock(Block block) {
- BlockInfo blockInfo = blocks.remove(block);
+ BlockInfoContiguous blockInfo = blocks.remove(block);
if (blockInfo == null)
return;
@@ -132,7 +132,7 @@ class BlocksMap {
}
/** Returns the block object it it exists in the map. */
- BlockInfo getStoredBlock(Block b) {
+ BlockInfoContiguous getStoredBlock(Block b) {
return blocks.get(b);
}
@@ -164,7 +164,7 @@ class BlocksMap {
* For a block that has already been retrieved from the BlocksMap
* returns {@link Iterable} of the storages the block belongs to.
*/
- Iterable<DatanodeStorageInfo> getStorages(final BlockInfo storedBlock) {
+ Iterable<DatanodeStorageInfo> getStorages(final BlockInfoContiguous storedBlock) {
return new Iterable<DatanodeStorageInfo>() {
@Override
public Iterator<DatanodeStorageInfo> iterator() {
@@ -175,7 +175,7 @@ class BlocksMap {
/** counts number of containing nodes. Better than using iterator. */
int numNodes(Block b) {
- BlockInfo info = blocks.get(b);
+ BlockInfoContiguous info = blocks.get(b);
return info == null ? 0 : info.numNodes();
}
@@ -185,7 +185,7 @@ class BlocksMap {
* only if it does not belong to any file and data-nodes.
*/
boolean removeNode(Block b, DatanodeDescriptor node) {
- BlockInfo info = blocks.get(b);
+ BlockInfoContiguous info = blocks.get(b);
if (info == null)
return false;
@@ -203,7 +203,7 @@ class BlocksMap {
return blocks.size();
}
- Iterable<BlockInfo> getBlocks() {
+ Iterable<BlockInfoContiguous> getBlocks() {
return blocks;
}
@@ -218,8 +218,8 @@ class BlocksMap {
* @param newBlock - block for replacement
* @return new block
*/
- BlockInfo replaceBlock(BlockInfo newBlock) {
- BlockInfo currentBlock = blocks.get(newBlock);
+ BlockInfoContiguous replaceBlock(BlockInfoContiguous newBlock) {
+ BlockInfoContiguous currentBlock = blocks.get(newBlock);
assert currentBlock != null : "the block if not in blocksMap";
// replace block in data-node lists
for (int i = currentBlock.numNodes() - 1; i >= 0; i--) {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/63fd1acc/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CacheReplicationMonitor.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CacheReplicationMonitor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CacheReplicationMonitor.java
index a0f3503..bf5ece9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CacheReplicationMonitor.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CacheReplicationMonitor.java
@@ -369,7 +369,7 @@ public class CacheReplicationMonitor extends Thread implements Closeable {
* @param file The file.
*/
private void rescanFile(CacheDirective directive, INodeFile file) {
- BlockInfo[] blockInfos = file.getBlocks();
+ BlockInfoContiguous[] blockInfos = file.getBlocks();
// Increment the "needed" statistics
directive.addFilesNeeded(1);
@@ -394,7 +394,7 @@ public class CacheReplicationMonitor extends Thread implements Closeable {
}
long cachedTotal = 0;
- for (BlockInfo blockInfo : blockInfos) {
+ for (BlockInfoContiguous blockInfo : blockInfos) {
if (!blockInfo.getBlockUCState().equals(BlockUCState.COMPLETE)) {
// We don't try to cache blocks that are under construction.
LOG.trace("Directive {}: can't cache block {} because it is in state "
@@ -453,7 +453,7 @@ public class CacheReplicationMonitor extends Thread implements Closeable {
}
private String findReasonForNotCaching(CachedBlock cblock,
- BlockInfo blockInfo) {
+ BlockInfoContiguous blockInfo) {
if (blockInfo == null) {
// Somehow, a cache report with the block arrived, but the block
// reports from the DataNode haven't (yet?) described such a block.
@@ -513,7 +513,7 @@ public class CacheReplicationMonitor extends Thread implements Closeable {
iter.remove();
}
}
- BlockInfo blockInfo = blockManager.
+ BlockInfoContiguous blockInfo = blockManager.
getStoredBlock(new Block(cblock.getBlockId()));
String reason = findReasonForNotCaching(cblock, blockInfo);
int neededCached = 0;
@@ -628,7 +628,7 @@ public class CacheReplicationMonitor extends Thread implements Closeable {
List<DatanodeDescriptor> pendingCached) {
// To figure out which replicas can be cached, we consult the
// blocksMap. We don't want to try to cache a corrupt replica, though.
- BlockInfo blockInfo = blockManager.
+ BlockInfoContiguous blockInfo = blockManager.
getStoredBlock(new Block(cachedBlock.getBlockId()));
if (blockInfo == null) {
LOG.debug("Block {}: can't add new cached replicas," +
@@ -667,7 +667,7 @@ public class CacheReplicationMonitor extends Thread implements Closeable {
Iterator<CachedBlock> it = datanode.getPendingCached().iterator();
while (it.hasNext()) {
CachedBlock cBlock = it.next();
- BlockInfo info =
+ BlockInfoContiguous info =
blockManager.getStoredBlock(new Block(cBlock.getBlockId()));
if (info != null) {
pendingBytes -= info.getNumBytes();
@@ -677,7 +677,7 @@ public class CacheReplicationMonitor extends Thread implements Closeable {
// Add pending uncached blocks from effective capacity
while (it.hasNext()) {
CachedBlock cBlock = it.next();
- BlockInfo info =
+ BlockInfoContiguous info =
blockManager.getStoredBlock(new Block(cBlock.getBlockId()));
if (info != null) {
pendingBytes += info.getNumBytes();
http://git-wip-us.apache.org/repos/asf/hadoop/blob/63fd1acc/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
index 644404b..833ab56 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
@@ -197,8 +197,8 @@ public class DatanodeDescriptor extends DatanodeInfo {
/** A queue of blocks to be replicated by this datanode */
private final BlockQueue<BlockTargetPair> replicateBlocks = new BlockQueue<BlockTargetPair>();
/** A queue of blocks to be recovered by this datanode */
- private final BlockQueue<BlockInfoUnderConstruction> recoverBlocks =
- new BlockQueue<BlockInfoUnderConstruction>();
+ private final BlockQueue<BlockInfoContiguousUnderConstruction> recoverBlocks =
+ new BlockQueue<BlockInfoContiguousUnderConstruction>();
/** A set of blocks to be invalidated by this datanode */
private final LightWeightHashSet<Block> invalidateBlocks = new LightWeightHashSet<Block>();
@@ -284,7 +284,7 @@ public class DatanodeDescriptor extends DatanodeInfo {
* Remove block from the list of blocks belonging to the data-node. Remove
* data-node from the block.
*/
- boolean removeBlock(BlockInfo b) {
+ boolean removeBlock(BlockInfoContiguous b) {
final DatanodeStorageInfo s = b.findStorageInfo(this);
// if block exists on this datanode
if (s != null) {
@@ -297,7 +297,7 @@ public class DatanodeDescriptor extends DatanodeInfo {
* Remove block from the list of blocks belonging to the data-node. Remove
* data-node from the block.
*/
- boolean removeBlock(String storageID, BlockInfo b) {
+ boolean removeBlock(String storageID, BlockInfoContiguous b) {
DatanodeStorageInfo s = getStorageInfo(storageID);
if (s != null) {
return s.removeBlock(b);
@@ -468,12 +468,12 @@ public class DatanodeDescriptor extends DatanodeInfo {
}
}
- private static class BlockIterator implements Iterator<BlockInfo> {
+ private static class BlockIterator implements Iterator<BlockInfoContiguous> {
private int index = 0;
- private final List<Iterator<BlockInfo>> iterators;
+ private final List<Iterator<BlockInfoContiguous>> iterators;
private BlockIterator(final DatanodeStorageInfo... storages) {
- List<Iterator<BlockInfo>> iterators = new ArrayList<Iterator<BlockInfo>>();
+ List<Iterator<BlockInfoContiguous>> iterators = new ArrayList<Iterator<BlockInfoContiguous>>();
for (DatanodeStorageInfo e : storages) {
iterators.add(e.getBlockIterator());
}
@@ -487,7 +487,7 @@ public class DatanodeDescriptor extends DatanodeInfo {
}
@Override
- public BlockInfo next() {
+ public BlockInfoContiguous next() {
update();
return iterators.get(index).next();
}
@@ -504,10 +504,10 @@ public class DatanodeDescriptor extends DatanodeInfo {
}
}
- Iterator<BlockInfo> getBlockIterator() {
+ Iterator<BlockInfoContiguous> getBlockIterator() {
return new BlockIterator(getStorageInfos());
}
- Iterator<BlockInfo> getBlockIterator(final String storageID) {
+ Iterator<BlockInfoContiguous> getBlockIterator(final String storageID) {
return new BlockIterator(getStorageInfo(storageID));
}
@@ -530,7 +530,7 @@ public class DatanodeDescriptor extends DatanodeInfo {
/**
* Store block recovery work.
*/
- void addBlockToBeRecovered(BlockInfoUnderConstruction block) {
+ void addBlockToBeRecovered(BlockInfoContiguousUnderConstruction block) {
if(recoverBlocks.contains(block)) {
// this prevents adding the same block twice to the recovery queue
BlockManager.LOG.info(block + " is already in the recovery queue");
@@ -572,11 +572,11 @@ public class DatanodeDescriptor extends DatanodeInfo {
return replicateBlocks.poll(maxTransfers);
}
- public BlockInfoUnderConstruction[] getLeaseRecoveryCommand(int maxTransfers) {
- List<BlockInfoUnderConstruction> blocks = recoverBlocks.poll(maxTransfers);
+ public BlockInfoContiguousUnderConstruction[] getLeaseRecoveryCommand(int maxTransfers) {
+ List<BlockInfoContiguousUnderConstruction> blocks = recoverBlocks.poll(maxTransfers);
if(blocks == null)
return null;
- return blocks.toArray(new BlockInfoUnderConstruction[blocks.size()]);
+ return blocks.toArray(new BlockInfoContiguousUnderConstruction[blocks.size()]);
}
/**
http://git-wip-us.apache.org/repos/asf/hadoop/blob/63fd1acc/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
index a3ebb68..a33d990 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
@@ -1417,12 +1417,12 @@ public class DatanodeManager {
}
//check lease recovery
- BlockInfoUnderConstruction[] blocks = nodeinfo
+ BlockInfoContiguousUnderConstruction[] blocks = nodeinfo
.getLeaseRecoveryCommand(Integer.MAX_VALUE);
if (blocks != null) {
BlockRecoveryCommand brCommand = new BlockRecoveryCommand(
blocks.length);
- for (BlockInfoUnderConstruction b : blocks) {
+ for (BlockInfoContiguousUnderConstruction b : blocks) {
final DatanodeStorageInfo[] storages = b.getExpectedStorageLocations();
// Skip stale nodes during recovery - not heart beated for some time (30s by default).
final List<DatanodeStorageInfo> recoveryLocations =
http://git-wip-us.apache.org/repos/asf/hadoop/blob/63fd1acc/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java
index 3ab10b4..d5ad5fe 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java
@@ -80,10 +80,10 @@ public class DatanodeStorageInfo {
/**
* Iterates over the list of blocks belonging to the data-node.
*/
- class BlockIterator implements Iterator<BlockInfo> {
- private BlockInfo current;
+ class BlockIterator implements Iterator<BlockInfoContiguous> {
+ private BlockInfoContiguous current;
- BlockIterator(BlockInfo head) {
+ BlockIterator(BlockInfoContiguous head) {
this.current = head;
}
@@ -91,8 +91,8 @@ public class DatanodeStorageInfo {
return current != null;
}
- public BlockInfo next() {
- BlockInfo res = current;
+ public BlockInfoContiguous next() {
+ BlockInfoContiguous res = current;
current = current.getNext(current.findStorageInfo(DatanodeStorageInfo.this));
return res;
}
@@ -112,7 +112,7 @@ public class DatanodeStorageInfo {
private volatile long remaining;
private long blockPoolUsed;
- private volatile BlockInfo blockList = null;
+ private volatile BlockInfoContiguous blockList = null;
private int numBlocks = 0;
/** The number of block reports received */
@@ -215,7 +215,7 @@ public class DatanodeStorageInfo {
return blockPoolUsed;
}
- public AddBlockResult addBlock(BlockInfo b) {
+ public AddBlockResult addBlock(BlockInfoContiguous b) {
// First check whether the block belongs to a different storage
// on the same DN.
AddBlockResult result = AddBlockResult.ADDED;
@@ -240,7 +240,7 @@ public class DatanodeStorageInfo {
return result;
}
- public boolean removeBlock(BlockInfo b) {
+ public boolean removeBlock(BlockInfoContiguous b) {
blockList = b.listRemove(blockList, this);
if (b.removeStorage(this)) {
numBlocks--;
@@ -254,7 +254,7 @@ public class DatanodeStorageInfo {
return numBlocks;
}
- Iterator<BlockInfo> getBlockIterator() {
+ Iterator<BlockInfoContiguous> getBlockIterator() {
return new BlockIterator(blockList);
}
@@ -263,7 +263,7 @@ public class DatanodeStorageInfo {
* Move block to the head of the list of blocks belonging to the data-node.
* @return the index of the head of the blockList
*/
- int moveBlockToHead(BlockInfo b, int curIndex, int headIndex) {
+ int moveBlockToHead(BlockInfoContiguous b, int curIndex, int headIndex) {
blockList = b.moveBlockToHead(blockList, this, curIndex, headIndex);
return curIndex;
}
@@ -273,7 +273,7 @@ public class DatanodeStorageInfo {
* @return the head of the blockList
*/
@VisibleForTesting
- BlockInfo getBlockListHeadForTesting(){
+ BlockInfoContiguous getBlockListHeadForTesting(){
return blockList;
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/63fd1acc/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
index 1432af7..2a2c881 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
@@ -52,8 +52,8 @@ import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
import org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos;
import org.apache.hadoop.hdfs.protocolPB.PBHelper;
-import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
-import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
@@ -364,7 +364,7 @@ public class FSDirectory implements Closeable {
long mtime, long atime, short replication, long preferredBlockSize,
byte storagePolicyId) {
return new INodeFile(id, null, permissions, mtime, atime,
- BlockInfo.EMPTY_ARRAY, replication, preferredBlockSize,
+ BlockInfoContiguous.EMPTY_ARRAY, replication, preferredBlockSize,
storagePolicyId);
}
@@ -444,7 +444,7 @@ public class FSDirectory implements Closeable {
/**
* Add a block to the file. Returns a reference to the added block.
*/
- BlockInfo addBlock(String path, INodesInPath inodesInPath, Block block,
+ BlockInfoContiguous addBlock(String path, INodesInPath inodesInPath, Block block,
DatanodeStorageInfo[] targets,
boolean isStriped) throws IOException {
writeLock();
@@ -459,8 +459,8 @@ public class FSDirectory implements Closeable {
updateCount(inodesInPath, 0, fileINode.getPreferredBlockDiskspace(), true);
// associate new last block for the file
- BlockInfoUnderConstruction blockInfo =
- new BlockInfoUnderConstruction(
+ BlockInfoContiguousUnderConstruction blockInfo =
+ new BlockInfoContiguousUnderConstruction(
block,
numLocations,
BlockUCState.UNDER_CONSTRUCTION,
@@ -978,7 +978,7 @@ public class FSDirectory implements Closeable {
unprotectedTruncate(iip, newLength, collectedBlocks, mtime);
if(! onBlockBoundary) {
- BlockInfo oldBlock = file.getLastBlock();
+ BlockInfoContiguous oldBlock = file.getLastBlock();
Block tBlk =
getFSNamesystem().prepareFileForTruncate(iip,
clientName, clientMachine, file.computeFileSize() - newLength,
http://git-wip-us.apache.org/repos/asf/hadoop/blob/63fd1acc/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
index 3c7eae4..9ce3fa9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
@@ -44,7 +44,7 @@ import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
-import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
import org.apache.hadoop.hdfs.server.common.Storage.FormatConfirmable;
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
@@ -52,7 +52,6 @@ import org.apache.hadoop.hdfs.server.common.StorageInfo;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.AddBlockOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.AddCacheDirectiveInfoOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.AddCachePoolOp;
-import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.AddCloseOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.AddOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.AllocateBlockIdOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.AllowSnapshotOp;
@@ -772,10 +771,10 @@ public class FSEditLog implements LogsPurgeable {
public void logAddBlock(String path, INodeFile file) {
Preconditions.checkArgument(file.isUnderConstruction());
- BlockInfo[] blocks = file.getBlocks();
+ BlockInfoContiguous[] blocks = file.getBlocks();
Preconditions.checkState(blocks != null && blocks.length > 0);
- BlockInfo pBlock = blocks.length > 1 ? blocks[blocks.length - 2] : null;
- BlockInfo lastBlock = blocks[blocks.length - 1];
+ BlockInfoContiguous pBlock = blocks.length > 1 ? blocks[blocks.length - 2] : null;
+ BlockInfoContiguous lastBlock = blocks[blocks.length - 1];
AddBlockOp op = AddBlockOp.getInstance(cache.get()).setPath(path)
.setPenultimateBlock(pBlock).setLastBlock(lastBlock);
logEdit(op);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/63fd1acc/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
index 7cb6486..5fcad74 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
@@ -42,8 +42,8 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.LastBlockWithStatus;
import org.apache.hadoop.hdfs.protocol.LayoutVersion;
-import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
-import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.RollingUpgradeStartupOption;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
import org.apache.hadoop.hdfs.server.common.Storage;
@@ -933,7 +933,7 @@ public class FSEditLogLoader {
*/
private void addNewBlock(FSDirectory fsDir, AddBlockOp op, INodeFile file)
throws IOException {
- BlockInfo[] oldBlocks = file.getBlocks();
+ BlockInfoContiguous[] oldBlocks = file.getBlocks();
Block pBlock = op.getPenultimateBlock();
Block newBlock= op.getLastBlock();
@@ -950,16 +950,16 @@ public class FSEditLogLoader {
}
oldLastBlock.setNumBytes(pBlock.getNumBytes());
- if (oldLastBlock instanceof BlockInfoUnderConstruction) {
+ if (oldLastBlock instanceof BlockInfoContiguousUnderConstruction) {
fsNamesys.getBlockManager().forceCompleteBlock(file,
- (BlockInfoUnderConstruction) oldLastBlock);
+ (BlockInfoContiguousUnderConstruction) oldLastBlock);
fsNamesys.getBlockManager().processQueuedMessagesForBlock(pBlock);
}
} else { // the penultimate block is null
Preconditions.checkState(oldBlocks == null || oldBlocks.length == 0);
}
// add the new block
- BlockInfo newBI = new BlockInfoUnderConstruction(
+ BlockInfoContiguous newBI = new BlockInfoContiguousUnderConstruction(
newBlock, file.getBlockReplication());
fsNamesys.getBlockManager().addBlockCollection(newBI, file);
file.addBlock(newBI);
@@ -973,7 +973,7 @@ public class FSEditLogLoader {
private void updateBlocks(FSDirectory fsDir, BlockListUpdatingOp op,
INodesInPath iip, INodeFile file) throws IOException {
// Update its block list
- BlockInfo[] oldBlocks = file.getBlocks();
+ BlockInfoContiguous[] oldBlocks = file.getBlocks();
Block[] newBlocks = op.getBlocks();
String path = op.getPath();
@@ -982,7 +982,7 @@ public class FSEditLogLoader {
// First, update blocks in common
for (int i = 0; i < oldBlocks.length && i < newBlocks.length; i++) {
- BlockInfo oldBlock = oldBlocks[i];
+ BlockInfoContiguous oldBlock = oldBlocks[i];
Block newBlock = newBlocks[i];
boolean isLastBlock = i == newBlocks.length - 1;
@@ -1000,11 +1000,11 @@ public class FSEditLogLoader {
oldBlock.getGenerationStamp() != newBlock.getGenerationStamp();
oldBlock.setGenerationStamp(newBlock.getGenerationStamp());
- if (oldBlock instanceof BlockInfoUnderConstruction &&
+ if (oldBlock instanceof BlockInfoContiguousUnderConstruction &&
(!isLastBlock || op.shouldCompleteLastBlock())) {
changeMade = true;
fsNamesys.getBlockManager().forceCompleteBlock(file,
- (BlockInfoUnderConstruction) oldBlock);
+ (BlockInfoContiguousUnderConstruction) oldBlock);
}
if (changeMade) {
// The state or gen-stamp of the block has changed. So, we may be
@@ -1033,19 +1033,19 @@ public class FSEditLogLoader {
// We're adding blocks
for (int i = oldBlocks.length; i < newBlocks.length; i++) {
Block newBlock = newBlocks[i];
- BlockInfo newBI;
+ BlockInfoContiguous newBI;
if (!op.shouldCompleteLastBlock()) {
// TODO: shouldn't this only be true for the last block?
// what about an old-version fsync() where fsync isn't called
// until several blocks in?
- newBI = new BlockInfoUnderConstruction(
+ newBI = new BlockInfoContiguousUnderConstruction(
newBlock, file.getBlockReplication());
} else {
// OP_CLOSE should add finalized blocks. This code path
// is only executed when loading edits written by prior
// versions of Hadoop. Current versions always log
// OP_ADD operations as each block is allocated.
- newBI = new BlockInfo(newBlock, file.getBlockReplication());
+ newBI = new BlockInfoContiguous(newBlock, file.getBlockReplication());
}
fsNamesys.getBlockManager().addBlockCollection(newBI, file);
file.addBlock(newBI);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/63fd1acc/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java
index 0a92054..ed05d40 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java
@@ -51,8 +51,8 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.LayoutFlags;
import org.apache.hadoop.hdfs.protocol.LayoutVersion;
import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature;
-import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
-import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException;
@@ -683,7 +683,7 @@ public class FSImageFormat {
public void updateBlocksMap(INodeFile file) {
// Add file->block mapping
- final BlockInfo[] blocks = file.getBlocks();
+ final BlockInfoContiguous[] blocks = file.getBlocks();
if (blocks != null) {
final BlockManager bm = namesystem.getBlockManager();
for (int i = 0; i < blocks.length; i++) {
@@ -749,9 +749,9 @@ public class FSImageFormat {
// file
// read blocks
- BlockInfo[] blocks = new BlockInfo[numBlocks];
+ BlockInfoContiguous[] blocks = new BlockInfoContiguous[numBlocks];
for (int j = 0; j < numBlocks; j++) {
- blocks[j] = new BlockInfo(replication);
+ blocks[j] = new BlockInfoContiguous(replication);
blocks[j].readFields(in);
}
@@ -771,8 +771,8 @@ public class FSImageFormat {
clientMachine = FSImageSerialization.readString(in);
// convert the last block to BlockUC
if (blocks.length > 0) {
- BlockInfo lastBlk = blocks[blocks.length - 1];
- blocks[blocks.length - 1] = new BlockInfoUnderConstruction(
+ BlockInfoContiguous lastBlk = blocks[blocks.length - 1];
+ blocks[blocks.length - 1] = new BlockInfoContiguousUnderConstruction(
lastBlk, replication);
}
}
@@ -947,9 +947,9 @@ public class FSImageFormat {
FileUnderConstructionFeature uc = cons.getFileUnderConstructionFeature();
oldnode.toUnderConstruction(uc.getClientName(), uc.getClientMachine());
if (oldnode.numBlocks() > 0) {
- BlockInfo ucBlock = cons.getLastBlock();
+ BlockInfoContiguous ucBlock = cons.getLastBlock();
// we do not replace the inode, just replace the last block of oldnode
- BlockInfo info = namesystem.getBlockManager().addBlockCollection(
+ BlockInfoContiguous info = namesystem.getBlockManager().addBlockCollection(
ucBlock, oldnode);
oldnode.setBlock(oldnode.numBlocks() - 1, info);
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/63fd1acc/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java
index 26ca16a..f20add1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java
@@ -40,8 +40,8 @@ import org.apache.hadoop.fs.XAttr;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto;
import org.apache.hadoop.hdfs.protocolPB.PBHelper;
-import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
-import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
import org.apache.hadoop.hdfs.server.namenode.FSImageFormatProtobuf.LoaderContext;
import org.apache.hadoop.hdfs.server.namenode.FSImageFormatProtobuf.SaverContext;
@@ -170,7 +170,7 @@ public final class FSImageFormatPBINode {
public static void updateBlocksMap(INodeFile file, BlockManager bm) {
// Add file->block mapping
- final BlockInfo[] blocks = file.getBlocks();
+ final BlockInfoContiguous[] blocks = file.getBlocks();
if (blocks != null) {
for (int i = 0; i < blocks.length; i++) {
file.setBlock(i, bm.addBlockCollection(blocks[i], file));
@@ -282,9 +282,9 @@ public final class FSImageFormatPBINode {
short replication = (short) f.getReplication();
LoaderContext state = parent.getLoaderContext();
- BlockInfo[] blocks = new BlockInfo[bp.size()];
+ BlockInfoContiguous[] blocks = new BlockInfoContiguous[bp.size()];
for (int i = 0, e = bp.size(); i < e; ++i) {
- blocks[i] = new BlockInfo(PBHelper.convert(bp.get(i)), replication);
+ blocks[i] = new BlockInfoContiguous(PBHelper.convert(bp.get(i)), replication);
}
final PermissionStatus permissions = loadPermission(f.getPermission(),
parent.getLoaderContext().getStringTable());
@@ -310,9 +310,9 @@ public final class FSImageFormatPBINode {
INodeSection.FileUnderConstructionFeature uc = f.getFileUC();
file.toUnderConstruction(uc.getClientName(), uc.getClientMachine());
if (blocks.length > 0) {
- BlockInfo lastBlk = file.getLastBlock();
+ BlockInfoContiguous lastBlk = file.getLastBlock();
// replace the last block of file
- file.setBlock(file.numBlocks() - 1, new BlockInfoUnderConstruction(
+ file.setBlock(file.numBlocks() - 1, new BlockInfoContiguousUnderConstruction(
lastBlk, replication));
}
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/63fd1acc/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java
index 1c22ee9..33f644d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java
@@ -33,8 +33,8 @@ import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
import org.apache.hadoop.hdfs.protocol.LayoutVersion;
-import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
-import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotFSImageFormat;
import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotFSImageFormat.ReferenceMap;
@@ -126,17 +126,17 @@ public class FSImageSerialization {
long preferredBlockSize = in.readLong();
int numBlocks = in.readInt();
- BlockInfo[] blocks = new BlockInfo[numBlocks];
+ BlockInfoContiguous[] blocks = new BlockInfoContiguous[numBlocks];
Block blk = new Block();
int i = 0;
for (; i < numBlocks-1; i++) {
blk.readFields(in);
- blocks[i] = new BlockInfo(blk, blockReplication);
+ blocks[i] = new BlockInfoContiguous(blk, blockReplication);
}
// last block is UNDER_CONSTRUCTION
if(numBlocks > 0) {
blk.readFields(in);
- blocks[i] = new BlockInfoUnderConstruction(
+ blocks[i] = new BlockInfoContiguousUnderConstruction(
blk, blockReplication, BlockUCState.UNDER_CONSTRUCTION, null);
}
PermissionStatus perm = PermissionStatus.read(in);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/63fd1acc/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 84ef360..a60f1eb 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -201,8 +201,8 @@ import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretMan
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager.SecretManagerState;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockCollection;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockIdManager;
-import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
-import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager;
@@ -2017,7 +2017,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
leaseManager.addLease(
file.getFileUnderConstructionFeature().getClientName(), src);
boolean shouldRecoverNow = (newBlock == null);
- BlockInfo oldBlock = file.getLastBlock();
+ BlockInfoContiguous oldBlock = file.getLastBlock();
boolean shouldCopyOnTruncate = shouldCopyOnTruncate(file, oldBlock);
if(newBlock == null) {
newBlock = (shouldCopyOnTruncate) ? createNewBlock(file.isStriped()) :
@@ -2025,11 +2025,11 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
nextGenerationStamp(blockIdManager.isLegacyBlock(oldBlock)));
}
- BlockInfoUnderConstruction truncatedBlockUC;
+ BlockInfoContiguousUnderConstruction truncatedBlockUC;
if(shouldCopyOnTruncate) {
// Add new truncateBlock into blocksMap and
// use oldBlock as a source for copy-on-truncate recovery
- truncatedBlockUC = new BlockInfoUnderConstruction(newBlock,
+ truncatedBlockUC = new BlockInfoContiguousUnderConstruction(newBlock,
file.getBlockReplication());
truncatedBlockUC.setNumBytes(oldBlock.getNumBytes() - lastBlockDelta);
truncatedBlockUC.setTruncateBlock(oldBlock);
@@ -2045,7 +2045,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
blockManager.convertLastBlockToUnderConstruction(file, lastBlockDelta);
oldBlock = file.getLastBlock();
assert !oldBlock.isComplete() : "oldBlock should be under construction";
- truncatedBlockUC = (BlockInfoUnderConstruction) oldBlock;
+ truncatedBlockUC = (BlockInfoContiguousUnderConstruction) oldBlock;
truncatedBlockUC.setTruncateBlock(new Block(oldBlock));
truncatedBlockUC.getTruncateBlock().setNumBytes(
oldBlock.getNumBytes() - lastBlockDelta);
@@ -2071,7 +2071,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
* Defines if a replica needs to be copied on truncate or
* can be truncated in place.
*/
- boolean shouldCopyOnTruncate(INodeFile file, BlockInfo blk) {
+ boolean shouldCopyOnTruncate(INodeFile file, BlockInfoContiguous blk) {
if(!isUpgradeFinalized()) {
return true;
}
@@ -2625,7 +2625,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
recoverLeaseInternal(RecoverLeaseOp.APPEND_FILE,
iip, src, holder, clientMachine, false);
- final BlockInfo lastBlock = myFile.getLastBlock();
+ final BlockInfoContiguous lastBlock = myFile.getLastBlock();
// Check that the block has at least minimum replication.
if(lastBlock != null && lastBlock.isComplete() &&
!getBlockManager().isSufficientlyReplicated(lastBlock)) {
@@ -2674,7 +2674,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
dir.updateSpaceConsumed(iip, 0, diff * file.getBlockReplication());
}
} else {
- BlockInfo lastBlock = file.getLastBlock();
+ BlockInfoContiguous lastBlock = file.getLastBlock();
if (lastBlock != null) {
ExtendedBlock blk = new ExtendedBlock(this.getBlockPoolId(), lastBlock);
ret = new LocatedBlock(blk, new DatanodeInfo[0]);
@@ -2807,7 +2807,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
op.getExceptionMessage(src, holder, clientMachine,
"lease recovery is in progress. Try again later."));
} else {
- final BlockInfo lastBlock = file.getLastBlock();
+ final BlockInfoContiguous lastBlock = file.getLastBlock();
if (lastBlock != null
&& lastBlock.getBlockUCState() == BlockUCState.UNDER_RECOVERY) {
throw new RecoveryInProgressException(
@@ -2994,8 +2994,8 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
return onRetryBlock[0];
} else {
// add new chosen targets to already allocated block and return
- BlockInfo lastBlockInFile = pendingFile.getLastBlock();
- ((BlockInfoUnderConstruction) lastBlockInFile)
+ BlockInfoContiguous lastBlockInFile = pendingFile.getLastBlock();
+ ((BlockInfoContiguousUnderConstruction) lastBlockInFile)
.setExpectedLocations(targets);
offset = pendingFile.computeFileSize();
return makeLocatedBlock(lastBlockInFile, targets, offset);
@@ -3086,7 +3086,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
}
}
final INodeFile pendingFile = checkLease(src, clientName, inode, fileId);
- BlockInfo lastBlockInFile = pendingFile.getLastBlock();
+ BlockInfoContiguous lastBlockInFile = pendingFile.getLastBlock();
if (!Block.matchingIdAndGenStamp(previousBlock, lastBlockInFile)) {
// The block that the client claims is the current last block
// doesn't match up with what we think is the last block. There are
@@ -3114,7 +3114,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
// changed the namesystem state yet.
// We run this analysis again in Part II where case 4 is impossible.
- BlockInfo penultimateBlock = pendingFile.getPenultimateBlock();
+ BlockInfoContiguous penultimateBlock = pendingFile.getPenultimateBlock();
if (previous == null &&
lastBlockInFile != null &&
lastBlockInFile.getNumBytes() == pendingFile.getPreferredBlockSize() &&
@@ -3141,7 +3141,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
src + ". Returning previously allocated block " + lastBlockInFile);
long offset = pendingFile.computeFileSize();
onRetryBlock[0] = makeLocatedBlock(lastBlockInFile,
- ((BlockInfoUnderConstruction)lastBlockInFile).getExpectedStorageLocations(),
+ ((BlockInfoContiguousUnderConstruction)lastBlockInFile).getExpectedStorageLocations(),
offset);
return new FileState(pendingFile, src, iip);
} else {
@@ -3433,12 +3433,12 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
* @param isStriped is the file under striping or contigunous layout?
* @throws QuotaExceededException If addition of block exceeds space quota
*/
- BlockInfo saveAllocatedBlock(String src, INodesInPath inodesInPath,
+ BlockInfoContiguous saveAllocatedBlock(String src, INodesInPath inodesInPath,
Block newBlock, DatanodeStorageInfo[] targets,
boolean isStriped)
throws IOException {
assert hasWriteLock();
- BlockInfo b = dir.addBlock(src, inodesInPath,
+ BlockInfoContiguous b = dir.addBlock(src, inodesInPath,
newBlock, targets, isStriped);
NameNode.stateChangeLog.info("BLOCK* allocate " + b + " for " + src);
DatanodeStorageInfo.incrementBlocksScheduled(targets);
@@ -3467,14 +3467,14 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
try {
if (checkall) {
// check all blocks of the file.
- for (BlockInfo block: v.getBlocks()) {
+ for (BlockInfoContiguous block: v.getBlocks()) {
if (!isCompleteBlock(src, block, blockManager.minReplication)) {
return false;
}
}
} else {
// check the penultimate block of this file
- BlockInfo b = v.getPenultimateBlock();
+ BlockInfoContiguous b = v.getPenultimateBlock();
if (b != null
&& !isCompleteBlock(src, b, blockManager.minReplication)) {
return false;
@@ -3486,9 +3486,9 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
}
}
- private static boolean isCompleteBlock(String src, BlockInfo b, int minRepl) {
+ private static boolean isCompleteBlock(String src, BlockInfoContiguous b, int minRepl) {
if (!b.isComplete()) {
- final BlockInfoUnderConstruction uc = (BlockInfoUnderConstruction)b;
+ final BlockInfoContiguousUnderConstruction uc = (BlockInfoContiguousUnderConstruction)b;
final int numNodes = b.numNodes();
LOG.info("BLOCK* " + b + " is not COMPLETE (ucState = "
+ uc.getBlockUCState() + ", replication# = " + numNodes
@@ -3678,7 +3678,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
for (Block b : blocks.getToDeleteList()) {
if (trackBlockCounts) {
- BlockInfo bi = getStoredBlock(b);
+ BlockInfoContiguous bi = getStoredBlock(b);
if (bi.isComplete()) {
numRemovedComplete++;
if (bi.numNodes() >= blockManager.minReplication) {
@@ -3897,10 +3897,10 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
final INodeFile pendingFile = iip.getLastINode().asFile();
int nrBlocks = pendingFile.numBlocks();
- BlockInfo[] blocks = pendingFile.getBlocks();
+ BlockInfoContiguous[] blocks = pendingFile.getBlocks();
int nrCompleteBlocks;
- BlockInfo curBlock = null;
+ BlockInfoContiguous curBlock = null;
for(nrCompleteBlocks = 0; nrCompleteBlocks < nrBlocks; nrCompleteBlocks++) {
curBlock = blocks[nrCompleteBlocks];
if(!curBlock.isComplete())
@@ -3935,9 +3935,9 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
// The last block is not COMPLETE, and
// that the penultimate block if exists is either COMPLETE or COMMITTED
- final BlockInfo lastBlock = pendingFile.getLastBlock();
+ final BlockInfoContiguous lastBlock = pendingFile.getLastBlock();
BlockUCState lastBlockState = lastBlock.getBlockUCState();
- BlockInfo penultimateBlock = pendingFile.getPenultimateBlock();
+ BlockInfoContiguous penultimateBlock = pendingFile.getPenultimateBlock();
// If penultimate block doesn't exist then its minReplication is met
boolean penultimateBlockMinReplication = penultimateBlock == null ? true :
@@ -3970,7 +3970,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
throw new AlreadyBeingCreatedException(message);
case UNDER_CONSTRUCTION:
case UNDER_RECOVERY:
- final BlockInfoUnderConstruction uc = (BlockInfoUnderConstruction)lastBlock;
+ final BlockInfoContiguousUnderConstruction uc = (BlockInfoContiguousUnderConstruction)lastBlock;
// determine if last block was intended to be truncated
Block recoveryBlock = uc.getTruncateBlock();
boolean truncateRecovery = recoveryBlock != null;
@@ -4081,12 +4081,12 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
}
@VisibleForTesting
- BlockInfo getStoredBlock(Block block) {
+ BlockInfoContiguous getStoredBlock(Block block) {
return blockManager.getStoredBlock(block);
}
@Override
- public boolean isInSnapshot(BlockInfoUnderConstruction blockUC) {
+ public boolean isInSnapshot(BlockInfoContiguousUnderConstruction blockUC) {
assert hasReadLock();
final BlockCollection bc = blockUC.getBlockCollection();
if (bc == null || !(bc instanceof INodeFile)
@@ -4139,7 +4139,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
checkNameNodeSafeMode(
"Cannot commitBlockSynchronization while in safe mode");
- final BlockInfo storedBlock = getStoredBlock(
+ final BlockInfoContiguous storedBlock = getStoredBlock(
ExtendedBlock.getLocalBlock(oldBlock));
if (storedBlock == null) {
if (deleteblock) {
@@ -4187,8 +4187,8 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
return;
}
- BlockInfoUnderConstruction truncatedBlock =
- (BlockInfoUnderConstruction) iFile.getLastBlock();
+ BlockInfoContiguousUnderConstruction truncatedBlock =
+ (BlockInfoContiguousUnderConstruction) iFile.getLastBlock();
long recoveryId = truncatedBlock.getBlockRecoveryId();
boolean copyTruncate =
truncatedBlock.getBlockId() != storedBlock.getBlockId();
@@ -4297,7 +4297,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
* @throws IOException on error
*/
@VisibleForTesting
- String closeFileCommitBlocks(INodeFile pendingFile, BlockInfo storedBlock)
+ String closeFileCommitBlocks(INodeFile pendingFile, BlockInfoContiguous storedBlock)
throws IOException {
final INodesInPath iip = INodesInPath.fromINode(pendingFile);
final String src = iip.getPath();
@@ -4605,7 +4605,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
while (it.hasNext()) {
Block b = it.next();
- BlockInfo blockInfo = blockManager.getStoredBlock(b);
+ BlockInfoContiguous blockInfo = blockManager.getStoredBlock(b);
if (blockInfo.getBlockCollection().getStoragePolicyID() == lpPolicy.getId()) {
filesToDelete.add(blockInfo.getBlockCollection());
}
@@ -5552,7 +5552,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
SafeModeInfo safeMode = this.safeMode;
if (safeMode == null) // mostly true
return;
- BlockInfo storedBlock = getStoredBlock(b);
+ BlockInfoContiguous storedBlock = getStoredBlock(b);
if (storedBlock.isComplete()) {
safeMode.decrementSafeBlockCount((short)blockManager.countNodes(b).liveReplicas());
}
@@ -6071,7 +6071,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
+ "access token for block " + block);
// check stored block state
- BlockInfo storedBlock = getStoredBlock(ExtendedBlock.getLocalBlock(block));
+ BlockInfoContiguous storedBlock = getStoredBlock(ExtendedBlock.getLocalBlock(block));
if (storedBlock == null ||
storedBlock.getBlockUCState() != BlockUCState.UNDER_CONSTRUCTION) {
throw new IOException(block +
@@ -6200,8 +6200,8 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
assert hasWriteLock();
// check the vadility of the block and lease holder name
final INodeFile pendingFile = checkUCBlock(oldBlock, clientName);
- final BlockInfoUnderConstruction blockinfo
- = (BlockInfoUnderConstruction)pendingFile.getLastBlock();
+ final BlockInfoContiguousUnderConstruction blockinfo
+ = (BlockInfoContiguousUnderConstruction)pendingFile.getLastBlock();
// check new GS & length: this is not expected
if (newBlock.getGenerationStamp() <= blockinfo.getGenerationStamp() ||
http://git-wip-us.apache.org/repos/asf/hadoop/blob/63fd1acc/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileUnderConstructionFeature.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileUnderConstructionFeature.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileUnderConstructionFeature.java
index 896bedb..1ebdde6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileUnderConstructionFeature.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileUnderConstructionFeature.java
@@ -20,9 +20,8 @@ package org.apache.hadoop.hdfs.server.namenode;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
-import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
-import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction;
import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo;
/**
@@ -59,10 +58,10 @@ public class FileUnderConstructionFeature implements INode.Feature {
*/
void updateLengthOfLastBlock(INodeFile f, long lastBlockLength)
throws IOException {
- BlockInfo lastBlock = f.getLastBlock();
+ BlockInfoContiguous lastBlock = f.getLastBlock();
assert (lastBlock != null) : "The last block for path "
+ f.getFullPathName() + " is null when updating its length";
- assert (lastBlock instanceof BlockInfoUnderConstruction)
+ assert (lastBlock instanceof BlockInfoContiguousUnderConstruction)
: "The last block for path " + f.getFullPathName()
+ " is not a BlockInfoUnderConstruction when updating its length";
lastBlock.setNumBytes(lastBlockLength);
@@ -75,11 +74,11 @@ public class FileUnderConstructionFeature implements INode.Feature {
*/
void cleanZeroSizeBlock(final INodeFile f,
final BlocksMapUpdateInfo collectedBlocks) {
- final BlockInfo[] blocks = f.getBlocks();
+ final BlockInfoContiguous[] blocks = f.getBlocks();
if (blocks != null && blocks.length > 0
- && blocks[blocks.length - 1] instanceof BlockInfoUnderConstruction) {
- BlockInfoUnderConstruction lastUC =
- (BlockInfoUnderConstruction) blocks[blocks.length - 1];
+ && blocks[blocks.length - 1] instanceof BlockInfoContiguousUnderConstruction) {
+ BlockInfoContiguousUnderConstruction lastUC =
+ (BlockInfoContiguousUnderConstruction) blocks[blocks.length - 1];
if (lastUC.getNumBytes() == 0) {
// this is a 0-sized block. do not need check its UC state here
collectedBlocks.addDeleteBlock(lastUC);