You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by ar...@apache.org on 2013/12/12 08:17:58 UTC
svn commit: r1550363 [5/8] - in
/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs: ./ src/main/java/
src/main/java/org/apache/hadoop/hdfs/
src/main/java/org/apache/hadoop/hdfs/protocol/
src/main/java/org/apache/hadoop/hdfs/protocolPB/ src/main/java/...
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java?rev=1550363&r1=1550362&r2=1550363&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java Thu Dec 12 07:17:51 2013
@@ -18,10 +18,7 @@
package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl;
import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.Iterator;
-import java.util.List;
+import java.util.*;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
@@ -56,6 +53,7 @@ class FsVolumeList {
* @param blockSize free space needed on the volume
* @return next volume to store the block in.
*/
+ // TODO should choose volume with storage type
synchronized FsVolumeImpl getNextVolume(long blockSize) throws IOException {
return blockChooser.chooseVolume(volumes, blockSize);
}
@@ -92,27 +90,32 @@ class FsVolumeList {
return remaining;
}
- void getVolumeMap(ReplicaMap volumeMap) throws IOException {
+ void initializeReplicaMaps(ReplicaMap globalReplicaMap) throws IOException {
for (FsVolumeImpl v : volumes) {
- v.getVolumeMap(volumeMap);
+ v.getVolumeMap(globalReplicaMap);
}
}
- void getVolumeMap(String bpid, ReplicaMap volumeMap) throws IOException {
+ void getAllVolumesMap(String bpid, ReplicaMap volumeMap) throws IOException {
long totalStartTime = System.currentTimeMillis();
for (FsVolumeImpl v : volumes) {
- FsDatasetImpl.LOG.info("Adding replicas to map for block pool " + bpid +
- " on volume " + v + "...");
- long startTime = System.currentTimeMillis();
- v.getVolumeMap(bpid, volumeMap);
- long timeTaken = System.currentTimeMillis() - startTime;
- FsDatasetImpl.LOG.info("Time to add replicas to map for block pool " + bpid +
- " on volume " + v + ": " + timeTaken + "ms");
+ getVolumeMap(bpid, v, volumeMap);
}
long totalTimeTaken = System.currentTimeMillis() - totalStartTime;
FsDatasetImpl.LOG.info("Total time to add all replicas to map: "
+ totalTimeTaken + "ms");
}
+
+ void getVolumeMap(String bpid, FsVolumeImpl volume, ReplicaMap volumeMap)
+ throws IOException {
+ FsDatasetImpl.LOG.info("Adding replicas to map for block pool " + bpid +
+ " on volume " + volume + "...");
+ long startTime = System.currentTimeMillis();
+ volume.getVolumeMap(bpid, volumeMap);
+ long timeTaken = System.currentTimeMillis() - startTime;
+ FsDatasetImpl.LOG.info("Time to add replicas to map for block pool " + bpid +
+ " on volume " + volume + ": " + timeTaken + "ms");
+ }
/**
* Calls {@link FsVolumeImpl#checkDirs()} on each volume, removing any
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/ReplicaMap.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/ReplicaMap.java?rev=1550363&r1=1550362&r2=1550363&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/ReplicaMap.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/ReplicaMap.java Thu Dec 12 07:17:51 2013
@@ -117,6 +117,13 @@ class ReplicaMap {
return m.put(replicaInfo.getBlockId(), replicaInfo);
}
}
+
+ /**
+ * Add all entries from the given replica map into the local replica map.
+ */
+ void addAll(ReplicaMap other) {
+ map.putAll(other.map);
+ }
/**
* Remove the replica's meta information from the map that matches
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java?rev=1550363&r1=1550362&r2=1550363&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java Thu Dec 12 07:17:51 2013
@@ -62,6 +62,7 @@ import org.apache.hadoop.hdfs.server.blo
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
+import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo;
import org.apache.hadoop.hdfs.server.namenode.INodeReference.WithCount;
@@ -364,7 +365,7 @@ public class FSDirectory implements Clos
* Add a block to the file. Returns a reference to the added block.
*/
BlockInfo addBlock(String path, INodesInPath inodesInPath, Block block,
- DatanodeDescriptor targets[]) throws IOException {
+ DatanodeStorageInfo[] targets) throws IOException {
waitForReady();
writeLock();
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java?rev=1550363&r1=1550362&r2=1550363&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java Thu Dec 12 07:17:51 2013
@@ -147,6 +147,7 @@ import org.apache.hadoop.hdfs.DFSConfigK
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.HAUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.hdfs.StorageType;
import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
@@ -174,14 +175,7 @@ import org.apache.hadoop.hdfs.security.t
import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager.AccessMode;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager;
-import org.apache.hadoop.hdfs.server.blockmanagement.BlockCollection;
-import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
-import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
-import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
-import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
-import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager;
-import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStatistics;
-import org.apache.hadoop.hdfs.server.blockmanagement.OutOfV1GenerationStampsException;
+import org.apache.hadoop.hdfs.server.blockmanagement.*;
import org.apache.hadoop.hdfs.server.common.GenerationStamp;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
@@ -218,7 +212,8 @@ import org.apache.hadoop.hdfs.server.pro
import org.apache.hadoop.hdfs.server.protocol.NamenodeCommand;
import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration;
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
-import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo;
+import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks;
+import org.apache.hadoop.hdfs.server.protocol.StorageReport;
import org.apache.hadoop.hdfs.util.ChunkedArrayList;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.Text;
@@ -2582,7 +2577,7 @@ public class FSNamesystem implements Nam
}
// choose targets for the new block to be allocated.
- final DatanodeDescriptor targets[] = getBlockManager().chooseTarget(
+ final DatanodeStorageInfo targets[] = getBlockManager().chooseTarget(
src, replication, clientNode, excludedNodes, blockSize, favoredNodes);
// Part II.
@@ -2709,7 +2704,7 @@ public class FSNamesystem implements Nam
src + ". Returning previously allocated block " + lastBlockInFile);
long offset = pendingFile.computeFileSize();
onRetryBlock[0] = makeLocatedBlock(lastBlockInFile,
- ((BlockInfoUnderConstruction)lastBlockInFile).getExpectedLocations(),
+ ((BlockInfoUnderConstruction)lastBlockInFile).getExpectedStorageLocations(),
offset);
return iip;
} else {
@@ -2727,11 +2722,10 @@ public class FSNamesystem implements Nam
return iip;
}
- LocatedBlock makeLocatedBlock(Block blk,
- DatanodeInfo[] locs,
+ LocatedBlock makeLocatedBlock(Block blk, DatanodeStorageInfo[] locs,
long offset) throws IOException {
LocatedBlock lBlk = new LocatedBlock(
- getExtendedBlock(blk), locs, offset);
+ getExtendedBlock(blk), locs, offset, false);
getBlockManager().setBlockToken(
lBlk, BlockTokenSecretManager.AccessMode.WRITE);
return lBlk;
@@ -2739,7 +2733,8 @@ public class FSNamesystem implements Nam
/** @see NameNode#getAdditionalDatanode(String, ExtendedBlock, DatanodeInfo[], DatanodeInfo[], int, String) */
LocatedBlock getAdditionalDatanode(String src, final ExtendedBlock blk,
- final DatanodeInfo[] existings, final Set<Node> excludes,
+ final DatanodeInfo[] existings, final String[] storageIDs,
+ final Set<Node> excludes,
final int numAdditionalNodes, final String clientName
) throws IOException {
//check if the feature is enabled
@@ -2747,7 +2742,7 @@ public class FSNamesystem implements Nam
final DatanodeDescriptor clientnode;
final long preferredblocksize;
- final List<DatanodeDescriptor> chosen;
+ final List<DatanodeStorageInfo> chosen;
checkOperation(OperationCategory.READ);
byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
readLock();
@@ -2762,23 +2757,18 @@ public class FSNamesystem implements Nam
clientnode = file.getFileUnderConstructionFeature().getClientNode();
preferredblocksize = file.getPreferredBlockSize();
- //find datanode descriptors
- chosen = new ArrayList<DatanodeDescriptor>();
- for(DatanodeInfo d : existings) {
- final DatanodeDescriptor descriptor = blockManager.getDatanodeManager(
- ).getDatanode(d);
- if (descriptor != null) {
- chosen.add(descriptor);
- }
- }
+ //find datanode storages
+ final DatanodeManager dm = blockManager.getDatanodeManager();
+ chosen = Arrays.asList(dm.getDatanodeStorageInfos(existings, storageIDs));
} finally {
readUnlock();
}
// choose new datanodes.
- final DatanodeInfo[] targets = blockManager.getBlockPlacementPolicy(
+ final DatanodeStorageInfo[] targets = blockManager.getBlockPlacementPolicy(
).chooseTarget(src, numAdditionalNodes, clientnode, chosen, true,
- excludes, preferredblocksize);
+ // TODO: get storage type from the file
+ excludes, preferredblocksize, StorageType.DEFAULT);
final LocatedBlock lb = new LocatedBlock(blk, targets);
blockManager.setBlockToken(lb, AccessMode.COPY);
return lb;
@@ -2949,14 +2939,13 @@ public class FSNamesystem implements Nam
* @throws QuotaExceededException If addition of block exceeds space quota
*/
BlockInfo saveAllocatedBlock(String src, INodesInPath inodesInPath,
- Block newBlock, DatanodeDescriptor targets[]) throws IOException {
+ Block newBlock, DatanodeStorageInfo[] targets)
+ throws IOException {
assert hasWriteLock();
BlockInfo b = dir.addBlock(src, inodesInPath, newBlock, targets);
NameNode.stateChangeLog.info("BLOCK* allocateBlock: " + src + ". "
+ getBlockPoolId() + " " + b);
- for (DatanodeDescriptor dn : targets) {
- dn.incBlocksScheduled();
- }
+ DatanodeStorageInfo.incrementBlocksScheduled(targets);
return b;
}
@@ -3419,7 +3408,7 @@ public class FSNamesystem implements Nam
boolean isFileClosed(String src)
throws AccessControlException, UnresolvedLinkException,
StandbyException, IOException {
- FSPermissionChecker pc = getPermissionChecker();
+ FSPermissionChecker pc = getPermissionChecker();
checkOperation(OperationCategory.READ);
readLock();
try {
@@ -3716,7 +3705,7 @@ public class FSNamesystem implements Nam
final BlockInfoUnderConstruction uc = (BlockInfoUnderConstruction)lastBlock;
// setup the last block locations from the blockManager if not known
if (uc.getNumExpectedLocations() == 0) {
- uc.setExpectedLocations(blockManager.getNodes(lastBlock));
+ uc.setExpectedLocations(blockManager.getStorages(lastBlock));
}
if (uc.getNumExpectedLocations() == 0 && uc.getNumBytes() == 0) {
@@ -3915,32 +3904,39 @@ public class FSNamesystem implements Nam
// find the DatanodeDescriptor objects
// There should be no locations in the blockManager till now because the
// file is underConstruction
- List<DatanodeDescriptor> targetList =
+ ArrayList<DatanodeDescriptor> trimmedTargets =
new ArrayList<DatanodeDescriptor>(newtargets.length);
+ ArrayList<String> trimmedStorages =
+ new ArrayList<String>(newtargets.length);
if (newtargets.length > 0) {
- for (DatanodeID newtarget : newtargets) {
+ for (int i = 0; i < newtargets.length; ++i) {
// try to get targetNode
DatanodeDescriptor targetNode =
- blockManager.getDatanodeManager().getDatanode(newtarget);
- if (targetNode != null)
- targetList.add(targetNode);
- else if (LOG.isDebugEnabled()) {
- LOG.debug("DatanodeDescriptor (=" + newtarget + ") not found");
+ blockManager.getDatanodeManager().getDatanode(newtargets[i]);
+ if (targetNode != null) {
+ trimmedTargets.add(targetNode);
+ trimmedStorages.add(newtargetstorages[i]);
+ } else if (LOG.isDebugEnabled()) {
+ LOG.debug("DatanodeDescriptor (=" + newtargets[i] + ") not found");
}
}
}
- if ((closeFile) && !targetList.isEmpty()) {
+ if ((closeFile) && !trimmedTargets.isEmpty()) {
// the file is getting closed. Insert block locations into blockManager.
// Otherwise fsck will report these blocks as MISSING, especially if the
// blocksReceived from Datanodes take a long time to arrive.
- for (DatanodeDescriptor targetNode : targetList) {
- targetNode.addBlock(storedBlock);
+ for (int i = 0; i < trimmedTargets.size(); i++) {
+ trimmedTargets.get(i).addBlock(
+ trimmedStorages.get(i), storedBlock);
}
}
+
// add pipeline locations into the INodeUnderConstruction
- DatanodeDescriptor[] targetArray =
- new DatanodeDescriptor[targetList.size()];
- iFile.setLastBlock(storedBlock, targetList.toArray(targetArray));
+ DatanodeStorageInfo[] trimmedStorageInfos =
+ blockManager.getDatanodeManager().getDatanodeStorageInfos(
+ trimmedTargets.toArray(new DatanodeID[trimmedTargets.size()]),
+ trimmedStorages.toArray(new String[trimmedStorages.size()]));
+ iFile.setLastBlock(storedBlock, trimmedStorageInfos);
}
if (closeFile) {
@@ -4142,16 +4138,16 @@ public class FSNamesystem implements Nam
* @throws IOException
*/
HeartbeatResponse handleHeartbeat(DatanodeRegistration nodeReg,
- long capacity, long dfsUsed, long remaining, long blockPoolUsed,
- long cacheCapacity, long cacheUsed, int xceiverCount, int xmitsInProgress,
- int failedVolumes) throws IOException {
+ StorageReport[] reports, long cacheCapacity, long cacheUsed,
+ int xceiverCount, int xmitsInProgress, int failedVolumes)
+ throws IOException {
readLock();
try {
final int maxTransfer = blockManager.getMaxReplicationStreams()
- xmitsInProgress;
DatanodeCommand[] cmds = blockManager.getDatanodeManager().handleHeartbeat(
- nodeReg, blockPoolId, capacity, dfsUsed, remaining, blockPoolUsed,
- cacheCapacity, cacheUsed, xceiverCount, maxTransfer, failedVolumes);
+ nodeReg, reports, blockPoolId, cacheCapacity, cacheUsed,
+ xceiverCount, maxTransfer, failedVolumes);
return new HeartbeatResponse(cmds, createHaStatusHeartbeat());
} finally {
readUnlock();
@@ -5312,11 +5308,11 @@ public class FSNamesystem implements Nam
}
public void processIncrementalBlockReport(final DatanodeID nodeID,
- final String poolId, final ReceivedDeletedBlockInfo blockInfos[])
+ final String poolId, final StorageReceivedDeletedBlocks srdb)
throws IOException {
writeLock();
try {
- blockManager.processIncrementalBlockReport(nodeID, poolId, blockInfos);
+ blockManager.processIncrementalBlockReport(nodeID, poolId, srdb);
} finally {
writeUnlock();
}
@@ -5805,9 +5801,10 @@ public class FSNamesystem implements Nam
for (int i = 0; i < blocks.length; i++) {
ExtendedBlock blk = blocks[i].getBlock();
DatanodeInfo[] nodes = blocks[i].getLocations();
+ String[] storageIDs = blocks[i].getStorageIDs();
for (int j = 0; j < nodes.length; j++) {
- DatanodeInfo dn = nodes[j];
- blockManager.findAndMarkBlockAsCorrupt(blk, dn,
+ blockManager.findAndMarkBlockAsCorrupt(blk, nodes[j],
+ storageIDs == null ? null: storageIDs[j],
"client machine reported it");
}
}
@@ -5862,7 +5859,7 @@ public class FSNamesystem implements Nam
* @throws IOException if any error occurs
*/
void updatePipeline(String clientName, ExtendedBlock oldBlock,
- ExtendedBlock newBlock, DatanodeID[] newNodes)
+ ExtendedBlock newBlock, DatanodeID[] newNodes, String[] newStorageIDs)
throws IOException {
checkOperation(OperationCategory.WRITE);
CacheEntry cacheEntry = RetryCache.waitForCompletion(retryCache);
@@ -5883,7 +5880,7 @@ public class FSNamesystem implements Nam
assert newBlock.getBlockId()==oldBlock.getBlockId() : newBlock + " and "
+ oldBlock + " has different block identifier";
updatePipelineInternal(clientName, oldBlock, newBlock, newNodes,
- cacheEntry != null);
+ newStorageIDs, cacheEntry != null);
success = true;
} finally {
writeUnlock();
@@ -5895,7 +5892,8 @@ public class FSNamesystem implements Nam
/** @see #updatePipeline(String, ExtendedBlock, ExtendedBlock, DatanodeID[]) */
private void updatePipelineInternal(String clientName, ExtendedBlock oldBlock,
- ExtendedBlock newBlock, DatanodeID[] newNodes, boolean logRetryCache)
+ ExtendedBlock newBlock, DatanodeID[] newNodes, String[] newStorageIDs,
+ boolean logRetryCache)
throws IOException {
assert hasWriteLock();
// check the vadility of the block and lease holder name
@@ -5918,15 +5916,9 @@ public class FSNamesystem implements Nam
blockinfo.setGenerationStampAndVerifyReplicas(newBlock.getGenerationStamp());
// find the DatanodeDescriptor objects
- final DatanodeManager dm = getBlockManager().getDatanodeManager();
- DatanodeDescriptor[] descriptors = null;
- if (newNodes.length > 0) {
- descriptors = new DatanodeDescriptor[newNodes.length];
- for(int i = 0; i < newNodes.length; i++) {
- descriptors[i] = dm.getDatanode(newNodes[i]);
- }
- }
- blockinfo.setExpectedLocations(descriptors);
+ final DatanodeStorageInfo[] storages = blockManager.getDatanodeManager()
+ .getDatanodeStorageInfos(newNodes, newStorageIDs);
+ blockinfo.setExpectedLocations(storages);
String src = leaseManager.findPath(pendingFile);
dir.persistBlocks(src, pendingFile, logRetryCache);
@@ -6053,7 +6045,7 @@ public class FSNamesystem implements Nam
* @throws IOException
*/
Collection<CorruptFileBlockInfo> listCorruptFileBlocks(String path,
- String[] cookieTab) throws IOException {
+ String[] cookieTab) throws IOException {
checkSuperuserPrivilege();
checkOperation(OperationCategory.READ);
readLock();
@@ -7374,3 +7366,4 @@ public class FSNamesystem implements Nam
}
}
}
+
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java?rev=1550363&r1=1550362&r2=1550363&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java Thu Dec 12 07:17:51 2013
@@ -27,10 +27,7 @@ import org.apache.hadoop.classification.
import org.apache.hadoop.fs.permission.PermissionStatus;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
-import org.apache.hadoop.hdfs.server.blockmanagement.BlockCollection;
-import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
-import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
-import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
+import org.apache.hadoop.hdfs.server.blockmanagement.*;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
import org.apache.hadoop.hdfs.server.namenode.snapshot.FileDiff;
import org.apache.hadoop.hdfs.server.namenode.snapshot.FileDiffList;
@@ -234,7 +231,7 @@ public class INodeFile extends INodeWith
@Override // BlockCollection, the file should be under construction
public BlockInfoUnderConstruction setLastBlock(BlockInfo lastBlock,
- DatanodeDescriptor[] locations) throws IOException {
+ DatanodeStorageInfo[] locations) throws IOException {
Preconditions.checkState(isUnderConstruction(),
"file is no longer under construction");
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java?rev=1550363&r1=1550362&r2=1550363&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java Thu Dec 12 07:17:51 2013
@@ -43,8 +43,8 @@ import org.apache.hadoop.fs.FileAlreadyE
import org.apache.hadoop.fs.FsServerDefaults;
import org.apache.hadoop.fs.InvalidPathException;
import org.apache.hadoop.fs.Options;
-import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.ParentNotDirectoryException;
+import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.UnresolvedLinkException;
import org.apache.hadoop.fs.BatchedRemoteIterator.BatchedEntries;
import org.apache.hadoop.fs.permission.FsPermission;
@@ -65,21 +65,21 @@ import org.apache.hadoop.hdfs.protocol.C
import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
import org.apache.hadoop.hdfs.protocol.CachePoolEntry;
import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks;
+import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.DirectoryListing;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
-import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
-import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException;
import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
import org.apache.hadoop.hdfs.protocol.RecoveryInProgressException;
+import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
import org.apache.hadoop.hdfs.protocol.UnregisteredNodeException;
import org.apache.hadoop.hdfs.protocol.UnresolvedPathException;
@@ -95,9 +95,9 @@ import org.apache.hadoop.hdfs.protocolPB
import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey;
import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
import org.apache.hadoop.hdfs.server.common.IncorrectVersionException;
-import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.namenode.NameNode.OperationCategory;
import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics;
import org.apache.hadoop.hdfs.server.namenode.web.resources.NamenodeWebHdfsMethods;
@@ -198,9 +198,9 @@ class NameNodeRpcServer implements Namen
NamenodeProtocolServerSideTranslatorPB namenodeProtocolXlator =
new NamenodeProtocolServerSideTranslatorPB(this);
- BlockingService NNPbService = NamenodeProtocolService
+ BlockingService NNPbService = NamenodeProtocolService
.newReflectiveBlockingService(namenodeProtocolXlator);
-
+
RefreshAuthorizationPolicyProtocolServerSideTranslatorPB refreshAuthPolicyXlator =
new RefreshAuthorizationPolicyProtocolServerSideTranslatorPB(this);
BlockingService refreshAuthService = RefreshAuthorizationPolicyProtocolService
@@ -220,7 +220,7 @@ class NameNodeRpcServer implements Namen
new HAServiceProtocolServerSideTranslatorPB(this);
BlockingService haPbService = HAServiceProtocolService
.newReflectiveBlockingService(haServiceProtocolXlator);
-
+
WritableRpcEngine.ensureInitialized();
InetSocketAddress serviceRpcAddr = nn.getServiceRpcServerAddress(conf);
@@ -571,7 +571,8 @@ class NameNodeRpcServer implements Namen
@Override // ClientProtocol
public LocatedBlock getAdditionalDatanode(final String src, final ExtendedBlock blk,
- final DatanodeInfo[] existings, final DatanodeInfo[] excludes,
+ final DatanodeInfo[] existings, final String[] existingStorageIDs,
+ final DatanodeInfo[] excludes,
final int numAdditionalNodes, final String clientName
) throws IOException {
if (LOG.isDebugEnabled()) {
@@ -592,8 +593,8 @@ class NameNodeRpcServer implements Namen
excludeSet.add(node);
}
}
- return namesystem.getAdditionalDatanode(src, blk,
- existings, excludeSet, numAdditionalNodes, clientName);
+ return namesystem.getAdditionalDatanode(src, blk, existings,
+ existingStorageIDs, excludeSet, numAdditionalNodes, clientName);
}
/**
* The client needs to give up on the block.
@@ -641,9 +642,9 @@ class NameNodeRpcServer implements Namen
@Override // ClientProtocol
public void updatePipeline(String clientName, ExtendedBlock oldBlock,
- ExtendedBlock newBlock, DatanodeID[] newNodes)
+ ExtendedBlock newBlock, DatanodeID[] newNodes, String[] newStorageIDs)
throws IOException {
- namesystem.updatePipeline(clientName, oldBlock, newBlock, newNodes);
+ namesystem.updatePipeline(clientName, oldBlock, newBlock, newNodes, newStorageIDs);
}
@Override // DatanodeProtocol
@@ -965,24 +966,25 @@ class NameNodeRpcServer implements Namen
int xmitsInProgress, int xceiverCount,
int failedVolumes) throws IOException {
verifyRequest(nodeReg);
- return namesystem.handleHeartbeat(nodeReg, report[0].getCapacity(),
- report[0].getDfsUsed(), report[0].getRemaining(),
- report[0].getBlockPoolUsed(), dnCacheCapacity, dnCacheUsed,
- xceiverCount, xmitsInProgress, failedVolumes);
+ return namesystem.handleHeartbeat(nodeReg, report,
+ dnCacheCapacity, dnCacheUsed, xceiverCount, xmitsInProgress,
+ failedVolumes);
}
@Override // DatanodeProtocol
public DatanodeCommand blockReport(DatanodeRegistration nodeReg,
String poolId, StorageBlockReport[] reports) throws IOException {
verifyRequest(nodeReg);
- BlockListAsLongs blist = new BlockListAsLongs(reports[0].getBlocks());
if(blockStateChangeLog.isDebugEnabled()) {
blockStateChangeLog.debug("*BLOCK* NameNode.blockReport: "
- + "from " + nodeReg + " " + blist.getNumberOfBlocks()
- + " blocks");
+ + "from " + nodeReg + ", reports.length=" + reports.length);
+ }
+ final BlockManager bm = namesystem.getBlockManager();
+ for(StorageBlockReport r : reports) {
+ final BlockListAsLongs blocks = new BlockListAsLongs(r.getBlocks());
+ bm.processReport(nodeReg, r.getStorage(), poolId, blocks);
}
- namesystem.getBlockManager().processReport(nodeReg, poolId, blist);
if (nn.getFSImage().isUpgradeFinalized() && !nn.isStandbyState())
return new FinalizeCommand(poolId);
return null;
@@ -1009,8 +1011,9 @@ class NameNodeRpcServer implements Namen
+"from "+nodeReg+" "+receivedAndDeletedBlocks.length
+" blocks.");
}
- namesystem.processIncrementalBlockReport(
- nodeReg, poolId, receivedAndDeletedBlocks[0].getBlocks());
+ for(StorageReceivedDeletedBlocks r : receivedAndDeletedBlocks) {
+ namesystem.processIncrementalBlockReport(nodeReg, poolId, r);
+ }
}
@Override // DatanodeProtocol
@@ -1280,3 +1283,4 @@ class NameNodeRpcServer implements Namen
return namesystem.listCachePools(prevKey != null ? prevKey : "");
}
}
+
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java?rev=1550363&r1=1550362&r2=1550363&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java Thu Dec 12 07:17:51 2013
@@ -27,13 +27,7 @@ import java.net.InetSocketAddress;
import java.net.URI;
import java.net.URLEncoder;
import java.security.PrivilegedExceptionAction;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.Date;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
+import java.util.*;
import javax.servlet.ServletContext;
import javax.servlet.http.HttpServletRequest;
@@ -50,6 +44,7 @@ import org.apache.hadoop.hdfs.security.t
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager;
+import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
import org.apache.hadoop.hdfs.server.common.JspHelper;
import org.apache.hadoop.hdfs.server.common.Storage;
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
@@ -1113,13 +1108,12 @@ class NamenodeJspHelper {
}
doc.startTag("replicas");
- for (final Iterator<DatanodeDescriptor> it = blockManager != null ?
- blockManager.datanodeIterator(block) :
- Collections.<DatanodeDescriptor>emptyList().iterator();
- it.hasNext();) {
+ for(DatanodeStorageInfo storage : (blockManager != null ?
+ blockManager.getStorages(block) :
+ Collections.<DatanodeStorageInfo>emptyList())) {
doc.startTag("replica");
- DatanodeDescriptor dd = it.next();
+ DatanodeDescriptor dd = storage.getDatanodeDescriptor();
doc.startTag("host_name");
doc.pcdata(dd.getHostName());
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java?rev=1550363&r1=1550362&r2=1550363&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java Thu Dec 12 07:17:51 2013
@@ -53,6 +53,7 @@ import org.apache.hadoop.conf.Configurat
import org.apache.hadoop.fs.ContentSummary;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.Options;
+import org.apache.hadoop.hdfs.StorageType;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.DirectoryListing;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
@@ -61,6 +62,7 @@ import org.apache.hadoop.hdfs.security.t
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
+import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
import org.apache.hadoop.hdfs.server.common.JspHelper;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
@@ -160,7 +162,7 @@ public class NamenodeWebHdfsMethods {
static DatanodeInfo chooseDatanode(final NameNode namenode,
final String path, final HttpOpParam.Op op, final long openOffset,
- final long blocksize, Configuration conf) throws IOException {
+ final long blocksize, final Configuration conf) throws IOException {
final BlockManager bm = namenode.getNamesystem().getBlockManager();
if (op == PutOpParam.Op.CREATE) {
@@ -168,11 +170,13 @@ public class NamenodeWebHdfsMethods {
final DatanodeDescriptor clientNode = bm.getDatanodeManager(
).getDatanodeByHost(getRemoteAddress());
if (clientNode != null) {
- final DatanodeDescriptor[] datanodes = bm.getBlockPlacementPolicy()
+ final DatanodeStorageInfo[] storages = bm.getBlockPlacementPolicy()
.chooseTarget(path, 1, clientNode,
- new ArrayList<DatanodeDescriptor>(), false, null, blocksize);
- if (datanodes.length > 0) {
- return datanodes[0];
+ new ArrayList<DatanodeStorageInfo>(), false, null, blocksize,
+ // TODO: get storage type from the file
+ StorageType.DEFAULT);
+ if (storages.length > 0) {
+ return storages[0].getDatanodeDescriptor();
}
}
} else if (op == GetOpParam.Op.OPEN
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockCommand.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockCommand.java?rev=1550363&r1=1550362&r2=1550363&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockCommand.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockCommand.java Thu Dec 12 07:17:51 2013
@@ -24,6 +24,7 @@ import org.apache.hadoop.classification.
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.BlockTargetPair;
+import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
/****************************************************
* A BlockCommand is an instruction to a datanode
@@ -46,9 +47,10 @@ public class BlockCommand extends Datano
*/
public static final long NO_ACK = Long.MAX_VALUE;
- String poolId;
- Block blocks[];
- DatanodeInfo targets[][];
+ final String poolId;
+ final Block[] blocks;
+ final DatanodeInfo[][] targets;
+ final String[][] targetStorageIDs;
/**
* Create BlockCommand for transferring blocks to another datanode
@@ -60,21 +62,26 @@ public class BlockCommand extends Datano
this.poolId = poolId;
blocks = new Block[blocktargetlist.size()];
targets = new DatanodeInfo[blocks.length][];
+ targetStorageIDs = new String[blocks.length][];
+
for(int i = 0; i < blocks.length; i++) {
BlockTargetPair p = blocktargetlist.get(i);
blocks[i] = p.block;
- targets[i] = p.targets;
+ targets[i] = DatanodeStorageInfo.toDatanodeInfos(p.targets);
+ targetStorageIDs[i] = DatanodeStorageInfo.toStorageIDs(p.targets);
}
}
- private static final DatanodeInfo[][] EMPTY_TARGET = {};
+ private static final DatanodeInfo[][] EMPTY_TARGET_DATANODES = {};
+ private static final String[][] EMPTY_TARGET_STORAGEIDS = {};
/**
* Create BlockCommand for the given action
* @param blocks blocks related to the action
*/
public BlockCommand(int action, String poolId, Block blocks[]) {
- this(action, poolId, blocks, EMPTY_TARGET);
+ this(action, poolId, blocks, EMPTY_TARGET_DATANODES,
+ EMPTY_TARGET_STORAGEIDS);
}
/**
@@ -82,11 +89,12 @@ public class BlockCommand extends Datano
* @param blocks blocks related to the action
*/
public BlockCommand(int action, String poolId, Block[] blocks,
- DatanodeInfo[][] targets) {
+ DatanodeInfo[][] targets, String[][] targetStorageIDs) {
super(action);
this.poolId = poolId;
this.blocks = blocks;
this.targets = targets;
+ this.targetStorageIDs = targetStorageIDs;
}
public String getBlockPoolId() {
@@ -100,4 +108,8 @@ public class BlockCommand extends Datano
public DatanodeInfo[][] getTargets() {
return targets;
}
+
+ public String[][] getTargetStorageIDs() {
+ return targetStorageIDs;
+ }
}
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlocksWithLocations.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlocksWithLocations.java?rev=1550363&r1=1550362&r2=1550363&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlocksWithLocations.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlocksWithLocations.java Thu Dec 12 07:17:51 2013
@@ -17,6 +17,8 @@
*/
package org.apache.hadoop.hdfs.server.protocol;
+import java.util.Arrays;
+
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.hdfs.protocol.Block;
@@ -34,12 +36,14 @@ public class BlocksWithLocations {
@InterfaceAudience.Private
@InterfaceStability.Evolving
public static class BlockWithLocations {
- Block block;
- String storageIDs[];
+ final Block block;
+ final String[] datanodeUuids;
+ final String[] storageIDs;
/** constructor */
- public BlockWithLocations(Block block, String[] storageIDs) {
+ public BlockWithLocations(Block block, String[] datanodeUuids, String[] storageIDs) {
this.block = block;
+ this.datanodeUuids = datanodeUuids;
this.storageIDs = storageIDs;
}
@@ -48,10 +52,30 @@ public class BlocksWithLocations {
return block;
}
- /** get the block's locations */
+ /** get the block's datanode locations */
+ public String[] getDatanodeUuids() {
+ return datanodeUuids;
+ }
+
+ /** get the block's storage locations */
public String[] getStorageIDs() {
return storageIDs;
}
+
+ @Override
+ public String toString() {
+ final StringBuilder b = new StringBuilder();
+ b.append(block);
+ if (datanodeUuids.length == 0) {
+ return b.append("[]").toString();
+ }
+
+ b.append(storageIDs[0]).append('@').append(datanodeUuids[0]);
+ for(int i = 1; i < datanodeUuids.length; i++) {
+ b.append(", ").append(storageIDs[i]).append("@").append(datanodeUuids[i]);
+ }
+ return b.append("]").toString();
+ }
}
private BlockWithLocations[] blocks;
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeRegistration.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeRegistration.java?rev=1550363&r1=1550362&r2=1550363&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeRegistration.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeRegistration.java Thu Dec 12 07:17:51 2013
@@ -82,7 +82,7 @@ public class DatanodeRegistration extend
public String toString() {
return getClass().getSimpleName()
+ "(" + getIpAddr()
- + ", storageID=" + getStorageID()
+ + ", datanodeUuid=" + getDatanodeUuid()
+ ", infoPort=" + getInfoPort()
+ ", ipcPort=" + getIpcPort()
+ ", storageInfo=" + storageInfo
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeStorage.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeStorage.java?rev=1550363&r1=1550362&r2=1550363&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeStorage.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeStorage.java Thu Dec 12 07:17:51 2013
@@ -17,6 +17,10 @@
*/
package org.apache.hadoop.hdfs.server.protocol;
+import org.apache.hadoop.hdfs.StorageType;
+
+import java.util.UUID;
+
/**
* Class captures information of a storage in Datanode.
*/
@@ -29,18 +33,21 @@ public class DatanodeStorage {
private final String storageID;
private final State state;
+ private final StorageType storageType;
/**
- * Create a storage with {@link State#NORMAL}.
+ * Create a storage with {@link State#NORMAL} and {@link StorageType#DEFAULT}.
+ *
* @param storageID
*/
public DatanodeStorage(String storageID) {
- this(storageID, State.NORMAL);
+ this(storageID, State.NORMAL, StorageType.DEFAULT);
}
- public DatanodeStorage(String sid, State s) {
- storageID = sid;
- state = s;
+ public DatanodeStorage(String sid, State s, StorageType sm) {
+ this.storageID = sid;
+ this.state = s;
+ this.storageType = sm;
}
public String getStorageID() {
@@ -50,4 +57,37 @@ public class DatanodeStorage {
public State getState() {
return state;
}
+
+ public StorageType getStorageType() {
+ return storageType;
+ }
+
+ /**
+ * Generate new storage ID. The format of this string can be changed
+ * in the future without requiring that old storage IDs be updated.
+ *
+ * @return unique storage ID
+ */
+ public static String generateUuid() {
+ return "DS-" + UUID.randomUUID();
+ }
+
+ @Override
+ public boolean equals(Object other){
+ if (other == this) {
+ return true;
+ }
+
+ if ((other == null) ||
+ !(other instanceof DatanodeStorage)) {
+ return false;
+ }
+ DatanodeStorage otherStorage = (DatanodeStorage) other;
+ return otherStorage.getStorageID().compareTo(getStorageID()) == 0;
+ }
+
+ @Override
+ public int hashCode() {
+ return getStorageID().hashCode();
+ }
}
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/StorageReport.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/StorageReport.java?rev=1550363&r1=1550362&r2=1550363&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/StorageReport.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/StorageReport.java Thu Dec 12 07:17:51 2013
@@ -27,6 +27,8 @@ public class StorageReport {
private final long dfsUsed;
private final long remaining;
private final long blockPoolUsed;
+
+ public static final StorageReport[] EMPTY_ARRAY = {};
public StorageReport(String sid, boolean failed, long capacity, long dfsUsed,
long remaining, long bpUsed) {
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageLoaderCurrent.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageLoaderCurrent.java?rev=1550363&r1=1550362&r2=1550363&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageLoaderCurrent.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageLoaderCurrent.java Thu Dec 12 07:17:51 2013
@@ -126,7 +126,7 @@ class ImageLoaderCurrent implements Imag
new SimpleDateFormat("yyyy-MM-dd HH:mm");
private static int[] versions = { -16, -17, -18, -19, -20, -21, -22, -23,
-24, -25, -26, -27, -28, -30, -31, -32, -33, -34, -35, -36, -37, -38, -39,
- -40, -41, -42, -43, -44, -45, -46, -47, -48 };
+ -40, -41, -42, -43, -44, -45, -46, -47, -48, -49 };
private int imageVersion = 0;
private final Map<Long, Boolean> subtreeMap = new HashMap<Long, Boolean>();
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java?rev=1550363&r1=1550362&r2=1550363&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java Thu Dec 12 07:17:51 2013
@@ -276,10 +276,11 @@ public class JsonUtil {
return null;
}
+ // TODO: Fix storageID
final Map<String, Object> m = new TreeMap<String, Object>();
m.put("ipAddr", datanodeinfo.getIpAddr());
m.put("hostName", datanodeinfo.getHostName());
- m.put("storageID", datanodeinfo.getStorageID());
+ m.put("storageID", datanodeinfo.getDatanodeUuid());
m.put("xferPort", datanodeinfo.getXferPort());
m.put("infoPort", datanodeinfo.getInfoPort());
m.put("infoSecurePort", datanodeinfo.getInfoSecurePort());
@@ -329,6 +330,7 @@ public class JsonUtil {
return null;
}
+ // TODO: Fix storageID
return new DatanodeInfo(
(String)m.get("ipAddr"),
(String)m.get("hostName"),
@@ -412,7 +414,7 @@ public class JsonUtil {
(Object[])m.get("cachedLocations"));
final LocatedBlock locatedblock = new LocatedBlock(b, locations,
- startOffset, isCorrupt, cachedLocations);
+ null, null, startOffset, isCorrupt, cachedLocations);
locatedblock.setBlockToken(toBlockToken((Map<?, ?>)m.get("blockToken")));
return locatedblock;
}
Propchange: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/
------------------------------------------------------------------------------
Merged /hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/native:r1513717-1550362
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto?rev=1550363&r1=1550362&r2=1550363&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto Thu Dec 12 07:17:51 2013
@@ -141,6 +141,7 @@ message GetAdditionalDatanodeRequestProt
repeated DatanodeInfoProto excludes = 4;
required uint32 numAdditionalNodes = 5;
required string clientName = 6;
+ repeated string existingStorageUuids = 7;
}
message GetAdditionalDatanodeResponseProto {
@@ -545,6 +546,7 @@ message UpdatePipelineRequestProto {
required ExtendedBlockProto oldBlock = 2;
required ExtendedBlockProto newBlock = 3;
repeated DatanodeIDProto newNodes = 4;
+ repeated string storageIDs = 5;
}
message UpdatePipelineResponseProto { // void response
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto?rev=1550363&r1=1550362&r2=1550363&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto Thu Dec 12 07:17:51 2013
@@ -53,8 +53,9 @@ message DatanodeStorageProto {
READ_ONLY = 1;
}
- required string storageID = 1; // Unique identifier for the storage
+ required string storageUuid = 1;
optional StorageState state = 2 [default = NORMAL];
+ optional StorageTypeProto storageType = 3 [default = DISK];
}
/**
@@ -106,10 +107,12 @@ message BlockCommandProto {
INVALIDATE = 2; // Invalidate blocks
SHUTDOWN = 3; // Shutdown the datanode
}
+
required Action action = 1;
required string blockPoolId = 2;
repeated BlockProto blocks = 3;
repeated DatanodeInfosProto targets = 4;
+ repeated StorageUuidsProto targetStorageUuids = 5;
}
/**
@@ -193,7 +196,7 @@ message HeartbeatRequestProto {
}
message StorageReportProto {
- required string storageID = 1;
+ required string storageUuid = 1;
optional bool failed = 2 [ default = false ];
optional uint64 capacity = 3 [ default = 0 ];
optional uint64 dfsUsed = 4 [ default = 0 ];
@@ -284,7 +287,7 @@ message ReceivedDeletedBlockInfoProto {
* List of blocks received and deleted for a storage.
*/
message StorageReceivedDeletedBlocksProto {
- required string storageID = 1;
+ required string storageUuid = 1;
repeated ReceivedDeletedBlockInfoProto blocks = 2;
}
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/InterDatanodeProtocol.proto
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/InterDatanodeProtocol.proto?rev=1550363&r1=1550362&r2=1550363&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/InterDatanodeProtocol.proto (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/InterDatanodeProtocol.proto Thu Dec 12 07:17:51 2013
@@ -65,7 +65,7 @@ message UpdateReplicaUnderRecoveryReques
* Response returns updated block information
*/
message UpdateReplicaUnderRecoveryResponseProto {
- required string storageID = 1; // ID of the storage that stores replica
+ optional string storageUuid = 1; // ID of the storage that stores replica
}
/**
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto?rev=1550363&r1=1550362&r2=1550363&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto Thu Dec 12 07:17:51 2013
@@ -50,7 +50,10 @@ message ExtendedBlockProto {
message DatanodeIDProto {
required string ipAddr = 1; // IP address
required string hostName = 2; // hostname
- required string storageID = 3; // unique storage id
+ required string datanodeUuid = 3; // UUID assigned to the Datanode. For
+ // upgraded clusters this is the same
+ // as the original StorageID of the
+ // Datanode.
required uint32 xferPort = 4; // data streaming port
required uint32 infoPort = 5; // datanode http port
required uint32 ipcPort = 6; // ipc server port
@@ -116,6 +119,20 @@ message FsPermissionProto {
required uint32 perm = 1; // Actually a short - only 16bits used
}
+/**
+ * Types of recognized storage media.
+ */
+enum StorageTypeProto {
+ DISK = 1;
+ SSD = 2;
+}
+
+/**
+ * A list of storage IDs.
+ */
+message StorageUuidsProto {
+ repeated string storageUuids = 1;
+}
/**
* A LocatedBlock gives information about a block and its location.
@@ -130,6 +147,8 @@ message LocatedBlockProto {
required hadoop.common.TokenProto blockToken = 5;
repeated bool isCached = 6 [packed=true]; // if a location in locs is cached
+ repeated StorageTypeProto storageTypes = 7;
+ repeated string storageIDs = 8;
}
message DataEncryptionKeyProto {
@@ -336,7 +355,8 @@ message BlockProto {
*/
message BlockWithLocationsProto {
required BlockProto block = 1; // Block
- repeated string storageIDs = 2; // Datanodes with replicas of the block
+ repeated string datanodeUuids = 2; // Datanodes with replicas of the block
+ repeated string storageUuids = 3; // Storages with replicas of the block
}
/**
@@ -439,3 +459,4 @@ message SnapshotInfoProto {
// TODO: do we need access time?
}
+
Propchange: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/
------------------------------------------------------------------------------
Merged /hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode:r1513717-1550362
Propchange: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/
------------------------------------------------------------------------------
Merged /hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs:r1513717-1550362
Propchange: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/
------------------------------------------------------------------------------
Merged /hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary:r1513717-1550362
Propchange: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs/
------------------------------------------------------------------------------
Merged /hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs:r1513717-1550362
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java?rev=1550363&r1=1550362&r2=1550363&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java Thu Dec 12 07:17:51 2013
@@ -27,6 +27,7 @@ import org.apache.hadoop.fs.*;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileSystem.Statistics;
import org.apache.hadoop.fs.Options.Rename;
+import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.MiniDFSCluster.NameNodeInfo;
import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
@@ -39,6 +40,7 @@ import org.apache.hadoop.hdfs.security.t
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager;
+import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
import org.apache.hadoop.hdfs.server.common.StorageInfo;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
@@ -46,6 +48,7 @@ import org.apache.hadoop.hdfs.server.dat
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
+import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.ShellBasedUnixGroupsMapping;
@@ -770,7 +773,8 @@ public class DFSTestUtil {
}
private static DatanodeID getDatanodeID(String ipAddr) {
- return new DatanodeID(ipAddr, "localhost", "",
+ return new DatanodeID(ipAddr, "localhost",
+ UUID.randomUUID().toString(),
DFSConfigKeys.DFS_DATANODE_DEFAULT_PORT,
DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT,
DFSConfigKeys.DFS_DATANODE_HTTPS_DEFAULT_PORT,
@@ -782,7 +786,8 @@ public class DFSTestUtil {
}
public static DatanodeID getLocalDatanodeID(int port) {
- return new DatanodeID("127.0.0.1", "localhost", "",
+ return new DatanodeID("127.0.0.1", "localhost",
+ UUID.randomUUID().toString(),
port, port, port, port);
}
@@ -804,8 +809,9 @@ public class DFSTestUtil {
public static DatanodeInfo getDatanodeInfo(String ipAddr,
String host, int port) {
- return new DatanodeInfo(new DatanodeID(ipAddr, host, "",
- port, DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT,
+ return new DatanodeInfo(new DatanodeID(ipAddr, host,
+ UUID.randomUUID().toString(), port,
+ DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT,
DFSConfigKeys.DFS_DATANODE_HTTPS_DEFAULT_PORT,
DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT));
}
@@ -826,9 +832,43 @@ public class DFSTestUtil {
rackLocation);
}
+ public static DatanodeStorageInfo createDatanodeStorageInfo(
+ String storageID, String ip) {
+ return createDatanodeStorageInfo(storageID, ip, "defaultRack");
+ }
+ public static DatanodeStorageInfo[] createDatanodeStorageInfos(String[] racks) {
+ return createDatanodeStorageInfos(racks.length, racks);
+ }
+ public static DatanodeStorageInfo[] createDatanodeStorageInfos(int n, String... racks) {
+ DatanodeStorageInfo[] storages = new DatanodeStorageInfo[n];
+ for(int i = storages.length; i > 0; ) {
+ final String storageID = "s" + i;
+ final String ip = i + "." + i + "." + i + "." + i;
+ i--;
+ final String rack = i < racks.length? racks[i]: "defaultRack";
+ storages[i] = createDatanodeStorageInfo(storageID, ip, rack);
+ }
+ return storages;
+ }
+ public static DatanodeStorageInfo createDatanodeStorageInfo(
+ String storageID, String ip, String rack) {
+ final DatanodeStorage storage = new DatanodeStorage(storageID);
+ final DatanodeDescriptor dn = BlockManagerTestUtil.getDatanodeDescriptor(ip, rack, storage);
+ return BlockManagerTestUtil.newDatanodeStorageInfo(dn, storage);
+ }
+ public static DatanodeDescriptor[] toDatanodeDescriptor(
+ DatanodeStorageInfo[] storages) {
+ DatanodeDescriptor[] datanodes = new DatanodeDescriptor[storages.length];
+ for(int i = 0; i < datanodes.length; i++) {
+ datanodes[i] = storages[i].getDatanodeDescriptor();
+ }
+ return datanodes;
+ }
+
public static DatanodeDescriptor getDatanodeDescriptor(String ipAddr,
int port, String rackLocation) {
- DatanodeID dnId = new DatanodeID(ipAddr, "host", "", port,
+ DatanodeID dnId = new DatanodeID(ipAddr, "host",
+ UUID.randomUUID().toString(), port,
DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT,
DFSConfigKeys.DFS_DATANODE_HTTPS_DEFAULT_PORT,
DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT);
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java?rev=1550363&r1=1550362&r2=1550363&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java Thu Dec 12 07:17:51 2013
@@ -56,6 +56,7 @@ import java.security.PrivilegedException
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
+import java.util.Map;
import java.util.Random;
import org.apache.commons.logging.Log;
@@ -88,6 +89,7 @@ import org.apache.hadoop.hdfs.server.dat
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
+import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
import org.apache.hadoop.hdfs.tools.DFSAdmin;
import org.apache.hadoop.hdfs.web.HftpFileSystem;
@@ -125,6 +127,9 @@ public class MiniDFSCluster {
public static final String DFS_NAMENODE_SAFEMODE_EXTENSION_TESTING_KEY
= DFS_NAMENODE_SAFEMODE_EXTENSION_KEY + ".testing";
+ // Changing this value may break some tests that assume it is 2.
+ public static final int DIRS_PER_DATANODE = 2;
+
static { DefaultMetricsSystem.setMiniClusterMode(true); }
/**
@@ -329,9 +334,10 @@ public class MiniDFSCluster {
builder.nameNodePort, builder.nameNodeHttpPort);
}
- LOG.info("starting cluster with " +
- builder.nnTopology.countNameNodes() + " namenodes.");
- nameNodes = new NameNodeInfo[builder.nnTopology.countNameNodes()];
+ final int numNameNodes = builder.nnTopology.countNameNodes();
+ LOG.info("starting cluster: numNameNodes=" + numNameNodes
+ + ", numDataNodes=" + builder.numDataNodes);
+ nameNodes = new NameNodeInfo[numNameNodes];
initMiniDFSCluster(builder.conf,
builder.numDataNodes,
@@ -1148,15 +1154,16 @@ public class MiniDFSCluster {
// Set up datanode address
setupDatanodeAddress(dnConf, setupHostsFile, checkDataNodeAddrConfig);
if (manageDfsDirs) {
- File dir1 = getInstanceStorageDir(i, 0);
- File dir2 = getInstanceStorageDir(i, 1);
- dir1.mkdirs();
- dir2.mkdirs();
- if (!dir1.isDirectory() || !dir2.isDirectory()) {
- throw new IOException("Mkdirs failed to create directory for DataNode "
- + i + ": " + dir1 + " or " + dir2);
+ StringBuilder sb = new StringBuilder();
+ for (int j = 0; j < DIRS_PER_DATANODE; ++j) {
+ File dir = getInstanceStorageDir(i, j);
+ dir.mkdirs();
+ if (!dir.isDirectory()) {
+ throw new IOException("Mkdirs failed to create directory for DataNode " + dir);
+ }
+ sb.append((j > 0 ? "," : "") + fileAsURI(dir));
}
- String dirs = fileAsURI(dir1) + "," + fileAsURI(dir2);
+ String dirs = sb.toString();
dnConf.set(DFS_DATANODE_DATA_DIR_KEY, dirs);
conf.set(DFS_DATANODE_DATA_DIR_KEY, dirs);
}
@@ -1926,12 +1933,14 @@ public class MiniDFSCluster {
// Wait for expected number of datanodes to start
if (dnInfo.length != numDataNodes) {
+ LOG.info("dnInfo.length != numDataNodes");
return true;
}
// if one of the data nodes is not fully started, continue to wait
for (DataNodeProperties dn : dataNodes) {
if (!dn.datanode.isDatanodeFullyStarted()) {
+ LOG.info("!dn.datanode.isDatanodeFullyStarted()");
return true;
}
}
@@ -1940,6 +1949,7 @@ public class MiniDFSCluster {
// using (capacity == 0) as proxy.
for (DatanodeInfo dn : dnInfo) {
if (dn.getCapacity() == 0) {
+ LOG.info("dn.getCapacity() == 0");
return true;
}
}
@@ -1947,6 +1957,7 @@ public class MiniDFSCluster {
// If datanode dataset is not initialized then wait
for (DataNodeProperties dn : dataNodes) {
if (DataNodeTestUtils.getFSDataset(dn.datanode) == null) {
+ LOG.info("DataNodeTestUtils.getFSDataset(dn.datanode) == null");
return true;
}
}
@@ -1966,12 +1977,12 @@ public class MiniDFSCluster {
* @param dataNodeIndex - data node whose block report is desired - the index is same as for getDataNodes()
* @return the block report for the specified data node
*/
- public Iterable<Block> getBlockReport(String bpid, int dataNodeIndex) {
+ public Map<DatanodeStorage, BlockListAsLongs> getBlockReport(String bpid, int dataNodeIndex) {
if (dataNodeIndex < 0 || dataNodeIndex > dataNodes.size()) {
throw new IndexOutOfBoundsException();
}
final DataNode dn = dataNodes.get(dataNodeIndex).datanode;
- return DataNodeTestUtils.getFSDataset(dn).getBlockReport(bpid);
+ return DataNodeTestUtils.getFSDataset(dn).getBlockReports(bpid);
}
@@ -1980,11 +1991,12 @@ public class MiniDFSCluster {
* @return block reports from all data nodes
* BlockListAsLongs is indexed in the same order as the list of datanodes returned by getDataNodes()
*/
- public Iterable<Block>[] getAllBlockReports(String bpid) {
+ public List<Map<DatanodeStorage, BlockListAsLongs>> getAllBlockReports(String bpid) {
int numDataNodes = dataNodes.size();
- Iterable<Block>[] result = new BlockListAsLongs[numDataNodes];
+ final List<Map<DatanodeStorage, BlockListAsLongs>> result
+ = new ArrayList<Map<DatanodeStorage, BlockListAsLongs>>(numDataNodes);
for (int i = 0; i < numDataNodes; ++i) {
- result[i] = getBlockReport(bpid, i);
+ result.add(getBlockReport(bpid, i));
}
return result;
}
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java?rev=1550363&r1=1550362&r2=1550363&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java Thu Dec 12 07:17:51 2013
@@ -23,6 +23,7 @@ import java.security.PrivilegedException
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
+import java.util.Map;
import java.util.Random;
import java.util.Scanner;
import java.util.concurrent.atomic.AtomicInteger;
@@ -35,8 +36,10 @@ import org.apache.hadoop.conf.Configurat
import org.apache.hadoop.fs.*;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
+import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
import org.apache.hadoop.hdfs.tools.DFSAdmin;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.SequenceFile;
@@ -1392,11 +1395,14 @@ public class TestDFSShell {
List<File> files = new ArrayList<File>();
List<DataNode> datanodes = cluster.getDataNodes();
String poolId = cluster.getNamesystem().getBlockPoolId();
- Iterable<Block>[] blocks = cluster.getAllBlockReports(poolId);
- for(int i = 0; i < blocks.length; i++) {
+ List<Map<DatanodeStorage, BlockListAsLongs>> blocks = cluster.getAllBlockReports(poolId);
+ for(int i = 0; i < blocks.size(); i++) {
DataNode dn = datanodes.get(i);
- for(Block b : blocks[i]) {
- files.add(DataNodeTestUtils.getFile(dn, poolId, b.getBlockId()));
+ Map<DatanodeStorage, BlockListAsLongs> map = blocks.get(i);
+ for(Map.Entry<DatanodeStorage, BlockListAsLongs> e : map.entrySet()) {
+ for(Block b : e.getValue()) {
+ files.add(DataNodeTestUtils.getFile(dn, poolId, b.getBlockId()));
+ }
}
}
return files;
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStartupVersions.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStartupVersions.java?rev=1550363&r1=1550362&r2=1550363&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStartupVersions.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStartupVersions.java Thu Dec 12 07:17:51 2013
@@ -237,7 +237,7 @@ public class TestDFSStartupVersions {
* this iterations version 3-tuple
* </pre>
*/
- @Test
+ @Test (timeout=300000)
public void testVersions() throws Exception {
UpgradeUtilities.initialize();
Configuration conf = UpgradeUtilities.initializeStorageStateConf(1,
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeConfig.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeConfig.java?rev=1550363&r1=1550362&r2=1550363&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeConfig.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeConfig.java Thu Dec 12 07:17:51 2013
@@ -20,6 +20,7 @@ package org.apache.hadoop.hdfs;
import static org.apache.hadoop.hdfs.server.common.Util.fileAsURI;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
import static org.junit.Assume.assumeTrue;
import java.io.File;
@@ -82,7 +83,8 @@ public class TestDatanodeConfig {
DataNode dn = null;
try {
dn = DataNode.createDataNode(new String[]{}, conf);
- } catch(IOException e) {
+ fail();
+ } catch(Exception e) {
// expecting exception here
}
if(dn != null)
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeRegistration.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeRegistration.java?rev=1550363&r1=1550362&r2=1550363&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeRegistration.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeRegistration.java Thu Dec 12 07:17:51 2013
@@ -173,7 +173,7 @@ public class TestDatanodeRegistration {
// register a datanode
DatanodeID dnId = new DatanodeID(DN_IP_ADDR, DN_HOSTNAME,
- "fake-storage-id", DN_XFER_PORT, DN_INFO_PORT, DN_INFO_SECURE_PORT,
+ "fake-datanode-id", DN_XFER_PORT, DN_INFO_PORT, DN_INFO_SECURE_PORT,
DN_IPC_PORT);
long nnCTime = cluster.getNamesystem().getFSImage().getStorage()
.getCTime();
@@ -190,7 +190,7 @@ public class TestDatanodeRegistration {
// register the same datanode again with a different storage ID
dnId = new DatanodeID(DN_IP_ADDR, DN_HOSTNAME,
- "changed-fake-storage-id", DN_XFER_PORT, DN_INFO_PORT,
+ "changed-fake-datanode-id", DN_XFER_PORT, DN_INFO_PORT,
DN_INFO_SECURE_PORT, DN_IPC_PORT);
dnReg = new DatanodeRegistration(dnId,
mockStorageInfo, null, VersionInfo.getVersion());
@@ -226,7 +226,7 @@ public class TestDatanodeRegistration {
DatanodeRegistration mockDnReg = mock(DatanodeRegistration.class);
doReturn(HdfsConstants.LAYOUT_VERSION).when(mockDnReg).getVersion();
doReturn(123).when(mockDnReg).getXferPort();
- doReturn("fake-storage-id").when(mockDnReg).getStorageID();
+ doReturn("fake-storage-id").when(mockDnReg).getDatanodeUuid();
doReturn(mockStorageInfo).when(mockDnReg).getStorageInfo();
// Should succeed when software versions are the same.
@@ -273,7 +273,7 @@ public class TestDatanodeRegistration {
DatanodeRegistration mockDnReg = mock(DatanodeRegistration.class);
doReturn(HdfsConstants.LAYOUT_VERSION).when(mockDnReg).getVersion();
- doReturn("fake-storage-id").when(mockDnReg).getStorageID();
+ doReturn("fake-storage-id").when(mockDnReg).getDatanodeUuid();
doReturn(mockStorageInfo).when(mockDnReg).getStorageInfo();
// Should succeed when software versions are the same and CTimes are the
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java?rev=1550363&r1=1550362&r2=1550363&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java Thu Dec 12 07:17:51 2013
@@ -158,7 +158,7 @@ public class TestFileCorruption {
ns.writeLock();
try {
cluster.getNamesystem().getBlockManager().findAndMarkBlockAsCorrupt(
- blk, new DatanodeInfo(dnR), "TEST");
+ blk, new DatanodeInfo(dnR), "TEST", "STORAGE_ID");
} finally {
ns.writeUnlock();
}
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestInjectionForSimulatedStorage.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestInjectionForSimulatedStorage.java?rev=1550363&r1=1550362&r2=1550363&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestInjectionForSimulatedStorage.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestInjectionForSimulatedStorage.java Thu Dec 12 07:17:51 2013
@@ -22,20 +22,21 @@ import static org.junit.Assert.assertEqu
import java.io.IOException;
import java.net.InetSocketAddress;
import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
import java.util.Set;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.CommonConfigurationKeys;
-import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
+import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
import org.apache.hadoop.util.Time;
import org.junit.Test;
@@ -136,7 +137,7 @@ public class TestInjectionForSimulatedSt
DFSTestUtil.createFile(cluster.getFileSystem(), testPath, filesize,
filesize, blockSize, (short) numDataNodes, 0L);
waitForBlockReplication(testFile, dfsClient.getNamenode(), numDataNodes, 20);
- Iterable<Block>[] blocksList = cluster.getAllBlockReports(bpid);
+ List<Map<DatanodeStorage, BlockListAsLongs>> blocksList = cluster.getAllBlockReports(bpid);
cluster.shutdown();
cluster = null;
@@ -157,9 +158,11 @@ public class TestInjectionForSimulatedSt
.build();
cluster.waitActive();
Set<Block> uniqueBlocks = new HashSet<Block>();
- for (int i=0; i<blocksList.length; ++i) {
- for (Block b : blocksList[i]) {
- uniqueBlocks.add(new Block(b));
+ for(Map<DatanodeStorage, BlockListAsLongs> map : blocksList) {
+ for(BlockListAsLongs blockList : map.values()) {
+ for(Block b : blockList) {
+ uniqueBlocks.add(new Block(b));
+ }
}
}
// Insert all the blocks in the first data node
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPeerCache.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPeerCache.java?rev=1550363&r1=1550362&r2=1550363&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPeerCache.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPeerCache.java Thu Dec 12 07:17:51 2013
@@ -151,7 +151,7 @@ public class TestPeerCache {
public void testAddAndRetrieve() throws Exception {
PeerCache cache = new PeerCache(3, 100000);
DatanodeID dnId = new DatanodeID("192.168.0.1",
- "fakehostname", "fake_storage_id",
+ "fakehostname", "fake_datanode_id",
100, 101, 102, 103);
FakePeer peer = new FakePeer(dnId, false);
cache.put(dnId, peer);
@@ -171,7 +171,7 @@ public class TestPeerCache {
FakePeer peers[] = new FakePeer[CAPACITY];
for (int i = 0; i < CAPACITY; ++i) {
dnIds[i] = new DatanodeID("192.168.0.1",
- "fakehostname_" + i, "fake_storage_id",
+ "fakehostname_" + i, "fake_datanode_id",
100, 101, 102, 103);
peers[i] = new FakePeer(dnIds[i], false);
}
@@ -202,7 +202,7 @@ public class TestPeerCache {
FakePeer peers[] = new FakePeer[CAPACITY + 1];
for (int i = 0; i < dnIds.length; ++i) {
dnIds[i] = new DatanodeID("192.168.0.1",
- "fakehostname_" + i, "fake_storage_id_" + i,
+ "fakehostname_" + i, "fake_datanode_id_" + i,
100, 101, 102, 103);
peers[i] = new FakePeer(dnIds[i], false);
}
@@ -233,7 +233,7 @@ public class TestPeerCache {
final int CAPACITY = 3;
PeerCache cache = new PeerCache(CAPACITY, 100000);
DatanodeID dnId = new DatanodeID("192.168.0.1",
- "fakehostname", "fake_storage_id",
+ "fakehostname", "fake_datanode_id",
100, 101, 102, 103);
HashMultiset<FakePeer> peers = HashMultiset.create(CAPACITY);
for (int i = 0; i < CAPACITY; ++i) {
@@ -258,7 +258,7 @@ public class TestPeerCache {
final int CAPACITY = 3;
PeerCache cache = new PeerCache(CAPACITY, 100000);
DatanodeID dnId = new DatanodeID("192.168.0.1",
- "fakehostname", "fake_storage_id",
+ "fakehostname", "fake_datanode_id",
100, 101, 102, 103);
HashMultiset<FakePeer> peers = HashMultiset.create(CAPACITY);
for (int i = 0; i < CAPACITY; ++i) {
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/UpgradeUtilities.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/UpgradeUtilities.java?rev=1550363&r1=1550362&r2=1550363&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/UpgradeUtilities.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/UpgradeUtilities.java Thu Dec 12 07:17:51 2013
@@ -453,12 +453,14 @@ public class UpgradeUtilities {
*/
public static void createDataNodeVersionFile(File[] parent,
StorageInfo version, String bpid, String bpidToWrite) throws IOException {
- DataStorage storage = new DataStorage(version, "doNotCare");
+ DataStorage storage = new DataStorage(version);
+ storage.setDatanodeUuid("FixedDatanodeUuid");
File[] versionFiles = new File[parent.length];
for (int i = 0; i < parent.length; i++) {
File versionFile = new File(parent[i], "VERSION");
StorageDirectory sd = new StorageDirectory(parent[i].getParentFile());
+ storage.createStorageID(sd);
storage.writeProperties(versionFile, sd);
versionFiles[i] = versionFile;
File bpDir = BlockPoolSliceStorage.getBpRoot(bpid, parent[i]);