You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by su...@apache.org on 2011/02/25 23:42:40 UTC
svn commit: r1074727 [2/2] - in /hadoop/hdfs/branches/HDFS-1052: ./
src/java/org/apache/hadoop/hdfs/protocol/
src/java/org/apache/hadoop/hdfs/security/token/block/
src/java/org/apache/hadoop/hdfs/server/balancer/
src/java/org/apache/hadoop/hdfs/server/...
Modified: hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestInjectionForSimulatedStorage.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestInjectionForSimulatedStorage.java?rev=1074727&r1=1074726&r2=1074727&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestInjectionForSimulatedStorage.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestInjectionForSimulatedStorage.java Fri Feb 25 22:42:39 2011
@@ -140,24 +140,18 @@ public class TestInjectionForSimulatedSt
//first time format
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes).build();
cluster.waitActive();
+ String bpid = cluster.getNamesystem().getPoolId();
DFSClient dfsClient = new DFSClient(new InetSocketAddress("localhost",
cluster.getNameNodePort()),
conf);
writeFile(cluster.getFileSystem(), testPath, numDataNodes);
-
-
waitForBlockReplication(testFile, dfsClient.getNamenode(), numDataNodes, 20);
-
-
- Iterable<Block>[] blocksList = cluster.getAllBlockReports();
-
+ Iterable<Block>[] blocksList = cluster.getAllBlockReports(bpid);
cluster.shutdown();
cluster = null;
-
-
/* Start the MiniDFSCluster with more datanodes since once a writeBlock
* to a datanode node fails, same block can not be written to it
* immediately. In our case some replication attempts will fail.
Modified: hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestPipelines.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestPipelines.java?rev=1074727&r1=1074726&r2=1074727&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestPipelines.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestPipelines.java Fri Feb 25 22:42:39 2011
@@ -103,8 +103,9 @@ public class TestPipelines {
List<LocatedBlock> lb = cluster.getNameNode().getBlockLocations(
filePath.toString(), FILE_SIZE - 1, FILE_SIZE).getLocatedBlocks();
+ String bpid = cluster.getNamesystem().getPoolId();
Replica r = DataNodeAdapter.fetchReplicaInfo(cluster.getDataNodes().get(0),
- lb.get(0).getBlock().getBlockId());
+ bpid, lb.get(0).getBlock().getBlockId());
assertTrue("Replica shouldn'e be null", r != null);
assertEquals(
"Should be RBW replica after sequence of calls append()/write()/hflush()",
Modified: hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/DataNodeAdapter.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/DataNodeAdapter.java?rev=1074727&r1=1074726&r2=1074727&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/DataNodeAdapter.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/DataNodeAdapter.java Fri Feb 25 22:42:39 2011
@@ -28,11 +28,13 @@ public class DataNodeAdapter {
/**
* Fetch a copy of ReplicaInfo from a datanode by block id
* @param dn datanode to retrieve a replicainfo object from
+ * @param bpid Block pool Id
* @param blkId id of the replica's block
* @return copy of ReplicaInfo object @link{FSDataset#fetchReplicaInfo}
*/
public static ReplicaInfo fetchReplicaInfo (final DataNode dn,
+ final String bpid,
final long blkId) {
- return ((FSDataset)dn.data).fetchReplicaInfo(blkId);
+ return ((FSDataset)dn.data).fetchReplicaInfo(bpid, blkId);
}
}
Modified: hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java?rev=1074727&r1=1074726&r2=1074727&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java Fri Feb 25 22:42:39 2011
@@ -23,6 +23,7 @@ import java.io.OutputStream;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
+import java.util.Map;
import java.util.Random;
import javax.management.NotCompliantMBeanException;
@@ -295,7 +296,7 @@ public class SimulatedFSDataset impleme
}
}
- private HashMap<Block, BInfo> blockMap = null;
+ private Map<String, Map<Block, BInfo>> blockMap = null;
private SimulatedStorage storage = null;
private String storageId;
@@ -320,10 +321,10 @@ public class SimulatedFSDataset impleme
//DataNode.LOG.info("Starting Simulated storage; Capacity = " + getCapacity() +
// "Used = " + getDfsUsed() + "Free =" + getRemaining());
- blockMap = new HashMap<Block,BInfo>();
+ blockMap = new HashMap<String, Map<Block,BInfo>>();
}
- public synchronized void injectBlocks(String poolId,
+ public synchronized void injectBlocks(String bpid,
Iterable<Block> injectBlocks) throws IOException {
ExtendedBlock blk = new ExtendedBlock();
if (injectBlocks != null) {
@@ -333,31 +334,41 @@ public class SimulatedFSDataset impleme
if (b == null) {
throw new NullPointerException("Null blocks in block list");
}
- blk.set(poolId, b);
+ blk.set(bpid, b);
if (isValidBlock(blk)) {
throw new IOException("Block already exists in block list");
}
}
- HashMap<Block, BInfo> oldBlockMap = blockMap;
- blockMap = new HashMap<Block,BInfo>(
- numInjectedBlocks + oldBlockMap.size());
- blockMap.putAll(oldBlockMap);
+ Map<Block, BInfo> map = blockMap.get(bpid);
+ if (map == null) {
+ map = new HashMap<Block, BInfo>();
+ blockMap.put(bpid, map);
+ }
+
for (Block b: injectBlocks) {
- BInfo binfo = new BInfo(b, false);
- blockMap.put(binfo.theBlock, binfo);
+ BInfo binfo = new BInfo(b, false);
+ map.put(binfo.theBlock, binfo);
}
}
}
+
+ /** Get a map for a given block pool Id */
+ private Map<Block, BInfo> getMap(String bpid) throws IOException {
+ final Map<Block, BInfo> map = blockMap.get(bpid);
+ if (map == null) {
+ throw new IOException("Non existent blockpool " + bpid);
+ }
+ return map;
+ }
@Override // FSDatasetInterface
public synchronized void finalizeBlock(ExtendedBlock b) throws IOException {
- // TODO:FEDERATION use ExtendedBlock
- BInfo binfo = blockMap.get(b.getLocalBlock());
+ final Map<Block, BInfo> map = getMap(b.getPoolId());
+ BInfo binfo = map.get(b.getLocalBlock());
if (binfo == null) {
throw new IOException("Finalizing a non existing block " + b);
}
binfo.finalizeBlock(b.getNumBytes());
-
}
@Override // FSDatasetInterface
@@ -368,16 +379,21 @@ public class SimulatedFSDataset impleme
}
@Override
- public synchronized BlockListAsLongs getBlockReport() {
- Block[] blockTable = new Block[blockMap.size()];
- int count = 0;
- for (BInfo b : blockMap.values()) {
- if (b.isFinalized()) {
- blockTable[count++] = b.theBlock;
+ public synchronized BlockListAsLongs getBlockReport(String bpid) {
+ final Map<Block, BInfo> map = blockMap.get(bpid);
+ Block[] blockTable = new Block[map.size()];
+ if (map != null) {
+ int count = 0;
+ for (BInfo b : map.values()) {
+ if (b.isFinalized()) {
+ blockTable[count++] = b.theBlock;
+ }
}
- }
- if (count != blockTable.length) {
- blockTable = Arrays.copyOf(blockTable, count);
+ if (count != blockTable.length) {
+ blockTable = Arrays.copyOf(blockTable, count);
+ }
+ } else {
+ blockTable = new Block[0];
}
return new BlockListAsLongs(
new ArrayList<Block>(Arrays.asList(blockTable)), null);
@@ -397,7 +413,8 @@ public class SimulatedFSDataset impleme
@Override // FSDatasetInterface
public synchronized long getLength(ExtendedBlock b) throws IOException {
- BInfo binfo = blockMap.get(b.getLocalBlock());
+ final Map<Block, BInfo> map = getMap(b.getPoolId());
+ BInfo binfo = map.get(b.getLocalBlock());
if (binfo == null) {
throw new IOException("Finalizing a non existing block " + b);
}
@@ -406,35 +423,40 @@ public class SimulatedFSDataset impleme
@Override
@Deprecated
- public Replica getReplica(long blockId) {
- return blockMap.get(new Block(blockId));
+ public Replica getReplica(String bpid, long blockId) {
+ final Map<Block, BInfo> map = blockMap.get(bpid);
+ if (map != null) {
+ return map.get(new Block(blockId));
+ }
+ return null;
}
@Override // FSDatasetInterface
- public Block getStoredBlock(String poolId, long blkid) throws IOException {
- ExtendedBlock b = new ExtendedBlock(poolId, blkid);
- // TODO:FEDERATION use ExtendedBlock
- BInfo binfo = blockMap.get(b.getLocalBlock());
- if (binfo == null) {
- return null;
+ public Block getStoredBlock(String bpid, long blkid) throws IOException {
+ final Map<Block, BInfo> map = blockMap.get(bpid);
+ if (map != null) {
+ BInfo binfo = map.get(new Block(blkid));
+ if (binfo == null) {
+ return null;
+ }
+ return new Block(blkid, binfo.getGenerationStamp(), binfo.getNumBytes());
}
- b.setGenerationStamp(binfo.getGenerationStamp());
- b.setNumBytes(binfo.getNumBytes());
- return b.getLocalBlock();
+ return null;
}
@Override // FSDatasetInterface
- public synchronized void invalidate(String poolId, Block[] invalidBlks)
+ public synchronized void invalidate(String bpid, Block[] invalidBlks)
throws IOException {
boolean error = false;
if (invalidBlks == null) {
return;
}
+ final Map<Block, BInfo> map = getMap(bpid);
for (Block b: invalidBlks) {
if (b == null) {
continue;
}
- BInfo binfo = blockMap.get(b);
+ BInfo binfo = map.get(b);
if (binfo == null) {
error = true;
DataNode.LOG.warn("Invalidate: Missing block");
@@ -443,16 +465,18 @@ public class SimulatedFSDataset impleme
storage.free(binfo.getNumBytes());
blockMap.remove(b);
}
- if (error) {
- throw new IOException("Invalidate: Missing blocks.");
- }
+ if (error) {
+ throw new IOException("Invalidate: Missing blocks.");
+ }
}
@Override // FSDatasetInterface
public synchronized boolean isValidBlock(ExtendedBlock b) {
- // return (blockMap.containsKey(b));
- // TODO:FEDERATION use ExtendedBlock
- BInfo binfo = blockMap.get(b.getLocalBlock());
+ final Map<Block, BInfo> map = blockMap.get(b.getPoolId());
+ if (map == null) {
+ return false;
+ }
+ BInfo binfo = map.get(b.getLocalBlock());
if (binfo == null) {
return false;
}
@@ -461,8 +485,11 @@ public class SimulatedFSDataset impleme
/* check if a block is created but not finalized */
private synchronized boolean isBeingWritten(ExtendedBlock b) {
- // TODO:FEDERATION use ExtendedBlock
- BInfo binfo = blockMap.get(b.getLocalBlock());
+ final Map<Block, BInfo> map = blockMap.get(b.getPoolId());
+ if (map == null) {
+ return false;
+ }
+ BInfo binfo = map.get(b.getLocalBlock());
if (binfo == null) {
return false;
}
@@ -476,7 +503,8 @@ public class SimulatedFSDataset impleme
@Override // FSDatasetInterface
public synchronized ReplicaInPipelineInterface append(ExtendedBlock b,
long newGS, long expectedBlockLen) throws IOException {
- BInfo binfo = blockMap.get(b.getLocalBlock());
+ final Map<Block, BInfo> map = getMap(b.getPoolId());
+ BInfo binfo = map.get(b.getLocalBlock());
if (binfo == null || !binfo.isFinalized()) {
throw new ReplicaNotFoundException("Block " + b
+ " is not valid, and cannot be appended to.");
@@ -488,7 +516,8 @@ public class SimulatedFSDataset impleme
@Override // FSDatasetInterface
public synchronized ReplicaInPipelineInterface recoverAppend(ExtendedBlock b,
long newGS, long expectedBlockLen) throws IOException {
- BInfo binfo = blockMap.get(b.getLocalBlock());
+ final Map<Block, BInfo> map = getMap(b.getPoolId());
+ BInfo binfo = map.get(b.getLocalBlock());
if (binfo == null) {
throw new ReplicaNotFoundException("Block " + b
+ " is not valid, and cannot be appended to.");
@@ -496,16 +525,17 @@ public class SimulatedFSDataset impleme
if (binfo.isFinalized()) {
binfo.unfinalizeBlock();
}
- blockMap.remove(b);
+ map.remove(b);
binfo.theBlock.setGenerationStamp(newGS);
- blockMap.put(binfo.theBlock, binfo);
+ map.put(binfo.theBlock, binfo);
return binfo;
}
@Override // FSDatasetInterface
public void recoverClose(ExtendedBlock b, long newGS, long expectedBlockLen)
throws IOException {
- BInfo binfo = blockMap.get(b.getLocalBlock());
+ final Map<Block, BInfo> map = getMap(b.getPoolId());
+ BInfo binfo = map.get(b.getLocalBlock());
if (binfo == null) {
throw new ReplicaNotFoundException("Block " + b
+ " is not valid, and cannot be appended to.");
@@ -513,15 +543,16 @@ public class SimulatedFSDataset impleme
if (!binfo.isFinalized()) {
binfo.finalizeBlock(binfo.getNumBytes());
}
- blockMap.remove(b.getLocalBlock());
+ map.remove(b.getLocalBlock());
binfo.theBlock.setGenerationStamp(newGS);
- blockMap.put(binfo.theBlock, binfo);
+ map.put(binfo.theBlock, binfo);
}
@Override // FSDatasetInterface
public synchronized ReplicaInPipelineInterface recoverRbw(ExtendedBlock b,
long newGS, long minBytesRcvd, long maxBytesRcvd) throws IOException {
- BInfo binfo = blockMap.get(b.getLocalBlock());
+ final Map<Block, BInfo> map = getMap(b.getPoolId());
+ BInfo binfo = map.get(b.getLocalBlock());
if ( binfo == null) {
throw new ReplicaNotFoundException("Block " + b
+ " does not exist, and cannot be appended to.");
@@ -530,9 +561,9 @@ public class SimulatedFSDataset impleme
throw new ReplicaAlreadyExistsException("Block " + b
+ " is valid, and cannot be written to.");
}
- blockMap.remove(b);
+ map.remove(b);
binfo.theBlock.setGenerationStamp(newGS);
- blockMap.put(binfo.theBlock, binfo);
+ map.put(binfo.theBlock, binfo);
return binfo;
}
@@ -553,22 +584,21 @@ public class SimulatedFSDataset impleme
throw new ReplicaAlreadyExistsException("Block " + b +
" is being written, and cannot be written to.");
}
- // TODO:FEDERATION use ExtendedBlock
+ final Map<Block, BInfo> map = getMap(b.getPoolId());
BInfo binfo = new BInfo(b.getLocalBlock(), true);
- blockMap.put(binfo.theBlock, binfo);
+ map.put(binfo.theBlock, binfo);
return binfo;
}
@Override // FSDatasetInterface
public synchronized InputStream getBlockInputStream(ExtendedBlock b)
throws IOException {
- // TODO:FEDERATION use ExtendedBlock
- BInfo binfo = blockMap.get(b.getLocalBlock());
+ final Map<Block, BInfo> map = getMap(b.getPoolId());
+ BInfo binfo = map.get(b.getLocalBlock());
if (binfo == null) {
throw new IOException("No such Block " + b );
}
- //DataNode.LOG.info("Opening block(" + b.blkid + ") of length " + b.len);
return binfo.getIStream();
}
@@ -596,8 +626,8 @@ public class SimulatedFSDataset impleme
*/
private synchronized InputStream getMetaDataInStream(ExtendedBlock b)
throws IOException {
- // TODO:FEDERATION use ExtendedBlock
- BInfo binfo = blockMap.get(b.getLocalBlock());
+ final Map<Block, BInfo> map = getMap(b.getPoolId());
+ BInfo binfo = map.get(b.getLocalBlock());
if (binfo == null) {
throw new IOException("No such Block " + b );
}
@@ -611,8 +641,8 @@ public class SimulatedFSDataset impleme
@Override // FSDatasetInterface
public synchronized long getMetaDataLength(ExtendedBlock b)
throws IOException {
- // TODO:FEDERATION use ExtendedBlock
- BInfo binfo = blockMap.get(b.getLocalBlock());
+ final Map<Block, BInfo> map = getMap(b.getPoolId());
+ BInfo binfo = map.get(b.getLocalBlock());
if (binfo == null) {
throw new IOException("No such Block " + b );
}
@@ -818,7 +848,8 @@ public class SimulatedFSDataset impleme
public ReplicaRecoveryInfo initReplicaRecovery(RecoveringBlock rBlock)
throws IOException {
ExtendedBlock b = rBlock.getBlock();
- BInfo binfo = blockMap.get(b.getLocalBlock());
+ final Map<Block, BInfo> map = getMap(b.getPoolId());
+ BInfo binfo = map.get(b.getLocalBlock());
if (binfo == null) {
throw new IOException("No such Block " + b );
}
@@ -840,4 +871,10 @@ public class SimulatedFSDataset impleme
public long getReplicaVisibleLength(ExtendedBlock block) throws IOException {
return block.getNumBytes();
}
+
+ @Override // FSDatasetInterface
+ public void addBlockPool(String bpid, Configuration conf) {
+ Map<Block, BInfo> map = new HashMap<Block, BInfo>();
+ blockMap.put(bpid, map);
+ }
}
Modified: hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestBlockReport.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestBlockReport.java?rev=1074727&r1=1074726&r2=1074727&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestBlockReport.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestBlockReport.java Fri Feb 25 22:42:39 2011
@@ -490,15 +490,15 @@ public class TestBlockReport {
LOG.debug("Total number of DNs " + cluster.getDataNodes().size());
}
// Look about specified DN for the replica of the block from 1st DN
- Replica r;
- r = ((FSDataset) cluster.getDataNodes().get(DN_N1).getFSDataset()).
- fetchReplicaInfo(bl.getBlockId());
+ String bpid = cluster.getNamesystem().getPoolId();
+ Replica r = ((FSDataset) cluster.getDataNodes().get(DN_N1).getFSDataset()).
+ fetchReplicaInfo(bpid, bl.getBlockId());
long start = System.currentTimeMillis();
int count = 0;
while (r == null) {
waitTil(50);
r = ((FSDataset) cluster.getDataNodes().get(DN_N1).getFSDataset()).
- fetchReplicaInfo(bl.getBlockId());
+ fetchReplicaInfo(bpid, bl.getBlockId());
long waiting_period = System.currentTimeMillis() - start;
if (count++ % 10 == 0)
if(LOG.isDebugEnabled()) {
Modified: hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java?rev=1074727&r1=1074726&r2=1074727&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java Fri Feb 25 22:42:39 2011
@@ -118,9 +118,9 @@ public class TestDataNodeVolumeFailure e
// make sure a block report is sent
DataNode dn = cluster.getDataNodes().get(1); //corresponds to dir data3
- long[] bReport = dn.getFSDataset().getBlockReport().getBlockListAsLongs();
- String poolId = cluster.getNamesystem().getPoolId();
- cluster.getNameNode().blockReport(dn.dnRegistration, poolId, bReport);
+ String bpid = cluster.getNamesystem().getPoolId();
+ long[] bReport = dn.getFSDataset().getBlockReport(bpid).getBlockListAsLongs();
+ cluster.getNameNode().blockReport(dn.dnRegistration, bpid, bReport);
// verify number of blocks and files...
verify(filename, filesize);
Modified: hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDatanodeRestart.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDatanodeRestart.java?rev=1074727&r1=1074726&r2=1074727&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDatanodeRestart.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDatanodeRestart.java Fri Feb 25 22:42:39 2011
@@ -112,16 +112,16 @@ public class TestDatanodeRestart {
dn = cluster.getDataNodes().get(0);
// check volumeMap: one rwr replica
+ String bpid = cluster.getNamesystem().getPoolId();
ReplicasMap replicas = ((FSDataset)(dn.data)).volumeMap;
- Assert.assertEquals(1, replicas.size());
- ReplicaInfo replica = replicas.replicas().iterator().next();
+ Assert.assertEquals(1, replicas.size(bpid));
+ ReplicaInfo replica = replicas.replicas(bpid).iterator().next();
Assert.assertEquals(ReplicaState.RWR, replica.getState());
if (isCorrupt) {
Assert.assertEquals((fileLen-1)/512*512, replica.getNumBytes());
} else {
Assert.assertEquals(fileLen, replica.getNumBytes());
}
- String bpid = cluster.getNamesystem().getPoolId();
dn.data.invalidate(bpid, new Block[]{replica});
} finally {
IOUtils.closeStream(out);
@@ -147,9 +147,10 @@ public class TestDatanodeRestart {
DFSTestUtil.createFile(fs, fileName, 1, (short)1, 0L);
DFSTestUtil.waitReplication(fs, fileName, (short)1);
}
+ String bpid = cluster.getNamesystem().getPoolId();
DataNode dn = cluster.getDataNodes().get(0);
Iterator<ReplicaInfo> replicasItor =
- ((FSDataset)dn.data).volumeMap.replicas().iterator();
+ ((FSDataset)dn.data).volumeMap.replicas(bpid).iterator();
ReplicaInfo replica = replicasItor.next();
createUnlinkTmpFile(replica, true, true); // rename block file
createUnlinkTmpFile(replica, false, true); // rename meta file
@@ -166,7 +167,7 @@ public class TestDatanodeRestart {
// check volumeMap: 4 finalized replica
Collection<ReplicaInfo> replicas =
- ((FSDataset)(dn.data)).volumeMap.replicas();
+ ((FSDataset)(dn.data)).volumeMap.replicas(bpid);
Assert.assertEquals(4, replicas.size());
replicasItor = replicas.iterator();
while (replicasItor.hasNext()) {
Modified: hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java?rev=1074727&r1=1074726&r2=1074727&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java Fri Feb 25 22:42:39 2011
@@ -48,6 +48,7 @@ public class TestDirectoryScanner extend
private static final int DEFAULT_GEN_STAMP = 9999;
private MiniDFSCluster cluster;
+ private String bpid;
private FSDataset fds = null;
private DirectoryScanner scanner = null;
private Random rand = new Random();
@@ -69,7 +70,7 @@ public class TestDirectoryScanner extend
/** Truncate a block file */
private long truncateBlockFile() throws IOException {
synchronized (fds) {
- for (ReplicaInfo b : fds.volumeMap.replicas()) {
+ for (ReplicaInfo b : fds.volumeMap.replicas(bpid)) {
File f = b.getBlockFile();
File mf = b.getMetaFile();
// Truncate a block file that has a corresponding metadata file
@@ -88,7 +89,7 @@ public class TestDirectoryScanner extend
/** Delete a block file */
private long deleteBlockFile() {
synchronized(fds) {
- for (ReplicaInfo b : fds.volumeMap.replicas()) {
+ for (ReplicaInfo b : fds.volumeMap.replicas(bpid)) {
File f = b.getBlockFile();
File mf = b.getMetaFile();
// Delete a block file that has corresponding metadata file
@@ -104,7 +105,7 @@ public class TestDirectoryScanner extend
/** Delete block meta file */
private long deleteMetaFile() {
synchronized(fds) {
- for (ReplicaInfo b : fds.volumeMap.replicas()) {
+ for (ReplicaInfo b : fds.volumeMap.replicas(bpid)) {
File file = b.getMetaFile();
// Delete a metadata file
if (file.exists() && file.delete()) {
@@ -121,7 +122,7 @@ public class TestDirectoryScanner extend
long id = rand.nextLong();
while (true) {
id = rand.nextLong();
- if (fds.fetchReplicaInfo(id) == null) {
+ if (fds.fetchReplicaInfo(bpid, id) == null) {
break;
}
}
@@ -215,6 +216,7 @@ public class TestDirectoryScanner extend
cluster = new MiniDFSCluster.Builder(CONF).build();
try {
cluster.waitActive();
+ bpid = cluster.getNamesystem().getPoolId();
fds = (FSDataset) cluster.getDataNodes().get(0).getFSDataset();
CONF.setInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_THREADS_KEY,
parallelism);
@@ -325,12 +327,12 @@ public class TestDirectoryScanner extend
private void verifyAddition(long blockId, long genStamp, long size) {
final ReplicaInfo replicainfo;
- replicainfo = fds.fetchReplicaInfo(blockId);
+ replicainfo = fds.fetchReplicaInfo(bpid, blockId);
assertNotNull(replicainfo);
// Added block has the same file as the one created by the test
File file = new File(getBlockFile(blockId));
- assertEquals(file.getName(), fds.findBlockFile(blockId).getName());
+ assertEquals(file.getName(), fds.findBlockFile(bpid, blockId).getName());
// Generation stamp is same as that of created file
assertEquals(genStamp, replicainfo.getGenerationStamp());
@@ -341,12 +343,12 @@ public class TestDirectoryScanner extend
private void verifyDeletion(long blockId) {
// Ensure block does not exist in memory
- assertNull(fds.fetchReplicaInfo(blockId));
+ assertNull(fds.fetchReplicaInfo(bpid, blockId));
}
private void verifyGenStamp(long blockId, long genStamp) {
final ReplicaInfo memBlock;
- memBlock = fds.fetchReplicaInfo(blockId);
+ memBlock = fds.fetchReplicaInfo(bpid, blockId);
assertNotNull(memBlock);
assertEquals(genStamp, memBlock.getGenerationStamp());
}
Modified: hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestInterDatanodeProtocol.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestInterDatanodeProtocol.java?rev=1074727&r1=1074726&r2=1074727&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestInterDatanodeProtocol.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestInterDatanodeProtocol.java Fri Feb 25 22:42:39 2011
@@ -130,44 +130,47 @@ public class TestInterDatanodeProtocol {
Assert.assertEquals(originalInfo.getState(), recoveryInfo.getOriginalReplicaState());
}
- /** Test {@link FSDataset#initReplicaRecovery(ReplicasMap, Block, long)} */
+ /** Test
+ * {@link FSDataset#initReplicaRecovery(String, ReplicasMap, Block, long)}
+ */
@Test
public void testInitReplicaRecovery() throws IOException {
final long firstblockid = 10000L;
final long gs = 7777L;
final long length = 22L;
final ReplicasMap map = new ReplicasMap();
+ String bpid = "BP-TEST";
final Block[] blocks = new Block[5];
for(int i = 0; i < blocks.length; i++) {
blocks[i] = new Block(firstblockid + i, length, gs);
- map.add(createReplicaInfo(blocks[i]));
+ map.add(bpid, createReplicaInfo(blocks[i]));
}
{
//normal case
final Block b = blocks[0];
- final ReplicaInfo originalInfo = map.get(b);
+ final ReplicaInfo originalInfo = map.get(bpid, b);
final long recoveryid = gs + 1;
- final ReplicaRecoveryInfo recoveryInfo = FSDataset.initReplicaRecovery(map, blocks[0], recoveryid);
+ final ReplicaRecoveryInfo recoveryInfo = FSDataset.initReplicaRecovery(bpid, map, blocks[0], recoveryid);
assertEquals(originalInfo, recoveryInfo);
- final ReplicaUnderRecovery updatedInfo = (ReplicaUnderRecovery)map.get(b);
+ final ReplicaUnderRecovery updatedInfo = (ReplicaUnderRecovery)map.get(bpid, b);
Assert.assertEquals(originalInfo.getBlockId(), updatedInfo.getBlockId());
Assert.assertEquals(recoveryid, updatedInfo.getRecoveryID());
//recover one more time
final long recoveryid2 = gs + 2;
- final ReplicaRecoveryInfo recoveryInfo2 = FSDataset.initReplicaRecovery(map, blocks[0], recoveryid2);
+ final ReplicaRecoveryInfo recoveryInfo2 = FSDataset.initReplicaRecovery(bpid, map, blocks[0], recoveryid2);
assertEquals(originalInfo, recoveryInfo2);
- final ReplicaUnderRecovery updatedInfo2 = (ReplicaUnderRecovery)map.get(b);
+ final ReplicaUnderRecovery updatedInfo2 = (ReplicaUnderRecovery)map.get(bpid, b);
Assert.assertEquals(originalInfo.getBlockId(), updatedInfo2.getBlockId());
Assert.assertEquals(recoveryid2, updatedInfo2.getRecoveryID());
//case RecoveryInProgressException
try {
- FSDataset.initReplicaRecovery(map, b, recoveryid);
+ FSDataset.initReplicaRecovery(bpid, map, b, recoveryid);
Assert.fail();
}
catch(RecoveryInProgressException ripe) {
@@ -178,7 +181,7 @@ public class TestInterDatanodeProtocol {
{ // BlockRecoveryFI_01: replica not found
final long recoveryid = gs + 1;
final Block b = new Block(firstblockid - 1, length, gs);
- ReplicaRecoveryInfo r = FSDataset.initReplicaRecovery(map, b, recoveryid);
+ ReplicaRecoveryInfo r = FSDataset.initReplicaRecovery(bpid, map, b, recoveryid);
Assert.assertNull("Data-node should not have this replica.", r);
}
@@ -186,7 +189,7 @@ public class TestInterDatanodeProtocol {
final long recoveryid = gs - 1;
final Block b = new Block(firstblockid + 1, length, gs);
try {
- FSDataset.initReplicaRecovery(map, b, recoveryid);
+ FSDataset.initReplicaRecovery(bpid, map, b, recoveryid);
Assert.fail();
}
catch(IOException ioe) {
@@ -199,7 +202,7 @@ public class TestInterDatanodeProtocol {
final long recoveryid = gs + 1;
final Block b = new Block(firstblockid, length, gs+1);
try {
- FSDataset.initReplicaRecovery(map, b, recoveryid);
+ FSDataset.initReplicaRecovery(bpid, map, b, recoveryid);
fail("InitReplicaRecovery should fail because replica's " +
"gs is less than the block's gs");
} catch (IOException e) {
@@ -221,6 +224,7 @@ public class TestInterDatanodeProtocol {
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
cluster.waitActive();
+ String bpid = cluster.getNamesystem().getPoolId();
//create a file
DistributedFileSystem dfs = (DistributedFileSystem)cluster.getFileSystem();
@@ -248,7 +252,7 @@ public class TestInterDatanodeProtocol {
new RecoveringBlock(b, null, recoveryid));
//check replica
- final ReplicaInfo replica = fsdataset.fetchReplicaInfo(b.getBlockId());
+ final ReplicaInfo replica = fsdataset.fetchReplicaInfo(bpid, b.getBlockId());
Assert.assertEquals(ReplicaState.RUR, replica.getState());
//check meta data before update
Modified: hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestReplicasMap.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestReplicasMap.java?rev=1074727&r1=1074726&r2=1074727&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestReplicasMap.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestReplicasMap.java Fri Feb 25 22:42:39 2011
@@ -27,11 +27,12 @@ import org.junit.Test;
*/
public class TestReplicasMap {
private static final ReplicasMap map = new ReplicasMap();
+ private static final String bpid = "BP-TEST";
private static final Block block = new Block(1234, 1234, 1234);
@BeforeClass
public static void setup() {
- map.add(new FinalizedReplica(block, null, null));
+ map.add(bpid, new FinalizedReplica(block, null, null));
}
/**
@@ -41,35 +42,35 @@ public class TestReplicasMap {
public void testGet() {
// Test 1: null argument throws invalid argument exception
try {
- map.get(null);
+ map.get(bpid, null);
fail("Expected exception not thrown");
} catch (IllegalArgumentException expected) { }
// Test 2: successful lookup based on block
- assertNotNull(map.get(block));
+ assertNotNull(map.get(bpid, block));
// Test 3: Lookup failure - generation stamp mismatch
Block b = new Block(block);
b.setGenerationStamp(0);
- assertNull(map.get(b));
+ assertNull(map.get(bpid, b));
// Test 4: Lookup failure - blockID mismatch
b.setGenerationStamp(block.getGenerationStamp());
b.setBlockId(0);
- assertNull(map.get(b));
+ assertNull(map.get(bpid, b));
// Test 5: successful lookup based on block ID
- assertNotNull(map.get(block.getBlockId()));
+ assertNotNull(map.get(bpid, block.getBlockId()));
// Test 6: failed lookup for invalid block ID
- assertNull(map.get(0));
+ assertNull(map.get(bpid, 0));
}
@Test
public void testAdd() {
// Test 1: null argument throws invalid argument exception
try {
- map.add(null);
+ map.add(bpid, null);
fail("Expected exception not thrown");
} catch (IllegalArgumentException expected) { }
}
@@ -78,28 +79,28 @@ public class TestReplicasMap {
public void testRemove() {
// Test 1: null argument throws invalid argument exception
try {
- map.remove(null);
+ map.remove(bpid, null);
fail("Expected exception not thrown");
} catch (IllegalArgumentException expected) { }
// Test 2: remove failure - generation stamp mismatch
Block b = new Block(block);
b.setGenerationStamp(0);
- assertNull(map.remove(b));
+ assertNull(map.remove(bpid, b));
// Test 3: remove failure - blockID mismatch
b.setGenerationStamp(block.getGenerationStamp());
b.setBlockId(0);
- assertNull(map.remove(b));
+ assertNull(map.remove(bpid, b));
// Test 4: remove success
- assertNotNull(map.remove(block));
+ assertNotNull(map.remove(bpid, block));
// Test 5: remove failure - invalid blockID
- assertNull(map.remove(0));
+ assertNull(map.remove(bpid, 0));
// Test 6: remove success
- map.add(new FinalizedReplica(block, null, null));
- assertNotNull(map.remove(block.getBlockId()));
+ map.add(bpid, new FinalizedReplica(block, null, null));
+ assertNotNull(map.remove(bpid, block.getBlockId()));
}
}
Modified: hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestSimulatedFSDataset.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestSimulatedFSDataset.java?rev=1074727&r1=1074726&r2=1074727&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestSimulatedFSDataset.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestSimulatedFSDataset.java Fri Feb 25 22:42:39 2011
@@ -35,14 +35,10 @@ import org.apache.hadoop.util.DataChecks
/**
* this class tests the methods of the SimulatedFSDataset.
- *
*/
-
public class TestSimulatedFSDataset extends TestCase {
Configuration conf = null;
-
- // TODO:FEDERATION initialize this
- static String bpid;
+ static final String bpid = "BP-TEST";
static final int NUMBLOCKS = 20;
static final int BLOCK_LENGTH_MULTIPLIER = 79;
@@ -50,7 +46,6 @@ public class TestSimulatedFSDataset exte
super.setUp();
conf = new HdfsConfiguration();
conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true);
-
}
protected void tearDown() throws Exception {
@@ -92,7 +87,7 @@ public class TestSimulatedFSDataset exte
}
public void testGetMetaData() throws IOException {
- FSDatasetInterface fsdataset = new SimulatedFSDataset(conf);
+ FSDatasetInterface fsdataset = getSimulatedFSDataset();
ExtendedBlock b = new ExtendedBlock(bpid, 1, 5, 0);
try {
assertFalse(fsdataset.metaFileExists(b));
@@ -113,19 +108,18 @@ public class TestSimulatedFSDataset exte
public void testStorageUsage() throws IOException {
- FSDatasetInterface fsdataset = new SimulatedFSDataset(conf);
+ FSDatasetInterface fsdataset = getSimulatedFSDataset();
assertEquals(fsdataset.getDfsUsed(), 0);
assertEquals(fsdataset.getRemaining(), fsdataset.getCapacity());
int bytesAdded = addSomeBlocks(fsdataset);
assertEquals(bytesAdded, fsdataset.getDfsUsed());
assertEquals(fsdataset.getCapacity()-bytesAdded, fsdataset.getRemaining());
-
}
- void checkBlockDataAndSize(FSDatasetInterface fsdataset,
- ExtendedBlock b, long expectedLen) throws IOException {
+ void checkBlockDataAndSize(FSDatasetInterface fsdataset, ExtendedBlock b,
+ long expectedLen) throws IOException {
InputStream input = fsdataset.getBlockInputStream(b);
long lengthRead = 0;
int data;
@@ -137,7 +131,7 @@ public class TestSimulatedFSDataset exte
}
public void testWriteRead() throws IOException {
- FSDatasetInterface fsdataset = new SimulatedFSDataset(conf);
+ FSDatasetInterface fsdataset = getSimulatedFSDataset();
addSomeBlocks(fsdataset);
for (int i=1; i <= NUMBLOCKS; ++i) {
ExtendedBlock b = new ExtendedBlock(bpid, i, 0, 0);
@@ -147,26 +141,25 @@ public class TestSimulatedFSDataset exte
}
}
-
-
public void testGetBlockReport() throws IOException {
- SimulatedFSDataset fsdataset = new SimulatedFSDataset(conf);
- BlockListAsLongs blockReport = fsdataset.getBlockReport();
+ SimulatedFSDataset fsdataset = getSimulatedFSDataset();
+ BlockListAsLongs blockReport = fsdataset.getBlockReport(bpid);
assertEquals(0, blockReport.getNumberOfBlocks());
- int bytesAdded = addSomeBlocks(fsdataset);
- blockReport = fsdataset.getBlockReport();
+ addSomeBlocks(fsdataset);
+ blockReport = fsdataset.getBlockReport(bpid);
assertEquals(NUMBLOCKS, blockReport.getNumberOfBlocks());
for (Block b: blockReport) {
assertNotNull(b);
assertEquals(blockIdToLen(b.getBlockId()), b.getNumBytes());
}
}
+
public void testInjectionEmpty() throws IOException {
- SimulatedFSDataset fsdataset = new SimulatedFSDataset(conf);
- BlockListAsLongs blockReport = fsdataset.getBlockReport();
+ SimulatedFSDataset fsdataset = getSimulatedFSDataset();
+ BlockListAsLongs blockReport = fsdataset.getBlockReport(bpid);
assertEquals(0, blockReport.getNumberOfBlocks());
int bytesAdded = addSomeBlocks(fsdataset);
- blockReport = fsdataset.getBlockReport();
+ blockReport = fsdataset.getBlockReport(bpid);
assertEquals(NUMBLOCKS, blockReport.getNumberOfBlocks());
for (Block b: blockReport) {
assertNotNull(b);
@@ -175,11 +168,9 @@ public class TestSimulatedFSDataset exte
// Inject blocks into an empty fsdataset
// - injecting the blocks we got above.
-
-
- SimulatedFSDataset sfsdataset = new SimulatedFSDataset(conf);
+ SimulatedFSDataset sfsdataset = getSimulatedFSDataset();
sfsdataset.injectBlocks(bpid, blockReport);
- blockReport = sfsdataset.getBlockReport();
+ blockReport = sfsdataset.getBlockReport(bpid);
assertEquals(NUMBLOCKS, blockReport.getNumberOfBlocks());
for (Block b: blockReport) {
assertNotNull(b);
@@ -192,12 +183,11 @@ public class TestSimulatedFSDataset exte
}
public void testInjectionNonEmpty() throws IOException {
- SimulatedFSDataset fsdataset = new SimulatedFSDataset(conf);
-
- BlockListAsLongs blockReport = fsdataset.getBlockReport();
+ SimulatedFSDataset fsdataset = getSimulatedFSDataset();
+ BlockListAsLongs blockReport = fsdataset.getBlockReport(bpid);
assertEquals(0, blockReport.getNumberOfBlocks());
int bytesAdded = addSomeBlocks(fsdataset);
- blockReport = fsdataset.getBlockReport();
+ blockReport = fsdataset.getBlockReport(bpid);
assertEquals(NUMBLOCKS, blockReport.getNumberOfBlocks());
for (Block b: blockReport) {
assertNotNull(b);
@@ -207,18 +197,16 @@ public class TestSimulatedFSDataset exte
// Inject blocks into an non-empty fsdataset
// - injecting the blocks we got above.
-
-
- SimulatedFSDataset sfsdataset = new SimulatedFSDataset(conf);
+ SimulatedFSDataset sfsdataset = getSimulatedFSDataset();
// Add come blocks whose block ids do not conflict with
// the ones we are going to inject.
bytesAdded += addSomeBlocks(sfsdataset, NUMBLOCKS+1);
- BlockListAsLongs blockReport2 = sfsdataset.getBlockReport();
+ sfsdataset.getBlockReport(bpid);
assertEquals(NUMBLOCKS, blockReport.getNumberOfBlocks());
- blockReport2 = sfsdataset.getBlockReport();
+ sfsdataset.getBlockReport(bpid);
assertEquals(NUMBLOCKS, blockReport.getNumberOfBlocks());
sfsdataset.injectBlocks(bpid, blockReport);
- blockReport = sfsdataset.getBlockReport();
+ blockReport = sfsdataset.getBlockReport(bpid);
assertEquals(NUMBLOCKS*2, blockReport.getNumberOfBlocks());
for (Block b: blockReport) {
assertNotNull(b);
@@ -229,23 +217,21 @@ public class TestSimulatedFSDataset exte
assertEquals(bytesAdded, sfsdataset.getDfsUsed());
assertEquals(sfsdataset.getCapacity()-bytesAdded, sfsdataset.getRemaining());
-
// Now test that the dataset cannot be created if it does not have sufficient cap
-
conf.setLong(SimulatedFSDataset.CONFIG_PROPERTY_CAPACITY, 10);
try {
- sfsdataset = new SimulatedFSDataset(conf);
+ sfsdataset = getSimulatedFSDataset();
+ sfsdataset.addBlockPool(bpid, conf);
sfsdataset.injectBlocks(bpid, blockReport);
assertTrue("Expected an IO exception", false);
} catch (IOException e) {
// ok - as expected
}
-
}
public void checkInvalidBlock(ExtendedBlock b) throws IOException {
- FSDatasetInterface fsdataset = new SimulatedFSDataset(conf);
+ FSDatasetInterface fsdataset = getSimulatedFSDataset();
assertFalse(fsdataset.isValidBlock(b));
try {
fsdataset.getLength(b);
@@ -267,11 +253,10 @@ public class TestSimulatedFSDataset exte
} catch (IOException e) {
// ok - as expected
}
-
}
public void testInValidBlocks() throws IOException {
- FSDatasetInterface fsdataset = new SimulatedFSDataset(conf);
+ FSDatasetInterface fsdataset = getSimulatedFSDataset();
ExtendedBlock b = new ExtendedBlock(bpid, 1, 5, 0);
checkInvalidBlock(b);
@@ -279,29 +264,31 @@ public class TestSimulatedFSDataset exte
addSomeBlocks(fsdataset);
b = new ExtendedBlock(bpid, NUMBLOCKS + 99, 5, 0);
checkInvalidBlock(b);
-
}
public void testInvalidate() throws IOException {
- FSDatasetInterface fsdataset = new SimulatedFSDataset(conf);
+ FSDatasetInterface fsdataset = getSimulatedFSDataset();
int bytesAdded = addSomeBlocks(fsdataset);
Block[] deleteBlocks = new Block[2];
deleteBlocks[0] = new Block(1, 0, 0);
deleteBlocks[1] = new Block(2, 0, 0);
fsdataset.invalidate(bpid, deleteBlocks);
checkInvalidBlock(new ExtendedBlock(bpid, deleteBlocks[0]));
- checkInvalidBlock(new ExtendedBlock(deleteBlocks[1]));
+ checkInvalidBlock(new ExtendedBlock(bpid, deleteBlocks[1]));
long sizeDeleted = blockIdToLen(1) + blockIdToLen(2);
assertEquals(bytesAdded-sizeDeleted, fsdataset.getDfsUsed());
assertEquals(fsdataset.getCapacity()-bytesAdded+sizeDeleted, fsdataset.getRemaining());
-
-
// Now make sure the rest of the blocks are valid
for (int i=3; i <= NUMBLOCKS; ++i) {
Block b = new Block(i, 0, 0);
assertTrue(fsdataset.isValidBlock(new ExtendedBlock(bpid, b)));
}
}
-
+
+ private SimulatedFSDataset getSimulatedFSDataset() throws IOException {
+ SimulatedFSDataset fsdataset = new SimulatedFSDataset(conf);
+ fsdataset.addBlockPool(bpid, conf);
+ return fsdataset;
+ }
}
Modified: hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestWriteToReplica.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestWriteToReplica.java?rev=1074727&r1=1074726&r2=1074727&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestWriteToReplica.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestWriteToReplica.java Fri Feb 25 22:42:39 2011
@@ -54,7 +54,8 @@ public class TestWriteToReplica {
FSDataset dataSet = (FSDataset)dn.data;
// set up replicasMap
- setup(dataSet);
+ String bpid = cluster.getNamesystem().getPoolId();
+ setup(bpid, dataSet);
// test close
testClose(dataSet);
@@ -73,10 +74,11 @@ public class TestWriteToReplica {
FSDataset dataSet = (FSDataset)dn.data;
// set up replicasMap
- setup(dataSet);
+ String bpid = cluster.getNamesystem().getPoolId();
+ setup(bpid, dataSet);
// test append
- testAppend(dataSet);
+ testAppend(bpid, dataSet);
} finally {
cluster.shutdown();
}
@@ -92,7 +94,8 @@ public class TestWriteToReplica {
FSDataset dataSet = (FSDataset)dn.data;
// set up replicasMap
- setup(dataSet);
+ String bpid = cluster.getNamesystem().getPoolId();
+ setup(bpid, dataSet);
// test writeToRbw
testWriteToRbw(dataSet);
@@ -111,7 +114,8 @@ public class TestWriteToReplica {
FSDataset dataSet = (FSDataset)dn.data;
// set up replicasMap
- setup(dataSet);
+ String bpid = cluster.getNamesystem().getPoolId();
+ setup(bpid, dataSet);
// test writeToTemporary
testWriteToTemporary(dataSet);
@@ -120,42 +124,42 @@ public class TestWriteToReplica {
}
}
- private void setup(FSDataset dataSet) throws IOException {
+ private void setup(String bpid, FSDataset dataSet) throws IOException {
// setup replicas map
ReplicasMap replicasMap = dataSet.volumeMap;
FSVolume vol = dataSet.volumes.getNextVolume(0);
ReplicaInfo replicaInfo = new FinalizedReplica(
blocks[FINALIZED].getLocalBlock(), vol, vol.getDir());
- replicasMap.add(replicaInfo);
+ replicasMap.add(bpid, replicaInfo);
replicaInfo.getBlockFile().createNewFile();
replicaInfo.getMetaFile().createNewFile();
- replicasMap.add(new ReplicaInPipeline(
+ replicasMap.add(bpid, new ReplicaInPipeline(
blocks[TEMPORARY].getBlockId(),
blocks[TEMPORARY].getGenerationStamp(), vol,
- vol.createTmpFile(blocks[TEMPORARY].getLocalBlock()).getParentFile()));
+ vol.createTmpFile(bpid, blocks[TEMPORARY].getLocalBlock()).getParentFile()));
replicaInfo = new ReplicaBeingWritten(blocks[RBW].getLocalBlock(), vol,
- vol.createRbwFile(blocks[RBW].getLocalBlock()).getParentFile(), null);
- replicasMap.add(replicaInfo);
+ vol.createRbwFile(bpid, blocks[RBW].getLocalBlock()).getParentFile(), null);
+ replicasMap.add(bpid, replicaInfo);
replicaInfo.getBlockFile().createNewFile();
replicaInfo.getMetaFile().createNewFile();
- replicasMap.add(new ReplicaWaitingToBeRecovered(
- blocks[RWR].getLocalBlock(), vol, vol.createRbwFile(
+ replicasMap.add(bpid, new ReplicaWaitingToBeRecovered(
+ blocks[RWR].getLocalBlock(), vol, vol.createRbwFile(bpid,
blocks[RWR].getLocalBlock()).getParentFile()));
- replicasMap.add(new ReplicaUnderRecovery(new FinalizedReplica(blocks[RUR]
+ replicasMap.add(bpid, new ReplicaUnderRecovery(new FinalizedReplica(blocks[RUR]
.getLocalBlock(), vol, vol.getDir()), 2007));
}
- private void testAppend(FSDataset dataSet) throws IOException {
+ private void testAppend(String bpid, FSDataset dataSet) throws IOException {
long newGS = blocks[FINALIZED].getGenerationStamp()+1;
- FSVolume v = dataSet.volumeMap.get(blocks[FINALIZED].getLocalBlock())
+ FSVolume v = dataSet.volumeMap.get(bpid, blocks[FINALIZED].getLocalBlock())
.getVolume();
long available = v.getCapacity()-v.getDfsUsed();
long expectedLen = blocks[FINALIZED].getNumBytes();
try {
- v.decDfsUsed(-available);
+ v.decDfsUsed(bpid, -available);
blocks[FINALIZED].setNumBytes(expectedLen+100);
dataSet.append(blocks[FINALIZED], newGS, expectedLen);
Assert.fail("Should not have space to append to an RWR replica" + blocks[RWR]);
@@ -163,7 +167,7 @@ public class TestWriteToReplica {
Assert.assertTrue(e.getMessage().startsWith(
"Insufficient space for appending to "));
}
- v.decDfsUsed(available);
+ v.decDfsUsed(bpid, available);
blocks[FINALIZED].setNumBytes(expectedLen);
newGS = blocks[RBW].getGenerationStamp()+1;