You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by su...@apache.org on 2011/02/22 21:31:15 UTC
svn commit: r1073489 [2/2] - in /hadoop/hdfs/branches/HDFS-1052: ./
src/java/org/apache/hadoop/hdfs/protocol/
src/java/org/apache/hadoop/hdfs/server/datanode/
src/java/org/apache/hadoop/hdfs/server/namenode/
src/java/org/apache/hadoop/hdfs/server/proto...
Modified: hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestWriteToReplica.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestWriteToReplica.java?rev=1073489&r1=1073488&r2=1073489&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestWriteToReplica.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestWriteToReplica.java Tue Feb 22 20:31:14 2011
@@ -19,10 +19,9 @@ package org.apache.hadoop.hdfs.server.da
import java.io.IOException;
-import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume;
import org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException;
@@ -31,10 +30,12 @@ import org.junit.Test;
/** Test if FSDataset#append, writeToRbw, and writeToTmp */
public class TestWriteToReplica {
- final private static Block[] blocks = new Block[] {
- new Block(1, 1, 2001), new Block(2, 1, 2002),
- new Block(3, 1, 2003), new Block(4, 1, 2004),
- new Block(5, 1, 2005), new Block(6, 1, 2006)
+ static String bpid;
+ // TODO:FEDERATION make sure bpid is passed righ there
+ final private static ExtendedBlock[] blocks = new ExtendedBlock[] {
+ new ExtendedBlock(bpid, 1, 1, 2001), new ExtendedBlock(bpid, 2, 1, 2002),
+ new ExtendedBlock(bpid, 3, 1, 2003), new ExtendedBlock(bpid, 4, 1, 2004),
+ new ExtendedBlock(bpid, 5, 1, 2005), new ExtendedBlock(bpid, 6, 1, 2006)
};
final private static int FINALIZED = 0;
final private static int TEMPORARY = 1;
@@ -124,7 +125,7 @@ public class TestWriteToReplica {
ReplicasMap replicasMap = dataSet.volumeMap;
FSVolume vol = dataSet.volumes.getNextVolume(0);
ReplicaInfo replicaInfo = new FinalizedReplica(
- blocks[FINALIZED], vol, vol.getDir());
+ blocks[FINALIZED].getLocalBlock(), vol, vol.getDir());
replicasMap.add(replicaInfo);
replicaInfo.getBlockFile().createNewFile();
replicaInfo.getMetaFile().createNewFile();
@@ -132,23 +133,25 @@ public class TestWriteToReplica {
replicasMap.add(new ReplicaInPipeline(
blocks[TEMPORARY].getBlockId(),
blocks[TEMPORARY].getGenerationStamp(), vol,
- vol.createTmpFile(blocks[TEMPORARY]).getParentFile()));
+ vol.createTmpFile(blocks[TEMPORARY].getLocalBlock()).getParentFile()));
- replicaInfo = new ReplicaBeingWritten(blocks[RBW], vol,
- vol.createRbwFile(blocks[RBW]).getParentFile(), null);
+ replicaInfo = new ReplicaBeingWritten(blocks[RBW].getLocalBlock(), vol,
+ vol.createRbwFile(blocks[RBW].getLocalBlock()).getParentFile(), null);
replicasMap.add(replicaInfo);
replicaInfo.getBlockFile().createNewFile();
replicaInfo.getMetaFile().createNewFile();
- replicasMap.add(new ReplicaWaitingToBeRecovered(blocks[RWR], vol,
- vol.createRbwFile(blocks[RWR]).getParentFile()));
- replicasMap.add(new ReplicaUnderRecovery(
- new FinalizedReplica(blocks[RUR], vol, vol.getDir()), 2007));
+ replicasMap.add(new ReplicaWaitingToBeRecovered(
+ blocks[RWR].getLocalBlock(), vol, vol.createRbwFile(
+ blocks[RWR].getLocalBlock()).getParentFile()));
+ replicasMap.add(new ReplicaUnderRecovery(new FinalizedReplica(blocks[RUR]
+ .getLocalBlock(), vol, vol.getDir()), 2007));
}
private void testAppend(FSDataset dataSet) throws IOException {
long newGS = blocks[FINALIZED].getGenerationStamp()+1;
- FSVolume v = dataSet.volumeMap.get(blocks[FINALIZED]).getVolume();
+ FSVolume v = dataSet.volumeMap.get(blocks[FINALIZED].getLocalBlock())
+ .getVolume();
long available = v.getCapacity()-v.getDfsUsed();
long expectedLen = blocks[FINALIZED].getNumBytes();
try {
Modified: hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestDatanodeDescriptor.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestDatanodeDescriptor.java?rev=1073489&r1=1073488&r2=1073489&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestDatanodeDescriptor.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestDatanodeDescriptor.java Tue Feb 22 20:31:14 2011
@@ -21,7 +21,6 @@ import java.util.ArrayList;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.server.common.GenerationStamp;
-import org.apache.hadoop.hdfs.server.protocol.BlockCommand;
import junit.framework.TestCase;
@@ -43,10 +42,10 @@ public class TestDatanodeDescriptor exte
blockList.add(new Block(i, 0, GenerationStamp.FIRST_VALID_STAMP));
}
dd.addBlocksToBeInvalidated(blockList);
- BlockCommand bc = dd.getInvalidateBlocks(MAX_LIMIT);
- assertEquals(bc.getBlocks().length, MAX_LIMIT);
+ Block[] bc = dd.getInvalidateBlocks(MAX_LIMIT);
+ assertEquals(bc.length, MAX_LIMIT);
bc = dd.getInvalidateBlocks(MAX_LIMIT);
- assertEquals(bc.getBlocks().length, REMAINING_BLOCKS);
+ assertEquals(bc.length, REMAINING_BLOCKS);
}
public void testBlocksCounter() throws Exception {
Modified: hadoop/hdfs/branches/HDFS-1052/src/test/unit/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/unit/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java?rev=1073489&r1=1073488&r2=1073489&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/test/unit/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/test/unit/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java Tue Feb 22 20:31:14 2011
@@ -451,7 +451,7 @@ public class TestBlockRecovery {
if(LOG.isDebugEnabled()) {
LOG.debug("Running " + GenericTestUtils.getMethodName());
}
- dn.data.createRbw(block.getLocalBlock());
+ dn.data.createRbw(block);
try {
dn.syncBlock(rBlock, initBlockRecords(dn));
fail("Sync should fail");
@@ -473,7 +473,7 @@ public class TestBlockRecovery {
if(LOG.isDebugEnabled()) {
LOG.debug("Running " + GenericTestUtils.getMethodName());
}
- ReplicaInPipelineInterface replicaInfo = dn.data.createRbw(block.getLocalBlock());
+ ReplicaInPipelineInterface replicaInfo = dn.data.createRbw(block);
BlockWriteStreams streams = null;
try {
streams = replicaInfo.createStreams(true, 0, 0);