You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by sz...@apache.org on 2012/02/08 22:04:48 UTC
svn commit: r1242093 [2/2] - in
/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs: ./
src/main/java/ src/main/java/org/apache/hadoop/hdfs/server/datanode/
src/test/java/org/apache/hadoop/hdfs/server/datanode/
Modified: hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java?rev=1242093&r1=1242092&r2=1242093&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java (original)
+++ hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java Wed Feb 8 21:04:47 2012
@@ -25,20 +25,20 @@ import java.util.LinkedList;
import java.util.List;
import java.util.Random;
+import junit.framework.TestCase;
+
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.Block;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.server.common.GenerationStamp;
-import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume;
-
-import junit.framework.TestCase;
+import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.FSVolumeInterface;
/**
* Tests {@link DirectoryScanner} handling of differences
@@ -142,10 +142,10 @@ public class TestDirectoryScanner extend
/** Create a block file in a random volume*/
private long createBlockFile() throws IOException {
- List<FSVolume> volumes = fds.volumes.getVolumes();
+ List<FSVolumeInterface> volumes = fds.getVolumes();
int index = rand.nextInt(volumes.size() - 1);
long id = getFreeBlockId();
- File finalizedDir = volumes.get(index).getBlockPoolSlice(bpid).getFinalizedDir();
+ File finalizedDir = volumes.get(index).getFinalizedDir(bpid);
File file = new File(finalizedDir, getBlockFile(id));
if (file.createNewFile()) {
LOG.info("Created block file " + file.getName());
@@ -155,10 +155,10 @@ public class TestDirectoryScanner extend
/** Create a metafile in a random volume*/
private long createMetaFile() throws IOException {
- List<FSVolume> volumes = fds.volumes.getVolumes();
+ List<FSVolumeInterface> volumes = fds.getVolumes();
int index = rand.nextInt(volumes.size() - 1);
long id = getFreeBlockId();
- File finalizedDir = volumes.get(index).getBlockPoolSlice(bpid).getFinalizedDir();
+ File finalizedDir = volumes.get(index).getFinalizedDir(bpid);
File file = new File(finalizedDir, getMetaFile(id));
if (file.createNewFile()) {
LOG.info("Created metafile " + file.getName());
@@ -168,10 +168,10 @@ public class TestDirectoryScanner extend
/** Create block file and corresponding metafile in a rondom volume */
private long createBlockMetaFile() throws IOException {
- List<FSVolume> volumes = fds.volumes.getVolumes();
+ List<FSVolumeInterface> volumes = fds.getVolumes();
int index = rand.nextInt(volumes.size() - 1);
long id = getFreeBlockId();
- File finalizedDir = volumes.get(index).getBlockPoolSlice(bpid).getFinalizedDir();
+ File finalizedDir = volumes.get(index).getFinalizedDir(bpid);
File file = new File(finalizedDir, getBlockFile(id));
if (file.createNewFile()) {
LOG.info("Created block file " + file.getName());
Modified: hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestRoundRobinVolumesPolicy.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestRoundRobinVolumesPolicy.java?rev=1242093&r1=1242092&r2=1242093&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestRoundRobinVolumesPolicy.java (original)
+++ hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestRoundRobinVolumesPolicy.java Wed Feb 8 21:04:47 2012
@@ -21,10 +21,10 @@ import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
-import junit.framework.Assert;
-import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume;
-import org.apache.hadoop.util.ReflectionUtils;
+import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.FSVolumeInterface;
import org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException;
+import org.apache.hadoop.util.ReflectionUtils;
+import org.junit.Assert;
import org.junit.Test;
import org.mockito.Mockito;
@@ -33,14 +33,14 @@ public class TestRoundRobinVolumesPolicy
// Test the Round-Robin block-volume choosing algorithm.
@Test
public void testRR() throws Exception {
- final List<FSVolume> volumes = new ArrayList<FSVolume>();
+ final List<FSVolumeInterface> volumes = new ArrayList<FSVolumeInterface>();
// First volume, with 100 bytes of space.
- volumes.add(Mockito.mock(FSVolume.class));
+ volumes.add(Mockito.mock(FSVolumeInterface.class));
Mockito.when(volumes.get(0).getAvailable()).thenReturn(100L);
// Second volume, with 200 bytes of space.
- volumes.add(Mockito.mock(FSVolume.class));
+ volumes.add(Mockito.mock(FSVolumeInterface.class));
Mockito.when(volumes.get(1).getAvailable()).thenReturn(200L);
RoundRobinVolumesPolicy policy = ReflectionUtils.newInstance(
@@ -69,14 +69,14 @@ public class TestRoundRobinVolumesPolicy
@Test
public void testRRPolicyExceptionMessage()
throws Exception {
- final List<FSVolume> volumes = new ArrayList<FSVolume>();
+ final List<FSVolumeInterface> volumes = new ArrayList<FSVolumeInterface>();
// First volume, with 500 bytes of space.
- volumes.add(Mockito.mock(FSVolume.class));
+ volumes.add(Mockito.mock(FSVolumeInterface.class));
Mockito.when(volumes.get(0).getAvailable()).thenReturn(500L);
// Second volume, with 600 bytes of space.
- volumes.add(Mockito.mock(FSVolume.class));
+ volumes.add(Mockito.mock(FSVolumeInterface.class));
Mockito.when(volumes.get(1).getAvailable()).thenReturn(600L);
RoundRobinVolumesPolicy policy = new RoundRobinVolumesPolicy();
Modified: hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestWriteToReplica.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestWriteToReplica.java?rev=1242093&r1=1242092&r2=1242093&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestWriteToReplica.java (original)
+++ hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestWriteToReplica.java Wed Feb 8 21:04:47 2012
@@ -140,7 +140,7 @@ public class TestWriteToReplica {
ReplicasMap replicasMap = dataSet.volumeMap;
FSVolume vol = dataSet.volumes.getNextVolume(0);
ReplicaInfo replicaInfo = new FinalizedReplica(
- blocks[FINALIZED].getLocalBlock(), vol, vol.getDir());
+ blocks[FINALIZED].getLocalBlock(), vol, vol.getCurrentDir().getParentFile());
replicasMap.add(bpid, replicaInfo);
replicaInfo.getBlockFile().createNewFile();
replicaInfo.getMetaFile().createNewFile();
@@ -160,15 +160,15 @@ public class TestWriteToReplica {
blocks[RWR].getLocalBlock(), vol, vol.createRbwFile(bpid,
blocks[RWR].getLocalBlock()).getParentFile()));
replicasMap.add(bpid, new ReplicaUnderRecovery(new FinalizedReplica(blocks[RUR]
- .getLocalBlock(), vol, vol.getDir()), 2007));
+ .getLocalBlock(), vol, vol.getCurrentDir().getParentFile()), 2007));
return blocks;
}
private void testAppend(String bpid, FSDataset dataSet, ExtendedBlock[] blocks) throws IOException {
long newGS = blocks[FINALIZED].getGenerationStamp()+1;
- FSVolume v = dataSet.volumeMap.get(bpid, blocks[FINALIZED].getLocalBlock())
- .getVolume();
+ final FSVolume v = (FSVolume)dataSet.volumeMap.get(
+ bpid, blocks[FINALIZED].getLocalBlock()).getVolume();
long available = v.getCapacity()-v.getDfsUsed();
long expectedLen = blocks[FINALIZED].getNumBytes();
try {