You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by st...@apache.org on 2009/11/28 21:06:08 UTC
svn commit: r885143 [17/18] - in /hadoop/hdfs/branches/HDFS-326: ./
.eclipse.templates/ .eclipse.templates/.launches/ conf/ ivy/ lib/
src/ant/org/apache/hadoop/ant/ src/ant/org/apache/hadoop/ant/condition/
src/c++/ src/c++/libhdfs/ src/c++/libhdfs/docs...
Modified: hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/hdfs/TestReplication.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/hdfs/TestReplication.java?rev=885143&r1=885142&r2=885143&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/hdfs/TestReplication.java (original)
+++ hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/hdfs/TestReplication.java Sat Nov 28 20:05:56 2009
@@ -146,7 +146,7 @@
* Test if Datanode reports bad blocks during replication request
*/
public void testBadBlockReportOnTransfer() throws Exception {
- Configuration conf = new Configuration();
+ Configuration conf = new HdfsConfiguration();
FileSystem fs = null;
DFSClient dfsClient = null;
LocatedBlocks blocks = null;
@@ -191,8 +191,8 @@
* Tests replication in DFS.
*/
public void runReplication(boolean simulated) throws IOException {
- Configuration conf = new Configuration();
- conf.setBoolean("dfs.replication.considerLoad", false);
+ Configuration conf = new HdfsConfiguration();
+ conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY, false);
if (simulated) {
conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true);
}
@@ -291,7 +291,7 @@
* for under replicated blocks.
*
* It creates a file with one block and replication of 4. It corrupts
- * two of the blocks and removes one of the replicas. Expected behaviour is
+ * two of the blocks and removes one of the replicas. Expected behavior is
* that missing replica will be copied from one valid source.
*/
public void testPendingReplicationRetry() throws IOException {
@@ -307,7 +307,7 @@
}
try {
- Configuration conf = new Configuration();
+ Configuration conf = new HdfsConfiguration();
conf.set("dfs.replication", Integer.toString(numDataNodes));
//first time format
cluster = new MiniDFSCluster(0, conf, numDataNodes, true,
@@ -341,7 +341,8 @@
int fileCount = 0;
for (int i=0; i<6; i++) {
- File blockFile = new File(baseDir, "data" + (i+1) + "/current/" + block);
+ File blockFile = new File(baseDir, "data" + (i+1) +
+ MiniDFSCluster.FINALIZED_DIR_NAME + block);
LOG.info("Checking for file " + blockFile);
if (blockFile.exists()) {
@@ -372,11 +373,11 @@
*/
LOG.info("Restarting minicluster after deleting a replica and corrupting 2 crcs");
- conf = new Configuration();
+ conf = new HdfsConfiguration();
conf.set("dfs.replication", Integer.toString(numDataNodes));
- conf.set("dfs.replication.pending.timeout.sec", Integer.toString(2));
+ conf.set(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, Integer.toString(2));
conf.set("dfs.datanode.block.write.timeout.sec", Integer.toString(5));
- conf.set("dfs.safemode.threshold.pct", "0.75f"); // only 3 copies exist
+ conf.set(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_KEY, "0.75f"); // only 3 copies exist
cluster = new MiniDFSCluster(0, conf, numDataNodes*2, false,
true, null, null);
@@ -400,7 +401,7 @@
* @throws Exception
*/
public void testReplicateLenMismatchedBlock() throws Exception {
- MiniDFSCluster cluster = new MiniDFSCluster(new Configuration(), 2, true, null);
+ MiniDFSCluster cluster = new MiniDFSCluster(new HdfsConfiguration(), 2, true, null);
try {
cluster.waitActive();
// test truncated block
@@ -417,7 +418,7 @@
final Path fileName = new Path("/file1");
final short REPLICATION_FACTOR = (short)1;
final FileSystem fs = cluster.getFileSystem();
- final int fileLen = fs.getConf().getInt("io.bytes.per.checksum", 512);
+ final int fileLen = fs.getConf().getInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, 512);
DFSTestUtil.createFile(fs, fileName, fileLen, REPLICATION_FACTOR, 0);
DFSTestUtil.waitReplication(fs, fileName, REPLICATION_FACTOR);
Modified: hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/hdfs/TestRestartDFS.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/hdfs/TestRestartDFS.java?rev=885143&r1=885142&r2=885143&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/hdfs/TestRestartDFS.java (original)
+++ hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/hdfs/TestRestartDFS.java Sat Nov 28 20:05:56 2009
@@ -31,7 +31,7 @@
public class TestRestartDFS extends TestCase {
/** check if DFS remains in proper condition after a restart */
public void testRestartDFS() throws Exception {
- final Configuration conf = new Configuration();
+ final Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = null;
DFSTestUtil files = new DFSTestUtil("TestRestartDFS", 20, 3, 8*1024);
Modified: hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/hdfs/TestSafeMode.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/hdfs/TestSafeMode.java?rev=885143&r1=885142&r2=885143&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/hdfs/TestSafeMode.java (original)
+++ hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/hdfs/TestSafeMode.java Sat Nov 28 20:05:56 2009
@@ -55,9 +55,9 @@
MiniDFSCluster cluster = null;
DistributedFileSystem fs = null;
try {
- Configuration conf = new Configuration();
+ Configuration conf = new HdfsConfiguration();
// disable safemode extension to make the test run faster.
- conf.set("dfs.safemode.extension", "1");
+ conf.set(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_EXTENSION_KEY, "1");
cluster = new MiniDFSCluster(conf, 1, true, null);
cluster.waitActive();
Modified: hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/hdfs/TestSeekBug.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/hdfs/TestSeekBug.java?rev=885143&r1=885142&r2=885143&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/hdfs/TestSeekBug.java (original)
+++ hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/hdfs/TestSeekBug.java Sat Nov 28 20:05:56 2009
@@ -123,7 +123,7 @@
* Test if the seek bug exists in FSDataInputStream in DFS.
*/
public void testSeekBugDFS() throws IOException {
- Configuration conf = new Configuration();
+ Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null);
FileSystem fileSys = cluster.getFileSystem();
try {
@@ -142,7 +142,7 @@
* Tests if the seek bug exists in FSDataInputStream in LocalFS.
*/
public void testSeekBugLocalFS() throws IOException {
- Configuration conf = new Configuration();
+ Configuration conf = new HdfsConfiguration();
FileSystem fileSys = FileSystem.getLocal(conf);
try {
Path file1 = new Path("build/test/data", "seektest.dat");
Modified: hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/hdfs/TestSetTimes.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/hdfs/TestSetTimes.java?rev=885143&r1=885142&r2=885143&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/hdfs/TestSetTimes.java (original)
+++ hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/hdfs/TestSetTimes.java Sat Nov 28 20:05:56 2009
@@ -77,10 +77,10 @@
* Tests mod & access time in DFS.
*/
public void testTimes() throws IOException {
- Configuration conf = new Configuration();
+ Configuration conf = new HdfsConfiguration();
final int MAX_IDLE_TIME = 2000; // 2s
conf.setInt("ipc.client.connection.maxidletime", MAX_IDLE_TIME);
- conf.setInt("heartbeat.recheck.interval", 1000);
+ conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1000);
conf.setInt("dfs.heartbeat.interval", 1);
@@ -187,13 +187,13 @@
* Tests mod time change at close in DFS.
*/
public void testTimesAtClose() throws IOException {
- Configuration conf = new Configuration();
+ Configuration conf = new HdfsConfiguration();
final int MAX_IDLE_TIME = 2000; // 2s
int replicas = 1;
// parameter initialization
conf.setInt("ipc.client.connection.maxidletime", MAX_IDLE_TIME);
- conf.setInt("heartbeat.recheck.interval", 1000);
+ conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1000);
conf.setInt("dfs.heartbeat.interval", 1);
conf.setInt("dfs.datanode.handler.count", 50);
MiniDFSCluster cluster = new MiniDFSCluster(conf, numDatanodes, true, null);
Modified: hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/hdfs/TestSetrepIncreasing.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/hdfs/TestSetrepIncreasing.java?rev=885143&r1=885142&r2=885143&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/hdfs/TestSetrepIncreasing.java (original)
+++ hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/hdfs/TestSetrepIncreasing.java Sat Nov 28 20:05:56 2009
@@ -26,13 +26,13 @@
public class TestSetrepIncreasing extends TestCase {
static void setrep(int fromREP, int toREP, boolean simulatedStorage) throws IOException {
- Configuration conf = new Configuration();
+ Configuration conf = new HdfsConfiguration();
if (simulatedStorage) {
conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true);
}
conf.set("dfs.replication", "" + fromREP);
conf.setLong("dfs.blockreport.intervalMsec", 1000L);
- conf.set("dfs.replication.pending.timeout.sec", Integer.toString(2));
+ conf.set(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, Integer.toString(2));
MiniDFSCluster cluster = new MiniDFSCluster(conf, 10, true, null);
FileSystem fs = cluster.getFileSystem();
assertTrue("Not a HDFS: "+fs.getUri(), fs instanceof DistributedFileSystem);
Modified: hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/hdfs/TestSmallBlock.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/hdfs/TestSmallBlock.java?rev=885143&r1=885142&r2=885143&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/hdfs/TestSmallBlock.java (original)
+++ hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/hdfs/TestSmallBlock.java Sat Nov 28 20:05:56 2009
@@ -90,11 +90,11 @@
* Tests small block size in in DFS.
*/
public void testSmallBlock() throws IOException {
- Configuration conf = new Configuration();
+ Configuration conf = new HdfsConfiguration();
if (simulatedStorage) {
conf.setBoolean("dfs.datanode.simulateddatastorage", true);
}
- conf.set("io.bytes.per.checksum", "1");
+ conf.set(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, "1");
MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null);
FileSystem fileSys = cluster.getFileSystem();
try {
Modified: hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/hdfs/UpgradeUtilities.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/hdfs/UpgradeUtilities.java?rev=885143&r1=885142&r2=885143&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/hdfs/UpgradeUtilities.java (original)
+++ hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/hdfs/UpgradeUtilities.java Sat Nov 28 20:05:56 2009
@@ -86,9 +86,9 @@
*/
public static void initialize() throws Exception {
createEmptyDirs(new String[] {TEST_ROOT_DIR.toString()});
- Configuration config = new Configuration();
- config.set("dfs.name.dir", namenodeStorage.toString());
- config.set("dfs.data.dir", datanodeStorage.toString());
+ Configuration config = new HdfsConfiguration();
+ config.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, namenodeStorage.toString());
+ config.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, datanodeStorage.toString());
MiniDFSCluster cluster = null;
try {
// format data-node
@@ -157,10 +157,10 @@
dataNodeDirs.append("," + new File(TEST_ROOT_DIR, "data"+i));
}
if (conf == null) {
- conf = new Configuration();
+ conf = new HdfsConfiguration();
}
- conf.set("dfs.name.dir", nameNodeDirs.toString());
- conf.set("dfs.data.dir", dataNodeDirs.toString());
+ conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, nameNodeDirs.toString());
+ conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, dataNodeDirs.toString());
conf.setInt("dfs.blockreport.intervalMsec", 10000);
return conf;
}
@@ -263,7 +263,7 @@
for (int i = 0; i < parents.length; i++) {
File newDir = new File(parents[i], dirName);
createEmptyDirs(new String[] {newDir.toString()});
- LocalFileSystem localFS = FileSystem.getLocal(new Configuration());
+ LocalFileSystem localFS = FileSystem.getLocal(new HdfsConfiguration());
switch (nodeType) {
case NAME_NODE:
localFS.copyToLocalFile(new Path(namenodeStorage.toString(), "current"),
Modified: hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java?rev=885143&r1=885142&r2=885143&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java (original)
+++ hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java Sat Nov 28 20:05:56 2009
@@ -19,12 +19,14 @@
import java.io.IOException;
import java.util.ArrayList;
+import java.util.Arrays;
import java.util.List;
import java.util.Random;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSClient;
import org.apache.hadoop.hdfs.DFSTestUtil;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
@@ -32,6 +34,7 @@
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType;
import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
@@ -59,8 +62,8 @@
}
private void initConf(Configuration conf) {
- conf.setLong("dfs.block.size", DEFAULT_BLOCK_SIZE);
- conf.setInt("io.bytes.per.checksum", DEFAULT_BLOCK_SIZE);
+ conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, DEFAULT_BLOCK_SIZE);
+ conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, DEFAULT_BLOCK_SIZE);
conf.setLong("dfs.heartbeat.interval", 1L);
conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true);
conf.setLong("dfs.balancer.movedWinWidth", 2000L);
@@ -163,13 +166,14 @@
blocks, (short)(numDatanodes-1), distribution);
// restart the cluster: do NOT format the cluster
- conf.set("dfs.safemode.threshold.pct", "0.0f");
+ conf.set(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_KEY, "0.0f");
cluster = new MiniDFSCluster(0, conf, numDatanodes,
false, true, null, racks, capacities);
cluster.waitActive();
client = DFSClient.createNamenode(conf);
- cluster.injectBlocks(blocksDN);
+ for(int i = 0; i < blocksDN.length; i++)
+ cluster.injectBlocks(i, Arrays.asList(blocksDN[i]));
long totalCapacity = 0L;
for(long capacity:capacities) {
@@ -279,7 +283,7 @@
/** Test a cluster with even distribution,
* then a new empty node is added to the cluster*/
public void testBalancer0() throws Exception {
- Configuration conf = new Configuration();
+ Configuration conf = new HdfsConfiguration();
initConf(conf);
oneNodeTest(conf);
twoNodeTest(conf);
@@ -287,7 +291,7 @@
/** Test unevenly distributed cluster */
public void testBalancer1() throws Exception {
- Configuration conf = new Configuration();
+ Configuration conf = new HdfsConfiguration();
initConf(conf);
testUnevenDistribution(conf,
new long[] {50*CAPACITY/100, 10*CAPACITY/100},
Modified: hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/hdfs/server/common/TestDistributedUpgrade.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/hdfs/server/common/TestDistributedUpgrade.java?rev=885143&r1=885142&r2=885143&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/hdfs/server/common/TestDistributedUpgrade.java (original)
+++ hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/hdfs/server/common/TestDistributedUpgrade.java Sat Nov 28 20:05:56 2009
@@ -25,6 +25,7 @@
import static org.apache.hadoop.hdfs.protocol.FSConstants.LAYOUT_VERSION;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.TestDFSUpgradeFromImage;
import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption;
@@ -100,7 +101,7 @@
UpgradeObjectCollection.registerUpgrade(new UO_Datanode3());
UpgradeObjectCollection.registerUpgrade(new UO_Namenode3());
- conf = new Configuration();
+ conf = new HdfsConfiguration();
if (System.getProperty("test.build.data") == null) { // to test to be run outside of ant
System.setProperty("test.build.data", "build/test/data");
}
Modified: hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java?rev=885143&r1=885142&r2=885143&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java (original)
+++ hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java Sat Nov 28 20:05:56 2009
@@ -20,6 +20,7 @@
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
+import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
import java.util.Random;
@@ -30,9 +31,14 @@
import org.apache.hadoop.conf.Configurable;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
import org.apache.hadoop.hdfs.protocol.FSConstants;
+import org.apache.hadoop.hdfs.server.common.HdfsConstants.ReplicaState;
import org.apache.hadoop.hdfs.server.datanode.metrics.FSDatasetMBean;
+import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo;
+import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock;
import org.apache.hadoop.metrics.util.MBeanUtil;
import org.apache.hadoop.util.DataChecksum;
import org.apache.hadoop.util.DiskChecker.DiskErrorException;
@@ -77,11 +83,14 @@
nullCrcFileData[i+2] = nullCrcHeader[i];
}
}
-
- private class BInfo { // information about a single block
+
+ // information about a single block
+ private class BInfo implements ReplicaInPipelineInterface {
Block theBlock;
private boolean finalized = false; // if not finalized => ongoing creation
SimulatedOutputStream oStream = null;
+ private long bytesAcked;
+ private long bytesRcvd;
BInfo(Block b, boolean forWriting) throws IOException {
theBlock = new Block(b);
if (theBlock.getNumBytes() < 0) {
@@ -102,26 +111,21 @@
}
}
- synchronized long getGenerationStamp() {
+ synchronized public long getGenerationStamp() {
return theBlock.getGenerationStamp();
}
- synchronized void updateBlock(Block b) {
- theBlock.setGenerationStamp(b.getGenerationStamp());
- setlength(b.getNumBytes());
- }
-
- synchronized long getlength() {
+ synchronized public long getNumBytes() {
if (!finalized) {
- return oStream.getLength();
+ return bytesRcvd;
} else {
return theBlock.getNumBytes();
}
}
- synchronized void setlength(long length) {
+ synchronized public void setNumBytes(long length) {
if (!finalized) {
- oStream.setLength(length);
+ bytesRcvd = length;
} else {
theBlock.setNumBytes(length);
}
@@ -170,7 +174,20 @@
oStream = null;
return;
}
-
+
+ synchronized void unfinalizeBlock() throws IOException {
+ if (!finalized) {
+ throw new IOException("Unfinalized a block that's not finalized "
+ + theBlock);
+ }
+ finalized = false;
+ oStream = new SimulatedOutputStream();
+ long blockLen = theBlock.getNumBytes();
+ oStream.setLength(blockLen);
+ bytesRcvd = blockLen;
+ bytesAcked = blockLen;
+ }
+
SimulatedInputStream getMetaIStream() {
return new SimulatedInputStream(nullCrcFileData);
}
@@ -178,6 +195,65 @@
synchronized boolean isFinalized() {
return finalized;
}
+
+ @Override
+ synchronized public BlockWriteStreams createStreams(boolean isCreate,
+ int bytesPerChunk, int checksumSize) throws IOException {
+ if (finalized) {
+ throw new IOException("Trying to write to a finalized replica "
+ + theBlock);
+ } else {
+ SimulatedOutputStream crcStream = new SimulatedOutputStream();
+ return new BlockWriteStreams(oStream, crcStream);
+ }
+ }
+
+ @Override
+ synchronized public long getBlockId() {
+ return theBlock.getBlockId();
+ }
+
+ @Override
+ synchronized public long getVisibleLength() {
+ return getBytesAcked();
+ }
+
+ @Override
+ public ReplicaState getState() {
+ return null;
+ }
+
+ @Override
+ synchronized public long getBytesAcked() {
+ if (finalized) {
+ return theBlock.getNumBytes();
+ } else {
+ return bytesAcked;
+ }
+ }
+
+ @Override
+ synchronized public void setBytesAcked(long bytesAcked) {
+ if (!finalized) {
+ this.bytesAcked = bytesAcked;
+ }
+ }
+
+ @Override
+ synchronized public long getBytesOnDisk() {
+ if (finalized) {
+ return theBlock.getNumBytes();
+ } else {
+ return oStream.getLength();
+ }
+ }
+
+ @Override
+ synchronized public void setBytesOnDisk(long bytesOnDisk) {
+ if (!finalized) {
+ oStream.setLength(bytesOnDisk);
+ }
+ }
}
static private class SimulatedStorage {
@@ -232,7 +308,7 @@
public void setConf(Configuration iconf) {
conf = iconf;
- storageId = conf.get("StorageId", "unknownStorageId" +
+ storageId = conf.get(DFSConfigKeys.DFS_DATANODE_STORAGEID_KEY, "unknownStorageId" +
new Random().nextInt());
registerMBean(storageId);
storage = new SimulatedStorage(
@@ -243,10 +319,12 @@
blockMap = new HashMap<Block,BInfo>();
}
- public synchronized void injectBlocks(Block[] injectBlocks)
+ public synchronized void injectBlocks(Iterable<Block> injectBlocks)
throws IOException {
if (injectBlocks != null) {
+ int numInjectedBlocks = 0;
for (Block b: injectBlocks) { // if any blocks in list is bad, reject list
+ numInjectedBlocks++;
if (b == null) {
throw new NullPointerException("Null blocks in block list");
}
@@ -255,12 +333,12 @@
}
}
HashMap<Block, BInfo> oldBlockMap = blockMap;
- blockMap =
- new HashMap<Block,BInfo>(injectBlocks.length + oldBlockMap.size());
+ blockMap = new HashMap<Block,BInfo>(
+ numInjectedBlocks + oldBlockMap.size());
blockMap.putAll(oldBlockMap);
for (Block b: injectBlocks) {
BInfo binfo = new BInfo(b, false);
- blockMap.put(b, binfo);
+ blockMap.put(binfo.theBlock, binfo);
}
}
}
@@ -280,7 +358,7 @@
}
}
- public synchronized Block[] getBlockReport() {
+ public synchronized BlockListAsLongs getBlockReport() {
Block[] blockTable = new Block[blockMap.size()];
int count = 0;
for (BInfo b : blockMap.values()) {
@@ -291,7 +369,8 @@
if (count != blockTable.length) {
blockTable = Arrays.copyOf(blockTable, count);
}
- return blockTable;
+ return new BlockListAsLongs(
+ new ArrayList<Block>(Arrays.asList(blockTable)), null);
}
public long getCapacity() throws IOException {
@@ -311,7 +390,13 @@
if (binfo == null) {
throw new IOException("Finalizing a non existing block " + b);
}
- return binfo.getlength();
+ return binfo.getNumBytes();
+ }
+
+ @Override
+ @Deprecated
+ public Replica getReplica(long blockId) {
+ return blockMap.get(new Block(blockId));
}
/** {@inheritDoc} */
@@ -322,19 +407,10 @@
return null;
}
b.setGenerationStamp(binfo.getGenerationStamp());
- b.setNumBytes(binfo.getlength());
+ b.setNumBytes(binfo.getNumBytes());
return b;
}
- /** {@inheritDoc} */
- public void updateBlock(Block oldblock, Block newblock) throws IOException {
- BInfo binfo = blockMap.get(newblock);
- if (binfo == null) {
- throw new IOException("BInfo not found, b=" + newblock);
- }
- binfo.updateBlock(newblock);
- }
-
public synchronized void invalidate(Block[] invalidBlks) throws IOException {
boolean error = false;
if (invalidBlks == null) {
@@ -350,7 +426,7 @@
DataNode.LOG.warn("Invalidate: Missing block");
continue;
}
- storage.free(binfo.getlength());
+ storage.free(binfo.getNumBytes());
blockMap.remove(b);
}
if (error) {
@@ -380,21 +456,89 @@
return getStorageInfo();
}
- public synchronized BlockWriteStreams writeToBlock(Block b,
- boolean isRecovery)
- throws IOException {
+ @Override
+ public synchronized ReplicaInPipelineInterface append(Block b,
+ long newGS, long expectedBlockLen) throws IOException {
+ BInfo binfo = blockMap.get(b);
+ if (binfo == null || !binfo.isFinalized()) {
+ throw new ReplicaNotFoundException("Block " + b
+ + " is not valid, and cannot be appended to.");
+ }
+ binfo.unfinalizeBlock();
+ return binfo;
+ }
+
+ @Override
+ public synchronized ReplicaInPipelineInterface recoverAppend(Block b,
+ long newGS, long expectedBlockLen) throws IOException {
+ BInfo binfo = blockMap.get(b);
+ if (binfo == null) {
+ throw new ReplicaNotFoundException("Block " + b
+ + " is not valid, and cannot be appended to.");
+ }
+ if (binfo.isFinalized()) {
+ binfo.unfinalizeBlock();
+ }
+ blockMap.remove(b);
+ binfo.theBlock.setGenerationStamp(newGS);
+ blockMap.put(binfo.theBlock, binfo);
+ return binfo;
+ }
+
+ @Override
+ public void recoverClose(Block b, long newGS,
+ long expectedBlockLen) throws IOException {
+ BInfo binfo = blockMap.get(b);
+ if (binfo == null) {
+ throw new ReplicaNotFoundException("Block " + b
+ + " is not valid, and cannot be appended to.");
+ }
+ if (!binfo.isFinalized()) {
+ binfo.finalizeBlock(binfo.getNumBytes());
+ }
+ blockMap.remove(b);
+ binfo.theBlock.setGenerationStamp(newGS);
+ blockMap.put(binfo.theBlock, binfo);
+ }
+
+ @Override
+ public synchronized ReplicaInPipelineInterface recoverRbw(Block b,
+ long newGS, long minBytesRcvd, long maxBytesRcvd) throws IOException {
+ BInfo binfo = blockMap.get(b);
+ if ( binfo == null) {
+ throw new ReplicaNotFoundException("Block " + b
+ + " does not exist, and cannot be appended to.");
+ }
+ if (binfo.isFinalized()) {
+ throw new ReplicaAlreadyExistsException("Block " + b
+ + " is valid, and cannot be written to.");
+ }
+ blockMap.remove(b);
+ binfo.theBlock.setGenerationStamp(newGS);
+ blockMap.put(binfo.theBlock, binfo);
+ return binfo;
+ }
+
+ @Override
+ public synchronized ReplicaInPipelineInterface createRbw(Block b)
+ throws IOException {
+ return createTemporary(b);
+ }
+
+ @Override
+ public synchronized ReplicaInPipelineInterface createTemporary(Block b)
+ throws IOException {
if (isValidBlock(b)) {
- throw new BlockAlreadyExistsException("Block " + b +
+ throw new ReplicaAlreadyExistsException("Block " + b +
" is valid, and cannot be written to.");
}
if (isBeingWritten(b)) {
- throw new BlockAlreadyExistsException("Block " + b +
+ throw new ReplicaAlreadyExistsException("Block " + b +
" is being written, and cannot be written to.");
}
- BInfo binfo = new BInfo(b, true);
- blockMap.put(b, binfo);
- SimulatedOutputStream crcStream = new SimulatedOutputStream();
- return new BlockWriteStreams(binfo.oStream, crcStream);
+ BInfo binfo = new BInfo(b, true);
+ blockMap.put(binfo.theBlock, binfo);
+ return binfo;
}
public synchronized InputStream getBlockInputStream(Block b)
@@ -421,10 +565,6 @@
throw new IOException("Not supported");
}
- /** No-op */
- public void validateBlockMetadata(Block b) {
- }
-
/**
* Returns metaData of block b as an input stream
* @param b - the block for which the metadata is desired
@@ -476,24 +616,11 @@
// nothing to check for simulated data set
}
- public synchronized long getChannelPosition(Block b,
- BlockWriteStreams stream)
- throws IOException {
- BInfo binfo = blockMap.get(b);
- if (binfo == null) {
- throw new IOException("No such Block " + b );
- }
- return binfo.getlength();
- }
-
- public synchronized void setChannelPosition(Block b, BlockWriteStreams stream,
- long dataOffset, long ckOffset)
+ @Override
+ public synchronized void adjustCrcChannelPosition(Block b,
+ BlockWriteStreams stream,
+ int checksumSize)
throws IOException {
- BInfo binfo = blockMap.get(b);
- if (binfo == null) {
- throw new IOException("No such Block " + b );
- }
- binfo.setlength(dataOffset);
}
/**
@@ -659,4 +786,23 @@
public boolean hasEnoughResource() {
return true;
}
+
+ @Override
+ public ReplicaRecoveryInfo initReplicaRecovery(RecoveringBlock rBlock)
+ throws IOException {
+ return new ReplicaRecoveryInfo(rBlock.getBlock(), ReplicaState.FINALIZED);
+ }
+
+ @Override // FSDatasetInterface
+ public FinalizedReplica updateReplicaUnderRecovery(Block oldBlock,
+ long recoveryId,
+ long newlength) throws IOException {
+ return new FinalizedReplica(
+ oldBlock.getBlockId(), newlength, recoveryId, null, null);
+ }
+
+ @Override
+ public long getReplicaVisibleLength(Block block) throws IOException {
+ return block.getNumBytes();
+ }
}
Modified: hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java?rev=885143&r1=885142&r2=885143&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java (original)
+++ hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java Sat Nov 28 20:05:56 2009
@@ -39,17 +39,19 @@
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSClient;
import org.apache.hadoop.hdfs.DFSTestUtil;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.DataTransferProtocol;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType;
+import org.apache.hadoop.hdfs.security.BlockAccessToken;
import org.apache.hadoop.hdfs.server.common.HdfsConstants;
import org.apache.hadoop.hdfs.server.common.Util;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.net.NetUtils;
-import org.apache.hadoop.security.AccessToken;
/**
* This class tests if block replacement request to data nodes work correctly.
*/
@@ -59,7 +61,7 @@
MiniDFSCluster cluster;
public void testThrottler() throws IOException {
- Configuration conf = new Configuration();
+ Configuration conf = new HdfsConfiguration();
FileSystem.setDefaultUri(conf, "hdfs://localhost:0");
long bandwidthPerSec = 1024*1024L;
final long TOTAL_BYTES =6*bandwidthPerSec;
@@ -82,7 +84,7 @@
}
public void testBlockReplacement() throws IOException {
- final Configuration CONF = new Configuration();
+ final Configuration CONF = new HdfsConfiguration();
final String[] INITIAL_RACKS = {"/RACK0", "/RACK1", "/RACK2"};
final String[] NEW_RACKS = {"/RACK2"};
@@ -90,8 +92,8 @@
final int DEFAULT_BLOCK_SIZE = 1024;
final Random r = new Random();
- CONF.setLong("dfs.block.size", DEFAULT_BLOCK_SIZE);
- CONF.setInt("io.bytes.per.checksum", DEFAULT_BLOCK_SIZE/2);
+ CONF.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, DEFAULT_BLOCK_SIZE);
+ CONF.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, DEFAULT_BLOCK_SIZE/2);
CONF.setLong("dfs.blockreport.intervalMsec",500);
cluster = new MiniDFSCluster(
CONF, REPLICATION_FACTOR, true, INITIAL_RACKS );
@@ -234,7 +236,7 @@
out.writeLong(block.getGenerationStamp());
Text.writeString(out, source.getStorageID());
sourceProxy.write(out);
- AccessToken.DUMMY_TOKEN.write(out);
+ BlockAccessToken.DUMMY_TOKEN.write(out);
out.flush();
// receiveResponse
DataInputStream reply = new DataInputStream(sock.getInputStream());
Modified: hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetrics.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetrics.java?rev=885143&r1=885142&r2=885143&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetrics.java (original)
+++ hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetrics.java Sat Nov 28 20:05:56 2009
@@ -22,6 +22,7 @@
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSTestUtil;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.server.datanode.metrics.DataNodeMetrics;
import org.apache.hadoop.conf.Configuration;
@@ -30,7 +31,7 @@
public class TestDataNodeMetrics extends TestCase {
public void testDataNodeMetrics() throws Exception {
- Configuration conf = new Configuration();
+ Configuration conf = new HdfsConfiguration();
conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true);
MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null);
try {
Modified: hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java?rev=885143&r1=885142&r2=885143&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java (original)
+++ hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java Sat Nov 28 20:05:56 2009
@@ -31,18 +31,18 @@
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSClient;
import org.apache.hadoop.hdfs.DFSTestUtil;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.Block;
-import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.server.common.HdfsConstants;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.net.NetUtils;
import org.junit.After;
import org.junit.Before;
@@ -69,8 +69,8 @@
public void setUp() throws Exception {
// bring up a cluster of 2
- Configuration conf = new Configuration();
- conf.setLong("dfs.block.size", block_size);
+ Configuration conf = new HdfsConfiguration();
+ conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, block_size);
cluster = new MiniDFSCluster(conf, dn_num, true, null);
cluster.waitActive();
}
@@ -98,7 +98,7 @@
// fail the volume
// delete/make non-writable one of the directories (failed volume)
data_fail = new File(dataDir, "data3");
- failedDir = new File(data_fail, "current");
+ failedDir = new File(data_fail, MiniDFSCluster.FINALIZED_DIR_NAME);
if (failedDir.exists() &&
//!FileUtil.fullyDelete(failedDir)
!deteteBlocks(failedDir)
@@ -116,8 +116,8 @@
// make sure a block report is sent
DataNode dn = cluster.getDataNodes().get(1); //corresponds to dir data3
- cluster.getNameNode().blockReport(dn.dnRegistration,
- BlockListAsLongs.convertToArrayLongs(cluster.getBlockReport(1)));
+ long[] bReport = dn.getFSDataset().getBlockReport().getBlockListAsLongs();
+ cluster.getNameNode().blockReport(dn.dnRegistration, bReport);
// verify number of blocks and files...
verify(filename, filesize);
@@ -302,7 +302,7 @@
int total = 0;
for(int i=0; i<dn_num; i++) {
for(int j=1; j<=2; j++) {
- File dir = new File(new File(dataDir, "data"+(2*i+j)), "current");
+ File dir = new File(dataDir, "data"+(2*i+j)+MiniDFSCluster.FINALIZED_DIR_NAME);
if(dir == null) {
System.out.println("dir is null for dn=" + i + " and data_dir=" + j);
continue;
Modified: hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java?rev=885143&r1=885142&r2=885143&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java (original)
+++ hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java Sat Nov 28 20:05:56 2009
@@ -22,7 +22,6 @@
import java.io.IOException;
import java.nio.channels.FileChannel;
import java.util.Random;
-import java.util.Map.Entry;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
@@ -30,8 +29,11 @@
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSTestUtil;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.server.common.GenerationStamp;
import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume;
import junit.framework.TestCase;
@@ -42,7 +44,7 @@
*/
public class TestDirectoryScanner extends TestCase {
private static final Log LOG = LogFactory.getLog(TestDirectoryScanner.class);
- private static final Configuration CONF = new Configuration();
+ private static final Configuration CONF = new HdfsConfiguration();
private static final int DEFAULT_GEN_STAMP = 9999;
private MiniDFSCluster cluster;
@@ -52,8 +54,8 @@
private Random r = new Random();
static {
- CONF.setLong("dfs.block.size", 100);
- CONF.setInt("io.bytes.per.checksum", 1);
+ CONF.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 100);
+ CONF.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, 1);
CONF.setLong("dfs.heartbeat.interval", 1L);
}
@@ -67,17 +69,16 @@
/** Truncate a block file */
private long truncateBlockFile() throws IOException {
synchronized (fds) {
- for (Entry<Block, ReplicaInfo> entry : fds.volumeMap.entrySet()) {
- Block b = entry.getKey();
- File f = entry.getValue().getFile();
- File mf = FSDataset.getMetaFile(f, b);
+ for (ReplicaInfo b : fds.volumeMap.replicas()) {
+ File f = b.getBlockFile();
+ File mf = b.getMetaFile();
// Truncate a block file that has a corresponding metadata file
if (f.exists() && f.length() != 0 && mf.exists()) {
FileOutputStream s = new FileOutputStream(f);
FileChannel channel = s.getChannel();
channel.truncate(0);
LOG.info("Truncated block file " + f.getAbsolutePath());
- return entry.getKey().getBlockId();
+ return b.getBlockId();
}
}
}
@@ -87,14 +88,13 @@
/** Delete a block file */
private long deleteBlockFile() {
synchronized(fds) {
- for (Entry<Block, ReplicaInfo> entry : fds.volumeMap.entrySet()) {
- Block b = entry.getKey();
- File f = entry.getValue().getFile();
- File mf = FSDataset.getMetaFile(f, b);
+ for (ReplicaInfo b : fds.volumeMap.replicas()) {
+ File f = b.getBlockFile();
+ File mf = b.getMetaFile();
// Delete a block file that has corresponding metadata file
if (f.exists() && mf.exists() && f.delete()) {
LOG.info("Deleting block file " + f.getAbsolutePath());
- return entry.getKey().getBlockId();
+ return b.getBlockId();
}
}
}
@@ -104,16 +104,12 @@
/** Delete block meta file */
private long deleteMetaFile() {
synchronized(fds) {
- for (Entry<Block, ReplicaInfo> entry : fds.volumeMap.entrySet()) {
- Block b = entry.getKey();
- String blkfile = entry.getValue().getFile().getAbsolutePath();
- long genStamp = b.getGenerationStamp();
- String metafile = FSDataset.getMetaFileName(blkfile, genStamp);
- File file = new File(metafile);
+ for (ReplicaInfo b : fds.volumeMap.replicas()) {
+ File file = b.getMetaFile();
// Delete a metadata file
if (file.exists() && file.delete()) {
LOG.info("Deleting metadata file " + file.getAbsolutePath());
- return entry.getKey().getBlockId();
+ return b.getBlockId();
}
}
}
@@ -125,12 +121,7 @@
long id = rand.nextLong();
while (true) {
id = rand.nextLong();
- Block b = new Block(id);
- ReplicaInfo info = null;
- synchronized(fds) {
- info = fds.volumeMap.get(b);
- }
- if (info == null) {
+ if (fds.fetchReplicaInfo(id) == null) {
break;
}
}
@@ -230,7 +221,7 @@
// Test2: block metafile is missing
long blockId = deleteMetaFile();
scan(totalBlocks, 1, 1, 0, 0, 1);
- verifyGenStamp(blockId, Block.GRANDFATHER_GENERATION_STAMP);
+ verifyGenStamp(blockId, GenerationStamp.GRANDFATHER_GENERATION_STAMP);
scan(totalBlocks, 0, 0, 0, 0, 0);
// Test3: block file is missing
@@ -245,7 +236,7 @@
blockId = createBlockFile();
totalBlocks++;
scan(totalBlocks, 1, 1, 0, 1, 0);
- verifyAddition(blockId, Block.GRANDFATHER_GENERATION_STAMP, 0);
+ verifyAddition(blockId, GenerationStamp.GRANDFATHER_GENERATION_STAMP, 0);
scan(totalBlocks, 0, 0, 0, 0, 0);
// Test5: A metafile exists for which there is no block file and
@@ -324,37 +315,29 @@
}
private void verifyAddition(long blockId, long genStamp, long size) {
- Block memBlock = fds.getBlockKey(blockId);
- assertNotNull(memBlock);
- ReplicaInfo blockInfo;
- synchronized(fds) {
- blockInfo = fds.volumeMap.get(memBlock);
- }
- assertNotNull(blockInfo);
+ final ReplicaInfo replicainfo;
+ replicainfo = fds.fetchReplicaInfo(blockId);
+ assertNotNull(replicainfo);
// Added block has the same file as the one created by the test
File file = new File(getBlockFile(blockId));
- assertEquals(file.getName(), blockInfo.getFile().getName());
+ assertEquals(file.getName(), fds.findBlockFile(blockId).getName());
// Generation stamp is same as that of created file
- assertEquals(genStamp, memBlock.getGenerationStamp());
+ assertEquals(genStamp, replicainfo.getGenerationStamp());
// File size matches
- assertEquals(size, memBlock.getNumBytes());
+ assertEquals(size, replicainfo.getNumBytes());
}
private void verifyDeletion(long blockId) {
// Ensure block does not exist in memory
- synchronized(fds) {
- assertEquals(null, fds.volumeMap.get(new Block(blockId)));
- }
+ assertNull(fds.fetchReplicaInfo(blockId));
}
private void verifyGenStamp(long blockId, long genStamp) {
- Block memBlock;
- synchronized(fds) {
- memBlock = fds.getBlockKey(blockId);
- }
+ final ReplicaInfo memBlock;
+ memBlock = fds.fetchReplicaInfo(blockId);
assertNotNull(memBlock);
assertEquals(genStamp, memBlock.getGenerationStamp());
}
Modified: hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java?rev=885143&r1=885142&r2=885143&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java (original)
+++ hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java Sat Nov 28 20:05:56 2009
@@ -32,10 +32,16 @@
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.DataTransferProtocol;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
+import org.apache.hadoop.hdfs.protocol.DataTransferProtocol.BlockConstructionStage;
+import org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Sender;
+import org.apache.hadoop.hdfs.security.BlockAccessToken;
+import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.io.Text;
-import org.apache.hadoop.security.AccessToken;
/** Test if a datanode can correctly handle errors during block read/write*/
public class TestDiskError extends TestCase {
@@ -50,15 +56,15 @@
return;
}
// bring up a cluster of 3
- Configuration conf = new Configuration();
- conf.setLong("dfs.block.size", 512L);
+ Configuration conf = new HdfsConfiguration();
+ conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 512L);
MiniDFSCluster cluster = new MiniDFSCluster(conf, 3, true, null);
cluster.waitActive();
FileSystem fs = cluster.getFileSystem();
final int dnIndex = 0;
String dataDir = cluster.getDataDirectory();
- File dir1 = new File(new File(dataDir, "data"+(2*dnIndex+1)), "tmp");
- File dir2 = new File(new File(dataDir, "data"+(2*dnIndex+2)), "tmp");
+ File dir1 = new File(new File(dataDir, "data"+(2*dnIndex+1)), "current/rbw");
+ File dir2 = new File(new File(dataDir, "data"+(2*dnIndex+2)), "current/rbw");
try {
// make the data directory of the first datanode to be readonly
assertTrue(dir1.setReadOnly());
@@ -82,7 +88,7 @@
public void testReplicationError() throws Exception {
// bring up a cluster of 1
- Configuration conf = new Configuration();
+ Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null);
cluster.waitActive();
FileSystem fs = cluster.getFileSystem();
@@ -95,8 +101,8 @@
DFSTestUtil.waitReplication(fs, fileName, (short)1);
// get the block belonged to the created file
- LocatedBlocks blocks = cluster.getNamesystem().getBlockLocations(
- fileName.toString(), 0, (long)fileLen);
+ LocatedBlocks blocks = NameNodeAdapter.getBlockLocations(
+ cluster.getNameNode(), fileName.toString(), 0, (long)fileLen);
assertEquals(blocks.locatedBlockCount(), 1);
LocatedBlock block = blocks.get(0);
@@ -113,17 +119,12 @@
DataOutputStream out = new DataOutputStream(
s.getOutputStream());
- out.writeShort( DataTransferProtocol.DATA_TRANSFER_VERSION );
- WRITE_BLOCK.write(out);
- out.writeLong( block.getBlock().getBlockId());
- out.writeLong( block.getBlock().getGenerationStamp() );
- out.writeInt(1);
- out.writeBoolean( false ); // recovery flag
- Text.writeString( out, "" );
- out.writeBoolean(false); // Not sending src node information
- out.writeInt(0);
- AccessToken.DUMMY_TOKEN.write(out);
-
+ Sender.opWriteBlock(out, block.getBlock().getBlockId(),
+ block.getBlock().getGenerationStamp(), 1,
+ BlockConstructionStage.PIPELINE_SETUP_CREATE,
+ 0L, 0L, 0L, "", null, new DatanodeInfo[0],
+ BlockAccessToken.DUMMY_TOKEN);
+
// write check header
out.writeByte( 1 );
out.writeInt( 512 );
@@ -135,8 +136,8 @@
// the temporary block & meta files should be deleted
String dataDir = cluster.getDataDirectory();
- File dir1 = new File(new File(dataDir, "data"+(2*sndNode+1)), "tmp");
- File dir2 = new File(new File(dataDir, "data"+(2*sndNode+2)), "tmp");
+ File dir1 = new File(new File(dataDir, "data"+(2*sndNode+1)), "current/rbw");
+ File dir2 = new File(new File(dataDir, "data"+(2*sndNode+2)), "current/rbw");
while (dir1.listFiles().length != 0 || dir2.listFiles().length != 0) {
Thread.sleep(100);
}
Modified: hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestInterDatanodeProtocol.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestInterDatanodeProtocol.java?rev=885143&r1=885142&r2=885143&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestInterDatanodeProtocol.java (original)
+++ hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestInterDatanodeProtocol.java Sat Nov 28 20:05:56 2009
@@ -17,37 +17,38 @@
*/
package org.apache.hadoop.hdfs.server.datanode;
+import static org.junit.Assert.assertTrue;
+
import java.io.IOException;
import java.util.List;
import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.*;
+import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSTestUtil;
-import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.DistributedFileSystem;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
-import org.apache.hadoop.hdfs.server.datanode.DataBlockScanner;
-import org.apache.hadoop.hdfs.server.datanode.DataNode;
-import org.apache.hadoop.hdfs.server.protocol.BlockMetaDataInfo;
+import org.apache.hadoop.hdfs.protocol.RecoveryInProgressException;
+import org.apache.hadoop.hdfs.server.common.HdfsConstants.ReplicaState;
import org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol;
+import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo;
+import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock;
+import org.junit.Assert;
+import org.junit.Test;
/**
* This tests InterDataNodeProtocol for block handling.
*/
-public class TestInterDatanodeProtocol extends junit.framework.TestCase {
- public static void checkMetaInfo(Block b, InterDatanodeProtocol idp,
- DataBlockScanner scanner) throws IOException {
- BlockMetaDataInfo metainfo = idp.getBlockMetaDataInfo(b);
- assertEquals(b.getBlockId(), metainfo.getBlockId());
- assertEquals(b.getNumBytes(), metainfo.getNumBytes());
- if (scanner != null) {
- assertEquals(scanner.getLastScanTime(b),
- metainfo.getLastScanTime());
- }
+public class TestInterDatanodeProtocol {
+ public static void checkMetaInfo(Block b, DataNode dn) throws IOException {
+ Block metainfo = dn.data.getStoredBlock(b.getBlockId());
+ Assert.assertEquals(b.getBlockId(), metainfo.getBlockId());
+ Assert.assertEquals(b.getNumBytes(), metainfo.getNumBytes());
}
public static LocatedBlock getLastLocatedBlock(
@@ -67,8 +68,9 @@
* It verifies the block information from a datanode.
* Then, it updates the block with new information and verifies again.
*/
+ @Test
public void testBlockMetaDataInfo() throws Exception {
- Configuration conf = new Configuration();
+ Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = null;
try {
@@ -99,16 +101,168 @@
//verify BlockMetaDataInfo
Block b = locatedblock.getBlock();
InterDatanodeProtocol.LOG.info("b=" + b + ", " + b.getClass());
- checkMetaInfo(b, idp, datanode.blockScanner);
+ checkMetaInfo(b, datanode);
+ long recoveryId = b.getGenerationStamp() + 1;
+ idp.initReplicaRecovery(
+ new RecoveringBlock(b, locatedblock.getLocations(), recoveryId));
//verify updateBlock
Block newblock = new Block(
b.getBlockId(), b.getNumBytes()/2, b.getGenerationStamp()+1);
- idp.updateBlock(b, newblock, false);
- checkMetaInfo(newblock, idp, datanode.blockScanner);
+ idp.updateReplicaUnderRecovery(b, recoveryId, newblock.getNumBytes());
+ checkMetaInfo(newblock, datanode);
}
finally {
if (cluster != null) {cluster.shutdown();}
}
}
+
+ private static ReplicaInfo createReplicaInfo(Block b) {
+ return new FinalizedReplica(b, null, null);
+ }
+
+ private static void assertEquals(ReplicaInfo originalInfo, ReplicaRecoveryInfo recoveryInfo) {
+ Assert.assertEquals(originalInfo.getBlockId(), recoveryInfo.getBlockId());
+ Assert.assertEquals(originalInfo.getGenerationStamp(), recoveryInfo.getGenerationStamp());
+ Assert.assertEquals(originalInfo.getBytesOnDisk(), recoveryInfo.getNumBytes());
+ Assert.assertEquals(originalInfo.getState(), recoveryInfo.getOriginalReplicaState());
+ }
+
+ /** Test {@link FSDataset#initReplicaRecovery(ReplicasMap, Block, long)} */
+ @Test
+ public void testInitReplicaRecovery() throws IOException {
+ final long firstblockid = 10000L;
+ final long gs = 7777L;
+ final long length = 22L;
+ final ReplicasMap map = new ReplicasMap();
+ final Block[] blocks = new Block[5];
+ for(int i = 0; i < blocks.length; i++) {
+ blocks[i] = new Block(firstblockid + i, length, gs);
+ map.add(createReplicaInfo(blocks[i]));
+ }
+
+ {
+ //normal case
+ final Block b = blocks[0];
+ final ReplicaInfo originalInfo = map.get(b);
+
+ final long recoveryid = gs + 1;
+ final ReplicaRecoveryInfo recoveryInfo = FSDataset.initReplicaRecovery(map, blocks[0], recoveryid);
+ assertEquals(originalInfo, recoveryInfo);
+
+ final ReplicaUnderRecovery updatedInfo = (ReplicaUnderRecovery)map.get(b);
+ Assert.assertEquals(originalInfo.getBlockId(), updatedInfo.getBlockId());
+ Assert.assertEquals(recoveryid, updatedInfo.getRecoveryID());
+
+ //recover one more time
+ final long recoveryid2 = gs + 2;
+ final ReplicaRecoveryInfo recoveryInfo2 = FSDataset.initReplicaRecovery(map, blocks[0], recoveryid2);
+ assertEquals(originalInfo, recoveryInfo2);
+
+ final ReplicaUnderRecovery updatedInfo2 = (ReplicaUnderRecovery)map.get(b);
+ Assert.assertEquals(originalInfo.getBlockId(), updatedInfo2.getBlockId());
+ Assert.assertEquals(recoveryid2, updatedInfo2.getRecoveryID());
+
+ //case RecoveryInProgressException
+ try {
+ FSDataset.initReplicaRecovery(map, b, recoveryid);
+ Assert.fail();
+ }
+ catch(RecoveryInProgressException ripe) {
+ System.out.println("GOOD: getting " + ripe);
+ }
+ }
+
+ { //replica not found
+ final long recoveryid = gs + 1;
+ final Block b = new Block(firstblockid - 1, length, gs);
+ ReplicaRecoveryInfo r = FSDataset.initReplicaRecovery(map, b, recoveryid);
+ Assert.assertNull("Data-node should not have this replica.", r);
+ }
+
+ { //case "THIS IS NOT SUPPOSED TO HAPPEN" with recovery id < gs
+ final long recoveryid = gs - 1;
+ final Block b = new Block(firstblockid + 1, length, gs);
+ try {
+ FSDataset.initReplicaRecovery(map, b, recoveryid);
+ Assert.fail();
+ }
+ catch(IOException ioe) {
+ System.out.println("GOOD: getting " + ioe);
+ }
+ }
+
+ }
+
+ /** Test {@link FSDataset#updateReplicaUnderRecovery(ReplicaUnderRecovery, long, long)} */
+ @Test
+ public void testUpdateReplicaUnderRecovery() throws IOException {
+ final Configuration conf = new HdfsConfiguration();
+ MiniDFSCluster cluster = null;
+
+ try {
+ cluster = new MiniDFSCluster(conf, 3, true, null);
+ cluster.waitActive();
+
+ //create a file
+ DistributedFileSystem dfs = (DistributedFileSystem)cluster.getFileSystem();
+ String filestr = "/foo";
+ Path filepath = new Path(filestr);
+ DFSTestUtil.createFile(dfs, filepath, 1024L, (short)3, 0L);
+
+ //get block info
+ final LocatedBlock locatedblock = getLastLocatedBlock(
+ dfs.getClient().getNamenode(), filestr);
+ final DatanodeInfo[] datanodeinfo = locatedblock.getLocations();
+ Assert.assertTrue(datanodeinfo.length > 0);
+
+ //get DataNode and FSDataset objects
+ final DataNode datanode = cluster.getDataNode(datanodeinfo[0].getIpcPort());
+ Assert.assertTrue(datanode != null);
+ Assert.assertTrue(datanode.data instanceof FSDataset);
+ final FSDataset fsdataset = (FSDataset)datanode.data;
+
+ //initReplicaRecovery
+ final Block b = locatedblock.getBlock();
+ final long recoveryid = b.getGenerationStamp() + 1;
+ final long newlength = b.getNumBytes() - 1;
+ final ReplicaRecoveryInfo rri = fsdataset.initReplicaRecovery(
+ new RecoveringBlock(b, null, recoveryid));
+
+ //check replica
+ final ReplicaInfo replica = fsdataset.fetchReplicaInfo(b.getBlockId());
+ Assert.assertEquals(ReplicaState.RUR, replica.getState());
+
+ //check meta data before update
+ FSDataset.checkReplicaFiles(replica);
+
+ //case "THIS IS NOT SUPPOSED TO HAPPEN"
+ //with (block length) != (stored replica's on disk length).
+ {
+ //create a block with same id and gs but different length.
+ final Block tmp = new Block(rri.getBlockId(), rri.getNumBytes() - 1,
+ rri.getGenerationStamp());
+ try {
+ //update should fail
+ fsdataset.updateReplicaUnderRecovery(tmp, recoveryid, newlength);
+ Assert.fail();
+ } catch(IOException ioe) {
+ System.out.println("GOOD: getting " + ioe);
+ }
+ }
+
+ //update
+ final ReplicaInfo finalized = fsdataset.updateReplicaUnderRecovery(
+ rri, recoveryid, newlength);
+
+ //check meta data after update
+ FSDataset.checkReplicaFiles(finalized);
+ Assert.assertEquals(b.getBlockId(), finalized.getBlockId());
+ Assert.assertEquals(recoveryid, finalized.getGenerationStamp());
+ Assert.assertEquals(newlength, finalized.getNumBytes());
+
+ } finally {
+ if (cluster != null) cluster.shutdown();
+ }
+ }
}
Modified: hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestSimulatedFSDataset.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestSimulatedFSDataset.java?rev=885143&r1=885142&r2=885143&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestSimulatedFSDataset.java (original)
+++ hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestSimulatedFSDataset.java Sat Nov 28 20:05:56 2009
@@ -24,9 +24,12 @@
import junit.framework.TestCase;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface;
import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
+import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.BlockWriteStreams;
import org.apache.hadoop.util.DataChecksum;
/**
@@ -45,7 +48,7 @@
protected void setUp() throws Exception {
super.setUp();
- conf = new Configuration();
+ conf = new HdfsConfiguration();
conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true);
}
@@ -62,14 +65,19 @@
int bytesAdded = 0;
for (int i = startingBlockId; i < startingBlockId+NUMBLOCKS; ++i) {
Block b = new Block(i, 0, 0); // we pass expected len as zero, - fsdataset should use the sizeof actual data written
- OutputStream dataOut = fsdataset.writeToBlock(b, false).dataOut;
- assertEquals(0, fsdataset.getLength(b));
- for (int j=1; j <= blockIdToLen(i); ++j) {
- dataOut.write(j);
- assertEquals(j, fsdataset.getLength(b)); // correct length even as we write
- bytesAdded++;
+ ReplicaInPipelineInterface bInfo = fsdataset.createRbw(b);
+ BlockWriteStreams out = bInfo.createStreams(true, 512, 4);
+ try {
+ OutputStream dataOut = out.dataOut;
+ assertEquals(0, fsdataset.getLength(b));
+ for (int j=1; j <= blockIdToLen(i); ++j) {
+ dataOut.write(j);
+ assertEquals(j, bInfo.getBytesOnDisk()); // correct length even as we write
+ bytesAdded++;
+ }
+ } finally {
+ out.close();
}
- dataOut.close();
b.setNumBytes(blockIdToLen(i));
fsdataset.finalizeBlock(b);
assertEquals(blockIdToLen(i), fsdataset.getLength(b));
@@ -139,24 +147,24 @@
public void testGetBlockReport() throws IOException {
- FSDatasetInterface fsdataset = new SimulatedFSDataset(conf);
- Block[] blockReport = fsdataset.getBlockReport();
- assertEquals(0, blockReport.length);
+ SimulatedFSDataset fsdataset = new SimulatedFSDataset(conf);
+ BlockListAsLongs blockReport = fsdataset.getBlockReport();
+ assertEquals(0, blockReport.getNumberOfBlocks());
int bytesAdded = addSomeBlocks(fsdataset);
blockReport = fsdataset.getBlockReport();
- assertEquals(NUMBLOCKS, blockReport.length);
+ assertEquals(NUMBLOCKS, blockReport.getNumberOfBlocks());
for (Block b: blockReport) {
assertNotNull(b);
assertEquals(blockIdToLen(b.getBlockId()), b.getNumBytes());
}
}
public void testInjectionEmpty() throws IOException {
- FSDatasetInterface fsdataset = new SimulatedFSDataset(conf);
- Block[] blockReport = fsdataset.getBlockReport();
- assertEquals(0, blockReport.length);
+ SimulatedFSDataset fsdataset = new SimulatedFSDataset(conf);
+ BlockListAsLongs blockReport = fsdataset.getBlockReport();
+ assertEquals(0, blockReport.getNumberOfBlocks());
int bytesAdded = addSomeBlocks(fsdataset);
blockReport = fsdataset.getBlockReport();
- assertEquals(NUMBLOCKS, blockReport.length);
+ assertEquals(NUMBLOCKS, blockReport.getNumberOfBlocks());
for (Block b: blockReport) {
assertNotNull(b);
assertEquals(blockIdToLen(b.getBlockId()), b.getNumBytes());
@@ -169,7 +177,7 @@
SimulatedFSDataset sfsdataset = new SimulatedFSDataset(conf);
sfsdataset.injectBlocks(blockReport);
blockReport = sfsdataset.getBlockReport();
- assertEquals(NUMBLOCKS, blockReport.length);
+ assertEquals(NUMBLOCKS, blockReport.getNumberOfBlocks());
for (Block b: blockReport) {
assertNotNull(b);
assertEquals(blockIdToLen(b.getBlockId()), b.getNumBytes());
@@ -180,13 +188,13 @@
}
public void testInjectionNonEmpty() throws IOException {
- FSDatasetInterface fsdataset = new SimulatedFSDataset(conf);
+ SimulatedFSDataset fsdataset = new SimulatedFSDataset(conf);
- Block[] blockReport = fsdataset.getBlockReport();
- assertEquals(0, blockReport.length);
+ BlockListAsLongs blockReport = fsdataset.getBlockReport();
+ assertEquals(0, blockReport.getNumberOfBlocks());
int bytesAdded = addSomeBlocks(fsdataset);
blockReport = fsdataset.getBlockReport();
- assertEquals(NUMBLOCKS, blockReport.length);
+ assertEquals(NUMBLOCKS, blockReport.getNumberOfBlocks());
for (Block b: blockReport) {
assertNotNull(b);
assertEquals(blockIdToLen(b.getBlockId()), b.getNumBytes());
@@ -201,13 +209,13 @@
// Add come blocks whose block ids do not conflict with
// the ones we are going to inject.
bytesAdded += addSomeBlocks(sfsdataset, NUMBLOCKS+1);
- Block[] blockReport2 = sfsdataset.getBlockReport();
- assertEquals(NUMBLOCKS, blockReport.length);
+ BlockListAsLongs blockReport2 = sfsdataset.getBlockReport();
+ assertEquals(NUMBLOCKS, blockReport.getNumberOfBlocks());
blockReport2 = sfsdataset.getBlockReport();
- assertEquals(NUMBLOCKS, blockReport.length);
+ assertEquals(NUMBLOCKS, blockReport.getNumberOfBlocks());
sfsdataset.injectBlocks(blockReport);
blockReport = sfsdataset.getBlockReport();
- assertEquals(NUMBLOCKS*2, blockReport.length);
+ assertEquals(NUMBLOCKS*2, blockReport.getNumberOfBlocks());
for (Block b: blockReport) {
assertNotNull(b);
assertEquals(blockIdToLen(b.getBlockId()), b.getNumBytes());
Modified: hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java?rev=885143&r1=885142&r2=885143&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java (original)
+++ hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java Sat Nov 28 20:05:56 2009
@@ -45,6 +45,7 @@
import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.io.EnumSetWritable;
import org.apache.hadoop.net.DNS;
import org.apache.hadoop.net.NetworkTopology;
@@ -516,10 +517,11 @@
// dummyActionNoSynch(fileIdx);
nameNode.create(fileNames[daemonId][inputIdx], FsPermission.getDefault(),
clientName, new EnumSetWritable<CreateFlag>(EnumSet
- .of(CreateFlag.OVERWRITE)), replication, BLOCK_SIZE);
+ .of(CreateFlag.OVERWRITE)), true, replication, BLOCK_SIZE);
long end = System.currentTimeMillis();
for(boolean written = !closeUponCreate; !written;
- written = nameNode.complete(fileNames[daemonId][inputIdx], clientName));
+ written = nameNode.complete(fileNames[daemonId][inputIdx],
+ clientName, null));
return end-start;
}
@@ -565,6 +567,7 @@
super.parseArguments(args);
}
+ @SuppressWarnings("deprecation")
void generateInputs(int[] opsPerThread) throws IOException {
// create files using opsPerThread
String[] createArgs = new String[] {
@@ -666,6 +669,7 @@
}
}
+ @SuppressWarnings("deprecation")
long executeOp(int daemonId, int inputIdx, String ignore)
throws IOException {
long start = System.currentTimeMillis();
@@ -685,8 +689,9 @@
NamespaceInfo nsInfo;
DatanodeRegistration dnRegistration;
- Block[] blocks;
+ ArrayList<Block> blocks;
int nrBlocks; // actual number of blocks
+ long[] blockReportList;
/**
* Get data-node in the form
@@ -705,7 +710,7 @@
TinyDatanode(int dnIdx, int blockCapacity) throws IOException {
dnRegistration = new DatanodeRegistration(getNodeName(dnIdx));
- this.blocks = new Block[blockCapacity];
+ this.blocks = new ArrayList<Block>(blockCapacity);
this.nrBlocks = 0;
}
@@ -738,19 +743,24 @@
}
boolean addBlock(Block blk) {
- if(nrBlocks == blocks.length) {
- LOG.debug("Cannot add block: datanode capacity = " + blocks.length);
+ if(nrBlocks == blocks.size()) {
+ LOG.debug("Cannot add block: datanode capacity = " + blocks.size());
return false;
}
- blocks[nrBlocks] = blk;
+ blocks.set(nrBlocks, blk);
nrBlocks++;
return true;
}
void formBlockReport() {
// fill remaining slots with blocks that do not exist
- for(int idx = blocks.length-1; idx >= nrBlocks; idx--)
- blocks[idx] = new Block(blocks.length - idx, 0, 0);
+ for(int idx = blocks.size()-1; idx >= nrBlocks; idx--)
+ blocks.set(idx, new Block(blocks.size() - idx, 0, 0));
+ blockReportList = new BlockListAsLongs(blocks,null).getBlockListAsLongs();
+ }
+
+ long[] getBlockReportList() {
+ return blockReportList;
}
public int compareTo(String name) {
@@ -760,6 +770,7 @@
/**
* Send a heartbeat to the name-node and replicate blocks if requested.
*/
+ @SuppressWarnings("unused") // keep it for future blockReceived benchmark
int replicateBlocks() throws IOException {
// register datanode
DatanodeCommand[] cmds = nameNode.sendHeartbeat(
@@ -887,10 +898,10 @@
for(int idx=0; idx < nrFiles; idx++) {
String fileName = nameGenerator.getNextFileName("ThroughputBench");
nameNode.create(fileName, FsPermission.getDefault(), clientName,
- new EnumSetWritable<CreateFlag>(EnumSet.of(CreateFlag.OVERWRITE)), replication,
+ new EnumSetWritable<CreateFlag>(EnumSet.of(CreateFlag.OVERWRITE)), true, replication,
BLOCK_SIZE);
- addBlocks(fileName, clientName);
- nameNode.complete(fileName, clientName);
+ Block lastBlock = addBlocks(fileName, clientName);
+ nameNode.complete(fileName, clientName, lastBlock);
}
// prepare block reports
for(int idx=0; idx < nrDatanodes; idx++) {
@@ -898,9 +909,12 @@
}
}
- private void addBlocks(String fileName, String clientName) throws IOException {
+ private Block addBlocks(String fileName, String clientName)
+ throws IOException {
+ Block prevBlock = null;
for(int jdx = 0; jdx < blocksPerFile; jdx++) {
- LocatedBlock loc = nameNode.addBlock(fileName, clientName);
+ LocatedBlock loc = nameNode.addBlock(fileName, clientName, prevBlock);
+ prevBlock = loc.getBlock();
for(DatanodeInfo dnInfo : loc.getLocations()) {
int dnIdx = Arrays.binarySearch(datanodes, dnInfo.getName());
datanodes[dnIdx].addBlock(loc.getBlock());
@@ -910,6 +924,7 @@
new String[] {""});
}
}
+ return prevBlock;
}
/**
@@ -923,8 +938,7 @@
assert daemonId < numThreads : "Wrong daemonId.";
TinyDatanode dn = datanodes[daemonId];
long start = System.currentTimeMillis();
- nameNode.blockReport(dn.dnRegistration,
- BlockListAsLongs.convertToArrayLongs(dn.blocks));
+ nameNode.blockReport(dn.dnRegistration, dn.getBlockReportList());
long end = System.currentTimeMillis();
return end-start;
}
@@ -1184,7 +1198,7 @@
}
public static void main(String[] args) throws Exception {
- runBenchmark(new Configuration(),
+ runBenchmark(new HdfsConfiguration(),
new ArrayList<String>(Arrays.asList(args)));
}
}
Modified: hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestAccessTokenWithDFS.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestAccessTokenWithDFS.java?rev=885143&r1=885142&r2=885143&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestAccessTokenWithDFS.java (original)
+++ hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestAccessTokenWithDFS.java Sat Nov 28 20:05:56 2009
@@ -28,10 +28,15 @@
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSClient;
import org.apache.hadoop.hdfs.DFSTestUtil;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.security.BlockAccessToken;
+import org.apache.hadoop.hdfs.security.AccessTokenHandler;
+import org.apache.hadoop.hdfs.security.InvalidAccessTokenException;
+import org.apache.hadoop.hdfs.security.SecurityTestUtil;
import org.apache.hadoop.hdfs.server.balancer.TestBalancer;
import org.apache.hadoop.hdfs.server.common.HdfsConstants;
import org.apache.hadoop.net.NetUtils;
@@ -39,11 +44,8 @@
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.security.AccessToken;
-import org.apache.hadoop.security.AccessTokenHandler;
-import org.apache.hadoop.security.InvalidAccessTokenException;
-import org.apache.hadoop.security.SecurityTestUtil;
import org.apache.log4j.Level;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
import junit.framework.TestCase;
@@ -162,10 +164,10 @@
// get a conf for testing
private static Configuration getConf(int numDataNodes) throws IOException {
- Configuration conf = new Configuration();
- conf.setBoolean(AccessTokenHandler.STRING_ENABLE_ACCESS_TOKEN, true);
- conf.setLong("dfs.block.size", BLOCK_SIZE);
- conf.setInt("io.bytes.per.checksum", BLOCK_SIZE);
+ Configuration conf = new HdfsConfiguration();
+ conf.setBoolean(DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY, true);
+ conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
+ conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, BLOCK_SIZE);
conf.setInt("dfs.heartbeat.interval", 1);
conf.setInt("dfs.replication", numDataNodes);
conf.setInt("ipc.client.connect.max.retries", 0);
@@ -201,12 +203,12 @@
stm = fs.append(fileToAppend);
int mid = rawData.length - 1;
stm.write(rawData, 1, mid - 1);
- stm.sync();
+ stm.hflush();
/*
* wait till token used in stm expires
*/
- AccessToken token = DFSTestUtil.getAccessToken(stm);
+ BlockAccessToken token = DFSTestUtil.getAccessToken(stm);
while (!SecurityTestUtil.isAccessTokenExpired(token)) {
try {
Thread.sleep(10);
@@ -253,12 +255,12 @@
// write a partial block
int mid = rawData.length - 1;
stm.write(rawData, 0, mid);
- stm.sync();
+ stm.hflush();
/*
* wait till token used in stm expires
*/
- AccessToken token = DFSTestUtil.getAccessToken(stm);
+ BlockAccessToken token = DFSTestUtil.getAccessToken(stm);
while (!SecurityTestUtil.isAccessTokenExpired(token)) {
try {
Thread.sleep(10);
@@ -320,7 +322,7 @@
List<LocatedBlock> locatedBlocks = dfsclient.getNamenode().getBlockLocations(
FILE_TO_READ, 0, FILE_SIZE).getLocatedBlocks();
LocatedBlock lblock = locatedBlocks.get(0); // first block
- AccessToken myToken = lblock.getAccessToken();
+ BlockAccessToken myToken = lblock.getAccessToken();
// verify token is not expired
assertFalse(SecurityTestUtil.isAccessTokenExpired(myToken));
// read with valid token, should succeed
@@ -524,8 +526,8 @@
* Integration testing of access token, involving NN, DN, and Balancer
*/
public void testEnd2End() throws Exception {
- Configuration conf = new Configuration();
- conf.setBoolean(AccessTokenHandler.STRING_ENABLE_ACCESS_TOKEN, true);
+ Configuration conf = new HdfsConfiguration();
+ conf.setBoolean(DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY, true);
new TestBalancer().integrationTest(conf);
}
}
Propchange: hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestAccessTokenWithDFS.java
------------------------------------------------------------------------------
svn:mime-type = text/plain
Modified: hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java?rev=885143&r1=885142&r2=885143&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java (original)
+++ hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java Sat Nov 28 20:05:56 2009
@@ -26,9 +26,11 @@
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption;
import org.apache.hadoop.hdfs.server.namenode.FSImage.CheckpointStates;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
import junit.framework.TestCase;
@@ -80,10 +82,10 @@
BackupNode startBackupNode(Configuration conf,
StartupOption t, int i) throws IOException {
- Configuration c = new Configuration(conf);
+ Configuration c = new HdfsConfiguration(conf);
String dirs = getBackupNodeDir(t, i);
- c.set("dfs.name.dir", dirs);
- c.set("dfs.name.edits.dir", "${dfs.name.dir}");
+ c.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, dirs);
+ c.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, "${dfs.name.dir}");
return (BackupNode)NameNode.createNameNode(new String[]{t.getName()}, c);
}
@@ -105,7 +107,7 @@
Path file1 = new Path("checkpoint.dat");
Path file2 = new Path("checkpoint2.dat");
- Configuration conf = new Configuration();
+ Configuration conf = new HdfsConfiguration();
short replication = (short)conf.getInt("dfs.replication", 3);
conf.set("dfs.blockreport.initialDelay", "0");
conf.setInt("dfs.datanode.scan.period.hours", -1); // disable block scanner
@@ -205,7 +207,7 @@
* @throws IOException
*/
public void testBackupRegistration() throws IOException {
- Configuration conf1 = new Configuration();
+ Configuration conf1 = new HdfsConfiguration();
Configuration conf2 = null;
MiniDFSCluster cluster = null;
BackupNode backup1 = null;
@@ -213,13 +215,13 @@
try {
// start name-node and backup node 1
cluster = new MiniDFSCluster(conf1, 0, true, null);
- conf1.set("dfs.backup.address", "0.0.0.0:7770");
- conf1.set("dfs.backup.http.address", "0.0.0.0:7775");
+ conf1.set(DFSConfigKeys.DFS_NAMENODE_BACKUP_ADDRESS_KEY, "0.0.0.0:7770");
+ conf1.set(DFSConfigKeys.DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY, "0.0.0.0:7775");
backup1 = startBackupNode(conf1, StartupOption.BACKUP, 1);
// try to start backup node 2
- conf2 = new Configuration(conf1);
- conf2.set("dfs.backup.address", "0.0.0.0:7771");
- conf2.set("dfs.backup.http.address", "0.0.0.0:7776");
+ conf2 = new HdfsConfiguration(conf1);
+ conf2.set(DFSConfigKeys.DFS_NAMENODE_BACKUP_ADDRESS_KEY, "0.0.0.0:7771");
+ conf2.set(DFSConfigKeys.DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY, "0.0.0.0:7776");
try {
backup2 = startBackupNode(conf2, StartupOption.BACKUP, 2);
backup2.stop();
Modified: hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java?rev=885143&r1=885142&r2=885143&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java (original)
+++ hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java Sat Nov 28 20:05:56 2009
@@ -27,6 +27,7 @@
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DistributedFileSystem;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction;
import org.apache.hadoop.hdfs.server.common.Storage;
@@ -40,6 +41,7 @@
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
/**
* This class tests the creation and validation of a checkpoint.
@@ -407,10 +409,10 @@
@SuppressWarnings("deprecation")
void testStartup(Configuration conf) throws IOException {
System.out.println("Startup of the name-node in the checkpoint directory.");
- String primaryDirs = conf.get("dfs.name.dir");
- String primaryEditsDirs = conf.get("dfs.name.edits.dir");
- String checkpointDirs = conf.get("fs.checkpoint.dir");
- String checkpointEditsDirs = conf.get("fs.checkpoint.edits.dir");
+ String primaryDirs = conf.get(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY);
+ String primaryEditsDirs = conf.get(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY);
+ String checkpointDirs = conf.get(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY);
+ String checkpointEditsDirs = conf.get(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY);
NameNode nn = startNameNode(conf, checkpointDirs, checkpointEditsDirs,
StartupOption.REGULAR);
@@ -555,10 +557,10 @@
String imageDirs,
String editsDirs,
StartupOption start) throws IOException {
- conf.set("fs.default.name", "hdfs://localhost:0");
- conf.set("dfs.http.address", "0.0.0.0:0");
- conf.set("dfs.name.dir", imageDirs);
- conf.set("dfs.name.edits.dir", editsDirs);
+ conf.set(DFSConfigKeys.FS_DEFAULT_NAME_KEY, "hdfs://localhost:0");
+ conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "0.0.0.0:0");
+ conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, imageDirs);
+ conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, editsDirs);
String[] args = new String[]{start.getName()};
NameNode nn = NameNode.createNameNode(args, conf);
assertTrue(nn.isInSafeMode());
@@ -570,7 +572,7 @@
@SuppressWarnings("deprecation")
SecondaryNameNode startSecondaryNameNode(Configuration conf
) throws IOException {
- conf.set("dfs.secondary.http.address", "0.0.0.0:0");
+ conf.set(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY, "0.0.0.0:0");
return new SecondaryNameNode(conf);
}
@@ -583,8 +585,8 @@
Path file2 = new Path("checkpoint2.dat");
Collection<URI> namedirs = null;
- Configuration conf = new Configuration();
- conf.set("dfs.secondary.http.address", "0.0.0.0:0");
+ Configuration conf = new HdfsConfiguration();
+ conf.set(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY, "0.0.0.0:0");
replication = (short)conf.getInt("dfs.replication", 3);
MiniDFSCluster cluster = new MiniDFSCluster(conf, numDatanodes, true, null);
cluster.waitActive();
@@ -677,7 +679,7 @@
MiniDFSCluster cluster = null;
DistributedFileSystem fs = null;
try {
- Configuration conf = new Configuration();
+ Configuration conf = new HdfsConfiguration();
cluster = new MiniDFSCluster(conf, numDatanodes, false, null);
cluster.waitActive();
fs = (DistributedFileSystem)(cluster.getFileSystem());
Modified: hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestComputeInvalidateWork.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestComputeInvalidateWork.java?rev=885143&r1=885142&r2=885143&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestComputeInvalidateWork.java (original)
+++ hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestComputeInvalidateWork.java Sat Nov 28 20:05:56 2009
@@ -1,6 +1,24 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
package org.apache.hadoop.hdfs.server.namenode;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.server.common.GenerationStamp;
@@ -16,7 +34,7 @@
* can schedule invalidate work correctly
*/
public void testCompInvalidate() throws Exception {
- final Configuration conf = new Configuration();
+ final Configuration conf = new HdfsConfiguration();
final int NUM_OF_DATANODES = 3;
final MiniDFSCluster cluster = new MiniDFSCluster(conf, NUM_OF_DATANODES, true, null);
try {