You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by st...@apache.org on 2010/01/08 15:52:48 UTC
svn commit: r897222 [3/3] - in /hadoop/hdfs/branches/HDFS-326: ./
.eclipse.templates/ ivy/ src/c++/libhdfs/ src/contrib/
src/contrib/fuse-dfs/ src/contrib/fuse-dfs/src/ src/contrib/hdfsproxy/
src/docs/src/documentation/content/xdocs/ src/java/ src/java...
Modified: hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/hdfs/MiniDFSCluster.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/hdfs/MiniDFSCluster.java?rev=897222&r1=897221&r2=897222&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/hdfs/MiniDFSCluster.java (original)
+++ hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/hdfs/MiniDFSCluster.java Fri Jan 8 14:52:46 2010
@@ -37,6 +37,7 @@
import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType;
+import static org.apache.hadoop.hdfs.server.common.Util.fileAsURI;
import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface;
@@ -258,10 +259,12 @@
FileSystem.setDefaultUri(conf, "hdfs://localhost:"+ Integer.toString(nameNodePort));
conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "127.0.0.1:0");
if (manageNameDfsDirs) {
- conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, new File(base_dir, "name1").getPath()+","+
- new File(base_dir, "name2").getPath());
- conf.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY, new File(base_dir, "namesecondary1").
- getPath()+"," + new File(base_dir, "namesecondary2").getPath());
+ conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
+ fileAsURI(new File(base_dir, "name1"))+","+
+ fileAsURI(new File(base_dir, "name2")));
+ conf.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY,
+ fileAsURI(new File(base_dir, "namesecondary1"))+","+
+ fileAsURI(new File(base_dir, "namesecondary2")));
}
int replication = conf.getInt("dfs.replication", 3);
@@ -716,8 +719,8 @@
/**
* Restart a datanode, on the same port if requested
- * @param dnprop, the datanode to restart
- * @param keepPort, whether to use the same port
+ * @param dnprop the datanode to restart
+ * @param keepPort whether to use the same port
* @return true if restarting is successful
* @throws IOException
*/
@@ -808,6 +811,28 @@
public FileSystem getFileSystem() throws IOException {
return FileSystem.get(conf);
}
+
+
+ /**
+ * Get another FileSystem instance that is different from FileSystem.get(conf).
+ * This simulating different threads working on different FileSystem instances.
+ */
+ public FileSystem getNewFileSystemInstance() throws IOException {
+ return FileSystem.newInstance(conf);
+ }
+
+ /**
+ * @return a {@link HftpFileSystem} object.
+ */
+ public HftpFileSystem getHftpFileSystem() throws IOException {
+ final String str = "hftp://"
+ + conf.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY);
+ try {
+ return (HftpFileSystem)FileSystem.get(new URI(str), conf);
+ } catch (URISyntaxException e) {
+ throw new IOException(e);
+ }
+ }
/**
* Get the directories where the namenode stores its image.
@@ -957,7 +982,6 @@
/**
* Access to the data directory used for Datanodes
- * @throws IOException
*/
public String getDataDirectory() {
return data_dir.getAbsolutePath();
Modified: hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSClientRetries.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSClientRetries.java?rev=897222&r1=897221&r2=897222&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSClientRetries.java (original)
+++ hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSClientRetries.java Fri Jan 8 14:52:46 2010
@@ -20,6 +20,8 @@
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
+import java.util.Arrays;
+import java.security.MessageDigest;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
@@ -137,17 +139,8 @@
return versionID;
}
- public LocatedBlock addBlock(String src, String clientName,
- Block previous) throws IOException {
-
- return addBlock(src, clientName, previous, null);
- }
-
- public LocatedBlock addBlock(String src,
- String clientName,
- Block previous,
- DatanodeInfo[] excludedNode
- ) throws IOException
+ public LocatedBlock addBlock(String src, String clientName, Block previous)
+ throws IOException
{
num_calls++;
if (num_calls > num_calls_allowed) {
@@ -258,4 +251,242 @@
}
}
+
+ /**
+ * Test that a DFSClient waits for random time before retry on busy blocks.
+ */
+ public void testDFSClientRetriesOnBusyBlocks() throws IOException {
+
+ System.out.println("Testing DFSClient random waiting on busy blocks.");
+
+ //
+ // Test settings:
+ //
+ // xcievers fileLen #clients timeWindow #retries
+ // ======== ======= ======== ========== ========
+ // Test 1: 2 6 MB 50 300 ms 3
+ // Test 2: 2 6 MB 50 300 ms 50
+ // Test 3: 2 6 MB 50 1000 ms 3
+ // Test 4: 2 6 MB 50 1000 ms 50
+ //
+ // Minimum xcievers is 2 since 1 thread is reserved for registry.
+ // Test 1 & 3 may fail since # retries is low.
+ // Test 2 & 4 should never fail since (#threads)/(xcievers-1) is the upper
+ // bound for guarantee to not throw BlockMissingException.
+ //
+ int xcievers = 2;
+ int fileLen = 6*1024*1024;
+ int threads = 50;
+ int retries = 3;
+ int timeWin = 300;
+
+ //
+ // Test 1: might fail
+ //
+ long timestamp = System.currentTimeMillis();
+ boolean pass = busyTest(xcievers, threads, fileLen, timeWin, retries);
+ long timestamp2 = System.currentTimeMillis();
+ if ( pass ) {
+ LOG.info("Test 1 succeeded! Time spent: " + (timestamp2-timestamp)/1000.0 + " sec.");
+ } else {
+ LOG.warn("Test 1 failed, but relax. Time spent: " + (timestamp2-timestamp)/1000.0 + " sec.");
+ }
+
+ //
+ // Test 2: should never fail
+ //
+ retries = 50;
+ timestamp = System.currentTimeMillis();
+ pass = busyTest(xcievers, threads, fileLen, timeWin, retries);
+ timestamp2 = System.currentTimeMillis();
+ assertTrue("Something wrong! Test 2 got Exception with maxmum retries!", pass);
+ LOG.info("Test 2 succeeded! Time spent: " + (timestamp2-timestamp)/1000.0 + " sec.");
+
+ //
+ // Test 3: might fail
+ //
+ retries = 3;
+ timeWin = 1000;
+ timestamp = System.currentTimeMillis();
+ pass = busyTest(xcievers, threads, fileLen, timeWin, retries);
+ timestamp2 = System.currentTimeMillis();
+ if ( pass ) {
+ LOG.info("Test 3 succeeded! Time spent: " + (timestamp2-timestamp)/1000.0 + " sec.");
+ } else {
+ LOG.warn("Test 3 failed, but relax. Time spent: " + (timestamp2-timestamp)/1000.0 + " sec.");
+ }
+
+ //
+ // Test 4: should never fail
+ //
+ retries = 50;
+ timeWin = 1000;
+ timestamp = System.currentTimeMillis();
+ pass = busyTest(xcievers, threads, fileLen, timeWin, retries);
+ timestamp2 = System.currentTimeMillis();
+ assertTrue("Something wrong! Test 4 got Exception with maxmum retries!", pass);
+ LOG.info("Test 4 succeeded! Time spent: " + (timestamp2-timestamp)/1000.0 + " sec.");
+ }
+
+ private boolean busyTest(int xcievers, int threads, int fileLen, int timeWin, int retries)
+ throws IOException {
+
+ boolean ret = true;
+ short replicationFactor = 1;
+ long blockSize = 128*1024*1024; // DFS block size
+ int bufferSize = 4096;
+
+ Configuration conf = new HdfsConfiguration();
+ conf.setInt("dfs.datanode.max.xcievers",xcievers);
+ conf.setInt("dfs.client.max.block.acquire.failures", retries);
+ conf.setInt("dfs.client.retry.window.base", timeWin);
+
+ MiniDFSCluster cluster = new MiniDFSCluster(conf, replicationFactor, true, null);
+ cluster.waitActive();
+
+ FileSystem fs = cluster.getFileSystem();
+ Path file1 = new Path("test_data.dat");
+ file1 = file1.makeQualified(fs.getUri(), fs.getWorkingDirectory()); // make URI hdfs://
+
+ try {
+
+ FSDataOutputStream stm = fs.create(file1, true,
+ bufferSize,
+ replicationFactor,
+ blockSize);
+
+ // verify that file exists in FS namespace
+ assertTrue(file1 + " should be a file",
+ fs.getFileStatus(file1).isDir() == false);
+ System.out.println("Path : \"" + file1 + "\"");
+ LOG.info("Path : \"" + file1 + "\"");
+
+ // write 1 block to file
+ byte[] buffer = AppendTestUtil.randomBytes(System.currentTimeMillis(), fileLen);
+ stm.write(buffer, 0, fileLen);
+ stm.close();
+
+ // verify that file size has changed to the full size
+ long len = fs.getFileStatus(file1).getLen();
+
+ assertTrue(file1 + " should be of size " + fileLen +
+ " but found to be of size " + len,
+ len == fileLen);
+
+ // read back and check data integrigy
+ byte[] read_buf = new byte[fileLen];
+ InputStream in = fs.open(file1, fileLen);
+ IOUtils.readFully(in, read_buf, 0, fileLen);
+ assert(Arrays.equals(buffer, read_buf));
+ in.close();
+ read_buf = null; // GC it if needed
+
+ // compute digest of the content to reduce memory space
+ MessageDigest m = MessageDigest.getInstance("SHA");
+ m.update(buffer, 0, fileLen);
+ byte[] hash_sha = m.digest();
+
+ // spawn multiple threads and all trying to access the same block
+ Thread[] readers = new Thread[threads];
+ Counter counter = new Counter(0);
+ for (int i = 0; i < threads; ++i ) {
+ DFSClientReader reader = new DFSClientReader(file1, cluster, hash_sha, fileLen, counter);
+ readers[i] = new Thread(reader);
+ readers[i].start();
+ }
+
+ // wait for them to exit
+ for (int i = 0; i < threads; ++i ) {
+ readers[i].join();
+ }
+ if ( counter.get() == threads )
+ ret = true;
+ else
+ ret = false;
+
+ } catch (InterruptedException e) {
+ System.out.println("Thread got InterruptedException.");
+ e.printStackTrace();
+ ret = false;
+ } catch (Exception e) {
+ e.printStackTrace();
+ ret = false;
+ } finally {
+ fs.delete(file1, false);
+ cluster.shutdown();
+ }
+ return ret;
+ }
+
+ class DFSClientReader implements Runnable {
+
+ DFSClient client;
+ Configuration conf;
+ byte[] expected_sha;
+ FileSystem fs;
+ Path filePath;
+ MiniDFSCluster cluster;
+ int len;
+ Counter counter;
+
+ DFSClientReader(Path file, MiniDFSCluster cluster, byte[] hash_sha, int fileLen, Counter cnt) {
+ filePath = file;
+ this.cluster = cluster;
+ counter = cnt;
+ len = fileLen;
+ conf = new HdfsConfiguration();
+ expected_sha = hash_sha;
+ try {
+ cluster.waitActive();
+ } catch (IOException e) {
+ e.printStackTrace();
+ }
+ }
+
+ public void run() {
+ try {
+ fs = cluster.getNewFileSystemInstance();
+
+ int bufferSize = len;
+ byte[] buf = new byte[bufferSize];
+
+ InputStream in = fs.open(filePath, bufferSize);
+
+ // read the whole file
+ IOUtils.readFully(in, buf, 0, bufferSize);
+
+ // compare with the expected input
+ MessageDigest m = MessageDigest.getInstance("SHA");
+ m.update(buf, 0, bufferSize);
+ byte[] hash_sha = m.digest();
+
+ buf = null; // GC if needed since there may be too many threads
+ in.close();
+ fs.close();
+
+ assertTrue("hashed keys are not the same size",
+ hash_sha.length == expected_sha.length);
+
+ assertTrue("hashed keys are not equal",
+ Arrays.equals(hash_sha, expected_sha));
+
+ counter.inc(); // count this thread as successful
+
+ LOG.info("Thread correctly read the block.");
+
+ } catch (BlockMissingException e) {
+ LOG.info("Bad - BlockMissingException is caught.");
+ e.printStackTrace();
+ } catch (Exception e) {
+ e.printStackTrace();
+ }
+ }
+ }
+
+ class Counter {
+ int counter;
+ Counter(int n) { counter = n; }
+ public synchronized void inc() { ++counter; }
+ public int get() { return counter; }
+ }
}
Modified: hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSPermission.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSPermission.java?rev=897222&r1=897221&r2=897222&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSPermission.java (original)
+++ hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSPermission.java Fri Jan 8 14:52:46 2010
@@ -19,22 +19,29 @@
import java.io.IOException;
import java.util.EnumSet;
+import java.util.HashMap;
+import java.util.Map;
import java.util.Random;
import javax.security.auth.login.LoginException;
-import org.apache.commons.logging.*;
+import junit.framework.AssertionFailedError;
+import junit.framework.TestCase;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.fs.CreateFlag;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.server.common.Util;
-import org.apache.hadoop.fs.*;
-import org.apache.hadoop.fs.permission.*;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.UnixUserGroupInformation;
-import junit.framework.AssertionFailedError;
-import junit.framework.TestCase;
-
/** Unit tests for permission */
public class TestDFSPermission extends TestCase {
public static final Log LOG = LogFactory.getLog(TestDFSPermission.class);
@@ -81,6 +88,13 @@
// explicitly turn on permission checking
conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, true);
+ // create fake mapping for the groups
+ Map<String, String[]> u2g_map = new HashMap<String, String[]> (3);
+ u2g_map.put(USER1_NAME, new String[] {GROUP1_NAME, GROUP2_NAME });
+ u2g_map.put(USER2_NAME, new String[] {GROUP2_NAME, GROUP3_NAME });
+ u2g_map.put(USER3_NAME, new String[] {GROUP3_NAME, GROUP4_NAME });
+ DFSTestUtil.updateConfWithFakeGroupMapping(conf, u2g_map);
+
// Initiate all four users
SUPERUSER = UnixUserGroupInformation.login(conf);
USER1 = new UnixUserGroupInformation(USER1_NAME, new String[] {
Modified: hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/hdfs/TestDataTransferProtocol.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/hdfs/TestDataTransferProtocol.java?rev=897222&r1=897221&r2=897222&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/hdfs/TestDataTransferProtocol.java (original)
+++ hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/hdfs/TestDataTransferProtocol.java Fri Jan 8 14:52:46 2010
@@ -19,6 +19,8 @@
import static org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Op.READ_BLOCK;
import static org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Op.WRITE_BLOCK;
+import static org.apache.hadoop.hdfs.protocol.DataTransferProtocol.PipelineAck;
+import static org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Status;
import static org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Status.ERROR;
import static org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Status.SUCCESS;
@@ -157,9 +159,8 @@
//ok finally write a block with 0 len
SUCCESS.write(recvOut);
- Text.writeString(recvOut, ""); // first bad node
- recvOut.writeLong(100); // sequencenumber
- SUCCESS.write(recvOut);
+ Text.writeString(recvOut, "");
+ new PipelineAck(100, new Status[]{SUCCESS}).write(recvOut);
sendRecvData(description, false);
}
@@ -381,9 +382,8 @@
// bad data chunk length
sendOut.writeInt(-1-random.nextInt(oneMil));
SUCCESS.write(recvOut);
- Text.writeString(recvOut, ""); // first bad node
- recvOut.writeLong(100); // sequencenumber
- ERROR.write(recvOut);
+ Text.writeString(recvOut, "");
+ new PipelineAck(100, new Status[]{ERROR}).write(recvOut);
sendRecvData("negative DATA_CHUNK len while writing block " + newBlockId,
true);
@@ -406,9 +406,8 @@
sendOut.flush();
//ok finally write a block with 0 len
SUCCESS.write(recvOut);
- Text.writeString(recvOut, ""); // first bad node
- recvOut.writeLong(100); // sequencenumber
- SUCCESS.write(recvOut);
+ Text.writeString(recvOut, "");
+ new PipelineAck(100, new Status[]{SUCCESS}).write(recvOut);
sendRecvData("Writing a zero len block blockid " + newBlockId, false);
/* Test OP_READ_BLOCK */
Modified: hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/hdfs/TestFSInputChecker.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/hdfs/TestFSInputChecker.java?rev=897222&r1=897221&r2=897222&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/hdfs/TestFSInputChecker.java (original)
+++ hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/hdfs/TestFSInputChecker.java Fri Jan 8 14:52:46 2010
@@ -212,19 +212,26 @@
private void testChecker(FileSystem fileSys, boolean readCS)
throws Exception {
Path file = new Path("try.dat");
- if( readCS ) {
- writeFile(fileSys, file);
- } else {
- writeFile(fileSys, file);
+ writeFile(fileSys, file);
+
+ try {
+ if (!readCS) {
+ fileSys.setVerifyChecksum(false);
+ }
+
+ stm = fileSys.open(file);
+ checkReadAndGetPos();
+ checkSeek();
+ checkSkip();
+ //checkMark
+ assertFalse(stm.markSupported());
+ stm.close();
+ } finally {
+ if (!readCS) {
+ fileSys.setVerifyChecksum(true);
+ }
+ cleanupFile(fileSys, file);
}
- stm = fileSys.open(file);
- checkReadAndGetPos();
- checkSeek();
- checkSkip();
- //checkMark
- assertFalse(stm.markSupported());
- stm.close();
- cleanupFile(fileSys, file);
}
private void testFileCorruption(LocalFileSystem fileSys) throws IOException {
Modified: hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/hdfs/TestFileStatus.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/hdfs/TestFileStatus.java?rev=897222&r1=897221&r2=897222&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/hdfs/TestFileStatus.java (original)
+++ hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/hdfs/TestFileStatus.java Fri Jan 8 14:52:46 2010
@@ -17,22 +17,31 @@
*/
package org.apache.hadoop.hdfs;
-import junit.framework.TestCase;
-import java.io.*;
+import java.io.IOException;
import java.util.Random;
+import junit.framework.TestCase;
+
+import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.ipc.RemoteException;
+import org.apache.log4j.Level;
/**
* This class tests the FileStatus API.
*/
public class TestFileStatus extends TestCase {
+ {
+ ((Log4JLogger)FSNamesystem.LOG).getLogger().setLevel(Level.ALL);
+ ((Log4JLogger)FileSystem.LOG).getLogger().setLevel(Level.ALL);
+ }
+
static final long seed = 0xDEADBEEFL;
static final int blockSize = 8192;
static final int fileSize = 16384;
@@ -64,6 +73,7 @@
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null);
FileSystem fs = cluster.getFileSystem();
+ final HftpFileSystem hftpfs = cluster.getHftpFileSystem();
final DFSClient dfsClient = new DFSClient(NameNode.getAddress(conf), conf);
try {
@@ -111,8 +121,10 @@
assertTrue(fs.exists(dir));
assertTrue(dir + " should be a directory",
fs.getFileStatus(path).isDir() == true);
- assertTrue(dir + " should be zero size ",
- fs.getContentSummary(dir).getLength() == 0);
+ assertEquals(dir + " should be zero size ",
+ 0, fs.getContentSummary(dir).getLength());
+ assertEquals(dir + " should be zero size using hftp",
+ 0, hftpfs.getContentSummary(dir).getLength());
assertTrue(dir + " should be zero size ",
fs.getFileStatus(dir).getLen() == 0);
System.out.println("Dir : \"" + dir + "\"");
@@ -139,8 +151,11 @@
// verify that the size of the directory increased by the size
// of the two files
- assertTrue(dir + " size should be " + (blockSize/2),
- blockSize/2 == fs.getContentSummary(dir).getLength());
+ final int expected = blockSize/2;
+ assertEquals(dir + " size should be " + expected,
+ expected, fs.getContentSummary(dir).getLength());
+ assertEquals(dir + " size should be " + expected + " using hftp",
+ expected, hftpfs.getContentSummary(dir).getLength());
} finally {
fs.close();
cluster.shutdown();
Modified: hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/hdfs/TestHDFSServerPorts.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/hdfs/TestHDFSServerPorts.java?rev=897222&r1=897221&r2=897222&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/hdfs/TestHDFSServerPorts.java (original)
+++ hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/hdfs/TestHDFSServerPorts.java Fri Jan 8 14:52:46 2010
@@ -28,6 +28,7 @@
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
+import static org.apache.hadoop.hdfs.server.common.Util.fileAsURI;
import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.namenode.BackupNode;
@@ -92,7 +93,8 @@
throw new IOException("Could not delete hdfs directory '" + hdfsDir + "'");
}
config = new HdfsConfiguration();
- config.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, new File(hdfsDir, "name1").getPath());
+ config.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
+ fileAsURI(new File(hdfsDir, "name1")).toString());
FileSystem.setDefaultUri(config, "hdfs://"+NAME_NODE_HOST + "0");
config.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, NAME_NODE_HTTP_HOST + "0");
NameNode.format(config);
@@ -120,7 +122,8 @@
assertTrue(currDir2.mkdirs());
assertTrue(currDir3.mkdirs());
- conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, new File(hdfsDir, "name2").getPath());
+ conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
+ fileAsURI(new File(hdfsDir, "name2")).toString());
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, "${dfs.name.dir}");
// Start BackupNode
@@ -246,7 +249,8 @@
// start another namenode on the same port
Configuration conf2 = new HdfsConfiguration(config);
- conf2.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, new File(hdfsDir, "name2").getPath());
+ conf2.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
+ fileAsURI(new File(hdfsDir, "name2")).toString());
NameNode.format(conf2);
boolean started = canStartNameNode(conf2);
assertFalse(started); // should fail
Modified: hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/hdfs/TestHFlush.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/hdfs/TestHFlush.java?rev=897222&r1=897221&r2=897222&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/hdfs/TestHFlush.java (original)
+++ hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/hdfs/TestHFlush.java Fri Jan 8 14:52:46 2010
@@ -17,10 +17,14 @@
*/
package org.apache.hadoop.hdfs;
+import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.server.datanode.DataNode;
+import org.apache.log4j.Level;
+
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import org.junit.Test;
@@ -30,6 +34,11 @@
/** Class contains a set of tests to verify the correctness of
* newly introduced {@link FSDataOutputStream#hflush()} method */
public class TestHFlush {
+ {
+ ((Log4JLogger)DataNode.LOG).getLogger().setLevel(Level.ALL);
+ ((Log4JLogger)DFSClient.LOG).getLogger().setLevel(Level.ALL);
+ }
+
private final String fName = "hflushtest.dat";
/** The test uses {@link #doTheJob(Configuration, String, long, short)
@@ -143,4 +152,55 @@
actual[idx] = 0;
}
}
+
+ /** This creates a slow writer and check to see
+ * if pipeline heartbeats work fine
+ */
+ @Test
+ public void testPipelineHeartbeat() throws Exception {
+ final int DATANODE_NUM = 2;
+ final int fileLen = 6;
+ Configuration conf = new HdfsConfiguration();
+ final int timeout = 2000;
+ conf.setInt(DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY,
+ timeout);
+
+ final Path p = new Path("/pipelineHeartbeat/foo");
+ System.out.println("p=" + p);
+
+ MiniDFSCluster cluster = new MiniDFSCluster(conf, DATANODE_NUM, true, null);
+ DistributedFileSystem fs = (DistributedFileSystem)cluster.getFileSystem();
+
+ byte[] fileContents = AppendTestUtil.initBuffer(fileLen);
+
+ // create a new file.
+ FSDataOutputStream stm = AppendTestUtil.createFile(fs, p, DATANODE_NUM);
+
+ stm.write(fileContents, 0, 1);
+ Thread.sleep(timeout);
+ stm.hflush();
+ System.out.println("Wrote 1 byte and hflush " + p);
+
+ // write another byte
+ Thread.sleep(timeout);
+ stm.write(fileContents, 1, 1);
+ stm.hflush();
+
+ stm.write(fileContents, 2, 1);
+ Thread.sleep(timeout);
+ stm.hflush();
+
+ stm.write(fileContents, 3, 1);
+ Thread.sleep(timeout);
+ stm.write(fileContents, 4, 1);
+ stm.hflush();
+
+ stm.write(fileContents, 5, 1);
+ Thread.sleep(timeout);
+ stm.close();
+
+ // verify that entire file is good
+ AppendTestUtil.checkFullFile(fs, p, fileLen,
+ fileContents, "Failed to slowly write to a file");
+ }
}
Modified: hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/hdfs/TestLeaseRecovery2.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/hdfs/TestLeaseRecovery2.java?rev=897222&r1=897221&r2=897222&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/hdfs/TestLeaseRecovery2.java (original)
+++ hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/hdfs/TestLeaseRecovery2.java Fri Jan 8 14:52:46 2010
@@ -18,6 +18,8 @@
package org.apache.hadoop.hdfs;
import java.io.IOException;
+import java.util.HashMap;
+import java.util.Map;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
@@ -29,7 +31,6 @@
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.LeaseManager;
-import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.security.UnixUserGroupInformation;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.log4j.Level;
@@ -45,6 +46,9 @@
static final int FILE_SIZE = 1024*16;
static final short REPLICATION_NUM = (short)3;
static byte[] buffer = new byte[FILE_SIZE];
+
+ static private String fakeUsername = "fakeUser1";
+ static private String fakeGroup = "supergroup";
public void testBlockSynchronization() throws Exception {
final long softLease = 1000;
@@ -56,6 +60,13 @@
conf.setInt("dfs.heartbeat.interval", 1);
// conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, 16);
+ // create fake mapping user to group and set it to the conf
+ // NOTE. this must be done at the beginning, before first call to mapping
+ // functions
+ Map<String, String []> u2g_map = new HashMap<String, String []>(1);
+ u2g_map.put(fakeUsername, new String[] {fakeGroup});
+ DFSTestUtil.updateConfWithFakeGroupMapping(conf, u2g_map);
+
MiniDFSCluster cluster = null;
DistributedFileSystem dfs = null;
byte[] actual = new byte[FILE_SIZE];
@@ -93,10 +104,9 @@
// should fail but will trigger lease recovery.
{
Configuration conf2 = new HdfsConfiguration(conf);
- String username = UserGroupInformation.getCurrentUGI().getUserName()+"_1";
UnixUserGroupInformation.saveToConf(conf2,
UnixUserGroupInformation.UGI_PROPERTY_NAME,
- new UnixUserGroupInformation(username, new String[]{"supergroup"}));
+ new UnixUserGroupInformation(fakeUsername, new String[]{fakeGroup}));
FileSystem dfs2 = FileSystem.get(conf2);
boolean done = false;
Modified: hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/hdfs/TestListPathServlet.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/hdfs/TestListPathServlet.java?rev=897222&r1=897221&r2=897222&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/hdfs/TestListPathServlet.java (original)
+++ hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/hdfs/TestListPathServlet.java Fri Jan 8 14:52:46 2010
@@ -60,7 +60,7 @@
final String str = "hftp://"
+ CONF.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY);
hftpURI = new URI(str);
- hftpFs = (HftpFileSystem) FileSystem.newInstance(hftpURI, CONF);
+ hftpFs = cluster.getHftpFileSystem();
}
@AfterClass
Modified: hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/hdfs/TestReadWhileWriting.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/hdfs/TestReadWhileWriting.java?rev=897222&r1=897221&r2=897222&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/hdfs/TestReadWhileWriting.java (original)
+++ hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/hdfs/TestReadWhileWriting.java Fri Jan 8 14:52:46 2010
@@ -17,23 +17,24 @@
*/
package org.apache.hadoop.hdfs;
+import java.io.IOException;
+import java.io.OutputStream;
+
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hdfs.protocol.FSConstants;
+import org.apache.hadoop.hdfs.DFSClient.DFSDataInputStream;
+import org.apache.hadoop.hdfs.protocol.RecoveryInProgressException;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
+import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.security.UnixUserGroupInformation;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.log4j.Level;
import org.junit.Assert;
import org.junit.Test;
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.OutputStream;
-
/** Test reading from hdfs while a file is being written. */
public class TestReadWhileWriting {
{
@@ -44,6 +45,7 @@
private static final String DIR = "/"
+ TestReadWhileWriting.class.getSimpleName() + "/";
private static final int BLOCK_SIZE = 8192;
+ private static final long LEASE_LIMIT = 500;
/** Test reading while writing. */
@Test
@@ -51,13 +53,13 @@
final Configuration conf = new HdfsConfiguration();
//enable append
conf.setBoolean(DFSConfigKeys.DFS_SUPPORT_APPEND_KEY, true);
+ conf.setLong("dfs.heartbeat.interval", 1);
// create cluster
final MiniDFSCluster cluster = new MiniDFSCluster(conf, 3, true, null);
try {
- //change the lease soft limit to 1 second.
- final long leaseSoftLimit = 1000;
- cluster.setLeasePeriod(leaseSoftLimit, FSConstants.LEASE_HARDLIMIT_PERIOD);
+ //change the lease limits.
+ cluster.setLeasePeriod(LEASE_LIMIT, LEASE_LIMIT);
//wait for the cluster
cluster.waitActive();
@@ -81,26 +83,44 @@
//b. On another machine M2, open file and verify that the half-block
// of data can be read successfully.
checkFile(p, half, conf);
+ AppendTestUtil.LOG.info("leasechecker.interruptAndJoin()");
+ ((DistributedFileSystem)fs).dfs.leasechecker.interruptAndJoin();
- /* TODO: enable the following when append is done.
//c. On M1, append another half block of data. Close file on M1.
{
- //sleep to make sure the lease is expired the soft limit.
- Thread.sleep(2*leaseSoftLimit);
-
- FSDataOutputStream out = fs.append(p);
+ //sleep to let the lease is expired.
+ Thread.sleep(2*LEASE_LIMIT);
+
+ final DistributedFileSystem dfs = (DistributedFileSystem)FileSystem.newInstance(conf);
+ final FSDataOutputStream out = append(dfs, p);
write(out, 0, half);
out.close();
}
//d. On M2, open file and read 1 block of data from it. Close file.
checkFile(p, 2*half, conf);
- */
} finally {
cluster.shutdown();
}
}
+ /** Try openning a file for append. */
+ private static FSDataOutputStream append(FileSystem fs, Path p) throws Exception {
+ for(int i = 0; i < 10; i++) {
+ try {
+ return fs.append(p);
+ } catch(RemoteException re) {
+ if (re.getClassName().equals(RecoveryInProgressException.class.getName())) {
+ AppendTestUtil.LOG.info("Will sleep and retry, i=" + i +", p="+p, re);
+ Thread.sleep(1000);
+ }
+ else
+ throw re;
+ }
+ }
+ throw new IOException("Cannot append to " + p);
+ }
+
static private int userCount = 0;
//check the file
static void checkFile(Path p, int expectedsize, Configuration conf
@@ -113,10 +133,10 @@
UnixUserGroupInformation.UGI_PROPERTY_NAME,
new UnixUserGroupInformation(username, new String[]{"supergroup"}));
final FileSystem fs = FileSystem.get(conf2);
- final InputStream in = fs.open(p);
+ final DFSDataInputStream in = (DFSDataInputStream)fs.open(p);
- //Is the data available?
- Assert.assertTrue(available(in, expectedsize));
+ //Check visible length
+ Assert.assertTrue(in.getVisibleLength() >= expectedsize);
//Able to read?
for(int i = 0; i < expectedsize; i++) {
@@ -135,15 +155,5 @@
}
out.write(bytes);
}
-
- /** Is the data available? */
- private static boolean available(InputStream in, int expectedsize
- ) throws IOException {
- final int available = in.available();
- System.out.println(" in.available()=" + available);
- Assert.assertTrue(available >= 0);
- Assert.assertTrue(available <= expectedsize);
- return available == expectedsize;
- }
}
Modified: hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java?rev=897222&r1=897221&r2=897222&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java (original)
+++ hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java Fri Jan 8 14:52:46 2010
@@ -180,6 +180,7 @@
totalCapacity += capacity;
}
runBalancer(conf, totalUsedSpace, totalCapacity);
+ cluster.shutdown();
}
/* wait for one heartbeat */
@@ -261,6 +262,38 @@
} while(!balanced);
}
+
+ private void runBalancerDefaultConstructor(Configuration conf,
+ long totalUsedSpace, long totalCapacity) throws Exception {
+ waitForHeartBeat(totalUsedSpace, totalCapacity);
+
+ // start rebalancing
+ balancer = new Balancer();
+ balancer.setConf(conf);
+ balancer.run(new String[0]);
+
+ waitForHeartBeat(totalUsedSpace, totalCapacity);
+ boolean balanced;
+ do {
+ DatanodeInfo[] datanodeReport = client
+ .getDatanodeReport(DatanodeReportType.ALL);
+ assertEquals(datanodeReport.length, cluster.getDataNodes().size());
+ balanced = true;
+ double avgUtilization = ((double) totalUsedSpace) / totalCapacity * 100;
+ for (DatanodeInfo datanode : datanodeReport) {
+ if (Math.abs(avgUtilization - ((double) datanode.getDfsUsed())
+ / datanode.getCapacity() * 100) > 10) {
+ balanced = false;
+ try {
+ Thread.sleep(100);
+ } catch (InterruptedException ignored) {
+ }
+ break;
+ }
+ }
+ } while (!balanced);
+
+ }
/** one-node cluster test*/
private void oneNodeTest(Configuration conf) throws Exception {
@@ -298,6 +331,44 @@
new long[]{CAPACITY, CAPACITY},
new String[] {RACK0, RACK1});
}
+
+ public void testBalancer2() throws Exception {
+ Configuration conf = new HdfsConfiguration();
+ initConf(conf);
+ testBalancerDefaultConstructor(conf, new long[] { CAPACITY, CAPACITY },
+ new String[] { RACK0, RACK1 }, CAPACITY, RACK2);
+ }
+
+ private void testBalancerDefaultConstructor(Configuration conf,
+ long[] capacities, String[] racks, long newCapacity, String newRack)
+ throws Exception {
+ int numOfDatanodes = capacities.length;
+ assertEquals(numOfDatanodes, racks.length);
+ cluster = new MiniDFSCluster(0, conf, capacities.length, true, true, null,
+ racks, capacities);
+ try {
+ cluster.waitActive();
+ client = DFSClient.createNamenode(conf);
+
+ long totalCapacity = 0L;
+ for (long capacity : capacities) {
+ totalCapacity += capacity;
+ }
+ // fill up the cluster to be 30% full
+ long totalUsedSpace = totalCapacity * 3 / 10;
+ createFile(totalUsedSpace / numOfDatanodes, (short) numOfDatanodes);
+ // start up an empty node with the same capacity and on the same rack
+ cluster.startDataNodes(conf, 1, true, null, new String[] { newRack },
+ new long[] { newCapacity });
+
+ totalCapacity += newCapacity;
+
+ // run balancer and validate results
+ runBalancerDefaultConstructor(conf, totalUsedSpace, totalCapacity);
+ } finally {
+ cluster.shutdown();
+ }
+ }
/**
* @param args
@@ -306,5 +377,6 @@
TestBalancer balancerTest = new TestBalancer();
balancerTest.testBalancer0();
balancerTest.testBalancer1();
+ balancerTest.testBalancer2();
}
}
Modified: hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java?rev=897222&r1=897221&r2=897222&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java (original)
+++ hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java Fri Jan 8 14:52:46 2010
@@ -215,12 +215,12 @@
try {
// start name-node and backup node 1
cluster = new MiniDFSCluster(conf1, 0, true, null);
- conf1.set(DFSConfigKeys.DFS_NAMENODE_BACKUP_ADDRESS_KEY, "0.0.0.0:7770");
+ conf1.set(DFSConfigKeys.DFS_NAMENODE_BACKUP_ADDRESS_KEY, "0.0.0.0:7771");
conf1.set(DFSConfigKeys.DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY, "0.0.0.0:7775");
backup1 = startBackupNode(conf1, StartupOption.BACKUP, 1);
// try to start backup node 2
conf2 = new HdfsConfiguration(conf1);
- conf2.set(DFSConfigKeys.DFS_NAMENODE_BACKUP_ADDRESS_KEY, "0.0.0.0:7771");
+ conf2.set(DFSConfigKeys.DFS_NAMENODE_BACKUP_ADDRESS_KEY, "0.0.0.0:7772");
conf2.set(DFSConfigKeys.DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY, "0.0.0.0:7776");
try {
backup2 = startBackupNode(conf2, StartupOption.BACKUP, 2);
Modified: hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestStartup.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestStartup.java?rev=897222&r1=897221&r2=897222&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestStartup.java (original)
+++ hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestStartup.java Fri Jan 8 14:52:46 2010
@@ -36,6 +36,7 @@
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.DFSConfigKeys;
+import static org.apache.hadoop.hdfs.server.common.Util.fileAsURI;
import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption;
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
import org.apache.hadoop.hdfs.server.namenode.FSImage.NameNodeDirType;
@@ -48,7 +49,7 @@
*/
public class TestStartup extends TestCase {
public static final String NAME_NODE_HOST = "localhost:";
- public static final String NAME_NODE_HTTP_HOST = "0.0.0.0:";
+ public static final String WILDCARD_HTTP_HOST = "0.0.0.0:";
private static final Log LOG =
LogFactory.getLog(TestStartup.class.getName());
private Configuration config;
@@ -74,18 +75,20 @@
protected void setUp() throws Exception {
config = new HdfsConfiguration();
- String baseDir = System.getProperty("test.build.data", "/tmp");
+ hdfsDir = new File(MiniDFSCluster.getBaseDirectory());
- hdfsDir = new File(baseDir, "dfs");
if ( hdfsDir.exists() && !FileUtil.fullyDelete(hdfsDir) ) {
throw new IOException("Could not delete hdfs directory '" + hdfsDir + "'");
}
LOG.info("--hdfsdir is " + hdfsDir.getAbsolutePath());
- config.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, new File(hdfsDir, "name").getPath());
- config.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, new File(hdfsDir, "data").getPath());
-
- config.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY,new File(hdfsDir, "secondary").getPath());
- //config.set("fs.default.name", "hdfs://"+ NAME_NODE_HOST + "0");
+ config.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
+ fileAsURI(new File(hdfsDir, "name")).toString());
+ config.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY,
+ new File(hdfsDir, "data").getPath());
+ config.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY,
+ fileAsURI(new File(hdfsDir, "secondary")).toString());
+ config.set(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY,
+ WILDCARD_HTTP_HOST + "0");
FileSystem.setDefaultUri(config, "hdfs://"+NAME_NODE_HOST + "0");
}
@@ -231,11 +234,15 @@
public void testChkpointStartup2() throws IOException{
LOG.info("--starting checkpointStartup2 - same directory for checkpoint");
// different name dirs
- config.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, new File(hdfsDir, "name").getPath());
- config.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, new File(hdfsDir, "edits").getPath());
+ config.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
+ fileAsURI(new File(hdfsDir, "name")).toString());
+ config.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,
+ fileAsURI(new File(hdfsDir, "edits")).toString());
// same checkpoint dirs
- config.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY, new File(hdfsDir, "chkpt").getPath());
- config.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY, new File(hdfsDir, "chkpt").getPath());
+ config.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY,
+ fileAsURI(new File(hdfsDir, "chkpt")).toString());
+ config.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY,
+ fileAsURI(new File(hdfsDir, "chkpt")).toString());
createCheckPoint();
@@ -253,11 +260,15 @@
//setUpConfig();
LOG.info("--starting testStartup Recovery");
// different name dirs
- config.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, new File(hdfsDir, "name").getPath());
- config.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, new File(hdfsDir, "edits").getPath());
+ config.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
+ fileAsURI(new File(hdfsDir, "name")).toString());
+ config.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,
+ fileAsURI(new File(hdfsDir, "edits")).toString());
// same checkpoint dirs
- config.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY, new File(hdfsDir, "chkpt_edits").getPath());
- config.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY, new File(hdfsDir, "chkpt").getPath());
+ config.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY,
+ fileAsURI(new File(hdfsDir, "chkpt_edits")).toString());
+ config.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY,
+ fileAsURI(new File(hdfsDir, "chkpt")).toString());
createCheckPoint();
corruptNameNodeFiles();
@@ -274,11 +285,15 @@
//setUpConfig();
LOG.info("--starting SecondNN startup test");
// different name dirs
- config.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, new File(hdfsDir, "name").getPath());
- config.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, new File(hdfsDir, "name").getPath());
+ config.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
+ fileAsURI(new File(hdfsDir, "name")).toString());
+ config.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,
+ fileAsURI(new File(hdfsDir, "name")).toString());
// same checkpoint dirs
- config.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY, new File(hdfsDir, "chkpt_edits").getPath());
- config.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY, new File(hdfsDir, "chkpt").getPath());
+ config.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY,
+ fileAsURI(new File(hdfsDir, "chkpt_edits")).toString());
+ config.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY,
+ fileAsURI(new File(hdfsDir, "chkpt")).toString());
LOG.info("--starting NN ");
MiniDFSCluster cluster = null;
Modified: hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestStorageRestore.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestStorageRestore.java?rev=897222&r1=897221&r2=897222&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestStorageRestore.java (original)
+++ hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestStorageRestore.java Fri Jan 8 14:52:46 2010
@@ -33,7 +33,8 @@
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.cli.TestHDFSCLI;
+import org.apache.hadoop.cli.CmdFactoryDFS;
+import org.apache.hadoop.cli.util.CLITestData;
import org.apache.hadoop.cli.util.CommandExecutor;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
@@ -164,7 +165,7 @@
/**
* This function returns a md5 hash of a file.
*
- * @param FileToMd5
+ * @param file input file
* @return The md5 string
*/
public String getFileMD5(File file) throws Exception {
@@ -189,7 +190,7 @@
/**
* read currentCheckpointTime directly from the file
* @param currDir
- * @return
+ * @return the checkpoint time
* @throws IOException
*/
long readCheckpointTime(File currDir) throws IOException {
@@ -351,25 +352,25 @@
String cmd = "-fs NAMENODE -restoreFailedStorage false";
String namenode = config.get(DFSConfigKeys.FS_DEFAULT_NAME_KEY, "file:///");
- CommandExecutor executor = new TestHDFSCLI.DFSAdminCmdExecutor(namenode);
+ CommandExecutor executor =
+ CmdFactoryDFS.getCommandExecutor(
+ new CLITestData.TestCmd(cmd, CLITestData.TestCmd.CommandType.DFSADMIN),
+ namenode);
executor.executeCommand(cmd);
restore = fsi.getRestoreFailedStorage();
- LOG.info("After set true call restore is " + restore);
- assertEquals(restore, false);
+ assertFalse("After set true call restore is " + restore, restore);
// run one more time - to set it to true again
cmd = "-fs NAMENODE -restoreFailedStorage true";
executor.executeCommand(cmd);
restore = fsi.getRestoreFailedStorage();
- LOG.info("After set false call restore is " + restore);
- assertEquals(restore, true);
+ assertTrue("After set false call restore is " + restore, restore);
// run one more time - no change in value
cmd = "-fs NAMENODE -restoreFailedStorage check";
CommandExecutor.Result cmdResult = executor.executeCommand(cmd);
restore = fsi.getRestoreFailedStorage();
- LOG.info("After check call restore is " + restore);
- assertEquals(restore, true);
+ assertTrue("After check call restore is " + restore, restore);
String commandOutput = cmdResult.getCommandOutput();
commandOutput.trim();
assertTrue(commandOutput.contains("restoreFailedStorage is set to true"));
Modified: hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java?rev=897222&r1=897221&r2=897222&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java (original)
+++ hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java Fri Jan 8 14:52:46 2010
@@ -39,11 +39,19 @@
*/
public class TestNameNodeMetrics extends TestCase {
private static final Configuration CONF = new HdfsConfiguration();
+ private static final int DFS_REPLICATION_INTERVAL = 1;
+ private static final Path TEST_ROOT_DIR_PATH =
+ new Path(System.getProperty("test.build.data", "build/test/data"));
+
+ // Number of datanodes in the cluster
+ private static final int DATANODE_COUNT = 3;
static {
CONF.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 100);
CONF.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, 1);
- CONF.setLong("dfs.heartbeat.interval", 1L);
- CONF.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1);
+ CONF.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY,
+ DFS_REPLICATION_INTERVAL);
+ CONF.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY,
+ DFS_REPLICATION_INTERVAL);
}
private MiniDFSCluster cluster;
@@ -52,9 +60,13 @@
private Random rand = new Random();
private FSNamesystem namesystem;
+ private static Path getTestPath(String fileName) {
+ return new Path(TEST_ROOT_DIR_PATH, fileName);
+ }
+
@Override
protected void setUp() throws Exception {
- cluster = new MiniDFSCluster(CONF, 3, true, null);
+ cluster = new MiniDFSCluster(CONF, DATANODE_COUNT, true, null);
cluster.waitActive();
namesystem = cluster.getNamesystem();
fs = (DistributedFileSystem) cluster.getFileSystem();
@@ -67,9 +79,8 @@
}
/** create a file with a length of <code>fileLen</code> */
- private void createFile(String fileName, long fileLen, short replicas) throws IOException {
- Path filePath = new Path(fileName);
- DFSTestUtil.createFile(fs, filePath, fileLen, replicas, rand.nextLong());
+ private void createFile(Path file, long fileLen, short replicas) throws IOException {
+ DFSTestUtil.createFile(fs, file, fileLen, replicas, rand.nextLong());
}
private void updateMetrics() throws Exception {
@@ -82,7 +93,7 @@
/** Test metrics associated with addition of a file */
public void testFileAdd() throws Exception {
// Add files with 100 blocks
- final String file = "/tmp/t";
+ final Path file = getTestPath("testFileAdd");
createFile(file, 3200, (short)3);
final int blockCount = 32;
int blockCapacity = namesystem.getBlockCapacity();
@@ -96,27 +107,37 @@
blockCapacity <<= 1;
}
updateMetrics();
- assertEquals(3, metrics.filesTotal.get());
+ int filesTotal = file.depth() + 1; // Add 1 for root
+ assertEquals(filesTotal, metrics.filesTotal.get());
assertEquals(blockCount, metrics.blocksTotal.get());
assertEquals(blockCapacity, metrics.blockCapacity.get());
- fs.delete(new Path(file), true);
+ fs.delete(file, true);
+ filesTotal--; // reduce the filecount for deleted file
+
+ // Wait for more than DATANODE_COUNT replication intervals to ensure all
+ // the blocks pending deletion are sent for deletion to the datanodes.
+ Thread.sleep(DFS_REPLICATION_INTERVAL * (DATANODE_COUNT + 1) * 1000);
+ updateMetrics();
+ assertEquals(filesTotal, metrics.filesTotal.get());
+ assertEquals(0, metrics.blocksTotal.get());
+ assertEquals(0, metrics.pendingDeletionBlocks.get());
}
/** Corrupt a block and ensure metrics reflects it */
public void testCorruptBlock() throws Exception {
// Create a file with single block with two replicas
- String file = "/tmp/t";
+ final Path file = getTestPath("testCorruptBlock");
createFile(file, 100, (short)2);
// Corrupt first replica of the block
LocatedBlock block = NameNodeAdapter.getBlockLocations(
- cluster.getNameNode(), file, 0, 1).get(0);
+ cluster.getNameNode(), file.toString(), 0, 1).get(0);
namesystem.markBlockAsCorrupt(block.getBlock(), block.getLocations()[0]);
updateMetrics();
assertEquals(1, metrics.corruptBlocks.get());
assertEquals(1, metrics.pendingReplicationBlocks.get());
assertEquals(1, metrics.scheduledReplicationBlocks.get());
- fs.delete(new Path(file), true);
+ fs.delete(file, true);
updateMetrics();
assertEquals(0, metrics.corruptBlocks.get());
assertEquals(0, metrics.pendingReplicationBlocks.get());
@@ -127,30 +148,29 @@
* for a file and ensure metrics reflects it
*/
public void testExcessBlocks() throws Exception {
- String file = "/tmp/t";
+ Path file = getTestPath("testExcessBlocks");
createFile(file, 100, (short)2);
int totalBlocks = 1;
- namesystem.setReplication(file, (short)1);
+ namesystem.setReplication(file.toString(), (short)1);
updateMetrics();
assertEquals(totalBlocks, metrics.excessBlocks.get());
- assertEquals(totalBlocks, metrics.pendingDeletionBlocks.get());
- fs.delete(new Path(file), true);
+ fs.delete(file, true);
}
/** Test to ensure metrics reflects missing blocks */
public void testMissingBlock() throws Exception {
// Create a file with single block with two replicas
- String file = "/tmp/t";
+ Path file = getTestPath("testMissingBlocks");
createFile(file, 100, (short)1);
// Corrupt the only replica of the block to result in a missing block
LocatedBlock block = NameNodeAdapter.getBlockLocations(
- cluster.getNameNode(), file, 0, 1).get(0);
+ cluster.getNameNode(), file.toString(), 0, 1).get(0);
namesystem.markBlockAsCorrupt(block.getBlock(), block.getLocations()[0]);
updateMetrics();
assertEquals(1, metrics.underReplicatedBlocks.get());
assertEquals(1, metrics.missingBlocks.get());
- fs.delete(new Path(file), true);
+ fs.delete(file, true);
updateMetrics();
assertEquals(0, metrics.underReplicatedBlocks.get());
}
Modified: hadoop/hdfs/branches/HDFS-326/src/test/unit/org/apache/hadoop/hdfs/server/namenode/TestNNLeaseRecovery.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-326/src/test/unit/org/apache/hadoop/hdfs/server/namenode/TestNNLeaseRecovery.java?rev=897222&r1=897221&r2=897222&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-326/src/test/unit/org/apache/hadoop/hdfs/server/namenode/TestNNLeaseRecovery.java (original)
+++ hadoop/hdfs/branches/HDFS-326/src/test/unit/org/apache/hadoop/hdfs/server/namenode/TestNNLeaseRecovery.java Fri Jan 8 14:52:46 2010
@@ -19,15 +19,6 @@
package org.apache.hadoop.hdfs.server.namenode;
import static junit.framework.Assert.assertTrue;
-import static org.junit.Assert.assertFalse;
-import static org.mockito.Matchers.anyString;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.spy;
-import static org.mockito.Mockito.when;
-
-import java.io.File;
-import java.io.IOException;
-
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger;
@@ -39,13 +30,20 @@
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
+import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.server.common.HdfsConstants;
import org.apache.hadoop.hdfs.server.common.HdfsConstants.NamenodeRole;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.log4j.Level;
import org.junit.After;
+import static org.junit.Assert.assertFalse;
import org.junit.Before;
import org.junit.Test;
+import static org.mockito.Matchers.anyString;
+import static org.mockito.Mockito.*;
+
+import java.io.File;
+import java.io.IOException;
public class TestNNLeaseRecovery {
public static final Log LOG = LogFactory.getLog(TestNNLeaseRecovery.class);
@@ -128,8 +126,8 @@
PermissionStatus ps =
new PermissionStatus("test", "test", new FsPermission((short)0777));
- mockFileBlocks(null,
- HdfsConstants.BlockUCState.UNDER_CONSTRUCTION, file, dnd, ps);
+ mockFileBlocks(2, null,
+ HdfsConstants.BlockUCState.UNDER_CONSTRUCTION, file, dnd, ps, false);
fsn.internalReleaseLease(lm, file.toString(), null);
assertTrue("FSNamesystem.internalReleaseLease suppose to throw " +
@@ -152,8 +150,52 @@
PermissionStatus ps =
new PermissionStatus("test", "test", new FsPermission((short)0777));
- mockFileBlocks(HdfsConstants.BlockUCState.COMMITTED,
- HdfsConstants.BlockUCState.COMMITTED, file, dnd, ps);
+ mockFileBlocks(2, HdfsConstants.BlockUCState.COMMITTED,
+ HdfsConstants.BlockUCState.COMMITTED, file, dnd, ps, false);
+
+ fsn.internalReleaseLease(lm, file.toString(), null);
+ assertTrue("FSNamesystem.internalReleaseLease suppose to throw " +
+ "AlreadyBeingCreatedException here", false);
+ }
+
+ /**
+ * Mocks FSNamesystem instance, adds an empty file with 0 blocks
+ * and invokes lease recovery method.
+ *
+ */
+ @Test
+ public void testInternalReleaseLease_0blocks () throws IOException {
+ LOG.debug("Running " + GenericTestUtils.getMethodName());
+ LeaseManager.Lease lm = mock(LeaseManager.Lease.class);
+ Path file =
+ spy(new Path("/" + GenericTestUtils.getMethodName() + "_test.dat"));
+ DatanodeDescriptor dnd = mock(DatanodeDescriptor.class);
+ PermissionStatus ps =
+ new PermissionStatus("test", "test", new FsPermission((short)0777));
+
+ mockFileBlocks(0, null, null, file, dnd, ps, false);
+
+ assertTrue("True has to be returned in this case",
+ fsn.internalReleaseLease(lm, file.toString(), null));
+ }
+
+ /**
+ * Mocks FSNamesystem instance, adds an empty file with 1 block
+ * and invokes lease recovery method.
+ * AlreadyBeingCreatedException is expected.
+ * @throws AlreadyBeingCreatedException as the result
+ */
+ @Test(expected=AlreadyBeingCreatedException.class)
+ public void testInternalReleaseLease_1blocks () throws IOException {
+ LOG.debug("Running " + GenericTestUtils.getMethodName());
+ LeaseManager.Lease lm = mock(LeaseManager.Lease.class);
+ Path file =
+ spy(new Path("/" + GenericTestUtils.getMethodName() + "_test.dat"));
+ DatanodeDescriptor dnd = mock(DatanodeDescriptor.class);
+ PermissionStatus ps =
+ new PermissionStatus("test", "test", new FsPermission((short)0777));
+
+ mockFileBlocks(1, null, HdfsConstants.BlockUCState.COMMITTED, file, dnd, ps, false);
fsn.internalReleaseLease(lm, file.toString(), null);
assertTrue("FSNamesystem.internalReleaseLease suppose to throw " +
@@ -176,25 +218,159 @@
PermissionStatus ps =
new PermissionStatus("test", "test", new FsPermission((short)0777));
- mockFileBlocks(HdfsConstants.BlockUCState.COMMITTED,
- HdfsConstants.BlockUCState.UNDER_CONSTRUCTION, file, dnd, ps);
+ mockFileBlocks(2, HdfsConstants.BlockUCState.COMMITTED,
+ HdfsConstants.BlockUCState.UNDER_CONSTRUCTION, file, dnd, ps, false);
assertFalse("False is expected in return in this case",
fsn.internalReleaseLease(lm, file.toString(), null));
}
- private void mockFileBlocks(HdfsConstants.BlockUCState penUltState,
- HdfsConstants.BlockUCState lastState,
- Path file, DatanodeDescriptor dnd,
- PermissionStatus ps) throws IOException {
+ @Test
+ public void testCommitBlockSynchronization_BlockNotFound ()
+ throws IOException {
+ LOG.debug("Running " + GenericTestUtils.getMethodName());
+ long recoveryId = 2002;
+ long newSize = 273487234;
+ Path file =
+ spy(new Path("/" + GenericTestUtils.getMethodName() + "_test.dat"));
+ DatanodeDescriptor dnd = mock(DatanodeDescriptor.class);
+ PermissionStatus ps =
+ new PermissionStatus("test", "test", new FsPermission((short)0777));
+
+ mockFileBlocks(2, HdfsConstants.BlockUCState.COMMITTED,
+ HdfsConstants.BlockUCState.UNDER_CONSTRUCTION, file, dnd, ps, false);
+
+ BlockInfo lastBlock = fsn.dir.getFileINode(anyString()).getLastBlock();
+ try {
+ fsn.commitBlockSynchronization(lastBlock,
+ recoveryId, newSize, true, false, new DatanodeID[1]);
+ } catch (IOException ioe) {
+ assertTrue(ioe.getMessage().startsWith("Block (="));
+ }
+ }
+
+ @Test
+ public void testCommitBlockSynchronization_notUR ()
+ throws IOException {
+ LOG.debug("Running " + GenericTestUtils.getMethodName());
+ long recoveryId = 2002;
+ long newSize = 273487234;
+ Path file =
+ spy(new Path("/" + GenericTestUtils.getMethodName() + "_test.dat"));
+ DatanodeDescriptor dnd = mock(DatanodeDescriptor.class);
+ PermissionStatus ps =
+ new PermissionStatus("test", "test", new FsPermission((short)0777));
+
+ mockFileBlocks(2, HdfsConstants.BlockUCState.COMMITTED,
+ HdfsConstants.BlockUCState.COMPLETE, file, dnd, ps, true);
+
+ BlockInfo lastBlock = fsn.dir.getFileINode(anyString()).getLastBlock();
+ when(lastBlock.isComplete()).thenReturn(true);
+
+ try {
+ fsn.commitBlockSynchronization(lastBlock,
+ recoveryId, newSize, true, false, new DatanodeID[1]);
+ } catch (IOException ioe) {
+ assertTrue(ioe.getMessage().startsWith("Unexpected block (="));
+ }
+ }
+
+ @Test
+ public void testCommitBlockSynchronization_WrongGreaterRecoveryID()
+ throws IOException {
+ LOG.debug("Running " + GenericTestUtils.getMethodName());
+ long recoveryId = 2002;
+ long newSize = 273487234;
+ Path file =
+ spy(new Path("/" + GenericTestUtils.getMethodName() + "_test.dat"));
+ DatanodeDescriptor dnd = mock(DatanodeDescriptor.class);
+ PermissionStatus ps =
+ new PermissionStatus("test", "test", new FsPermission((short)0777));
+
+ mockFileBlocks(2, HdfsConstants.BlockUCState.COMMITTED,
+ HdfsConstants.BlockUCState.UNDER_CONSTRUCTION, file, dnd, ps, true);
+
+ BlockInfo lastBlock = fsn.dir.getFileINode(anyString()).getLastBlock();
+ when(((BlockInfoUnderConstruction)lastBlock).getBlockRecoveryId()).thenReturn(recoveryId-100);
+
+ try {
+ fsn.commitBlockSynchronization(lastBlock,
+ recoveryId, newSize, true, false, new DatanodeID[1]);
+ } catch (IOException ioe) {
+ assertTrue(ioe.getMessage().startsWith("The recovery id " + recoveryId + " does not match current recovery id " + (recoveryId-100)));
+ }
+ }
+
+ @Test
+ public void testCommitBlockSynchronization_WrongLesserRecoveryID()
+ throws IOException {
+ LOG.debug("Running " + GenericTestUtils.getMethodName());
+ long recoveryId = 2002;
+ long newSize = 273487234;
+ Path file =
+ spy(new Path("/" + GenericTestUtils.getMethodName() + "_test.dat"));
+ DatanodeDescriptor dnd = mock(DatanodeDescriptor.class);
+ PermissionStatus ps =
+ new PermissionStatus("test", "test", new FsPermission((short)0777));
+
+ mockFileBlocks(2, HdfsConstants.BlockUCState.COMMITTED,
+ HdfsConstants.BlockUCState.UNDER_CONSTRUCTION, file, dnd, ps, true);
+
+ BlockInfo lastBlock = fsn.dir.getFileINode(anyString()).getLastBlock();
+ when(((BlockInfoUnderConstruction)lastBlock).getBlockRecoveryId()).thenReturn(recoveryId+100);
+
+ try {
+ fsn.commitBlockSynchronization(lastBlock,
+ recoveryId, newSize, true, false, new DatanodeID[1]);
+ } catch (IOException ioe) {
+ assertTrue(ioe.getMessage().startsWith("The recovery id " + recoveryId + " does not match current recovery id " + (recoveryId+100)));
+ }
+ }
+
+ @Test
+ public void testCommitBlockSynchronization_EqualRecoveryID()
+ throws IOException {
+ LOG.debug("Running " + GenericTestUtils.getMethodName());
+ long recoveryId = 2002;
+ long newSize = 273487234;
+ Path file =
+ spy(new Path("/" + GenericTestUtils.getMethodName() + "_test.dat"));
+ DatanodeDescriptor dnd = mock(DatanodeDescriptor.class);
+ PermissionStatus ps =
+ new PermissionStatus("test", "test", new FsPermission((short)0777));
+
+ mockFileBlocks(2, HdfsConstants.BlockUCState.COMMITTED,
+ HdfsConstants.BlockUCState.UNDER_CONSTRUCTION, file, dnd, ps, true);
+
+ BlockInfo lastBlock = fsn.dir.getFileINode(anyString()).getLastBlock();
+ when(((BlockInfoUnderConstruction)lastBlock).getBlockRecoveryId()).thenReturn(recoveryId);
+
+ boolean recoveryChecked = false;
+ try {
+ fsn.commitBlockSynchronization(lastBlock,
+ recoveryId, newSize, true, false, new DatanodeID[1]);
+ } catch (NullPointerException ioe) {
+ // It is fine to get NPE here because the datanodes array is empty
+ recoveryChecked = true;
+ }
+ assertTrue("commitBlockSynchronization had to throw NPE here", recoveryChecked);
+ }
+
+ private void mockFileBlocks(int fileBlocksNumber,
+ HdfsConstants.BlockUCState penUltState,
+ HdfsConstants.BlockUCState lastState,
+ Path file, DatanodeDescriptor dnd,
+ PermissionStatus ps,
+ boolean setStoredBlock) throws IOException {
BlockInfo b = mock(BlockInfo.class);
BlockInfoUnderConstruction b1 = mock(BlockInfoUnderConstruction.class);
when(b.getBlockUCState()).thenReturn(penUltState);
when(b1.getBlockUCState()).thenReturn(lastState);
- BlockInfo[] blocks = new BlockInfo[]{b, b1};
+ BlockInfo[] blocks;
FSDirectory fsDir = mock(FSDirectory.class);
INodeFileUnderConstruction iNFmock = mock(INodeFileUnderConstruction.class);
+
fsn.dir = fsDir;
FSImage fsImage = mock(FSImage.class);
FSEditLog editLog = mock(FSEditLog.class);
@@ -203,14 +379,34 @@
when(fsn.getFSImage().getEditLog()).thenReturn(editLog);
fsn.getFSImage().setFSNamesystem(fsn);
+ switch (fileBlocksNumber) {
+ case 0:
+ blocks = new BlockInfo[0];
+ break;
+ case 1:
+ blocks = new BlockInfo[]{b1};
+ when(iNFmock.getLastBlock()).thenReturn(b1);
+ break;
+ default:
+ when(iNFmock.getPenultimateBlock()).thenReturn(b);
+ when(iNFmock.getLastBlock()).thenReturn(b1);
+ blocks = new BlockInfo[]{b, b1};
+ }
+
when(iNFmock.getBlocks()).thenReturn(blocks);
- when(iNFmock.numBlocks()).thenReturn(2);
- when(iNFmock.getPenultimateBlock()).thenReturn(b);
- when(iNFmock.getLastBlock()).thenReturn(b1);
+ when(iNFmock.numBlocks()).thenReturn(blocks.length);
when(iNFmock.isUnderConstruction()).thenReturn(true);
+ when(iNFmock.convertToInodeFile()).thenReturn(iNFmock);
fsDir.addFile(file.toString(), ps, (short)3, 1l, "test",
"test-machine", dnd, 1001l);
+ fsn.leaseManager = mock(LeaseManager.class);
+ fsn.leaseManager.addLease("mock-lease", file.toString());
+ if (setStoredBlock) {
+ when(b1.getINode()).thenReturn(iNFmock);
+ fsn.blockManager.blocksMap.addINode(b1, iNFmock);
+ }
+
when(fsDir.getFileINode(anyString())).thenReturn(iNFmock);
}
Propchange: hadoop/hdfs/branches/HDFS-326/src/webapps/datanode/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Fri Jan 8 14:52:46 2010
@@ -2,4 +2,4 @@
/hadoop/core/trunk/src/webapps/datanode:776175-784663
/hadoop/hdfs/branches/HDFS-265/src/webapps/datanode:796829-820463
/hadoop/hdfs/branches/branch-0.21/src/webapps/datanode:820487
-/hadoop/hdfs/trunk/src/webapps/datanode:804973-885783
+/hadoop/hdfs/trunk/src/webapps/datanode:804973-897215
Propchange: hadoop/hdfs/branches/HDFS-326/src/webapps/hdfs/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Fri Jan 8 14:52:46 2010
@@ -2,4 +2,4 @@
/hadoop/core/trunk/src/webapps/hdfs:776175-784663
/hadoop/hdfs/branches/HDFS-265/src/webapps/hdfs:796829-820463
/hadoop/hdfs/branches/branch-0.21/src/webapps/hdfs:820487
-/hadoop/hdfs/trunk/src/webapps/hdfs:804973-885783
+/hadoop/hdfs/trunk/src/webapps/hdfs:804973-897215
Propchange: hadoop/hdfs/branches/HDFS-326/src/webapps/secondary/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Fri Jan 8 14:52:46 2010
@@ -2,4 +2,4 @@
/hadoop/core/trunk/src/webapps/secondary:776175-784663
/hadoop/hdfs/branches/HDFS-265/src/webapps/secondary:796829-820463
/hadoop/hdfs/branches/branch-0.21/src/webapps/secondary:820487
-/hadoop/hdfs/trunk/src/webapps/secondary:804973-885783
+/hadoop/hdfs/trunk/src/webapps/secondary:804973-897215