You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by cm...@apache.org on 2014/08/20 01:50:25 UTC
svn commit: r1619012 [32/35] - in
/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project: hadoop-hdfs-httpfs/
hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/
hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/ hadoop...
Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFavoredNodesEndToEnd.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFavoredNodesEndToEnd.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFavoredNodesEndToEnd.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFavoredNodesEndToEnd.java Tue Aug 19 23:49:39 2014
@@ -18,32 +18,41 @@
*/
package org.apache.hadoop.hdfs.server.namenode;
-import static org.junit.Assert.*;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
-import java.util.ArrayList;
-import java.util.Random;
-import java.io.IOException;
import java.net.InetAddress;
import java.net.InetSocketAddress;
import java.net.UnknownHostException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Random;
+import org.apache.commons.logging.LogFactory;
+import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.BlockLocation;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
-import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
-import org.junit.Test;
+import org.apache.log4j.Level;
import org.junit.AfterClass;
+import org.junit.Assert;
import org.junit.BeforeClass;
+import org.junit.Test;
public class TestFavoredNodesEndToEnd {
+ {
+ ((Log4JLogger)LogFactory.getLog(BlockPlacementPolicy.class)).getLogger().setLevel(Level.ALL);
+ }
+
private static MiniDFSCluster cluster;
private static Configuration conf;
private final static int NUM_DATA_NODES = 10;
@@ -79,7 +88,7 @@ public class TestFavoredNodesEndToEnd {
InetSocketAddress datanode[] = getDatanodes(rand);
Path p = new Path("/filename"+i);
FSDataOutputStream out = dfs.create(p, FsPermission.getDefault(), true,
- 4096, (short)3, (long)4096, null, datanode);
+ 4096, (short)3, 4096L, null, datanode);
out.write(SOME_BYTES);
out.close();
BlockLocation[] locations = getBlockLocations(p);
@@ -98,14 +107,13 @@ public class TestFavoredNodesEndToEnd {
//get some other nodes. In other words, the write to hdfs should not fail
//and if we do getBlockLocations on the file, we should see one blklocation
//and three hosts for that
- Random rand = new Random(System.currentTimeMillis());
InetSocketAddress arbitraryAddrs[] = new InetSocketAddress[3];
for (int i = 0; i < 3; i++) {
arbitraryAddrs[i] = getArbitraryLocalHostAddr();
}
Path p = new Path("/filename-foo-bar");
FSDataOutputStream out = dfs.create(p, FsPermission.getDefault(), true,
- 4096, (short)3, (long)4096, null, arbitraryAddrs);
+ 4096, (short)3, 4096L, null, arbitraryAddrs);
out.write(SOME_BYTES);
out.close();
getBlockLocations(p);
@@ -113,35 +121,41 @@ public class TestFavoredNodesEndToEnd {
@Test(timeout=180000)
public void testWhenSomeNodesAreNotGood() throws Exception {
+ // 4 favored nodes
+ final InetSocketAddress addrs[] = new InetSocketAddress[4];
+ final String[] hosts = new String[addrs.length];
+ for (int i = 0; i < addrs.length; i++) {
+ addrs[i] = datanodes.get(i).getXferAddress();
+ hosts[i] = addrs[i].getAddress().getHostAddress() + ":" + addrs[i].getPort();
+ }
+
//make some datanode not "good" so that even if the client prefers it,
//the namenode would not give it as a replica to write to
DatanodeInfo d = cluster.getNameNode().getNamesystem().getBlockManager()
.getDatanodeManager().getDatanodeByXferAddr(
- datanodes.get(0).getXferAddress().getAddress().getHostAddress(),
- datanodes.get(0).getXferAddress().getPort());
+ addrs[0].getAddress().getHostAddress(), addrs[0].getPort());
//set the decommission status to true so that
//BlockPlacementPolicyDefault.isGoodTarget returns false for this dn
d.setDecommissioned();
- InetSocketAddress addrs[] = new InetSocketAddress[3];
- for (int i = 0; i < 3; i++) {
- addrs[i] = datanodes.get(i).getXferAddress();
- }
Path p = new Path("/filename-foo-bar-baz");
+ final short replication = (short)3;
FSDataOutputStream out = dfs.create(p, FsPermission.getDefault(), true,
- 4096, (short)3, (long)4096, null, addrs);
+ 4096, replication, 4096L, null, addrs);
out.write(SOME_BYTES);
out.close();
//reset the state
d.stopDecommission();
+
BlockLocation[] locations = getBlockLocations(p);
+ Assert.assertEquals(replication, locations[0].getNames().length);;
//also make sure that the datanode[0] is not in the list of hosts
- String datanode0 =
- datanodes.get(0).getXferAddress().getAddress().getHostAddress()
- + ":" + datanodes.get(0).getXferAddress().getPort();
- for (int i = 0; i < 3; i++) {
- if (locations[0].getNames()[i].equals(datanode0)) {
- fail(datanode0 + " not supposed to be a replica for the block");
- }
+ for (int i = 0; i < replication; i++) {
+ final String loc = locations[0].getNames()[i];
+ int j = 0;
+ for(; j < hosts.length && !loc.equals(hosts[j]); j++);
+ Assert.assertTrue("j=" + j, j > 0);
+ Assert.assertTrue("loc=" + loc + " not in host list "
+ + Arrays.asList(hosts) + ", j=" + j, j < hosts.length);
}
}
Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsLimits.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsLimits.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsLimits.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsLimits.java Tue Aug 19 23:49:39 2014
@@ -19,11 +19,9 @@
package org.apache.hadoop.hdfs.server.namenode;
import static org.apache.hadoop.hdfs.server.common.Util.fileAsURI;
-import static org.apache.hadoop.util.Time.now;
import static org.junit.Assert.assertEquals;
-import static org.mockito.Matchers.anyObject;
+import static org.mockito.Mockito.doReturn;
import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.when;
import java.io.File;
import java.io.IOException;
@@ -45,37 +43,20 @@ import org.junit.Test;
public class TestFsLimits {
static Configuration conf;
- static INode[] inodes;
- static FSDirectory fs;
+ static FSNamesystem fs;
static boolean fsIsReady;
static final PermissionStatus perms
= new PermissionStatus("admin", "admin", FsPermission.getDefault());
- static private FSImage getMockFSImage() {
- FSEditLog editLog = mock(FSEditLog.class);
+ static private FSNamesystem getMockNamesystem() throws IOException {
FSImage fsImage = mock(FSImage.class);
- when(fsImage.getEditLog()).thenReturn(editLog);
- return fsImage;
- }
-
- static private FSNamesystem getMockNamesystem() {
- FSNamesystem fsn = mock(FSNamesystem.class);
- when(
- fsn.createFsOwnerPermissions((FsPermission)anyObject())
- ).thenReturn(
- new PermissionStatus("root", "wheel", FsPermission.getDefault())
- );
+ FSEditLog editLog = mock(FSEditLog.class);
+ doReturn(editLog).when(fsImage).getEditLog();
+ FSNamesystem fsn = new FSNamesystem(conf, fsImage);
+ fsn.setImageLoaded(fsIsReady);
return fsn;
}
-
- private static class MockFSDirectory extends FSDirectory {
- public MockFSDirectory() throws IOException {
- super(getMockFSImage(), getMockNamesystem(), conf);
- setReady(fsIsReady);
- NameNode.initMetrics(conf, NamenodeRole.NAMENODE);
- }
- }
@Before
public void setUp() throws IOException {
@@ -83,7 +64,7 @@ public class TestFsLimits {
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
fileAsURI(new File(MiniDFSCluster.getBaseDirectory(),
"namenode")).toString());
-
+ NameNode.initMetrics(conf, NamenodeRole.NAMENODE);
fs = null;
fsIsReady = true;
}
@@ -197,9 +178,10 @@ public class TestFsLimits {
lazyInitFSDirectory();
Class<?> generated = null;
try {
- fs.mkdirs(name, perms, false, now());
+ fs.mkdirs(name, perms, false);
} catch (Throwable e) {
generated = e.getClass();
+ e.printStackTrace();
}
assertEquals(expected, generated);
}
@@ -209,7 +191,7 @@ public class TestFsLimits {
lazyInitFSDirectory();
Class<?> generated = null;
try {
- fs.renameTo(src, dst, false, new Rename[] { });
+ fs.renameTo(src, dst, new Rename[] { });
} catch (Throwable e) {
generated = e.getClass();
}
@@ -222,7 +204,7 @@ public class TestFsLimits {
lazyInitFSDirectory();
Class<?> generated = null;
try {
- fs.renameTo(src, dst, false);
+ fs.renameTo(src, dst);
} catch (Throwable e) {
generated = e.getClass();
}
@@ -232,7 +214,7 @@ public class TestFsLimits {
private static void lazyInitFSDirectory() throws IOException {
// have to create after the caller has had a chance to set conf values
if (fs == null) {
- fs = new MockFSDirectory();
+ fs = getMockNamesystem();
}
}
}
Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java Tue Aug 19 23:49:39 2014
@@ -41,6 +41,7 @@ import java.net.InetSocketAddress;
import java.nio.channels.FileChannel;
import java.security.PrivilegedExceptionAction;
import java.util.HashMap;
+import java.util.List;
import java.util.Map;
import java.util.Random;
import java.util.Set;
@@ -63,11 +64,14 @@ import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
+import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager;
import org.apache.hadoop.hdfs.server.namenode.NamenodeFsck.Result;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
import org.apache.hadoop.hdfs.tools.DFSck;
@@ -99,13 +103,13 @@ public class TestFsck {
"ugi=.*?\\s" +
"ip=/\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\s" +
"cmd=fsck\\ssrc=\\/\\sdst=null\\s" +
- "perm=null");
+ "perm=null\\s" + "proto=.*");
static final Pattern getfileinfoPattern = Pattern.compile(
"allowed=.*?\\s" +
"ugi=.*?\\s" +
"ip=/\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\s" +
"cmd=getfileinfo\\ssrc=\\/\\sdst=null\\s" +
- "perm=null");
+ "perm=null\\s" + "proto=.*");
static final Pattern numCorruptBlocksPattern = Pattern.compile(
".*Corrupt blocks:\t\t([0123456789]*).*");
@@ -610,6 +614,8 @@ public class TestFsck {
public void testCorruptBlock() throws Exception {
Configuration conf = new HdfsConfiguration();
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000);
+ // Set short retry timeouts so this test runs faster
+ conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE, 10);
FileSystem fs = null;
DFSClient dfsClient = null;
LocatedBlocks blocks = null;
@@ -699,7 +705,7 @@ public class TestFsck {
DFSTestUtil.waitReplication(fs, filePath, (short)1);
// intentionally corrupt NN data structure
- INodeFile node = (INodeFile)cluster.getNamesystem().dir.rootDir.getNode(
+ INodeFile node = (INodeFile)cluster.getNamesystem().dir.getNode(
fileName, true);
final BlockInfo[] blocks = node.getBlocks();
assertEquals(blocks.length, 1);
@@ -746,15 +752,14 @@ public class TestFsck {
for (int j=0; j<=1; j++) {
File storageDir = cluster.getInstanceStorageDir(i, j);
File data_dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
- File[] blocks = data_dir.listFiles();
- if (blocks == null)
+ List<File> metadataFiles = MiniDFSCluster.getAllBlockMetadataFiles(
+ data_dir);
+ if (metadataFiles == null)
continue;
-
- for (int idx = 0; idx < blocks.length; idx++) {
- if (!blocks[idx].getName().startsWith("blk_")) {
- continue;
- }
- assertTrue("Cannot remove file.", blocks[idx].delete());
+ for (File metadataFile : metadataFiles) {
+ File blockFile = Block.metaToBlockFile(metadataFile);
+ assertTrue("Cannot remove file.", blockFile.delete());
+ assertTrue("Cannot remove file.", metadataFile.delete());
}
}
}
@@ -981,10 +986,15 @@ public class TestFsck {
PrintWriter out = new PrintWriter(result, true);
InetAddress remoteAddress = InetAddress.getLocalHost();
FSNamesystem fsName = mock(FSNamesystem.class);
+ BlockManager blockManager = mock(BlockManager.class);
+ DatanodeManager dnManager = mock(DatanodeManager.class);
+
when(namenode.getNamesystem()).thenReturn(fsName);
when(fsName.getBlockLocations(anyString(), anyLong(), anyLong(),
anyBoolean(), anyBoolean(), anyBoolean())).
thenThrow(new FileNotFoundException()) ;
+ when(fsName.getBlockManager()).thenReturn(blockManager);
+ when(blockManager.getDatanodeManager()).thenReturn(dnManager);
NamenodeFsck fsck = new NamenodeFsck(conf, namenode, nettop, pmap, out,
NUM_REPLICAS, (short)1, remoteAddress);
Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestHostsFiles.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestHostsFiles.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestHostsFiles.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestHostsFiles.java Tue Aug 19 23:49:39 2014
@@ -19,10 +19,8 @@ package org.apache.hadoop.hdfs.server.na
import static org.junit.Assert.assertTrue;
-import java.net.InetSocketAddress;
-import java.net.URL;
+import java.lang.management.ManagementFactory;
-import org.apache.commons.lang.StringEscapeUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
@@ -36,6 +34,9 @@ import org.apache.hadoop.hdfs.MiniDFSClu
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.junit.Test;
+import javax.management.MBeanServer;
+import javax.management.ObjectName;
+
/**
* DFS_HOSTS and DFS_HOSTS_EXCLUDE tests
*
@@ -73,7 +74,7 @@ public class TestHostsFiles {
}
@Test
- public void testHostsExcludeDfshealthJsp() throws Exception {
+ public void testHostsExcludeInUI() throws Exception {
Configuration conf = getConf();
short REPLICATION_FACTOR = 2;
final Path filePath = new Path("/testFile");
@@ -117,19 +118,55 @@ public class TestHostsFiles {
// Check the block still has sufficient # replicas across racks
DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0);
-
- InetSocketAddress nnHttpAddress = cluster.getNameNode().getHttpAddress();
- LOG.info("nnaddr = '" + nnHttpAddress + "'");
- String nnHostName = nnHttpAddress.getHostName();
- URL nnjsp = new URL("http://" + nnHostName + ":" + nnHttpAddress.getPort() + "/dfshealth.jsp");
- LOG.info("fetching " + nnjsp);
- String dfshealthPage = StringEscapeUtils.unescapeHtml(DFSTestUtil.urlGet(nnjsp));
- LOG.info("got " + dfshealthPage);
- assertTrue("dfshealth should contain " + nnHostName + ", got:" + dfshealthPage,
- dfshealthPage.contains(nnHostName));
+ MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
+ ObjectName mxbeanName = new ObjectName(
+ "Hadoop:service=NameNode,name=NameNodeInfo");
+ String nodes = (String) mbs.getAttribute(mxbeanName, "LiveNodes");
+ assertTrue("Live nodes should contain the decommissioned node",
+ nodes.contains("Decommissioned"));
} finally {
cluster.shutdown();
}
}
+
+ @Test
+ public void testHostsIncludeForDeadCount() throws Exception {
+ Configuration conf = getConf();
+
+ // Configure an excludes file
+ FileSystem localFileSys = FileSystem.getLocal(conf);
+ Path workingDir = localFileSys.getWorkingDirectory();
+ Path dir = new Path(workingDir, "build/test/data/temp/decommission");
+ Path excludeFile = new Path(dir, "exclude");
+ Path includeFile = new Path(dir, "include");
+ assertTrue(localFileSys.mkdirs(dir));
+ StringBuilder includeHosts = new StringBuilder();
+ includeHosts.append("localhost:52").append("\n").append("127.0.0.1:7777")
+ .append("\n");
+ DFSTestUtil.writeFile(localFileSys, excludeFile, "");
+ DFSTestUtil.writeFile(localFileSys, includeFile, includeHosts.toString());
+ conf.set(DFSConfigKeys.DFS_HOSTS_EXCLUDE, excludeFile.toUri().getPath());
+ conf.set(DFSConfigKeys.DFS_HOSTS, includeFile.toUri().getPath());
+
+ MiniDFSCluster cluster = null;
+ try {
+ cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
+ final FSNamesystem ns = cluster.getNameNode().getNamesystem();
+ assertTrue(ns.getNumDeadDataNodes() == 2);
+ assertTrue(ns.getNumLiveDataNodes() == 0);
+
+ // Testing using MBeans
+ MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
+ ObjectName mxbeanName = new ObjectName(
+ "Hadoop:service=NameNode,name=FSNamesystemState");
+ String nodes = mbs.getAttribute(mxbeanName, "NumDeadDataNodes") + "";
+ assertTrue((Integer) mbs.getAttribute(mxbeanName, "NumDeadDataNodes") == 2);
+ assertTrue((Integer) mbs.getAttribute(mxbeanName, "NumLiveDataNodes") == 0);
+ } finally {
+ if (cluster != null) {
+ cluster.shutdown();
+ }
+ }
+ }
}
Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java Tue Aug 19 23:49:39 2014
@@ -27,11 +27,8 @@ import static org.junit.Assert.fail;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.util.ArrayList;
-import java.util.Arrays;
import java.util.List;
-import junit.framework.Assert;
-
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
@@ -47,6 +44,8 @@ import org.apache.hadoop.fs.Options.Rena
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathIsNotDirectoryException;
import org.apache.hadoop.fs.RemoteIterator;
+import org.apache.hadoop.fs.XAttr;
+import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.permission.PermissionStatus;
import org.apache.hadoop.hdfs.DFSClient;
@@ -68,6 +67,8 @@ import org.apache.hadoop.util.Time;
import org.junit.Test;
import org.mockito.Mockito;
+import com.google.common.collect.ImmutableList;
+
public class TestINodeFile {
public static final Log LOG = LogFactory.getLog(TestINodeFile.class);
@@ -77,7 +78,7 @@ public class TestINodeFile {
private final PermissionStatus perm = new PermissionStatus(
"userName", null, FsPermission.getDefault());
private short replication;
- private long preferredBlockSize;
+ private long preferredBlockSize = 1024;
INodeFile createINodeFile(short replication, long preferredBlockSize) {
return new INodeFile(INodeId.GRANDFATHER_INODE_ID, null, perm, 0L, 0L,
@@ -316,7 +317,7 @@ public class TestINodeFile {
{//cast from INodeFileUnderConstruction
final INode from = new INodeFile(
INodeId.GRANDFATHER_INODE_ID, null, perm, 0L, 0L, null, replication, 1024L);
- from.asFile().toUnderConstruction("client", "machine", null);
+ from.asFile().toUnderConstruction("client", "machine");
//cast to INodeFile, should success
final INodeFile f = INodeFile.valueOf(from, path);
@@ -466,8 +467,8 @@ public class TestINodeFile {
}
}
- @Test
- public void testWriteToRenamedFile() throws IOException {
+ @Test(timeout=120000)
+ public void testWriteToDeletedFile() throws IOException {
Configuration conf = new Configuration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
.build();
@@ -484,18 +485,16 @@ public class TestINodeFile {
Path filePath = new Path("/test1/file");
FSDataOutputStream fos = fs.create(filePath);
- // Rename /test1 to test2, and recreate /test1/file
- Path renamedPath = new Path("/test2");
- fs.rename(path, renamedPath);
- fs.create(filePath, (short) 1);
+ // Delete the file
+ fs.delete(filePath, false);
- // Add new block should fail since /test1/file has a different fileId
+ // Add new block should fail since /test1/file has been deleted.
try {
fos.write(data, 0, data.length);
// make sure addBlock() request gets to NN immediately
fos.hflush();
- fail("Write should fail after rename");
+ fail("Write should fail after delete");
} catch (Exception e) {
/* Ignore */
} finally {
@@ -523,6 +522,7 @@ public class TestINodeFile {
Configuration conf = new Configuration();
conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,
DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_DEFAULT);
+ conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
MiniDFSCluster cluster = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
@@ -570,6 +570,20 @@ public class TestINodeFile {
// ClientProtocol#getPreferredBlockSize
assertEquals(testFileBlockSize,
nnRpc.getPreferredBlockSize(testFileInodePath.toString()));
+
+ /*
+ * HDFS-6749 added missing calls to FSDirectory.resolvePath in the
+ * following four methods. The calls below ensure that
+ * /.reserved/.inodes paths work properly. No need to check return
+ * values as these methods are tested elsewhere.
+ */
+ {
+ fs.isFileClosed(testFileInodePath);
+ fs.getAclStatus(testFileInodePath);
+ fs.getXAttrs(testFileInodePath);
+ fs.listXAttrs(testFileInodePath);
+ fs.access(testFileInodePath, FsAction.READ_WRITE);
+ }
// symbolic link related tests
@@ -789,14 +803,6 @@ public class TestINodeFile {
return dir; // Last Inode in the chain
}
- private static void checkEquals(byte[][] expected, byte[][] actual) {
- assertEquals(expected.length, actual.length);
- int i = 0;
- for (byte[] e : expected) {
- assertTrue(Arrays.equals(e, actual[i++]));
- }
- }
-
/**
* Test for {@link FSDirectory#getPathComponents(INode)}
*/
@@ -806,7 +812,7 @@ public class TestINodeFile {
INode inode = createTreeOfInodes(path);
byte[][] expected = INode.getPathComponents(path);
byte[][] actual = FSDirectory.getPathComponents(inode);
- checkEquals(expected, actual);
+ DFSTestUtil.checkComponentsEquals(expected, actual);
}
/**
@@ -1078,14 +1084,31 @@ public class TestINodeFile {
final String clientName = "client";
final String clientMachine = "machine";
- file.toUnderConstruction(clientName, clientMachine, null);
+ file.toUnderConstruction(clientName, clientMachine);
assertTrue(file.isUnderConstruction());
FileUnderConstructionFeature uc = file.getFileUnderConstructionFeature();
assertEquals(clientName, uc.getClientName());
assertEquals(clientMachine, uc.getClientMachine());
- Assert.assertNull(uc.getClientNode());
file.toCompleteFile(Time.now());
assertFalse(file.isUnderConstruction());
}
+
+ @Test
+ public void testXAttrFeature() {
+ replication = 3;
+ preferredBlockSize = 128*1024*1024;
+ INodeFile inf = createINodeFile(replication, preferredBlockSize);
+ ImmutableList.Builder<XAttr> builder = new ImmutableList.Builder<XAttr>();
+ XAttr xAttr = new XAttr.Builder().setNameSpace(XAttr.NameSpace.USER).
+ setName("a1").setValue(new byte[]{0x31, 0x32, 0x33}).build();
+ builder.add(xAttr);
+ XAttrFeature f = new XAttrFeature(builder.build());
+ inf.addXAttrFeature(f);
+ XAttrFeature f1 = inf.getXAttrFeature();
+ assertEquals(xAttr, f1.getXAttrs().get(0));
+ inf.removeXAttrFeature();
+ f1 = inf.getXAttrFeature();
+ assertEquals(f1, null);
+ }
}
Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListCorruptFileBlocks.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListCorruptFileBlocks.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListCorruptFileBlocks.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListCorruptFileBlocks.java Tue Aug 19 23:49:39 2014
@@ -25,6 +25,7 @@ import java.io.RandomAccessFile;
import java.nio.ByteBuffer;
import java.nio.channels.FileChannel;
import java.util.Collection;
+import java.util.List;
import java.util.Random;
import org.apache.commons.logging.Log;
@@ -39,7 +40,11 @@ import org.apache.hadoop.hdfs.DFSTestUti
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.TestFileCorruption;
+import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.hdfs.server.datanode.DatanodeUtil;
import org.apache.hadoop.util.StringUtils;
import org.junit.Test;
@@ -64,6 +69,8 @@ public class TestListCorruptFileBlocks {
Configuration conf = new HdfsConfiguration();
conf.setInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY, 1); // datanode scans directories
conf.setInt(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 3 * 1000); // datanode sends block reports
+ // Set short retry timeouts so this test runs faster
+ conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE, 10);
cluster = new MiniDFSCluster.Builder(conf).build();
FileSystem fs = cluster.getFileSystem();
@@ -85,36 +92,29 @@ public class TestListCorruptFileBlocks {
File storageDir = cluster.getInstanceStorageDir(0, 1);
File data_dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
assertTrue("data directory does not exist", data_dir.exists());
- File[] blocks = data_dir.listFiles();
- assertTrue("Blocks do not exist in data-dir", (blocks != null) && (blocks.length > 0));
- for (int idx = 0; idx < blocks.length; idx++) {
- if (blocks[idx].getName().startsWith("blk_") &&
- blocks[idx].getName().endsWith(".meta")) {
- //
- // shorten .meta file
- //
- RandomAccessFile file = new RandomAccessFile(blocks[idx], "rw");
- FileChannel channel = file.getChannel();
- long position = channel.size() - 2;
- int length = 2;
- byte[] buffer = new byte[length];
- random.nextBytes(buffer);
- channel.write(ByteBuffer.wrap(buffer), position);
- file.close();
- LOG.info("Deliberately corrupting file " + blocks[idx].getName() +
- " at offset " + position + " length " + length);
-
- // read all files to trigger detection of corrupted replica
- try {
- util.checkFiles(fs, "/srcdat10");
- } catch (BlockMissingException e) {
- System.out.println("Received BlockMissingException as expected.");
- } catch (IOException e) {
- assertTrue("Corrupted replicas not handled properly. Expecting BlockMissingException " +
- " but received IOException " + e, false);
- }
- break;
- }
+ List<File> metaFiles = MiniDFSCluster.getAllBlockMetadataFiles(data_dir);
+ assertTrue("Data directory does not contain any blocks or there was an "
+ + "IO error", metaFiles != null && !metaFiles.isEmpty());
+ File metaFile = metaFiles.get(0);
+ RandomAccessFile file = new RandomAccessFile(metaFile, "rw");
+ FileChannel channel = file.getChannel();
+ long position = channel.size() - 2;
+ int length = 2;
+ byte[] buffer = new byte[length];
+ random.nextBytes(buffer);
+ channel.write(ByteBuffer.wrap(buffer), position);
+ file.close();
+ LOG.info("Deliberately corrupting file " + metaFile.getName() +
+ " at offset " + position + " length " + length);
+
+ // read all files to trigger detection of corrupted replica
+ try {
+ util.checkFiles(fs, "/srcdat10");
+ } catch (BlockMissingException e) {
+ System.out.println("Received BlockMissingException as expected.");
+ } catch (IOException e) {
+ assertTrue("Corrupted replicas not handled properly. Expecting BlockMissingException " +
+ " but received IOException " + e, false);
}
// fetch bad file list from namenode. There should be one file.
@@ -148,6 +148,8 @@ public class TestListCorruptFileBlocks {
// start populating repl queues immediately
conf.setFloat(DFSConfigKeys.DFS_NAMENODE_REPL_QUEUE_THRESHOLD_PCT_KEY,
0f);
+ // Set short retry timeouts so this test runs faster
+ conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE, 10);
cluster = new MiniDFSCluster.Builder(conf).waitSafeMode(false).build();
cluster.getNameNodeRpc().setSafeMode(
HdfsConstants.SafeModeAction.SAFEMODE_LEAVE, false);
@@ -170,38 +172,30 @@ public class TestListCorruptFileBlocks {
File data_dir = MiniDFSCluster.getFinalizedDir(storageDir,
cluster.getNamesystem().getBlockPoolId());
assertTrue("data directory does not exist", data_dir.exists());
- File[] blocks = data_dir.listFiles();
- assertTrue("Blocks do not exist in data-dir", (blocks != null) &&
- (blocks.length > 0));
- for (int idx = 0; idx < blocks.length; idx++) {
- if (blocks[idx].getName().startsWith("blk_") &&
- blocks[idx].getName().endsWith(".meta")) {
- //
- // shorten .meta file
- //
- RandomAccessFile file = new RandomAccessFile(blocks[idx], "rw");
- FileChannel channel = file.getChannel();
- long position = channel.size() - 2;
- int length = 2;
- byte[] buffer = new byte[length];
- random.nextBytes(buffer);
- channel.write(ByteBuffer.wrap(buffer), position);
- file.close();
- LOG.info("Deliberately corrupting file " + blocks[idx].getName() +
- " at offset " + position + " length " + length);
-
- // read all files to trigger detection of corrupted replica
- try {
- util.checkFiles(fs, "/srcdat10");
- } catch (BlockMissingException e) {
- System.out.println("Received BlockMissingException as expected.");
- } catch (IOException e) {
- assertTrue("Corrupted replicas not handled properly. " +
- "Expecting BlockMissingException " +
- " but received IOException " + e, false);
- }
- break;
- }
+ List<File> metaFiles = MiniDFSCluster.getAllBlockMetadataFiles(data_dir);
+ assertTrue("Data directory does not contain any blocks or there was an "
+ + "IO error", metaFiles != null && !metaFiles.isEmpty());
+ File metaFile = metaFiles.get(0);
+ RandomAccessFile file = new RandomAccessFile(metaFile, "rw");
+ FileChannel channel = file.getChannel();
+ long position = channel.size() - 2;
+ int length = 2;
+ byte[] buffer = new byte[length];
+ random.nextBytes(buffer);
+ channel.write(ByteBuffer.wrap(buffer), position);
+ file.close();
+ LOG.info("Deliberately corrupting file " + metaFile.getName() +
+ " at offset " + position + " length " + length);
+
+ // read all files to trigger detection of corrupted replica
+ try {
+ util.checkFiles(fs, "/srcdat10");
+ } catch (BlockMissingException e) {
+ System.out.println("Received BlockMissingException as expected.");
+ } catch (IOException e) {
+ assertTrue("Corrupted replicas not handled properly. " +
+ "Expecting BlockMissingException " +
+ " but received IOException " + e, false);
}
// fetch bad file list from namenode. There should be one file.
@@ -291,17 +285,18 @@ public class TestListCorruptFileBlocks {
for (int j = 0; j <= 1; j++) {
File storageDir = cluster.getInstanceStorageDir(i, j);
File data_dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
- File[] blocks = data_dir.listFiles();
- if (blocks == null)
+ List<File> metadataFiles = MiniDFSCluster.getAllBlockMetadataFiles(
+ data_dir);
+ if (metadataFiles == null)
continue;
// assertTrue("Blocks do not exist in data-dir", (blocks != null) &&
// (blocks.length > 0));
- for (int idx = 0; idx < blocks.length; idx++) {
- if (!blocks[idx].getName().startsWith("blk_")) {
- continue;
- }
- LOG.info("Deliberately removing file " + blocks[idx].getName());
- assertTrue("Cannot remove file.", blocks[idx].delete());
+ for (File metadataFile : metadataFiles) {
+ File blockFile = Block.metaToBlockFile(metadataFile);
+ LOG.info("Deliberately removing file " + blockFile.getName());
+ assertTrue("Cannot remove file.", blockFile.delete());
+ LOG.info("Deliberately removing file " + metadataFile.getName());
+ assertTrue("Cannot remove file.", metadataFile.delete());
// break;
}
}
@@ -401,17 +396,18 @@ public class TestListCorruptFileBlocks {
for (int i = 0; i < 2; i++) {
File storageDir = cluster.getInstanceStorageDir(0, i);
File data_dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
- File[] blocks = data_dir.listFiles();
- if (blocks == null)
+ List<File> metadataFiles = MiniDFSCluster.getAllBlockMetadataFiles(
+ data_dir);
+ if (metadataFiles == null)
continue;
// assertTrue("Blocks do not exist in data-dir", (blocks != null) &&
// (blocks.length > 0));
- for (int idx = 0; idx < blocks.length; idx++) {
- if (!blocks[idx].getName().startsWith("blk_")) {
- continue;
- }
- LOG.info("Deliberately removing file " + blocks[idx].getName());
- assertTrue("Cannot remove file.", blocks[idx].delete());
+ for (File metadataFile : metadataFiles) {
+ File blockFile = Block.metaToBlockFile(metadataFile);
+ LOG.info("Deliberately removing file " + blockFile.getName());
+ assertTrue("Cannot remove file.", blockFile.delete());
+ LOG.info("Deliberately removing file " + metadataFile.getName());
+ assertTrue("Cannot remove file.", metadataFile.delete());
// break;
}
}
@@ -478,15 +474,14 @@ public class TestListCorruptFileBlocks {
File storageDir = cluster.getInstanceStorageDir(i, j);
File data_dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
LOG.info("Removing files from " + data_dir);
- File[] blocks = data_dir.listFiles();
- if (blocks == null)
+ List<File> metadataFiles = MiniDFSCluster.getAllBlockMetadataFiles(
+ data_dir);
+ if (metadataFiles == null)
continue;
-
- for (int idx = 0; idx < blocks.length; idx++) {
- if (!blocks[idx].getName().startsWith("blk_")) {
- continue;
- }
- assertTrue("Cannot remove file.", blocks[idx].delete());
+ for (File metadataFile : metadataFiles) {
+ File blockFile = Block.metaToBlockFile(metadataFile);
+ assertTrue("Cannot remove file.", blockFile.delete());
+ assertTrue("Cannot remove file.", metadataFile.delete());
}
}
}
Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMetaSave.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMetaSave.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMetaSave.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMetaSave.java Tue Aug 19 23:49:39 2014
@@ -36,6 +36,7 @@ import org.apache.hadoop.hdfs.DFSTestUti
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.junit.AfterClass;
+import org.junit.Assert;
import org.junit.BeforeClass;
import org.junit.Test;
@@ -92,8 +93,9 @@ public class TestMetaSave {
try {
reader = new BufferedReader(new InputStreamReader(in));
String line = reader.readLine();
- assertTrue(line.equals(
- "3 files and directories, 2 blocks = 5 total filesystem objects"));
+ Assert.assertEquals(
+ "3 files and directories, 2 blocks = 5 total filesystem objects",
+ line);
line = reader.readLine();
assertTrue(line.equals("Live Datanodes: 1"));
line = reader.readLine();
Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNStorageRetentionManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNStorageRetentionManager.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNStorageRetentionManager.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNStorageRetentionManager.java Tue Aug 19 23:49:39 2014
@@ -212,18 +212,25 @@ public class TestNNStorageRetentionManag
tc.addImage("/foo1/current/" + getImageFileName(300), false);
tc.addImage("/foo1/current/" + getImageFileName(400), false);
+ // Segments containing txns upto txId 250 are extra and should be purged.
tc.addLog("/foo2/current/" + getFinalizedEditsFileName(1, 100), true);
- // Without lowering the max segments to retain, we'd retain all segments
- // going back to txid 150 (300 - 150).
tc.addLog("/foo2/current/" + getFinalizedEditsFileName(101, 175), true);
+ tc.addLog("/foo2/current/" + getInProgressEditsFileName(176) + ".empty",
+ true);
tc.addLog("/foo2/current/" + getFinalizedEditsFileName(176, 200), true);
tc.addLog("/foo2/current/" + getFinalizedEditsFileName(201, 225), true);
+ tc.addLog("/foo2/current/" + getInProgressEditsFileName(226) + ".corrupt",
+ true);
tc.addLog("/foo2/current/" + getFinalizedEditsFileName(226, 240), true);
// Only retain 2 extra segments. The 301-350 and 351-400 segments are
// considered required, not extra.
tc.addLog("/foo2/current/" + getFinalizedEditsFileName(241, 275), false);
tc.addLog("/foo2/current/" + getFinalizedEditsFileName(276, 300), false);
+ tc.addLog("/foo2/current/" + getInProgressEditsFileName(301) + ".empty",
+ false);
tc.addLog("/foo2/current/" + getFinalizedEditsFileName(301, 350), false);
+ tc.addLog("/foo2/current/" + getInProgressEditsFileName(351) + ".corrupt",
+ false);
tc.addLog("/foo2/current/" + getFinalizedEditsFileName(351, 400), false);
tc.addLog("/foo2/current/" + getInProgressEditsFileName(401), false);
runTest(tc);
Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java Tue Aug 19 23:49:39 2014
@@ -30,9 +30,13 @@ import javax.management.MBeanServer;
import javax.management.ObjectName;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.io.nativeio.NativeIO;
import org.apache.hadoop.io.nativeio.NativeIO.POSIX.NoMlockCacheManipulator;
import org.apache.hadoop.util.VersionInfo;
@@ -58,11 +62,14 @@ public class TestNameNodeMXBean {
public void testNameNodeMXBeanInfo() throws Exception {
Configuration conf = new Configuration();
conf.setLong(DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY,
- NativeIO.POSIX.getCacheManipulator().getMemlockLimit());
+ NativeIO.POSIX.getCacheManipulator().getMemlockLimit());
+ conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
+ conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1);
+
MiniDFSCluster cluster = null;
try {
- cluster = new MiniDFSCluster.Builder(conf).build();
+ cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
cluster.waitActive();
FSNamesystem fsn = cluster.getNameNode().namesystem;
@@ -70,6 +77,29 @@ public class TestNameNodeMXBean {
MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
ObjectName mxbeanName = new ObjectName(
"Hadoop:service=NameNode,name=NameNodeInfo");
+
+ // Define include file to generate deadNodes metrics
+ FileSystem localFileSys = FileSystem.getLocal(conf);
+ Path workingDir = localFileSys.getWorkingDirectory();
+ Path dir = new Path(workingDir,
+ "build/test/data/temp/TestNameNodeMXBean");
+ Path includeFile = new Path(dir, "include");
+ assertTrue(localFileSys.mkdirs(dir));
+ StringBuilder includeHosts = new StringBuilder();
+ for(DataNode dn : cluster.getDataNodes()) {
+ includeHosts.append(dn.getDisplayName()).append("\n");
+ }
+ DFSTestUtil.writeFile(localFileSys, includeFile, includeHosts.toString());
+ conf.set(DFSConfigKeys.DFS_HOSTS, includeFile.toUri().getPath());
+ fsn.getBlockManager().getDatanodeManager().refreshNodes(conf);
+
+ cluster.stopDataNode(0);
+ while (fsn.getNumDatanodesInService() != 2) {
+ try {
+ Thread.sleep(1000);
+ } catch (InterruptedException e) {}
+ }
+
// get attribute "ClusterId"
String clusterId = (String) mbs.getAttribute(mxbeanName, "ClusterId");
assertEquals(fsn.getClusterId(), clusterId);
@@ -121,6 +151,15 @@ public class TestNameNodeMXBean {
String deadnodeinfo = (String) (mbs.getAttribute(mxbeanName,
"DeadNodes"));
assertEquals(fsn.getDeadNodes(), deadnodeinfo);
+ Map<String, Map<String, Object>> deadNodes =
+ (Map<String, Map<String, Object>>) JSON.parse(deadnodeinfo);
+ assertTrue(deadNodes.size() > 0);
+ for (Map<String, Object> deadNode : deadNodes.values()) {
+ assertTrue(deadNode.containsKey("lastContact"));
+ assertTrue(deadNode.containsKey("decommissioned"));
+ assertTrue(deadNode.containsKey("xferaddr"));
+ }
+
// get attribute NodeUsage
String nodeUsage = (String) (mbs.getAttribute(mxbeanName,
"NodeUsage"));
@@ -181,7 +220,7 @@ public class TestNameNodeMXBean {
assertEquals(1, statusMap.get("active").size());
assertEquals(1, statusMap.get("failed").size());
assertEquals(0L, mbs.getAttribute(mxbeanName, "CacheUsed"));
- assertEquals(NativeIO.POSIX.getCacheManipulator().getMemlockLimit() *
+ assertEquals(NativeIO.POSIX.getCacheManipulator().getMemlockLimit() *
cluster.getDataNodes().size(),
mbs.getAttribute(mxbeanName, "CacheCapacity"));
} finally {
Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeCapacityReport.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeCapacityReport.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeCapacityReport.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeCapacityReport.java Tue Aug 19 23:49:39 2014
@@ -18,9 +18,11 @@
package org.apache.hadoop.hdfs.server.namenode;
-import static org.junit.Assert.assertTrue;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_BLOCK_WRITE_LOCATEFOLLOWINGBLOCK_RETRIES_KEY;
+import static org.junit.Assert.*;
import java.io.File;
+import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
@@ -28,12 +30,21 @@ import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.DF;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DFSOutputStream;
import org.apache.hadoop.hdfs.DFSUtil;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager;
+import org.apache.hadoop.hdfs.server.datanode.DataNode;
+import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
import org.junit.Test;
@@ -153,4 +164,177 @@ public class TestNamenodeCapacityReport
if (cluster != null) {cluster.shutdown();}
}
}
+
+ private static final float EPSILON = 0.0001f;
+ @Test
+ public void testXceiverCount() throws Exception {
+ Configuration conf = new HdfsConfiguration();
+ // don't waste time retrying if close fails
+ conf.setInt(DFS_CLIENT_BLOCK_WRITE_LOCATEFOLLOWINGBLOCK_RETRIES_KEY, 0);
+ MiniDFSCluster cluster = null;
+
+ final int nodes = 8;
+ final int fileCount = 5;
+ final short fileRepl = 3;
+
+ try {
+ cluster = new MiniDFSCluster.Builder(conf).numDataNodes(nodes).build();
+ cluster.waitActive();
+
+ final FSNamesystem namesystem = cluster.getNamesystem();
+ final DatanodeManager dnm = namesystem.getBlockManager().getDatanodeManager();
+ List<DataNode> datanodes = cluster.getDataNodes();
+ final DistributedFileSystem fs = cluster.getFileSystem();
+
+ // trigger heartbeats in case not already sent
+ triggerHeartbeats(datanodes);
+
+ // check that all nodes are live and in service
+ int expectedTotalLoad = nodes; // xceiver server adds 1 to load
+ int expectedInServiceNodes = nodes;
+ int expectedInServiceLoad = nodes;
+ assertEquals(nodes, namesystem.getNumLiveDataNodes());
+ assertEquals(expectedInServiceNodes, namesystem.getNumDatanodesInService());
+ assertEquals(expectedTotalLoad, namesystem.getTotalLoad());
+ assertEquals((double)expectedInServiceLoad/expectedInServiceLoad,
+ namesystem.getInServiceXceiverAverage(), EPSILON);
+
+ // shutdown half the nodes and force a heartbeat check to ensure
+ // counts are accurate
+ for (int i=0; i < nodes/2; i++) {
+ DataNode dn = datanodes.get(i);
+ DatanodeDescriptor dnd = dnm.getDatanode(dn.getDatanodeId());
+ dn.shutdown();
+ dnd.setLastUpdate(0L);
+ BlockManagerTestUtil.checkHeartbeat(namesystem.getBlockManager());
+ expectedInServiceNodes--;
+ assertEquals(expectedInServiceNodes, namesystem.getNumLiveDataNodes());
+ assertEquals(expectedInServiceNodes, namesystem.getNumDatanodesInService());
+ }
+
+ // restart the nodes to verify that counts are correct after
+ // node re-registration
+ cluster.restartDataNodes();
+ cluster.waitActive();
+ datanodes = cluster.getDataNodes();
+ expectedInServiceNodes = nodes;
+ assertEquals(nodes, datanodes.size());
+ assertEquals(nodes, namesystem.getNumLiveDataNodes());
+ assertEquals(expectedInServiceNodes, namesystem.getNumDatanodesInService());
+ assertEquals(expectedTotalLoad, namesystem.getTotalLoad());
+ assertEquals((double)expectedInServiceLoad/expectedInServiceLoad,
+ namesystem.getInServiceXceiverAverage(), EPSILON);
+
+ // create streams and hsync to force datastreamers to start
+ DFSOutputStream[] streams = new DFSOutputStream[fileCount];
+ for (int i=0; i < fileCount; i++) {
+ streams[i] = (DFSOutputStream)fs.create(new Path("/f"+i), fileRepl)
+ .getWrappedStream();
+ streams[i].write("1".getBytes());
+ streams[i].hsync();
+ // the load for writers is 2 because both the write xceiver & packet
+ // responder threads are counted in the load
+ expectedTotalLoad += 2*fileRepl;
+ expectedInServiceLoad += 2*fileRepl;
+ }
+ // force nodes to send load update
+ triggerHeartbeats(datanodes);
+ assertEquals(nodes, namesystem.getNumLiveDataNodes());
+ assertEquals(expectedInServiceNodes,
+ namesystem.getNumDatanodesInService());
+ assertEquals(expectedTotalLoad, namesystem.getTotalLoad());
+ assertEquals((double)expectedInServiceLoad/expectedInServiceNodes,
+ namesystem.getInServiceXceiverAverage(), EPSILON);
+
+ // decomm a few nodes, substract their load from the expected load,
+ // trigger heartbeat to force load update
+ for (int i=0; i < fileRepl; i++) {
+ expectedInServiceNodes--;
+ DatanodeDescriptor dnd =
+ dnm.getDatanode(datanodes.get(i).getDatanodeId());
+ expectedInServiceLoad -= dnd.getXceiverCount();
+ dnm.startDecommission(dnd);
+ DataNodeTestUtils.triggerHeartbeat(datanodes.get(i));
+ Thread.sleep(100);
+ assertEquals(nodes, namesystem.getNumLiveDataNodes());
+ assertEquals(expectedInServiceNodes,
+ namesystem.getNumDatanodesInService());
+ assertEquals(expectedTotalLoad, namesystem.getTotalLoad());
+ assertEquals((double)expectedInServiceLoad/expectedInServiceNodes,
+ namesystem.getInServiceXceiverAverage(), EPSILON);
+ }
+
+ // check expected load while closing each stream. recalc expected
+ // load based on whether the nodes in the pipeline are decomm
+ for (int i=0; i < fileCount; i++) {
+ int decomm = 0;
+ for (DatanodeInfo dni : streams[i].getPipeline()) {
+ DatanodeDescriptor dnd = dnm.getDatanode(dni);
+ expectedTotalLoad -= 2;
+ if (dnd.isDecommissionInProgress() || dnd.isDecommissioned()) {
+ decomm++;
+ } else {
+ expectedInServiceLoad -= 2;
+ }
+ }
+ try {
+ streams[i].close();
+ } catch (IOException ioe) {
+ // nodes will go decommissioned even if there's a UC block whose
+ // other locations are decommissioned too. we'll ignore that
+ // bug for now
+ if (decomm < fileRepl) {
+ throw ioe;
+ }
+ }
+ triggerHeartbeats(datanodes);
+ // verify node count and loads
+ assertEquals(nodes, namesystem.getNumLiveDataNodes());
+ assertEquals(expectedInServiceNodes,
+ namesystem.getNumDatanodesInService());
+ assertEquals(expectedTotalLoad, namesystem.getTotalLoad());
+ assertEquals((double)expectedInServiceLoad/expectedInServiceNodes,
+ namesystem.getInServiceXceiverAverage(), EPSILON);
+ }
+
+ // shutdown each node, verify node counts based on decomm state
+ for (int i=0; i < nodes; i++) {
+ DataNode dn = datanodes.get(i);
+ dn.shutdown();
+ // force it to appear dead so live count decreases
+ DatanodeDescriptor dnDesc = dnm.getDatanode(dn.getDatanodeId());
+ dnDesc.setLastUpdate(0L);
+ BlockManagerTestUtil.checkHeartbeat(namesystem.getBlockManager());
+ assertEquals(nodes-1-i, namesystem.getNumLiveDataNodes());
+ // first few nodes are already out of service
+ if (i >= fileRepl) {
+ expectedInServiceNodes--;
+ }
+ assertEquals(expectedInServiceNodes, namesystem.getNumDatanodesInService());
+
+ // live nodes always report load of 1. no nodes is load 0
+ double expectedXceiverAvg = (i == nodes-1) ? 0.0 : 1.0;
+ assertEquals((double)expectedXceiverAvg,
+ namesystem.getInServiceXceiverAverage(), EPSILON);
+ }
+
+ // final sanity check
+ assertEquals(0, namesystem.getNumLiveDataNodes());
+ assertEquals(0, namesystem.getNumDatanodesInService());
+ assertEquals(0.0, namesystem.getTotalLoad(), EPSILON);
+ assertEquals(0.0, namesystem.getInServiceXceiverAverage(), EPSILON);
+ } finally {
+ if (cluster != null) {
+ cluster.shutdown();
+ }
+ }
+ }
+
+ private void triggerHeartbeats(List<DataNode> datanodes)
+ throws IOException, InterruptedException {
+ for (DataNode dn : datanodes) {
+ DataNodeTestUtils.triggerHeartbeat(dn);
+ }
+ Thread.sleep(100);
+ }
}
Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java Tue Aug 19 23:49:39 2014
@@ -415,7 +415,7 @@ public class TestNamenodeRetryCache {
LightWeightCache<CacheEntry, CacheEntry> cacheSet =
(LightWeightCache<CacheEntry, CacheEntry>) namesystem.getRetryCache().getCacheSet();
- assertEquals(20, cacheSet.size());
+ assertEquals(23, cacheSet.size());
Map<CacheEntry, CacheEntry> oldEntries =
new HashMap<CacheEntry, CacheEntry>();
@@ -434,7 +434,7 @@ public class TestNamenodeRetryCache {
assertTrue(namesystem.hasRetryCache());
cacheSet = (LightWeightCache<CacheEntry, CacheEntry>) namesystem
.getRetryCache().getCacheSet();
- assertEquals(20, cacheSet.size());
+ assertEquals(23, cacheSet.size());
iter = cacheSet.iterator();
while (iter.hasNext()) {
CacheEntry entry = iter.next();
Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSaveNamespace.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSaveNamespace.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSaveNamespace.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSaveNamespace.java Tue Aug 19 23:49:39 2014
@@ -61,6 +61,7 @@ import org.apache.hadoop.test.GenericTes
import org.apache.log4j.Level;
import org.junit.Test;
import org.mockito.Mockito;
+import org.mockito.internal.util.reflection.Whitebox;
import org.mockito.invocation.InvocationOnMock;
import org.mockito.stubbing.Answer;
@@ -124,14 +125,14 @@ public class TestSaveNamespace {
FSNamesystem fsn = FSNamesystem.loadFromDisk(conf);
// Replace the FSImage with a spy
- FSImage originalImage = fsn.dir.fsImage;
+ FSImage originalImage = fsn.getFSImage();
NNStorage storage = originalImage.getStorage();
NNStorage spyStorage = spy(storage);
originalImage.storage = spyStorage;
FSImage spyImage = spy(originalImage);
- fsn.dir.fsImage = spyImage;
+ Whitebox.setInternalState(fsn, "fsImage", spyImage);
boolean shouldFail = false; // should we expect the save operation to fail
// inject fault
@@ -233,11 +234,11 @@ public class TestSaveNamespace {
FSNamesystem fsn = FSNamesystem.loadFromDisk(conf);
// Replace the FSImage with a spy
- FSImage originalImage = fsn.dir.fsImage;
+ FSImage originalImage = fsn.getFSImage();
NNStorage storage = originalImage.getStorage();
FSImage spyImage = spy(originalImage);
- fsn.dir.fsImage = spyImage;
+ Whitebox.setInternalState(fsn, "fsImage", spyImage);
FileSystem fs = FileSystem.getLocal(conf);
File rootDir = storage.getStorageDir(0).getRoot();
@@ -367,14 +368,15 @@ public class TestSaveNamespace {
FSNamesystem fsn = FSNamesystem.loadFromDisk(conf);
// Replace the FSImage with a spy
- final FSImage originalImage = fsn.dir.fsImage;
+ final FSImage originalImage = fsn.getFSImage();
NNStorage storage = originalImage.getStorage();
storage.close(); // unlock any directories that FSNamesystem's initialization may have locked
NNStorage spyStorage = spy(storage);
originalImage.storage = spyStorage;
FSImage spyImage = spy(originalImage);
- fsn.dir.fsImage = spyImage;
+ Whitebox.setInternalState(fsn, "fsImage", spyImage);
+
spyImage.storage.setStorageDirectories(
FSNamesystem.getNamespaceDirs(conf),
FSNamesystem.getNamespaceEditsDirs(conf));
@@ -504,7 +506,7 @@ public class TestSaveNamespace {
FSNamesystem fsn = FSNamesystem.loadFromDisk(conf);
// Replace the FSImage with a spy
- final FSImage image = fsn.dir.fsImage;
+ final FSImage image = fsn.getFSImage();
NNStorage storage = image.getStorage();
storage.close(); // unlock any directories that FSNamesystem's initialization may have locked
storage.setStorageDirectories(
Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSecondaryWebUi.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSecondaryWebUi.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSecondaryWebUi.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSecondaryWebUi.java Tue Aug 19 23:49:39 2014
@@ -17,20 +17,22 @@
*/
package org.apache.hadoop.hdfs.server.namenode;
-import static org.junit.Assert.assertTrue;
-
-import java.io.IOException;
-import java.net.MalformedURLException;
-import java.net.URL;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_TXNS_KEY;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.junit.AfterClass;
+import org.junit.Assert;
import org.junit.BeforeClass;
import org.junit.Test;
+import javax.management.*;
+import java.io.IOException;
+import java.lang.management.ManagementFactory;
+import java.net.URL;
+
public class TestSecondaryWebUi {
private static MiniDFSCluster cluster;
@@ -41,6 +43,7 @@ public class TestSecondaryWebUi {
public static void setUpCluster() throws IOException {
conf.set(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY,
"0.0.0.0:0");
+ conf.setLong(DFS_NAMENODE_CHECKPOINT_TXNS_KEY, 500);
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
.build();
cluster.waitActive();
@@ -59,18 +62,20 @@ public class TestSecondaryWebUi {
}
@Test
- public void testSecondaryWebUi() throws IOException {
- String pageContents = DFSTestUtil.urlGet(new URL("http://localhost:" +
- SecondaryNameNode.getHttpAddress(conf).getPort() + "/status.jsp"));
- assertTrue("Didn't find \"Last Checkpoint\"",
- pageContents.contains("Last Checkpoint"));
- }
-
- @Test
- public void testSecondaryWebJmx() throws MalformedURLException, IOException {
- String pageContents = DFSTestUtil.urlGet(new URL("http://localhost:" +
- SecondaryNameNode.getHttpAddress(conf).getPort() + "/jmx"));
- assertTrue(pageContents.contains(
- "Hadoop:service=SecondaryNameNode,name=JvmMetrics"));
+ public void testSecondaryWebUi()
+ throws IOException, MalformedObjectNameException,
+ AttributeNotFoundException, MBeanException,
+ ReflectionException, InstanceNotFoundException {
+ MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
+ ObjectName mxbeanName = new ObjectName(
+ "Hadoop:service=SecondaryNameNode,name=SecondaryNameNodeInfo");
+
+ String[] checkpointDir = (String[]) mbs.getAttribute(mxbeanName,
+ "CheckpointDirectories");
+ Assert.assertArrayEquals(checkpointDir, snn.getCheckpointDirectories());
+ String[] checkpointEditlogDir = (String[]) mbs.getAttribute(mxbeanName,
+ "CheckpointEditlogDirectories");
+ Assert.assertArrayEquals(checkpointEditlogDir,
+ snn.getCheckpointEditlogDirectories());
}
}
Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSnapshotPathINodes.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSnapshotPathINodes.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSnapshotPathINodes.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSnapshotPathINodes.java Tue Aug 19 23:49:39 2014
@@ -30,7 +30,6 @@ import org.apache.hadoop.hdfs.DFSTestUti
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectorySnapshottable;
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
import org.junit.AfterClass;
import org.junit.Assert;
@@ -90,22 +89,20 @@ public class TestSnapshotPathINodes {
final INode before = fsdir.getINode(pathStr);
// Before a directory is snapshottable
- Assert.assertTrue(before instanceof INodeDirectory);
- Assert.assertFalse(before instanceof INodeDirectorySnapshottable);
+ Assert.assertFalse(before.asDirectory().isSnapshottable());
// After a directory is snapshottable
final Path path = new Path(pathStr);
hdfs.allowSnapshot(path);
{
final INode after = fsdir.getINode(pathStr);
- Assert.assertTrue(after instanceof INodeDirectorySnapshottable);
+ Assert.assertTrue(after.asDirectory().isSnapshottable());
}
hdfs.disallowSnapshot(path);
{
final INode after = fsdir.getINode(pathStr);
- Assert.assertTrue(after instanceof INodeDirectory);
- Assert.assertFalse(after instanceof INodeDirectorySnapshottable);
+ Assert.assertFalse(after.asDirectory().isSnapshottable());
}
}
@@ -115,8 +112,7 @@ public class TestSnapshotPathINodes {
}
final int i = inodesInPath.getSnapshotRootIndex() - 1;
final INode inode = inodesInPath.getINodes()[i];
- return ((INodeDirectorySnapshottable)inode).getSnapshot(
- DFSUtil.string2Bytes(name));
+ return inode.asDirectory().getSnapshot(DFSUtil.string2Bytes(name));
}
static void assertSnapshot(INodesInPath inodesInPath, boolean isSnapshot,
Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java Tue Aug 19 23:49:39 2014
@@ -26,6 +26,7 @@ import static org.junit.Assert.fail;
import java.io.File;
import java.io.IOException;
+import java.lang.management.ManagementFactory;
import java.net.InetAddress;
import java.net.URI;
import java.util.ArrayList;
@@ -49,6 +50,7 @@ import org.apache.hadoop.hdfs.MiniDFSClu
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
import org.apache.hadoop.hdfs.server.common.Storage;
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
@@ -64,6 +66,9 @@ import org.junit.After;
import org.junit.Before;
import org.junit.Test;
+import javax.management.MBeanServer;
+import javax.management.ObjectName;
+
/**
* Startup and checkpoint tests
*
@@ -620,4 +625,104 @@ public class TestStartup {
fileSys.delete(name, true);
assertTrue(!fileSys.exists(name));
}
+
+
+ @Test(timeout = 120000)
+ public void testXattrConfiguration() throws Exception {
+ Configuration conf = new HdfsConfiguration();
+ MiniDFSCluster cluster = null;
+
+ try {
+ conf.setInt(DFSConfigKeys.DFS_NAMENODE_MAX_XATTR_SIZE_KEY, -1);
+ cluster =
+ new MiniDFSCluster.Builder(conf).numDataNodes(0).format(true).build();
+ fail("Expected exception with negative xattr size");
+ } catch (IllegalArgumentException e) {
+ GenericTestUtils.assertExceptionContains(
+ "Cannot set a negative value for the maximum size of an xattr", e);
+ } finally {
+ conf.setInt(DFSConfigKeys.DFS_NAMENODE_MAX_XATTR_SIZE_KEY,
+ DFSConfigKeys.DFS_NAMENODE_MAX_XATTR_SIZE_DEFAULT);
+ if (cluster != null) {
+ cluster.shutdown();
+ }
+ }
+
+ try {
+ conf.setInt(DFSConfigKeys.DFS_NAMENODE_MAX_XATTRS_PER_INODE_KEY, -1);
+ cluster =
+ new MiniDFSCluster.Builder(conf).numDataNodes(0).format(true).build();
+ fail("Expected exception with negative # xattrs per inode");
+ } catch (IllegalArgumentException e) {
+ GenericTestUtils.assertExceptionContains(
+ "Cannot set a negative limit on the number of xattrs per inode", e);
+ } finally {
+ conf.setInt(DFSConfigKeys.DFS_NAMENODE_MAX_XATTRS_PER_INODE_KEY,
+ DFSConfigKeys.DFS_NAMENODE_MAX_XATTRS_PER_INODE_DEFAULT);
+ if (cluster != null) {
+ cluster.shutdown();
+ }
+ }
+
+ try {
+ // Set up a logger to check log message
+ final LogVerificationAppender appender = new LogVerificationAppender();
+ final Logger logger = Logger.getRootLogger();
+ logger.addAppender(appender);
+ int count = appender.countLinesWithMessage(
+ "Maximum size of an xattr: 0 (unlimited)");
+ assertEquals("Expected no messages about unlimited xattr size", 0, count);
+
+ conf.setInt(DFSConfigKeys.DFS_NAMENODE_MAX_XATTR_SIZE_KEY, 0);
+ cluster =
+ new MiniDFSCluster.Builder(conf).numDataNodes(0).format(true).build();
+
+ count = appender.countLinesWithMessage(
+ "Maximum size of an xattr: 0 (unlimited)");
+ // happens twice because we format then run
+ assertEquals("Expected unlimited xattr size", 2, count);
+ } finally {
+ conf.setInt(DFSConfigKeys.DFS_NAMENODE_MAX_XATTR_SIZE_KEY,
+ DFSConfigKeys.DFS_NAMENODE_MAX_XATTR_SIZE_DEFAULT);
+ if (cluster != null) {
+ cluster.shutdown();
+ }
+ }
+ }
+
+
+ /**
+ * Verify the following scenario.
+ * 1. NN restarts.
+ * 2. Heartbeat RPC will retry and succeed. NN asks DN to reregister.
+ * 3. After reregistration completes, DN will send Heartbeat, followed by
+ * Blockreport.
+ * 4. NN will mark DatanodeStorageInfo#blockContentsStale to false.
+ * @throws Exception
+ */
+ @Test(timeout = 60000)
+ public void testStorageBlockContentsStaleAfterNNRestart() throws Exception {
+ MiniDFSCluster dfsCluster = null;
+ try {
+ Configuration config = new Configuration();
+ dfsCluster = new MiniDFSCluster.Builder(config).numDataNodes(1).build();
+ dfsCluster.waitActive();
+ dfsCluster.restartNameNode(true);
+ BlockManagerTestUtil.checkHeartbeat(
+ dfsCluster.getNamesystem().getBlockManager());
+ MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
+ ObjectName mxbeanNameFsns = new ObjectName(
+ "Hadoop:service=NameNode,name=FSNamesystemState");
+ Integer numStaleStorages = (Integer) (mbs.getAttribute(
+ mxbeanNameFsns, "NumStaleStorages"));
+ assertEquals(0, numStaleStorages.intValue());
+ } finally {
+ if (dfsCluster != null) {
+ dfsCluster.shutdown();
+ }
+ }
+
+ return;
+ }
+
}
Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartupOptionUpgrade.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartupOptionUpgrade.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartupOptionUpgrade.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartupOptionUpgrade.java Tue Aug 19 23:49:39 2014
@@ -21,6 +21,8 @@ import static org.junit.Assert.assertEqu
import static org.junit.Assert.assertTrue;
import java.net.URI;
+import java.util.Arrays;
+import java.util.Collection;
import java.util.Collections;
import org.apache.hadoop.conf.Configuration;
@@ -30,11 +32,15 @@ import org.apache.hadoop.hdfs.server.com
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+import org.junit.runners.Parameterized.Parameters;
/**
* This class tests various upgrade cases from earlier versions to current
* version with and without clusterid.
*/
+@RunWith(value = Parameterized.class)
public class TestStartupOptionUpgrade {
private Configuration conf;
@@ -42,10 +48,21 @@ public class TestStartupOptionUpgrade {
private int layoutVersion;
NNStorage storage;
+ @Parameters
+ public static Collection<Object[]> startOption() {
+ Object[][] params = new Object[][] { { StartupOption.UPGRADE },
+ { StartupOption.UPGRADEONLY } };
+ return Arrays.asList(params);
+ }
+
+ public TestStartupOptionUpgrade(StartupOption startOption) {
+ super();
+ this.startOpt = startOption;
+ }
+
@Before
public void setUp() throws Exception {
conf = new HdfsConfiguration();
- startOpt = StartupOption.UPGRADE;
startOpt.setClusterId(null);
storage = new NNStorage(conf,
Collections.<URI>emptyList(),
Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestTransferFsImage.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestTransferFsImage.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestTransferFsImage.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestTransferFsImage.java Tue Aug 19 23:49:39 2014
@@ -65,7 +65,7 @@ public class TestTransferFsImage {
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(0).build();
NNStorage mockStorage = Mockito.mock(NNStorage.class);
- List<File> localPath = Collections.<File>singletonList(
+ List<File> localPath = Collections.singletonList(
new File("/xxxxx-does-not-exist/blah"));
try {
Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDFSZKFailoverController.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDFSZKFailoverController.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDFSZKFailoverController.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDFSZKFailoverController.java Tue Aug 19 23:49:39 2014
@@ -35,6 +35,7 @@ import org.apache.hadoop.ha.ZKFailoverCo
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.MiniDFSNNTopology;
+import org.apache.hadoop.hdfs.server.namenode.EditLogFileOutputStream;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.tools.DFSHAAdmin;
import org.apache.hadoop.hdfs.tools.DFSZKFailoverController;
@@ -53,6 +54,11 @@ public class TestDFSZKFailoverController
private TestContext ctx;
private ZKFCThread thr1, thr2;
private FileSystem fs;
+
+ static {
+ // Make tests run faster by avoiding fsync()
+ EditLogFileOutputStream.setShouldSkipFsyncForTesting(true);
+ }
@Before
public void setup() throws Exception {
Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDNFencing.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDNFencing.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDNFencing.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDNFencing.java Tue Aug 19 23:49:39 2014
@@ -46,7 +46,7 @@ import org.apache.hadoop.hdfs.server.blo
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyDefault;
-import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
+import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
@@ -585,15 +585,14 @@ public class TestDNFencing {
}
@Override
- public DatanodeDescriptor chooseReplicaToDelete(BlockCollection inode,
+ public DatanodeStorageInfo chooseReplicaToDelete(BlockCollection inode,
Block block, short replicationFactor,
- Collection<DatanodeDescriptor> first,
- Collection<DatanodeDescriptor> second) {
+ Collection<DatanodeStorageInfo> first,
+ Collection<DatanodeStorageInfo> second) {
- Collection<DatanodeDescriptor> chooseFrom =
- !first.isEmpty() ? first : second;
+ Collection<DatanodeStorageInfo> chooseFrom = !first.isEmpty() ? first : second;
- List<DatanodeDescriptor> l = Lists.newArrayList(chooseFrom);
+ List<DatanodeStorageInfo> l = Lists.newArrayList(chooseFrom);
return l.get(DFSUtil.getRandom().nextInt(l.size()));
}
}