You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by sz...@apache.org on 2012/10/19 04:28:07 UTC
svn commit: r1399950 [23/27] - in
/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project: ./
hadoop-hdfs-httpfs/ hadoop-hdfs-httpfs/dev-support/
hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/
hadoop-hdfs-httpfs/src/main/java/org/apac...
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReport.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReport.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReport.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReport.java Fri Oct 19 02:25:55 2012
@@ -27,6 +27,9 @@ import java.util.ArrayList;
import java.util.List;
import java.util.Random;
import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.TimeoutException;
+
+import junit.framework.Assert;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
@@ -55,6 +58,7 @@ import org.apache.hadoop.hdfs.server.pro
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.test.GenericTestUtils.DelayAnswer;
+import org.apache.hadoop.util.Time;
import org.apache.log4j.Level;
import org.junit.After;
import org.junit.Before;
@@ -64,7 +68,7 @@ import org.mockito.invocation.Invocation
/**
* This test simulates a variety of situations when blocks are being
- * intentionally orrupted, unexpectedly modified, and so on before a block
+ * intentionally corrupted, unexpectedly modified, and so on before a block
* report is happening
*/
public class TestBlockReport {
@@ -315,7 +319,7 @@ public class TestBlockReport {
* @throws IOException in case of an error
*/
@Test
- public void blockReport_06() throws IOException {
+ public void blockReport_06() throws Exception {
final String METHOD_NAME = GenericTestUtils.getMethodName();
Path filePath = new Path("/" + METHOD_NAME + ".dat");
final int DN_N1 = DN_N0 + 1;
@@ -352,7 +356,7 @@ public class TestBlockReport {
@Test
// Currently this test is failing as expected 'cause the correct behavior is
// not yet implemented (9/15/09)
- public void blockReport_07() throws IOException {
+ public void blockReport_07() throws Exception {
final String METHOD_NAME = GenericTestUtils.getMethodName();
Path filePath = new Path("/" + METHOD_NAME + ".dat");
final int DN_N1 = DN_N0 + 1;
@@ -615,12 +619,12 @@ public class TestBlockReport {
final DataNode dn1 = cluster.getDataNodes().get(DN_N1);
String bpid = cluster.getNamesystem().getBlockPoolId();
Replica r = DataNodeTestUtils.fetchReplicaInfo(dn1, bpid, bl.getBlockId());
- long start = System.currentTimeMillis();
+ long start = Time.now();
int count = 0;
while (r == null) {
waitTil(5);
r = DataNodeTestUtils.fetchReplicaInfo(dn1, bpid, bl.getBlockId());
- long waiting_period = System.currentTimeMillis() - start;
+ long waiting_period = Time.now() - start;
if (count++ % 100 == 0)
if(LOG.isDebugEnabled()) {
LOG.debug("Has been waiting for " + waiting_period + " ms.");
@@ -634,7 +638,7 @@ public class TestBlockReport {
if(LOG.isDebugEnabled()) {
LOG.debug("Replica state before the loop " + state.getValue());
}
- start = System.currentTimeMillis();
+ start = Time.now();
while (state != HdfsServerConstants.ReplicaState.TEMPORARY) {
waitTil(5);
state = r.getState();
@@ -642,7 +646,7 @@ public class TestBlockReport {
LOG.debug("Keep waiting for " + bl.getBlockName() +
" is in state " + state.getValue());
}
- if (System.currentTimeMillis() - start > TIMEOUT)
+ if (Time.now() - start > TIMEOUT)
assertTrue("Was waiting too long for a replica to become TEMPORARY",
tooLongWait);
}
@@ -669,21 +673,24 @@ public class TestBlockReport {
}
private void startDNandWait(Path filePath, boolean waitReplicas)
- throws IOException {
- if(LOG.isDebugEnabled()) {
+ throws IOException, InterruptedException, TimeoutException {
+ if (LOG.isDebugEnabled()) {
LOG.debug("Before next DN start: " + cluster.getDataNodes().size());
}
cluster.startDataNodes(conf, 1, true, null, null);
+ cluster.waitClusterUp();
ArrayList<DataNode> datanodes = cluster.getDataNodes();
assertEquals(datanodes.size(), 2);
- if(LOG.isDebugEnabled()) {
+ if (LOG.isDebugEnabled()) {
int lastDn = datanodes.size() - 1;
LOG.debug("New datanode "
+ cluster.getDataNodes().get(lastDn).getDisplayName()
+ " has been started");
}
- if (waitReplicas) DFSTestUtil.waitReplication(fs, filePath, REPL_FACTOR);
+ if (waitReplicas) {
+ DFSTestUtil.waitReplication(fs, filePath, REPL_FACTOR);
+ }
}
private ArrayList<Block> prepareForRide(final Path filePath,
@@ -761,6 +768,7 @@ public class TestBlockReport {
this.all = all;
}
+ @Override
public boolean accept(File file, String s) {
if (all)
return s != null && s.startsWith(nameToAccept);
@@ -830,11 +838,13 @@ public class TestBlockReport {
this.filePath = filePath;
}
+ @Override
public void run() {
try {
startDNandWait(filePath, true);
- } catch (IOException e) {
- LOG.warn("Shouldn't happen", e);
+ } catch (Exception e) {
+ e.printStackTrace();
+ Assert.fail("Failed to start BlockChecker: " + e);
}
}
}
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMXBean.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMXBean.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMXBean.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMXBean.java Fri Oct 19 02:25:55 2012
@@ -23,10 +23,10 @@ import java.util.List;
import javax.management.MBeanServer;
import javax.management.ObjectName;
-import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.conf.Configuration;
-import org.junit.Test;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.junit.Assert;
+import org.junit.Test;
/**
* Class for testing {@link DataNodeMXBean} implementation
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetrics.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetrics.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetrics.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetrics.java Fri Oct 19 02:25:55 2012
@@ -18,20 +18,26 @@
package org.apache.hadoop.hdfs.server.datanode;
import static org.apache.hadoop.test.MetricsAsserts.assertCounter;
+import static org.apache.hadoop.test.MetricsAsserts.assertQuantileGauges;
import static org.apache.hadoop.test.MetricsAsserts.getLongCounter;
import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
-import static org.junit.Assert.*;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
import java.util.List;
-import java.util.Random;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DFSOutputStream;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.metrics2.MetricsRecordBuilder;
import org.junit.Test;
@@ -58,8 +64,10 @@ public class TestDataNodeMetrics {
}
@Test
- public void testSendDataPacket() throws Exception {
+ public void testSendDataPacketMetrics() throws Exception {
Configuration conf = new HdfsConfiguration();
+ final int interval = 1;
+ conf.set(DFSConfigKeys.DFS_METRICS_PERCENTILES_INTERVALS_KEY, "" + interval);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
try {
FileSystem fs = cluster.getFileSystem();
@@ -72,64 +80,110 @@ public class TestDataNodeMetrics {
assertEquals(datanodes.size(), 1);
DataNode datanode = datanodes.get(0);
MetricsRecordBuilder rb = getMetrics(datanode.getMetrics().name());
-
// Expect 2 packets, 1 for the 1 byte read, 1 for the empty packet
// signaling the end of the block
assertCounter("SendDataPacketTransferNanosNumOps", (long)2, rb);
assertCounter("SendDataPacketBlockedOnNetworkNanosNumOps", (long)2, rb);
+ // Wait for at least 1 rollover
+ Thread.sleep((interval + 1) * 1000);
+ // Check that the sendPacket percentiles rolled to non-zero values
+ String sec = interval + "s";
+ assertQuantileGauges("SendDataPacketBlockedOnNetworkNanos" + sec, rb);
+ assertQuantileGauges("SendDataPacketTransferNanos" + sec, rb);
} finally {
if (cluster != null) {cluster.shutdown();}
}
}
@Test
- public void testFlushMetric() throws Exception {
+ public void testReceivePacketMetrics() throws Exception {
Configuration conf = new HdfsConfiguration();
- MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
+ final int interval = 1;
+ conf.set(DFSConfigKeys.DFS_METRICS_PERCENTILES_INTERVALS_KEY, "" + interval);
+ MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
try {
cluster.waitActive();
DistributedFileSystem fs = (DistributedFileSystem) cluster.getFileSystem();
Path testFile = new Path("/testFlushNanosMetric.txt");
- DFSTestUtil.createFile(fs, testFile, 1, (short)1, new Random().nextLong());
-
+ FSDataOutputStream fout = fs.create(testFile);
+ fout.write(new byte[1]);
+ fout.hsync();
+ fout.close();
List<DataNode> datanodes = cluster.getDataNodes();
DataNode datanode = datanodes.get(0);
MetricsRecordBuilder dnMetrics = getMetrics(datanode.getMetrics().name());
- // Expect 2 flushes, 1 for the flush that occurs after writing, 1 that occurs
- // on closing the data and metadata files.
+ // Expect two flushes, 1 for the flush that occurs after writing,
+ // 1 that occurs on closing the data and metadata files.
assertCounter("FlushNanosNumOps", 2L, dnMetrics);
+ // Expect two syncs, one from the hsync, one on close.
+ assertCounter("FsyncNanosNumOps", 2L, dnMetrics);
+ // Wait for at least 1 rollover
+ Thread.sleep((interval + 1) * 1000);
+ // Check the receivePacket percentiles that should be non-zero
+ String sec = interval + "s";
+ assertQuantileGauges("FlushNanos" + sec, dnMetrics);
+ assertQuantileGauges("FsyncNanos" + sec, dnMetrics);
} finally {
if (cluster != null) {cluster.shutdown();}
}
}
+ /**
+ * Tests that round-trip acks in a datanode write pipeline are correctly
+ * measured.
+ */
@Test
public void testRoundTripAckMetric() throws Exception {
- final int DATANODE_COUNT = 2;
-
+ final int datanodeCount = 2;
+ final int interval = 1;
Configuration conf = new HdfsConfiguration();
- MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(DATANODE_COUNT).build();
+ conf.set(DFSConfigKeys.DFS_METRICS_PERCENTILES_INTERVALS_KEY, "" + interval);
+ MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(
+ datanodeCount).build();
try {
cluster.waitActive();
- DistributedFileSystem fs = (DistributedFileSystem) cluster.getFileSystem();
-
+ FileSystem fs = cluster.getFileSystem();
+ // Open a file and get the head of the pipeline
Path testFile = new Path("/testRoundTripAckMetric.txt");
- DFSTestUtil.createFile(fs, testFile, 1, (short)DATANODE_COUNT,
- new Random().nextLong());
-
- boolean foundNonzeroPacketAckNumOps = false;
+ FSDataOutputStream fsout = fs.create(testFile, (short) datanodeCount);
+ DFSOutputStream dout = (DFSOutputStream) fsout.getWrappedStream();
+ // Slow down the writes to catch the write pipeline
+ dout.setChunksPerPacket(5);
+ dout.setArtificialSlowdown(3000);
+ fsout.write(new byte[10000]);
+ DatanodeInfo[] pipeline = null;
+ int count = 0;
+ while (pipeline == null && count < 5) {
+ pipeline = dout.getPipeline();
+ System.out.println("Waiting for pipeline to be created.");
+ Thread.sleep(1000);
+ count++;
+ }
+ // Get the head node that should be receiving downstream acks
+ DatanodeInfo headInfo = pipeline[0];
+ DataNode headNode = null;
for (DataNode datanode : cluster.getDataNodes()) {
- MetricsRecordBuilder dnMetrics = getMetrics(datanode.getMetrics().name());
- if (getLongCounter("PacketAckRoundTripTimeNanosNumOps", dnMetrics) > 0) {
- foundNonzeroPacketAckNumOps = true;
+ if (datanode.getDatanodeId().equals(headInfo)) {
+ headNode = datanode;
+ break;
}
}
- assertTrue(
- "Expected at least one datanode to have reported PacketAckRoundTripTimeNanos metric",
- foundNonzeroPacketAckNumOps);
+ assertNotNull("Could not find the head of the datanode write pipeline",
+ headNode);
+ // Close the file and wait for the metrics to rollover
+ Thread.sleep((interval + 1) * 1000);
+ // Check the ack was received
+ MetricsRecordBuilder dnMetrics = getMetrics(headNode.getMetrics()
+ .name());
+ assertTrue("Expected non-zero number of acks",
+ getLongCounter("PacketAckRoundTripTimeNanosNumOps", dnMetrics) > 0);
+ assertQuantileGauges("PacketAckRoundTripTimeNanos" + interval
+ + "s", dnMetrics);
} finally {
- if (cluster != null) {cluster.shutdown();}
+ if (cluster != null) {
+ cluster.shutdown();
+ }
}
}
}
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMultipleRegistrations.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMultipleRegistrations.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMultipleRegistrations.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMultipleRegistrations.java Fri Oct 19 02:25:55 2012
@@ -36,6 +36,7 @@ import org.apache.hadoop.hdfs.MiniDFSNNT
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
+import org.apache.hadoop.util.StringUtils;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
@@ -216,48 +217,62 @@ public class TestDataNodeMultipleRegistr
LOG.info("dn bpos len (still should be 3):" + bposs.length);
Assert.assertEquals("should've registered with three namenodes", 3, bposs.length);
} finally {
- if(cluster != null)
- cluster.shutdown();
+ cluster.shutdown();
}
}
@Test
public void testMiniDFSClusterWithMultipleNN() throws IOException {
-
Configuration conf = new HdfsConfiguration();
// start Federated cluster and add a node.
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
.nnTopology(MiniDFSNNTopology.simpleFederatedTopology(2))
.build();
- Assert.assertNotNull(cluster);
- Assert.assertEquals("(1)Should be 2 namenodes", 2, cluster.getNumNameNodes());
// add a node
- cluster.addNameNode(conf, 0);
- Assert.assertEquals("(1)Should be 3 namenodes", 3, cluster.getNumNameNodes());
- cluster.shutdown();
+ try {
+ Assert.assertNotNull(cluster);
+ cluster.waitActive();
+ Assert.assertEquals("(1)Should be 2 namenodes", 2, cluster.getNumNameNodes());
+
+ cluster.addNameNode(conf, 0);
+ Assert.assertEquals("(1)Should be 3 namenodes", 3, cluster.getNumNameNodes());
+ } catch (IOException ioe) {
+ Assert.fail("Failed to add NN to cluster:" + StringUtils.stringifyException(ioe));
+ } finally {
+ cluster.shutdown();
+ }
// 2. start with Federation flag set
conf = new HdfsConfiguration();
cluster = new MiniDFSCluster.Builder(conf)
.nnTopology(MiniDFSNNTopology.simpleFederatedTopology(1))
.build();
- Assert.assertNotNull(cluster);
- Assert.assertEquals("(2)Should be 1 namenodes", 1, cluster.getNumNameNodes());
- // add a node
- cluster.addNameNode(conf, 0);
- Assert.assertEquals("(2)Should be 2 namenodes", 2, cluster.getNumNameNodes());
- cluster.shutdown();
+ try {
+ Assert.assertNotNull(cluster);
+ cluster.waitActive();
+ Assert.assertEquals("(2)Should be 1 namenodes", 1, cluster.getNumNameNodes());
+
+ // add a node
+ cluster.addNameNode(conf, 0);
+ Assert.assertEquals("(2)Should be 2 namenodes", 2, cluster.getNumNameNodes());
+ } catch (IOException ioe) {
+ Assert.fail("Failed to add NN to cluster:" + StringUtils.stringifyException(ioe));
+ } finally {
+ cluster.shutdown();
+ }
// 3. start non-federated
conf = new HdfsConfiguration();
cluster = new MiniDFSCluster.Builder(conf).build();
- Assert.assertNotNull(cluster);
- Assert.assertEquals("(2)Should be 1 namenodes", 1, cluster.getNumNameNodes());
// add a node
try {
+ cluster.waitActive();
+ Assert.assertNotNull(cluster);
+ Assert.assertEquals("(2)Should be 1 namenodes", 1, cluster.getNumNameNodes());
+
cluster.addNameNode(conf, 9929);
Assert.fail("shouldn't be able to add another NN to non federated cluster");
} catch (IOException e) {
@@ -268,6 +283,4 @@ public class TestDataNodeMultipleRegistr
cluster.shutdown();
}
}
-
-
}
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java Fri Oct 19 02:25:55 2012
@@ -105,7 +105,7 @@ public class TestDataNodeVolumeFailure {
* failure if the configuration parameter allows this.
*/
@Test
- public void testVolumeFailure() throws IOException {
+ public void testVolumeFailure() throws Exception {
FileSystem fs = cluster.getFileSystem();
dataDir = new File(cluster.getDataDirectory());
System.out.println("Data dir: is " + dataDir.getPath());
@@ -281,7 +281,7 @@ public class TestDataNodeVolumeFailure {
"test-blockpoolid",
block.getBlockId());
BlockReaderFactory.newBlockReader(conf, s, file, block, lblock
- .getBlockToken(), 0, -1);
+ .getBlockToken(), 0, -1, null);
// nothing - if it fails - it will throw and exception
}
@@ -375,6 +375,7 @@ public class TestDataNodeVolumeFailure {
private String [] metaFilesInDir(File dir) {
String [] res = dir.list(
new FilenameFilter() {
+ @Override
public boolean accept(File dir, String name) {
return name.startsWith("blk_") &&
name.endsWith(Block.METADATA_EXTENSION);
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDeleteBlockPool.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDeleteBlockPool.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDeleteBlockPool.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDeleteBlockPool.java Fri Oct 19 02:25:55 2012
@@ -19,12 +19,13 @@
package org.apache.hadoop.hdfs.server.datanode;
import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
import java.io.File;
import java.io.IOException;
-import junit.framework.Assert;
-
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
@@ -74,7 +75,7 @@ public class TestDeleteBlockPool {
// Although namenode is shutdown, the bp offerservice is still running
try {
dn1.deleteBlockPool(bpid1, true);
- Assert.fail("Must not delete a running block pool");
+ fail("Must not delete a running block pool");
} catch (IOException expected) {
}
@@ -85,7 +86,7 @@ public class TestDeleteBlockPool {
try {
dn1.deleteBlockPool(bpid1, false);
- Assert.fail("Must not delete if any block files exist unless "
+ fail("Must not delete if any block files exist unless "
+ "force is true");
} catch (IOException expected) {
}
@@ -115,7 +116,7 @@ public class TestDeleteBlockPool {
// on dn2
try {
dn2.deleteBlockPool(bpid1, true);
- Assert.fail("Must not delete a running block pool");
+ fail("Must not delete a running block pool");
} catch (IOException expected) {
}
@@ -180,21 +181,21 @@ public class TestDeleteBlockPool {
Configuration nn1Conf = cluster.getConfiguration(0);
nn1Conf.set(DFSConfigKeys.DFS_NAMESERVICES, "namesServerId1");
dn1.refreshNamenodes(nn1Conf);
- Assert.assertEquals(1, dn1.getAllBpOs().length);
+ assertEquals(1, dn1.getAllBpOs().length);
DFSAdmin admin = new DFSAdmin(nn1Conf);
String dn1Address = dn1.getDatanodeId().getIpAddr() + ":" + dn1.getIpcPort();
String[] args = { "-deleteBlockPool", dn1Address, bpid2 };
int ret = admin.run(args);
- Assert.assertFalse(0 == ret);
+ assertFalse(0 == ret);
verifyBlockPoolDirectories(true, dn1StorageDir1, bpid2);
verifyBlockPoolDirectories(true, dn1StorageDir2, bpid2);
String[] forceArgs = { "-deleteBlockPool", dn1Address, bpid2, "force" };
ret = admin.run(forceArgs);
- Assert.assertEquals(0, ret);
+ assertEquals(0, ret);
verifyBlockPoolDirectories(false, dn1StorageDir1, bpid2);
verifyBlockPoolDirectories(false, dn1StorageDir2, bpid2);
@@ -216,7 +217,7 @@ public class TestDeleteBlockPool {
+ bpid);
if (shouldExist == false) {
- Assert.assertFalse(bpDir.exists());
+ assertFalse(bpDir.exists());
} else {
File bpCurrentDir = new File(bpDir, DataStorage.STORAGE_DIR_CURRENT);
File finalizedDir = new File(bpCurrentDir,
@@ -224,9 +225,9 @@ public class TestDeleteBlockPool {
File rbwDir = new File(bpCurrentDir, DataStorage.STORAGE_DIR_RBW);
File versionFile = new File(bpCurrentDir, "VERSION");
- Assert.assertTrue(finalizedDir.isDirectory());
- Assert.assertTrue(rbwDir.isDirectory());
- Assert.assertTrue(versionFile.exists());
+ assertTrue(finalizedDir.isDirectory());
+ assertTrue(rbwDir.isDirectory());
+ assertTrue(versionFile.exists());
}
}
}
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java Fri Oct 19 02:25:55 2012
@@ -17,6 +17,12 @@
*/
package org.apache.hadoop.hdfs.server.datanode;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
@@ -25,8 +31,6 @@ import java.util.LinkedList;
import java.util.List;
import java.util.Random;
-import junit.framework.TestCase;
-
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
@@ -38,15 +42,16 @@ import org.apache.hadoop.hdfs.HdfsConfig
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.server.common.GenerationStamp;
-import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
+import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetTestUtil;
+import org.junit.Test;
/**
* Tests {@link DirectoryScanner} handling of differences
* between blocks on the disk and block in memory.
*/
-public class TestDirectoryScanner extends TestCase {
+public class TestDirectoryScanner {
private static final Log LOG = LogFactory.getLog(TestDirectoryScanner.class);
private static final Configuration CONF = new HdfsConfiguration();
private static final int DEFAULT_GEN_STAMP = 9999;
@@ -218,6 +223,7 @@ public class TestDirectoryScanner extend
assertEquals(mismatchBlocks, stats.mismatchBlocks);
}
+ @Test
public void testDirectoryScanner() throws Exception {
// Run the test with and without parallel scanning
for (int parallelism = 1; parallelism < 3; parallelism++) {
@@ -233,8 +239,7 @@ public class TestDirectoryScanner extend
fds = DataNodeTestUtils.getFSDataset(cluster.getDataNodes().get(0));
CONF.setInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_THREADS_KEY,
parallelism);
- DataNode dn = cluster.getDataNodes().get(0);
- scanner = new DirectoryScanner(dn, fds, CONF);
+ scanner = new DirectoryScanner(fds, CONF);
scanner.setRetainDiffs(true);
// Add files with 100 blocks
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java Fri Oct 19 02:25:55 2012
@@ -142,7 +142,7 @@ public class TestDiskError {
DataOutputStream out = new DataOutputStream(s.getOutputStream());
DataChecksum checksum = DataChecksum.newDataChecksum(
- DataChecksum.CHECKSUM_CRC32, 512);
+ DataChecksum.Type.CRC32, 512);
new Sender(out).writeBlock(block.getBlock(),
BlockTokenSecretManager.DUMMY_TOKEN, "",
new DatanodeInfo[0], null,
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestHSync.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestHSync.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestHSync.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestHSync.java Fri Oct 19 02:25:55 2012
@@ -17,7 +17,8 @@
*/
package org.apache.hadoop.hdfs.server.datanode;
-import static org.apache.hadoop.test.MetricsAsserts.*;
+import static org.apache.hadoop.test.MetricsAsserts.assertCounter;
+import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
import java.util.EnumSet;
import java.util.Random;
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestMultipleNNDataBlockScanner.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestMultipleNNDataBlockScanner.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestMultipleNNDataBlockScanner.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestMultipleNNDataBlockScanner.java Fri Oct 19 02:25:55 2012
@@ -20,8 +20,11 @@ package org.apache.hadoop.hdfs.server.da
import java.io.IOException;
+import junit.framework.Assert;
+
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
+import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
@@ -31,7 +34,13 @@ import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.MiniDFSNNTopology;
+import org.apache.hadoop.hdfs.server.datanode.BlockPoolSliceScanner;
+import static org.apache.hadoop.hdfs.server.datanode.DataBlockScanner.SLEEP_PERIOD_MS;
+import org.apache.log4j.Level;
+import org.apache.log4j.Logger;
import org.junit.Test;
+import org.junit.Ignore;
+import static org.junit.Assert.fail;
public class TestMultipleNNDataBlockScanner {
@@ -65,7 +74,7 @@ public class TestMultipleNNDataBlockScan
}
}
- @Test
+ @Test(timeout=120000)
public void testDataBlockScanner() throws IOException, InterruptedException {
setUp();
try {
@@ -88,7 +97,7 @@ public class TestMultipleNNDataBlockScan
}
}
- @Test
+ @Test(timeout=120000)
public void testBlockScannerAfterRefresh() throws IOException,
InterruptedException {
setUp();
@@ -140,7 +149,7 @@ public class TestMultipleNNDataBlockScan
}
}
- @Test
+ @Test(timeout=120000)
public void testBlockScannerAfterRestart() throws IOException,
InterruptedException {
setUp();
@@ -166,4 +175,75 @@ public class TestMultipleNNDataBlockScan
cluster.shutdown();
}
}
+
+ @Test(timeout=120000)
+ public void test2NNBlockRescanInterval() throws IOException {
+ ((Log4JLogger)BlockPoolSliceScanner.LOG).getLogger().setLevel(Level.ALL);
+ Configuration conf = new HdfsConfiguration();
+ cluster = new MiniDFSCluster.Builder(conf)
+ .nnTopology(MiniDFSNNTopology.simpleFederatedTopology(3))
+ .build();
+
+ try {
+ FileSystem fs = cluster.getFileSystem(1);
+ Path file2 = new Path("/test/testBlockScanInterval");
+ DFSTestUtil.createFile(fs, file2, 30, (short) 1, 0);
+
+ fs = cluster.getFileSystem(0);
+ Path file1 = new Path("/test/testBlockScanInterval");
+ DFSTestUtil.createFile(fs, file1, 30, (short) 1, 0);
+ for (int i = 0; i < 8; i++) {
+ LOG.info("Verifying that the blockscanner scans exactly once");
+ waitAndScanBlocks(1, 1);
+ }
+ } finally {
+ cluster.shutdown();
+ }
+ }
+
+ /**
+ * HDFS-3828: DN rescans blocks too frequently
+ *
+ * @throws Exception
+ */
+ @Test(timeout=120000)
+ public void testBlockRescanInterval() throws IOException {
+ ((Log4JLogger)BlockPoolSliceScanner.LOG).getLogger().setLevel(Level.ALL);
+ Configuration conf = new HdfsConfiguration();
+ cluster = new MiniDFSCluster.Builder(conf).build();
+
+ try {
+ FileSystem fs = cluster.getFileSystem();
+ Path file1 = new Path("/test/testBlockScanInterval");
+ DFSTestUtil.createFile(fs, file1, 30, (short) 1, 0);
+ for (int i = 0; i < 4; i++) {
+ LOG.info("Verifying that the blockscanner scans exactly once");
+ waitAndScanBlocks(1, 1);
+ }
+ } finally {
+ cluster.shutdown();
+ }
+ }
+
+ void waitAndScanBlocks(long scansLastRun, long scansTotal)
+ throws IOException {
+ // DataBlockScanner will run for every 5 seconds so we are checking for
+ // every 5 seconds
+ int n = 5;
+ String bpid = cluster.getNamesystem(0).getBlockPoolId();
+ DataNode dn = cluster.getDataNodes().get(0);
+ long blocksScanned, total;
+ do {
+ try {
+ Thread.sleep(SLEEP_PERIOD_MS);
+ } catch (InterruptedException e) {
+ fail("Interrupted: " + e);
+ }
+ blocksScanned = dn.blockScanner.getBlocksScannedInLastRun(bpid);
+ total = dn.blockScanner.getTotalScans(bpid);
+ LOG.info("bpid = " + bpid + " blocksScanned = " + blocksScanned + " total=" + total);
+ } while (n-- > 0 && (blocksScanned != scansLastRun || scansTotal != total));
+ Assert.assertEquals(scansTotal, total);
+ Assert.assertEquals(scansLastRun, blocksScanned);
+ }
}
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestRefreshNamenodes.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestRefreshNamenodes.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestRefreshNamenodes.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestRefreshNamenodes.java Fri Oct 19 02:25:55 2012
@@ -18,7 +18,8 @@
package org.apache.hadoop.hdfs.server.datanode;
-import static org.junit.Assert.*;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
import java.io.IOException;
import java.net.InetSocketAddress;
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestSimulatedFSDataset.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestSimulatedFSDataset.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestSimulatedFSDataset.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestSimulatedFSDataset.java Fri Oct 19 02:25:55 2012
@@ -17,13 +17,16 @@
*/
package org.apache.hadoop.hdfs.server.datanode;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+
import java.io.DataInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
-import junit.framework.TestCase;
-
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.protocol.Block;
@@ -33,25 +36,23 @@ import org.apache.hadoop.hdfs.server.dat
import org.apache.hadoop.hdfs.server.datanode.fsdataset.ReplicaOutputStreams;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetFactory;
import org.apache.hadoop.util.DataChecksum;
+import org.junit.Before;
+import org.junit.Test;
/**
* this class tests the methods of the SimulatedFSDataset.
*/
-public class TestSimulatedFSDataset extends TestCase {
+public class TestSimulatedFSDataset {
Configuration conf = null;
static final String bpid = "BP-TEST";
static final int NUMBLOCKS = 20;
static final int BLOCK_LENGTH_MULTIPLIER = 79;
- protected void setUp() throws Exception {
- super.setUp();
+ @Before
+ public void setUp() throws Exception {
conf = new HdfsConfiguration();
SimulatedFSDataset.setFactory(conf);
}
-
- protected void tearDown() throws Exception {
- super.tearDown();
- }
long blockIdToLen(long blkid) {
return blkid*BLOCK_LENGTH_MULTIPLIER;
@@ -66,7 +67,7 @@ public class TestSimulatedFSDataset exte
// data written
ReplicaInPipelineInterface bInfo = fsdataset.createRbw(b);
ReplicaOutputStreams out = bInfo.createStreams(true,
- DataChecksum.newDataChecksum(DataChecksum.CHECKSUM_CRC32, 512));
+ DataChecksum.newDataChecksum(DataChecksum.Type.CRC32, 512));
try {
OutputStream dataOut = out.getDataOut();
assertEquals(0, fsdataset.getLength(b));
@@ -88,6 +89,7 @@ public class TestSimulatedFSDataset exte
return addSomeBlocks(fsdataset, 1);
}
+ @Test
public void testFSDatasetFactory() {
final Configuration conf = new Configuration();
FsDatasetSpi.Factory<?> f = FsDatasetSpi.Factory.getFactory(conf);
@@ -100,6 +102,7 @@ public class TestSimulatedFSDataset exte
assertTrue(s.isSimulated());
}
+ @Test
public void testGetMetaData() throws IOException {
final SimulatedFSDataset fsdataset = getSimulatedFSDataset();
ExtendedBlock b = new ExtendedBlock(bpid, 1, 5, 0);
@@ -116,11 +119,12 @@ public class TestSimulatedFSDataset exte
short version = metaDataInput.readShort();
assertEquals(BlockMetadataHeader.VERSION, version);
DataChecksum checksum = DataChecksum.newDataChecksum(metaDataInput);
- assertEquals(DataChecksum.CHECKSUM_NULL, checksum.getChecksumType());
+ assertEquals(DataChecksum.Type.NULL, checksum.getChecksumType());
assertEquals(0, checksum.getChecksumSize());
}
+ @Test
public void testStorageUsage() throws IOException {
final SimulatedFSDataset fsdataset = getSimulatedFSDataset();
assertEquals(fsdataset.getDfsUsed(), 0);
@@ -144,6 +148,7 @@ public class TestSimulatedFSDataset exte
assertEquals(expectedLen, lengthRead);
}
+ @Test
public void testWriteRead() throws IOException {
final SimulatedFSDataset fsdataset = getSimulatedFSDataset();
addSomeBlocks(fsdataset);
@@ -155,6 +160,7 @@ public class TestSimulatedFSDataset exte
}
}
+ @Test
public void testGetBlockReport() throws IOException {
SimulatedFSDataset fsdataset = getSimulatedFSDataset();
BlockListAsLongs blockReport = fsdataset.getBlockReport(bpid);
@@ -168,6 +174,7 @@ public class TestSimulatedFSDataset exte
}
}
+ @Test
public void testInjectionEmpty() throws IOException {
SimulatedFSDataset fsdataset = getSimulatedFSDataset();
BlockListAsLongs blockReport = fsdataset.getBlockReport(bpid);
@@ -196,6 +203,7 @@ public class TestSimulatedFSDataset exte
assertEquals(sfsdataset.getCapacity()-bytesAdded, sfsdataset.getRemaining());
}
+ @Test
public void testInjectionNonEmpty() throws IOException {
SimulatedFSDataset fsdataset = getSimulatedFSDataset();
BlockListAsLongs blockReport = fsdataset.getBlockReport(bpid);
@@ -269,6 +277,7 @@ public class TestSimulatedFSDataset exte
}
}
+ @Test
public void testInValidBlocks() throws IOException {
final SimulatedFSDataset fsdataset = getSimulatedFSDataset();
ExtendedBlock b = new ExtendedBlock(bpid, 1, 5, 0);
@@ -280,6 +289,7 @@ public class TestSimulatedFSDataset exte
checkInvalidBlock(b);
}
+ @Test
public void testInvalidate() throws IOException {
final SimulatedFSDataset fsdataset = getSimulatedFSDataset();
int bytesAdded = addSomeBlocks(fsdataset);
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestDatanodeRestart.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestDatanodeRestart.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestDatanodeRestart.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestDatanodeRestart.java Fri Oct 19 02:25:55 2012
@@ -137,7 +137,7 @@ public class TestDatanodeRestart {
}
// test recovering unlinked tmp replicas
- @Test public void testRecoverReplicas() throws IOException {
+ @Test public void testRecoverReplicas() throws Exception {
Configuration conf = new HdfsConfiguration();
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 1024L);
conf.setInt(DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY, 512);
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestInterDatanodeProtocol.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestInterDatanodeProtocol.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestInterDatanodeProtocol.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestInterDatanodeProtocol.java Fri Oct 19 02:25:55 2012
@@ -17,7 +17,9 @@
*/
package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl;
-import static org.junit.Assert.*;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
import java.io.IOException;
import java.net.InetSocketAddress;
@@ -27,6 +29,7 @@ import java.util.List;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSClientAdapter;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.HdfsConfiguration;
@@ -57,6 +60,8 @@ import org.apache.hadoop.net.NetUtils;
import org.junit.Assert;
import org.junit.Test;
+import static org.junit.Assume.assumeTrue;
+
/**
* This tests InterDataNodeProtocol for block handling.
*/
@@ -123,17 +128,42 @@ public class TestInterDatanodeProtocol {
return blocks.get(blocks.size() - 1);
}
+ /** Test block MD access via a DN */
+ @Test
+ public void testBlockMetaDataInfo() throws Exception {
+ checkBlockMetaDataInfo(false);
+ }
+
+ /** The same as above, but use hostnames for DN<->DN communication */
+ @Test
+ public void testBlockMetaDataInfoWithHostname() throws Exception {
+ assumeTrue(System.getProperty("os.name").startsWith("Linux"));
+ checkBlockMetaDataInfo(true);
+ }
+
/**
* The following test first creates a file.
* It verifies the block information from a datanode.
- * Then, it updates the block with new information and verifies again.
+ * Then, it updates the block with new information and verifies again.
+ * @param useDnHostname whether DNs should connect to other DNs by hostname
*/
- @Test
- public void testBlockMetaDataInfo() throws Exception {
+ private void checkBlockMetaDataInfo(boolean useDnHostname) throws Exception {
MiniDFSCluster cluster = null;
+ conf.setBoolean(DFSConfigKeys.DFS_DATANODE_USE_DN_HOSTNAME, useDnHostname);
+ if (useDnHostname) {
+ // Since the mini cluster only listens on the loopback we have to
+ // ensure the hostname used to access DNs maps to the loopback. We
+ // do this by telling the DN to advertise localhost as its hostname
+ // instead of the default hostname.
+ conf.set(DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY, "localhost");
+ }
+
try {
- cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
+ cluster = new MiniDFSCluster.Builder(conf)
+ .numDataNodes(3)
+ .checkDataNodeHostConfig(true)
+ .build();
cluster.waitActive();
//create a file
@@ -152,7 +182,7 @@ public class TestInterDatanodeProtocol {
//connect to a data node
DataNode datanode = cluster.getDataNode(datanodeinfo[0].getIpcPort());
InterDatanodeProtocol idp = DataNodeTestUtils.createInterDatanodeProtocolProxy(
- datanode, datanodeinfo[0], conf);
+ datanode, datanodeinfo[0], conf, useDnHostname);
//stop block scanner, so we could compare lastScanTime
DataNodeTestUtils.shutdownBlockScanner(datanode);
@@ -362,7 +392,7 @@ public class TestInterDatanodeProtocol {
try {
proxy = DataNode.createInterDataNodeProtocolProxy(
- dInfo, conf, 500);
+ dInfo, conf, 500, false);
proxy.initReplicaRecovery(new RecoveringBlock(
new ExtendedBlock("bpid", 1), null, 100));
fail ("Expected SocketTimeoutException exception, but did not get.");
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestReplicaMap.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestReplicaMap.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestReplicaMap.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestReplicaMap.java Fri Oct 19 02:25:55 2012
@@ -23,7 +23,6 @@ import static org.junit.Assert.fail;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.server.datanode.FinalizedReplica;
-import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.ReplicaMap;
import org.junit.Before;
import org.junit.Test;
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/journalservice/TestJournalService.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/journalservice/TestJournalService.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/journalservice/TestJournalService.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/journalservice/TestJournalService.java Fri Oct 19 02:25:55 2012
@@ -17,11 +17,11 @@
*/
package org.apache.hadoop.hdfs.server.journalservice;
+import static org.junit.Assert.assertNotNull;
+
import java.io.IOException;
import java.net.InetSocketAddress;
-import junit.framework.Assert;
-
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileSystemTestHelper;
@@ -122,6 +122,6 @@ public class TestJournalService {
// New epoch higher than the current epoch is successful
FenceResponse resp = s.fence(info, currentEpoch+1, "fencer");
- Assert.assertNotNull(resp);
+ assertNotNull(resp);
}
}
\ No newline at end of file
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java Fri Oct 19 02:25:55 2012
@@ -17,6 +17,12 @@
*/
package org.apache.hadoop.hdfs.server.namenode;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+import static org.mockito.Mockito.doReturn;
+import static org.mockito.Mockito.mock;
+
import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
@@ -37,33 +43,29 @@ import java.util.Set;
import org.apache.commons.lang.StringUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.permission.PermissionStatus;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirType;
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
-import org.apache.hadoop.hdfs.server.namenode.FileJournalManager.EditLogFile;
import org.apache.hadoop.hdfs.server.namenode.FSImageStorageInspector.FSImageFile;
+import org.apache.hadoop.hdfs.server.namenode.FileJournalManager.EditLogFile;
import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
import org.apache.hadoop.hdfs.util.Holder;
import org.apache.hadoop.hdfs.util.MD5FileUtils;
import org.apache.hadoop.io.IOUtils;
-import org.apache.hadoop.conf.Configuration;
-import org.mockito.Mockito;
import org.mockito.Matchers;
+import org.mockito.Mockito;
import com.google.common.base.Joiner;
-import com.google.common.collect.Lists;
import com.google.common.collect.ImmutableList;
+import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.common.collect.Sets;
import com.google.common.io.Files;
-import static org.junit.Assert.*;
-import static org.mockito.Mockito.doReturn;
-import static org.mockito.Mockito.mock;
-
/**
* Utility functions for testing fsimage storage.
*/
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java Fri Oct 19 02:25:55 2012
@@ -60,6 +60,7 @@ import org.apache.hadoop.net.DNS;
import org.apache.hadoop.net.NetworkTopology;
import org.apache.hadoop.security.Groups;
import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.util.Time;
import org.apache.hadoop.util.VersionInfo;
import org.apache.log4j.Level;
import org.apache.log4j.LogManager;
@@ -253,7 +254,7 @@ public class NNThroughputBenchmark {
setNameNodeLoggingLevel(logLevel);
for(tIdx=0; tIdx < numThreads; tIdx++)
daemons.add(new StatsDaemon(tIdx, opsPerThread[tIdx], this));
- start = System.currentTimeMillis();
+ start = Time.now();
LOG.info("Starting " + numOpsRequired + " " + getOpName() + "(s).");
for(StatsDaemon d : daemons)
d.start();
@@ -261,7 +262,7 @@ public class NNThroughputBenchmark {
while(isInPorgress()) {
// try {Thread.sleep(500);} catch (InterruptedException e) {}
}
- elapsedTime = System.currentTimeMillis() - start;
+ elapsedTime = Time.now() - start;
for(StatsDaemon d : daemons) {
incrementStats(d.localNumOpsExecuted, d.localCumulativeTime);
// System.out.println(d.toString() + ": ops Exec = " + d.localNumOpsExecuted);
@@ -391,6 +392,7 @@ public class NNThroughputBenchmark {
setName(toString());
}
+ @Override
public void run() {
localNumOpsExecuted = 0;
localCumulativeTime = 0;
@@ -403,6 +405,7 @@ public class NNThroughputBenchmark {
}
}
+ @Override
public String toString() {
return "StatsDaemon-" + daemonId;
}
@@ -445,16 +448,19 @@ public class NNThroughputBenchmark {
keepResults = true;
}
+ @Override
String getOpName() {
return OP_CLEAN_NAME;
}
+ @Override
void parseArguments(List<String> args) {
boolean ignoreUnrelatedOptions = verifyOpArgument(args);
if(args.size() > 2 && !ignoreUnrelatedOptions)
printUsage();
}
+ @Override
void generateInputs(int[] opsPerThread) throws IOException {
// do nothing
}
@@ -462,6 +468,7 @@ public class NNThroughputBenchmark {
/**
* Does not require the argument
*/
+ @Override
String getExecutionArgument(int daemonId) {
return null;
}
@@ -469,15 +476,17 @@ public class NNThroughputBenchmark {
/**
* Remove entire benchmark directory.
*/
+ @Override
long executeOp(int daemonId, int inputIdx, String ignore)
throws IOException {
nameNodeProto.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_LEAVE);
- long start = System.currentTimeMillis();
+ long start = Time.now();
nameNodeProto.delete(BASE_DIR_NAME, true);
- long end = System.currentTimeMillis();
+ long end = Time.now();
return end-start;
}
+ @Override
void printResults() {
LOG.info("--- " + getOpName() + " inputs ---");
LOG.info("Remove directory " + BASE_DIR_NAME);
@@ -507,10 +516,12 @@ public class NNThroughputBenchmark {
parseArguments(args);
}
+ @Override
String getOpName() {
return OP_CREATE_NAME;
}
+ @Override
void parseArguments(List<String> args) {
boolean ignoreUnrelatedOptions = verifyOpArgument(args);
int nrFilesPerDir = 4;
@@ -533,6 +544,7 @@ public class NNThroughputBenchmark {
nameGenerator = new FileNameGenerator(getBaseDir(), nrFilesPerDir);
}
+ @Override
void generateInputs(int[] opsPerThread) throws IOException {
assert opsPerThread.length == numThreads : "Error opsPerThread.length";
nameNodeProto.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_LEAVE);
@@ -556,6 +568,7 @@ public class NNThroughputBenchmark {
/**
* returns client name
*/
+ @Override
String getExecutionArgument(int daemonId) {
return getClientName(daemonId);
}
@@ -563,20 +576,22 @@ public class NNThroughputBenchmark {
/**
* Do file create.
*/
+ @Override
long executeOp(int daemonId, int inputIdx, String clientName)
throws IOException {
- long start = System.currentTimeMillis();
+ long start = Time.now();
// dummyActionNoSynch(fileIdx);
nameNodeProto.create(fileNames[daemonId][inputIdx], FsPermission.getDefault(),
clientName, new EnumSetWritable<CreateFlag>(EnumSet
.of(CreateFlag.CREATE, CreateFlag.OVERWRITE)), true, replication, BLOCK_SIZE);
- long end = System.currentTimeMillis();
+ long end = Time.now();
for(boolean written = !closeUponCreate; !written;
written = nameNodeProto.complete(fileNames[daemonId][inputIdx],
clientName, null));
return end-start;
}
+ @Override
void printResults() {
LOG.info("--- " + getOpName() + " inputs ---");
LOG.info("nrFiles = " + numOpsRequired);
@@ -606,10 +621,12 @@ public class NNThroughputBenchmark {
super(args);
}
+ @Override
String getOpName() {
return OP_OPEN_NAME;
}
+ @Override
void parseArguments(List<String> args) {
int ueIndex = args.indexOf("-useExisting");
useExisting = (ueIndex >= 0);
@@ -619,6 +636,7 @@ public class NNThroughputBenchmark {
super.parseArguments(args);
}
+ @Override
void generateInputs(int[] opsPerThread) throws IOException {
// create files using opsPerThread
String[] createArgs = new String[] {
@@ -651,11 +669,12 @@ public class NNThroughputBenchmark {
/**
* Do file open.
*/
+ @Override
long executeOp(int daemonId, int inputIdx, String ignore)
throws IOException {
- long start = System.currentTimeMillis();
+ long start = Time.now();
nameNodeProto.getBlockLocations(fileNames[daemonId][inputIdx], 0L, BLOCK_SIZE);
- long end = System.currentTimeMillis();
+ long end = Time.now();
return end-start;
}
}
@@ -675,15 +694,17 @@ public class NNThroughputBenchmark {
super(args);
}
+ @Override
String getOpName() {
return OP_DELETE_NAME;
}
+ @Override
long executeOp(int daemonId, int inputIdx, String ignore)
throws IOException {
- long start = System.currentTimeMillis();
+ long start = Time.now();
nameNodeProto.delete(fileNames[daemonId][inputIdx], false);
- long end = System.currentTimeMillis();
+ long end = Time.now();
return end-start;
}
}
@@ -703,15 +724,17 @@ public class NNThroughputBenchmark {
super(args);
}
+ @Override
String getOpName() {
return OP_FILE_STATUS_NAME;
}
+ @Override
long executeOp(int daemonId, int inputIdx, String ignore)
throws IOException {
- long start = System.currentTimeMillis();
+ long start = Time.now();
nameNodeProto.getFileInfo(fileNames[daemonId][inputIdx]);
- long end = System.currentTimeMillis();
+ long end = Time.now();
return end-start;
}
}
@@ -733,10 +756,12 @@ public class NNThroughputBenchmark {
super(args);
}
+ @Override
String getOpName() {
return OP_RENAME_NAME;
}
+ @Override
void generateInputs(int[] opsPerThread) throws IOException {
super.generateInputs(opsPerThread);
destNames = new String[fileNames.length][];
@@ -748,12 +773,13 @@ public class NNThroughputBenchmark {
}
}
+ @Override
long executeOp(int daemonId, int inputIdx, String ignore)
throws IOException {
- long start = System.currentTimeMillis();
+ long start = Time.now();
nameNodeProto.rename(fileNames[daemonId][inputIdx],
destNames[daemonId][inputIdx]);
- long end = System.currentTimeMillis();
+ long end = Time.now();
return end-start;
}
}
@@ -792,6 +818,7 @@ public class NNThroughputBenchmark {
this.nrBlocks = 0;
}
+ @Override
public String toString() {
return dnRegistration.toString();
}
@@ -867,6 +894,7 @@ public class NNThroughputBenchmark {
return blockReportList;
}
+ @Override
public int compareTo(String xferAddr) {
return getXferAddr().compareTo(xferAddr);
}
@@ -958,10 +986,12 @@ public class NNThroughputBenchmark {
return numThreads;
}
+ @Override
String getOpName() {
return OP_BLOCK_REPORT_NAME;
}
+ @Override
void parseArguments(List<String> args) {
boolean ignoreUnrelatedOptions = verifyOpArgument(args);
for (int i = 2; i < args.size(); i++) { // parse command line
@@ -982,6 +1012,7 @@ public class NNThroughputBenchmark {
}
}
+ @Override
void generateInputs(int[] ignore) throws IOException {
int nrDatanodes = getNumDatanodes();
int nrBlocks = (int)Math.ceil((double)blocksPerReport * nrDatanodes
@@ -1043,22 +1074,25 @@ public class NNThroughputBenchmark {
/**
* Does not require the argument
*/
+ @Override
String getExecutionArgument(int daemonId) {
return null;
}
+ @Override
long executeOp(int daemonId, int inputIdx, String ignore) throws IOException {
assert daemonId < numThreads : "Wrong daemonId.";
TinyDatanode dn = datanodes[daemonId];
- long start = System.currentTimeMillis();
+ long start = Time.now();
StorageBlockReport[] report = { new StorageBlockReport(
dn.storage, dn.getBlockReportList()) };
nameNodeProto.blockReport(dn.dnRegistration, nameNode.getNamesystem()
.getBlockPoolId(), report);
- long end = System.currentTimeMillis();
+ long end = Time.now();
return end-start;
}
+ @Override
void printResults() {
String blockDistribution = "";
String delim = "(";
@@ -1119,10 +1153,12 @@ public class NNThroughputBenchmark {
numPendingBlocks = 0;
}
+ @Override
String getOpName() {
return OP_REPLICATION_NAME;
}
+ @Override
void parseArguments(List<String> args) {
boolean ignoreUnrelatedOptions = verifyOpArgument(args);
for (int i = 2; i < args.size(); i++) { // parse command line
@@ -1146,6 +1182,7 @@ public class NNThroughputBenchmark {
}
}
+ @Override
void generateInputs(int[] ignore) throws IOException {
final FSNamesystem namesystem = nameNode.getNamesystem();
@@ -1192,23 +1229,26 @@ public class NNThroughputBenchmark {
/**
* Does not require the argument
*/
+ @Override
String getExecutionArgument(int daemonId) {
return null;
}
+ @Override
long executeOp(int daemonId, int inputIdx, String ignore) throws IOException {
assert daemonId < numThreads : "Wrong daemonId.";
- long start = System.currentTimeMillis();
+ long start = Time.now();
// compute data-node work
int work = BlockManagerTestUtil.getComputedDatanodeWork(
nameNode.getNamesystem().getBlockManager());
- long end = System.currentTimeMillis();
+ long end = Time.now();
numPendingBlocks += work;
if(work == 0)
daemons.get(daemonId).terminate();
return end-start;
}
+ @Override
void printResults() {
String blockDistribution = "";
String delim = "(";
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java Fri Oct 19 02:25:55 2012
@@ -17,11 +17,16 @@
*/
package org.apache.hadoop.hdfs.server.namenode;
+import static org.mockito.Matchers.anyInt;
+import static org.mockito.Mockito.doNothing;
+import static org.mockito.Mockito.spy;
+
import java.io.File;
import java.io.IOException;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import org.apache.hadoop.fs.UnresolvedLinkException;
+import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.permission.PermissionStatus;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
@@ -61,7 +66,7 @@ public class NameNodeAdapter {
public static HdfsFileStatus getFileInfo(NameNode namenode, String src,
boolean resolveLink) throws AccessControlException, UnresolvedLinkException,
- StandbyException {
+ StandbyException, IOException {
return namenode.getNamesystem().getFileInfo(src, resolveLink);
}
@@ -81,9 +86,8 @@ public class NameNodeAdapter {
namenode.getNamesystem().enterSafeMode(resourcesLow);
}
- public static void leaveSafeMode(NameNode namenode, boolean checkForUpgrades)
- throws SafeModeException {
- namenode.getNamesystem().leaveSafeMode(checkForUpgrades);
+ public static void leaveSafeMode(NameNode namenode) {
+ namenode.getNamesystem().leaveSafeMode();
}
public static void abortEditLogs(NameNode nn) {
@@ -183,6 +187,15 @@ public class NameNodeAdapter {
}
}
+ public static FSEditLogOp createMkdirOp(String path) {
+ MkdirOp op = MkdirOp.getInstance(new FSEditLogOp.OpInstanceCache())
+ .setPath(path)
+ .setTimestamp(0)
+ .setPermissionStatus(new PermissionStatus(
+ "testuser", "testgroup", FsPermission.getDefault()));
+ return op;
+ }
+
/**
* @return the number of blocks marked safe by safemode, or -1
* if safemode is not running.
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/OfflineEditsViewerHelper.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/OfflineEditsViewerHelper.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/OfflineEditsViewerHelper.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/OfflineEditsViewerHelper.java Fri Oct 19 02:25:55 2012
@@ -28,6 +28,7 @@ import org.apache.commons.logging.LogFac
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileContext;
import org.apache.hadoop.fs.Options.Rename;
@@ -40,13 +41,11 @@ import org.apache.hadoop.hdfs.Distribute
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
-import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
import org.apache.hadoop.hdfs.server.common.Util;
import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
-import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
/**
* OfflineEditsViewerHelper is a helper class for TestOfflineEditsViewer,
@@ -195,19 +194,21 @@ public class OfflineEditsViewerHelper {
Path pathSymlink = new Path("/file_symlink");
fc.createSymlink(pathConcatTarget, pathSymlink, false);
// OP_GET_DELEGATION_TOKEN 18
- final Token<DelegationTokenIdentifier> token =
- dfs.getDelegationToken("JobTracker");
// OP_RENEW_DELEGATION_TOKEN 19
// OP_CANCEL_DELEGATION_TOKEN 20
// see TestDelegationToken.java
// fake the user to renew token for
+ final Token<?>[] tokens = dfs.addDelegationTokens("JobTracker", null);
UserGroupInformation longUgi = UserGroupInformation.createRemoteUser(
"JobTracker/foo.com@FOO.COM");
try {
longUgi.doAs(new PrivilegedExceptionAction<Object>() {
+ @Override
public Object run() throws IOException, InterruptedException {
- token.renew(config);
- token.cancel(config);
+ for (Token<?> token : tokens) {
+ token.renew(config);
+ token.cancel(config);
+ }
return null;
}
});
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogs.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogs.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogs.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogs.java Fri Oct 19 02:25:55 2012
@@ -18,9 +18,10 @@
package org.apache.hadoop.hdfs.server.namenode;
-import static org.junit.Assert.*;
-import org.junit.Before;
-import org.junit.After;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
import java.io.BufferedReader;
import java.io.File;
@@ -31,19 +32,25 @@ import java.util.regex.Pattern;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.hdfs.HftpFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.web.WebHdfsTestUtil;
+import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.log4j.Level;
import org.apache.log4j.Logger;
import org.apache.log4j.PatternLayout;
import org.apache.log4j.RollingFileAppender;
+import org.junit.After;
+import org.junit.Before;
import org.junit.Test;
/**
@@ -80,6 +87,7 @@ public class TestAuditLogs {
final long precision = 1L;
conf.setLong(DFSConfigKeys.DFS_NAMENODE_ACCESSTIME_PRECISION_KEY, precision);
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 10000L);
+ conf.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
util = new DFSTestUtil.Builder().setName("TestAuditAllowed").
setNumFiles(20).build();
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
@@ -109,7 +117,19 @@ public class TestAuditLogs {
int val = istream.read();
istream.close();
verifyAuditLogs(true);
- assertTrue("failed to read from file", val > 0);
+ assertTrue("failed to read from file", val >= 0);
+ }
+
+ /** test that allowed stat puts proper entry in audit log */
+ @Test
+ public void testAuditAllowedStat() throws Exception {
+ final Path file = new Path(fnames[0]);
+ FileSystem userfs = DFSTestUtil.getFileSystemAs(userGroupInfo, conf);
+
+ setupAuditLogs();
+ FileStatus st = userfs.getFileStatus(file);
+ verifyAuditLogs(true);
+ assertTrue("failed to stat file", st != null && st.isFile());
}
/** test that denied operation puts proper entry in audit log */
@@ -132,6 +152,85 @@ public class TestAuditLogs {
verifyAuditLogs(false);
}
+ /** test that access via webhdfs puts proper entry in audit log */
+ @Test
+ public void testAuditWebHdfs() throws Exception {
+ final Path file = new Path(fnames[0]);
+
+ fs.setPermission(file, new FsPermission((short)0644));
+ fs.setOwner(file, "root", null);
+
+ setupAuditLogs();
+
+ WebHdfsFileSystem webfs = WebHdfsTestUtil.getWebHdfsFileSystemAs(userGroupInfo, conf);
+ InputStream istream = webfs.open(file);
+ int val = istream.read();
+ istream.close();
+
+ verifyAuditLogsRepeat(true, 3);
+ assertTrue("failed to read from file", val >= 0);
+ }
+
+ /** test that stat via webhdfs puts proper entry in audit log */
+ @Test
+ public void testAuditWebHdfsStat() throws Exception {
+ final Path file = new Path(fnames[0]);
+
+ fs.setPermission(file, new FsPermission((short)0644));
+ fs.setOwner(file, "root", null);
+
+ setupAuditLogs();
+
+ WebHdfsFileSystem webfs = WebHdfsTestUtil.getWebHdfsFileSystemAs(userGroupInfo, conf);
+ FileStatus st = webfs.getFileStatus(file);
+
+ verifyAuditLogs(true);
+ assertTrue("failed to stat file", st != null && st.isFile());
+ }
+
+ /** test that access via Hftp puts proper entry in audit log */
+ @Test
+ public void testAuditHftp() throws Exception {
+ final Path file = new Path(fnames[0]);
+
+ final String hftpUri =
+ "hftp://" + conf.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY);
+
+ HftpFileSystem hftpFs = null;
+
+ setupAuditLogs();
+ try {
+ hftpFs = (HftpFileSystem) new Path(hftpUri).getFileSystem(conf);
+ InputStream istream = hftpFs.open(file);
+ int val = istream.read();
+ istream.close();
+
+ verifyAuditLogs(true);
+ } finally {
+ if (hftpFs != null) hftpFs.close();
+ }
+ }
+
+ /** test that denied access via webhdfs puts proper entry in audit log */
+ @Test
+ public void testAuditWebHdfsDenied() throws Exception {
+ final Path file = new Path(fnames[0]);
+
+ fs.setPermission(file, new FsPermission((short)0600));
+ fs.setOwner(file, "root", null);
+
+ setupAuditLogs();
+ try {
+ WebHdfsFileSystem webfs = WebHdfsTestUtil.getWebHdfsFileSystemAs(userGroupInfo, conf);
+ InputStream istream = webfs.open(file);
+ int val = istream.read();
+ fail("open+read must not succeed, got " + val);
+ } catch(AccessControlException E) {
+ System.out.println("got access denied, as expected.");
+ }
+ verifyAuditLogsRepeat(false, 2);
+ }
+
/** Sets up log4j logger for auditlogs */
private void setupAuditLogs() throws IOException {
File file = new File(auditLogFile);
@@ -145,19 +244,34 @@ public class TestAuditLogs {
logger.addAppender(appender);
}
+ // Ensure audit log has only one entry
private void verifyAuditLogs(boolean expectSuccess) throws IOException {
+ verifyAuditLogsRepeat(expectSuccess, 1);
+ }
+
+ // Ensure audit log has exactly N entries
+ private void verifyAuditLogsRepeat(boolean expectSuccess, int ndupe)
+ throws IOException {
// Turn off the logs
Logger logger = ((Log4JLogger) FSNamesystem.auditLog).getLogger();
logger.setLevel(Level.OFF);
- // Ensure audit log has only one entry
BufferedReader reader = new BufferedReader(new FileReader(auditLogFile));
- String line = reader.readLine();
- assertNotNull(line);
- assertTrue("Expected audit event not found in audit log",
- auditPattern.matcher(line).matches());
- assertTrue("Expected success=" + expectSuccess,
- successPattern.matcher(line).matches() == expectSuccess);
- assertNull("Unexpected event in audit log", reader.readLine());
+ String line = null;
+ boolean ret = true;
+
+ try {
+ for (int i = 0; i < ndupe; i++) {
+ line = reader.readLine();
+ assertNotNull(line);
+ assertTrue("Expected audit event not found in audit log",
+ auditPattern.matcher(line).matches());
+ ret &= successPattern.matcher(line).matches();
+ }
+ assertNull("Unexpected event in audit log", reader.readLine());
+ assertTrue("Expected success=" + expectSuccess, ret == expectSuccess);
+ } finally {
+ reader.close();
+ }
}
}
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java Fri Oct 19 02:25:55 2012
@@ -17,7 +17,10 @@
*/
package org.apache.hadoop.hdfs.server.namenode;
-import static org.junit.Assert.*;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
import java.io.File;
import java.io.IOException;
@@ -33,6 +36,7 @@ import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.HAUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
@@ -61,6 +65,10 @@ public class TestBackupNode {
}
static final String BASE_DIR = MiniDFSCluster.getBaseDirectory();
+
+ static final long seed = 0xDEADBEEFL;
+ static final int blockSize = 4096;
+ static final int fileSize = 8192;
@Before
public void setUp() throws Exception {
@@ -347,14 +355,17 @@ public class TestBackupNode {
+ NetUtils.getHostPortString(add)).toUri(), conf);
boolean canWrite = true;
try {
- TestCheckpoint.writeFile(bnFS, file3, replication);
+ DFSTestUtil.createFile(bnFS, file3, fileSize, fileSize, blockSize,
+ replication, seed);
} catch (IOException eio) {
LOG.info("Write to BN failed as expected: ", eio);
canWrite = false;
}
assertFalse("Write to BackupNode must be prohibited.", canWrite);
- TestCheckpoint.writeFile(fileSys, file3, replication);
+ DFSTestUtil.createFile(fileSys, file3, fileSize, fileSize, blockSize,
+ replication, seed);
+
TestCheckpoint.checkFile(fileSys, file3, replication);
// should also be on BN right away
assertTrue("file3 does not exist on BackupNode",
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckPointForSecurityTokens.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckPointForSecurityTokens.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckPointForSecurityTokens.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckPointForSecurityTokens.java Fri Oct 19 02:25:55 2012
@@ -17,9 +17,11 @@
*/
package org.apache.hadoop.hdfs.server.namenode;
-import static org.junit.Assert.*;
-import junit.framework.Assert;
-import java.io.*;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSConfigKeys;
@@ -120,7 +122,7 @@ public class TestCheckPointForSecurityTo
renewToken(token1);
renewToken(token2);
} catch (IOException e) {
- Assert.fail("Could not renew or cancel the token");
+ fail("Could not renew or cancel the token");
}
namesystem = cluster.getNamesystem();
@@ -148,7 +150,7 @@ public class TestCheckPointForSecurityTo
renewToken(token5);
} catch (IOException e) {
- Assert.fail("Could not renew or cancel the token");
+ fail("Could not renew or cancel the token");
}
// restart cluster again
@@ -171,7 +173,7 @@ public class TestCheckPointForSecurityTo
renewToken(token5);
cancelToken(token5);
} catch (IOException e) {
- Assert.fail("Could not renew or cancel the token");
+ fail("Could not renew or cancel the token");
}
} finally {