You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by to...@apache.org on 2012/07/31 01:31:51 UTC

svn commit: r1367365 [4/5] - in /hadoop/common/branches/HDFS-3077/hadoop-hdfs-project: hadoop-hdfs-httpfs/ hadoop-hdfs-httpfs/dev-support/ hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/ hadoop-hdfs-httpfs/src/main/java/org/apache/ha...

Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/TestJspHelper.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/TestJspHelper.java?rev=1367365&r1=1367364&r2=1367365&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/TestJspHelper.java (original)
+++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/TestJspHelper.java Mon Jul 30 23:31:42 2012
@@ -31,8 +31,13 @@ import org.apache.hadoop.hdfs.DFSConfigK
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeHttpServer;
+import org.apache.hadoop.hdfs.web.resources.DoAsParam;
+import org.apache.hadoop.hdfs.web.resources.UserParam;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
+import org.apache.hadoop.security.authorize.AuthorizationException;
+import org.apache.hadoop.security.authorize.ProxyUsers;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.TokenIdentifier;
 import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSecretManager;
@@ -136,4 +141,262 @@ public class TestJspHelper {
     Assert.assertEquals("", delegationTokenParam);
   }
 
+  @Test
+  public void testGetUgiFromToken() throws IOException {
+    conf.set(DFSConfigKeys.FS_DEFAULT_NAME_KEY, "hdfs://localhost:4321/");
+    ServletContext context = mock(ServletContext.class);
+    String realUser = "TheDoctor";
+    String user = "TheNurse";
+    conf.set(DFSConfigKeys.HADOOP_SECURITY_AUTHENTICATION, "kerberos");
+    UserGroupInformation.setConfiguration(conf);
+    UserGroupInformation ugi;
+    HttpServletRequest request;
+    
+    Text ownerText = new Text(user);
+    DelegationTokenIdentifier dtId = new DelegationTokenIdentifier(
+        ownerText, ownerText, new Text(realUser));
+    Token<DelegationTokenIdentifier> token = new Token<DelegationTokenIdentifier>(
+        dtId, new DummySecretManager(0, 0, 0, 0));
+    String tokenString = token.encodeToUrlString();
+    
+    // token with no auth-ed user
+    request = getMockRequest(null, null, null);
+    when(request.getParameter(JspHelper.DELEGATION_PARAMETER_NAME)).thenReturn(
+        tokenString);
+    ugi = JspHelper.getUGI(context, request, conf);
+    Assert.assertNotNull(ugi.getRealUser());
+    Assert.assertEquals(ugi.getRealUser().getShortUserName(), realUser);
+    Assert.assertEquals(ugi.getShortUserName(), user);
+    checkUgiFromToken(ugi);
+    
+    // token with auth-ed user
+    request = getMockRequest(realUser, null, null);
+    when(request.getParameter(JspHelper.DELEGATION_PARAMETER_NAME)).thenReturn(
+        tokenString);
+    ugi = JspHelper.getUGI(context, request, conf);
+    Assert.assertNotNull(ugi.getRealUser());
+    Assert.assertEquals(ugi.getRealUser().getShortUserName(), realUser);
+    Assert.assertEquals(ugi.getShortUserName(), user);    
+    checkUgiFromToken(ugi);
+    
+    // completely different user, token trumps auth
+    request = getMockRequest("rogue", null, null);
+    when(request.getParameter(JspHelper.DELEGATION_PARAMETER_NAME)).thenReturn(
+        tokenString);
+    ugi = JspHelper.getUGI(context, request, conf);
+    Assert.assertNotNull(ugi.getRealUser());
+    Assert.assertEquals(ugi.getRealUser().getShortUserName(), realUser);
+    Assert.assertEquals(ugi.getShortUserName(), user);    
+    checkUgiFromToken(ugi);
+    
+    // expected case
+    request = getMockRequest(null, user, null);
+    when(request.getParameter(JspHelper.DELEGATION_PARAMETER_NAME)).thenReturn(
+        tokenString);
+    ugi = JspHelper.getUGI(context, request, conf);
+    Assert.assertNotNull(ugi.getRealUser());
+    Assert.assertEquals(ugi.getRealUser().getShortUserName(), realUser);
+    Assert.assertEquals(ugi.getShortUserName(), user);    
+    checkUgiFromToken(ugi);
+    
+    // can't proxy with a token!
+    request = getMockRequest(null, null, "rogue");
+    when(request.getParameter(JspHelper.DELEGATION_PARAMETER_NAME)).thenReturn(
+        tokenString);
+    try {
+      JspHelper.getUGI(context, request, conf);
+      Assert.fail("bad request allowed");
+    } catch (IOException ioe) {
+      Assert.assertEquals(
+          "Usernames not matched: name=rogue != expected="+user,
+          ioe.getMessage());
+    }
+    
+    // can't proxy with a token!
+    request = getMockRequest(null, user, "rogue");
+    when(request.getParameter(JspHelper.DELEGATION_PARAMETER_NAME)).thenReturn(
+        tokenString);
+    try {
+      JspHelper.getUGI(context, request, conf);
+      Assert.fail("bad request allowed");
+    } catch (IOException ioe) {
+      Assert.assertEquals(
+          "Usernames not matched: name=rogue != expected="+user,
+          ioe.getMessage());
+    }
+  }
+  
+  @Test
+  public void testGetNonProxyUgi() throws IOException {
+    conf.set(DFSConfigKeys.FS_DEFAULT_NAME_KEY, "hdfs://localhost:4321/");
+    ServletContext context = mock(ServletContext.class);
+    String realUser = "TheDoctor";
+    String user = "TheNurse";
+    conf.set(DFSConfigKeys.HADOOP_SECURITY_AUTHENTICATION, "kerberos");
+    UserGroupInformation.setConfiguration(conf);
+    UserGroupInformation ugi;
+    HttpServletRequest request;
+    
+    // have to be auth-ed with remote user
+    request = getMockRequest(null, null, null);
+    try {
+      JspHelper.getUGI(context, request, conf);
+      Assert.fail("bad request allowed");
+    } catch (IOException ioe) {
+      Assert.assertEquals(
+          "Security enabled but user not authenticated by filter",
+          ioe.getMessage());
+    }
+    request = getMockRequest(null, realUser, null);
+    try {
+      JspHelper.getUGI(context, request, conf);
+      Assert.fail("bad request allowed");
+    } catch (IOException ioe) {
+      Assert.assertEquals(
+          "Security enabled but user not authenticated by filter",
+          ioe.getMessage());
+    }
+    
+    // ugi for remote user
+    request = getMockRequest(realUser, null, null);
+    ugi = JspHelper.getUGI(context, request, conf);
+    Assert.assertNull(ugi.getRealUser());
+    Assert.assertEquals(ugi.getShortUserName(), realUser);
+    checkUgiFromAuth(ugi);
+    
+    // ugi for remote user = real user
+    request = getMockRequest(realUser, realUser, null);
+    ugi = JspHelper.getUGI(context, request, conf);
+    Assert.assertNull(ugi.getRealUser());
+    Assert.assertEquals(ugi.getShortUserName(), realUser);
+    checkUgiFromAuth(ugi);
+    
+    // ugi for remote user != real user 
+    request = getMockRequest(realUser, user, null);
+    try {
+      JspHelper.getUGI(context, request, conf);
+      Assert.fail("bad request allowed");
+    } catch (IOException ioe) {
+      Assert.assertEquals(
+          "Usernames not matched: name="+user+" != expected="+realUser,
+          ioe.getMessage());
+    }
+  }
+  
+  @Test
+  public void testGetProxyUgi() throws IOException {
+    conf.set(DFSConfigKeys.FS_DEFAULT_NAME_KEY, "hdfs://localhost:4321/");
+    ServletContext context = mock(ServletContext.class);
+    String realUser = "TheDoctor";
+    String user = "TheNurse";
+    conf.set(DFSConfigKeys.HADOOP_SECURITY_AUTHENTICATION, "kerberos");
+    
+    conf.set(ProxyUsers.CONF_HADOOP_PROXYUSER+realUser+".groups", "*");
+    conf.set(ProxyUsers.CONF_HADOOP_PROXYUSER+realUser+".hosts", "*");
+    ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
+    UserGroupInformation.setConfiguration(conf);
+    UserGroupInformation ugi;
+    HttpServletRequest request;
+    
+    // have to be auth-ed with remote user
+    request = getMockRequest(null, null, user);
+    try {
+      JspHelper.getUGI(context, request, conf);
+      Assert.fail("bad request allowed");
+    } catch (IOException ioe) {
+      Assert.assertEquals(
+          "Security enabled but user not authenticated by filter",
+          ioe.getMessage());
+    }
+    request = getMockRequest(null, realUser, user);
+    try {
+      JspHelper.getUGI(context, request, conf);
+      Assert.fail("bad request allowed");
+    } catch (IOException ioe) {
+      Assert.assertEquals(
+          "Security enabled but user not authenticated by filter",
+          ioe.getMessage());
+    }
+    
+    // proxy ugi for user via remote user
+    request = getMockRequest(realUser, null, user);
+    ugi = JspHelper.getUGI(context, request, conf);
+    Assert.assertNotNull(ugi.getRealUser());
+    Assert.assertEquals(ugi.getRealUser().getShortUserName(), realUser);
+    Assert.assertEquals(ugi.getShortUserName(), user);
+    checkUgiFromAuth(ugi);
+    
+    // proxy ugi for user vi a remote user = real user
+    request = getMockRequest(realUser, realUser, user);
+    ugi = JspHelper.getUGI(context, request, conf);
+    Assert.assertNotNull(ugi.getRealUser());
+    Assert.assertEquals(ugi.getRealUser().getShortUserName(), realUser);
+    Assert.assertEquals(ugi.getShortUserName(), user);
+    checkUgiFromAuth(ugi);
+    
+    // proxy ugi for user via remote user != real user
+    request = getMockRequest(realUser, user, user);
+    try {
+      JspHelper.getUGI(context, request, conf);
+      Assert.fail("bad request allowed");
+    } catch (IOException ioe) {
+      Assert.assertEquals(
+          "Usernames not matched: name="+user+" != expected="+realUser,
+          ioe.getMessage());
+    }
+    
+    // try to get get a proxy user with unauthorized user
+    try {
+      request = getMockRequest(user, null, realUser);
+      JspHelper.getUGI(context, request, conf);
+      Assert.fail("bad proxy request allowed");
+    } catch (AuthorizationException ae) {
+      Assert.assertEquals(
+          "User: " + user + " is not allowed to impersonate " + realUser,
+           ae.getMessage());
+    }
+    try {
+      request = getMockRequest(user, user, realUser);
+      JspHelper.getUGI(context, request, conf);
+      Assert.fail("bad proxy request allowed");
+    } catch (AuthorizationException ae) {
+      Assert.assertEquals(
+          "User: " + user + " is not allowed to impersonate " + realUser,
+           ae.getMessage());
+    }
+  }
+  
+  private HttpServletRequest getMockRequest(String remoteUser, String user, String doAs) {
+    HttpServletRequest request = mock(HttpServletRequest.class);
+    when(request.getParameter(UserParam.NAME)).thenReturn(user);
+    if (doAs != null) {
+      when(request.getParameter(DoAsParam.NAME)).thenReturn(doAs);
+    }
+    when(request.getRemoteUser()).thenReturn(remoteUser);
+    return request;
+  }
+  
+  private void checkUgiFromAuth(UserGroupInformation ugi) {
+    if (ugi.getRealUser() != null) {
+      Assert.assertEquals(AuthenticationMethod.PROXY,
+                          ugi.getAuthenticationMethod());
+      Assert.assertEquals(AuthenticationMethod.KERBEROS_SSL,
+                          ugi.getRealUser().getAuthenticationMethod());
+    } else {
+      Assert.assertEquals(AuthenticationMethod.KERBEROS_SSL,
+                          ugi.getAuthenticationMethod()); 
+    }
+  }
+  
+  private void checkUgiFromToken(UserGroupInformation ugi) {
+    if (ugi.getRealUser() != null) {
+      Assert.assertEquals(AuthenticationMethod.PROXY,
+                          ugi.getAuthenticationMethod());
+      Assert.assertEquals(AuthenticationMethod.TOKEN,
+                          ugi.getRealUser().getAuthenticationMethod());
+    } else {
+      Assert.assertEquals(AuthenticationMethod.TOKEN,
+                          ugi.getAuthenticationMethod());
+    }
+  }
 }

Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetrics.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetrics.java?rev=1367365&r1=1367364&r2=1367365&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetrics.java (original)
+++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetrics.java Mon Jul 30 23:31:42 2012
@@ -18,21 +18,26 @@
 package org.apache.hadoop.hdfs.server.datanode;
 
 import static org.apache.hadoop.test.MetricsAsserts.assertCounter;
+import static org.apache.hadoop.test.MetricsAsserts.assertQuantileGauges;
 import static org.apache.hadoop.test.MetricsAsserts.getLongCounter;
 import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertTrue;
 
 import java.util.List;
-import java.util.Random;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DFSOutputStream;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.metrics2.MetricsRecordBuilder;
 import org.junit.Test;
 
@@ -59,8 +64,10 @@ public class TestDataNodeMetrics {
   }
 
   @Test
-  public void testSendDataPacket() throws Exception {
+  public void testSendDataPacketMetrics() throws Exception {
     Configuration conf = new HdfsConfiguration();
+    final int interval = 1;
+    conf.set(DFSConfigKeys.DFS_METRICS_PERCENTILES_INTERVALS_KEY, "" + interval);
     MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
     try {
       FileSystem fs = cluster.getFileSystem();
@@ -73,64 +80,110 @@ public class TestDataNodeMetrics {
       assertEquals(datanodes.size(), 1);
       DataNode datanode = datanodes.get(0);
       MetricsRecordBuilder rb = getMetrics(datanode.getMetrics().name());
-
       // Expect 2 packets, 1 for the 1 byte read, 1 for the empty packet
       // signaling the end of the block
       assertCounter("SendDataPacketTransferNanosNumOps", (long)2, rb);
       assertCounter("SendDataPacketBlockedOnNetworkNanosNumOps", (long)2, rb);
+      // Wait for at least 1 rollover
+      Thread.sleep((interval + 1) * 1000);
+      // Check that the sendPacket percentiles rolled to non-zero values
+      String sec = interval + "s";
+      assertQuantileGauges("SendDataPacketBlockedOnNetworkNanos" + sec, rb);
+      assertQuantileGauges("SendDataPacketTransferNanos" + sec, rb);
     } finally {
       if (cluster != null) {cluster.shutdown();}
     }
   }
 
   @Test
-  public void testFlushMetric() throws Exception {
+  public void testReceivePacketMetrics() throws Exception {
     Configuration conf = new HdfsConfiguration();
-    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
+    final int interval = 1;
+    conf.set(DFSConfigKeys.DFS_METRICS_PERCENTILES_INTERVALS_KEY, "" + interval);
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
     try {
       cluster.waitActive();
       DistributedFileSystem fs = (DistributedFileSystem) cluster.getFileSystem();
 
       Path testFile = new Path("/testFlushNanosMetric.txt");
-      DFSTestUtil.createFile(fs, testFile, 1, (short)1, new Random().nextLong());
-
+      FSDataOutputStream fout = fs.create(testFile);
+      fout.write(new byte[1]);
+      fout.hsync();
+      fout.close();
       List<DataNode> datanodes = cluster.getDataNodes();
       DataNode datanode = datanodes.get(0);
       MetricsRecordBuilder dnMetrics = getMetrics(datanode.getMetrics().name());
-      // Expect 2 flushes, 1 for the flush that occurs after writing, 1 that occurs
-      // on closing the data and metadata files.
+      // Expect two flushes, 1 for the flush that occurs after writing, 
+      // 1 that occurs on closing the data and metadata files.
       assertCounter("FlushNanosNumOps", 2L, dnMetrics);
+      // Expect two syncs, one from the hsync, one on close.
+      assertCounter("FsyncNanosNumOps", 2L, dnMetrics);
+      // Wait for at least 1 rollover
+      Thread.sleep((interval + 1) * 1000);
+      // Check the receivePacket percentiles that should be non-zero
+      String sec = interval + "s";
+      assertQuantileGauges("FlushNanos" + sec, dnMetrics);
+      assertQuantileGauges("FsyncNanos" + sec, dnMetrics);
     } finally {
       if (cluster != null) {cluster.shutdown();}
     }
   }
 
+  /**
+   * Tests that round-trip acks in a datanode write pipeline are correctly 
+   * measured. 
+   */
   @Test
   public void testRoundTripAckMetric() throws Exception {
-    final int DATANODE_COUNT = 2;
-
+    final int datanodeCount = 2;
+    final int interval = 1;
     Configuration conf = new HdfsConfiguration();
-    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(DATANODE_COUNT).build();
+    conf.set(DFSConfigKeys.DFS_METRICS_PERCENTILES_INTERVALS_KEY, "" + interval);
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(
+        datanodeCount).build();
     try {
       cluster.waitActive();
-      DistributedFileSystem fs = (DistributedFileSystem) cluster.getFileSystem();
-
+      FileSystem fs = cluster.getFileSystem();
+      // Open a file and get the head of the pipeline
       Path testFile = new Path("/testRoundTripAckMetric.txt");
-      DFSTestUtil.createFile(fs, testFile, 1, (short)DATANODE_COUNT,
-          new Random().nextLong());
-
-      boolean foundNonzeroPacketAckNumOps = false;
+      FSDataOutputStream fsout = fs.create(testFile, (short) datanodeCount);
+      DFSOutputStream dout = (DFSOutputStream) fsout.getWrappedStream();
+      // Slow down the writes to catch the write pipeline
+      dout.setChunksPerPacket(5);
+      dout.setArtificialSlowdown(3000);
+      fsout.write(new byte[10000]);
+      DatanodeInfo[] pipeline = null;
+      int count = 0;
+      while (pipeline == null && count < 5) {
+        pipeline = dout.getPipeline();
+        System.out.println("Waiting for pipeline to be created.");
+        Thread.sleep(1000);
+        count++;
+      }
+      // Get the head node that should be receiving downstream acks
+      DatanodeInfo headInfo = pipeline[0];
+      DataNode headNode = null;
       for (DataNode datanode : cluster.getDataNodes()) {
-        MetricsRecordBuilder dnMetrics = getMetrics(datanode.getMetrics().name());
-        if (getLongCounter("PacketAckRoundTripTimeNanosNumOps", dnMetrics) > 0) {
-          foundNonzeroPacketAckNumOps = true;
+        if (datanode.getDatanodeId().equals(headInfo)) {
+          headNode = datanode;
+          break;
         }
       }
-      assertTrue(
-          "Expected at least one datanode to have reported PacketAckRoundTripTimeNanos metric",
-          foundNonzeroPacketAckNumOps);
+      assertNotNull("Could not find the head of the datanode write pipeline", 
+          headNode);
+      // Close the file and wait for the metrics to rollover
+      Thread.sleep((interval + 1) * 1000);
+      // Check the ack was received
+      MetricsRecordBuilder dnMetrics = getMetrics(headNode.getMetrics()
+          .name());
+      assertTrue("Expected non-zero number of acks", 
+          getLongCounter("PacketAckRoundTripTimeNanosNumOps", dnMetrics) > 0);
+      assertQuantileGauges("PacketAckRoundTripTimeNanos" + interval
+          + "s", dnMetrics);
     } finally {
-      if (cluster != null) {cluster.shutdown();}
+      if (cluster != null) {
+        cluster.shutdown();
+      }
     }
   }
 }

Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestSimulatedFSDataset.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestSimulatedFSDataset.java?rev=1367365&r1=1367364&r2=1367365&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestSimulatedFSDataset.java (original)
+++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestSimulatedFSDataset.java Mon Jul 30 23:31:42 2012
@@ -17,13 +17,16 @@
  */
 package org.apache.hadoop.hdfs.server.datanode;
 
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+
 import java.io.DataInputStream;
 import java.io.IOException;
 import java.io.InputStream;
 import java.io.OutputStream;
 
-import junit.framework.TestCase;
-
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.protocol.Block;
@@ -33,27 +36,23 @@ import org.apache.hadoop.hdfs.server.dat
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.ReplicaOutputStreams;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetFactory;
 import org.apache.hadoop.util.DataChecksum;
+import org.junit.Before;
+import org.junit.Test;
 
 /**
  * this class tests the methods of the  SimulatedFSDataset.
  */
-public class TestSimulatedFSDataset extends TestCase {
+public class TestSimulatedFSDataset {
   Configuration conf = null;
   static final String bpid = "BP-TEST";
   static final int NUMBLOCKS = 20;
   static final int BLOCK_LENGTH_MULTIPLIER = 79;
 
-  @Override
-  protected void setUp() throws Exception {
-    super.setUp();
+  @Before
+  public void setUp() throws Exception {
     conf = new HdfsConfiguration();
     SimulatedFSDataset.setFactory(conf);
   }
-
-  @Override
-  protected void tearDown() throws Exception {
-    super.tearDown();
-  }
   
   long blockIdToLen(long blkid) {
     return blkid*BLOCK_LENGTH_MULTIPLIER;
@@ -90,6 +89,7 @@ public class TestSimulatedFSDataset exte
     return addSomeBlocks(fsdataset, 1);
   }
   
+  @Test
   public void testFSDatasetFactory() {
     final Configuration conf = new Configuration();
     FsDatasetSpi.Factory<?> f = FsDatasetSpi.Factory.getFactory(conf);
@@ -102,6 +102,7 @@ public class TestSimulatedFSDataset exte
     assertTrue(s.isSimulated());
   }
 
+  @Test
   public void testGetMetaData() throws IOException {
     final SimulatedFSDataset fsdataset = getSimulatedFSDataset();
     ExtendedBlock b = new ExtendedBlock(bpid, 1, 5, 0);
@@ -123,6 +124,7 @@ public class TestSimulatedFSDataset exte
   }
 
 
+  @Test
   public void testStorageUsage() throws IOException {
     final SimulatedFSDataset fsdataset = getSimulatedFSDataset();
     assertEquals(fsdataset.getDfsUsed(), 0);
@@ -146,6 +148,7 @@ public class TestSimulatedFSDataset exte
     assertEquals(expectedLen, lengthRead);
   }
   
+  @Test
   public void testWriteRead() throws IOException {
     final SimulatedFSDataset fsdataset = getSimulatedFSDataset();
     addSomeBlocks(fsdataset);
@@ -157,6 +160,7 @@ public class TestSimulatedFSDataset exte
     }
   }
 
+  @Test
   public void testGetBlockReport() throws IOException {
     SimulatedFSDataset fsdataset = getSimulatedFSDataset(); 
     BlockListAsLongs blockReport = fsdataset.getBlockReport(bpid);
@@ -170,6 +174,7 @@ public class TestSimulatedFSDataset exte
     }
   }
   
+  @Test
   public void testInjectionEmpty() throws IOException {
     SimulatedFSDataset fsdataset = getSimulatedFSDataset(); 
     BlockListAsLongs blockReport = fsdataset.getBlockReport(bpid);
@@ -198,6 +203,7 @@ public class TestSimulatedFSDataset exte
     assertEquals(sfsdataset.getCapacity()-bytesAdded, sfsdataset.getRemaining());
   }
 
+  @Test
   public void testInjectionNonEmpty() throws IOException {
     SimulatedFSDataset fsdataset = getSimulatedFSDataset(); 
     BlockListAsLongs blockReport = fsdataset.getBlockReport(bpid);
@@ -271,6 +277,7 @@ public class TestSimulatedFSDataset exte
     }
   }
   
+  @Test
   public void testInValidBlocks() throws IOException {
     final SimulatedFSDataset fsdataset = getSimulatedFSDataset();
     ExtendedBlock b = new ExtendedBlock(bpid, 1, 5, 0);
@@ -282,6 +289,7 @@ public class TestSimulatedFSDataset exte
     checkInvalidBlock(b);
   }
 
+  @Test
   public void testInvalidate() throws IOException {
     final SimulatedFSDataset fsdataset = getSimulatedFSDataset();
     int bytesAdded = addSomeBlocks(fsdataset);

Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java?rev=1367365&r1=1367364&r2=1367365&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java (original)
+++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java Mon Jul 30 23:31:42 2012
@@ -20,6 +20,12 @@ package org.apache.hadoop.hdfs.server.na
 import static org.apache.hadoop.hdfs.server.common.Util.fileAsURI;
 import static org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil.assertNNHasCheckpoints;
 import static org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil.getNameNodeCurrentDirs;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
 
 import java.io.File;
 import java.io.IOException;
@@ -31,8 +37,6 @@ import java.util.Collection;
 import java.util.List;
 import java.util.Random;
 
-import junit.framework.TestCase;
-
 import org.apache.commons.cli.ParseException;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -69,6 +73,8 @@ import org.apache.hadoop.test.GenericTes
 import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.log4j.Level;
+import org.junit.Before;
+import org.junit.Test;
 import org.mockito.ArgumentMatcher;
 import org.mockito.Mockito;
 import org.mockito.invocation.InvocationOnMock;
@@ -84,7 +90,7 @@ import com.google.common.primitives.Ints
 /**
  * This class tests the creation and validation of a checkpoint.
  */
-public class TestCheckpoint extends TestCase {
+public class TestCheckpoint {
 
   static {
     ((Log4JLogger)FSImage.LOG).getLogger().setLevel(Level.ALL);
@@ -100,7 +106,7 @@ public class TestCheckpoint extends Test
 
   private CheckpointFaultInjector faultInjector;
     
-  @Override
+  @Before
   public void setUp() throws IOException {
     FileUtil.fullyDeleteContents(new File(MiniDFSCluster.getBaseDirectory()));
     
@@ -139,6 +145,7 @@ public class TestCheckpoint extends Test
   /*
    * Verify that namenode does not startup if one namedir is bad.
    */
+  @Test
   public void testNameDirError() throws IOException {
     LOG.info("Starting testNameDirError");
     Configuration conf = new HdfsConfiguration();
@@ -180,6 +187,7 @@ public class TestCheckpoint extends Test
    * correctly (by removing the storage directory)
    * See https://issues.apache.org/jira/browse/HDFS-2011
    */
+  @Test
   public void testWriteTransactionIdHandlesIOE() throws Exception {
     LOG.info("Check IOException handled correctly by writeTransactionIdFile");
     ArrayList<URI> fsImageDirs = new ArrayList<URI>();
@@ -214,6 +222,7 @@ public class TestCheckpoint extends Test
   /*
    * Simulate namenode crashing after rolling edit log.
    */
+  @Test
   public void testSecondaryNamenodeError1()
     throws IOException {
     LOG.info("Starting testSecondaryNamenodeError1");
@@ -279,6 +288,7 @@ public class TestCheckpoint extends Test
   /*
    * Simulate a namenode crash after uploading new image
    */
+  @Test
   public void testSecondaryNamenodeError2() throws IOException {
     LOG.info("Starting testSecondaryNamenodeError2");
     Configuration conf = new HdfsConfiguration();
@@ -340,6 +350,7 @@ public class TestCheckpoint extends Test
   /*
    * Simulate a secondary namenode crash after rolling the edit log.
    */
+  @Test
   public void testSecondaryNamenodeError3() throws IOException {
     LOG.info("Starting testSecondaryNamenodeError3");
     Configuration conf = new HdfsConfiguration();
@@ -412,6 +423,7 @@ public class TestCheckpoint extends Test
    * back to the name-node.
    * Used to truncate primary fsimage file.
    */
+  @Test
   public void testSecondaryFailsToReturnImage() throws IOException {
     Mockito.doThrow(new IOException("If this exception is not caught by the " +
         "name-node, fs image will be truncated."))
@@ -425,6 +437,7 @@ public class TestCheckpoint extends Test
    * before even setting the length header. This used to cause image
    * truncation. Regression test for HDFS-3330.
    */
+  @Test
   public void testSecondaryFailsWithErrorBeforeSettingHeaders()
       throws IOException {
     Mockito.doThrow(new Error("If this exception is not caught by the " +
@@ -497,6 +510,7 @@ public class TestCheckpoint extends Test
    * The length header in the HTTP transfer should prevent
    * this from corrupting the NN.
    */
+  @Test
   public void testNameNodeImageSendFailWrongSize()
       throws IOException {
     LOG.info("Starting testNameNodeImageSendFailWrongSize");
@@ -511,6 +525,7 @@ public class TestCheckpoint extends Test
    * The digest header in the HTTP transfer should prevent
    * this from corrupting the NN.
    */
+  @Test
   public void testNameNodeImageSendFailWrongDigest()
       throws IOException {
     LOG.info("Starting testNameNodeImageSendFailWrongDigest");
@@ -528,7 +543,7 @@ public class TestCheckpoint extends Test
   private void doSendFailTest(String exceptionSubstring)
       throws IOException {
     Configuration conf = new HdfsConfiguration();
-    Path file1 = new Path("checkpoint-doSendFailTest-" + getName() + ".dat");
+    Path file1 = new Path("checkpoint-doSendFailTest-doSendFailTest.dat");
     MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
                                                .numDataNodes(numDatanodes)
                                                .build();
@@ -574,6 +589,7 @@ public class TestCheckpoint extends Test
    * Test that the NN locks its storage and edits directories, and won't start up
    * if the directories are already locked
    **/
+  @Test
   public void testNameDirLocking() throws IOException {
     Configuration conf = new HdfsConfiguration();
     MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
@@ -603,6 +619,7 @@ public class TestCheckpoint extends Test
    * Test that, if the edits dir is separate from the name dir, it is
    * properly locked.
    **/
+  @Test
   public void testSeparateEditsDirLocking() throws IOException {
     Configuration conf = new HdfsConfiguration();
     File editsDir = new File(MiniDFSCluster.getBaseDirectory() +
@@ -638,6 +655,7 @@ public class TestCheckpoint extends Test
   /**
    * Test that the SecondaryNameNode properly locks its storage directories.
    */
+  @Test
   public void testSecondaryNameNodeLocking() throws Exception {
     // Start a primary NN so that the secondary will start successfully
     Configuration conf = new HdfsConfiguration();
@@ -687,6 +705,7 @@ public class TestCheckpoint extends Test
    * Test that, an attempt to lock a storage that is already locked by a nodename,
    * logs error message that includes JVM name of the namenode that locked it.
    */
+  @Test
   public void testStorageAlreadyLockedErrorMessage() throws Exception {
     Configuration conf = new HdfsConfiguration();
     MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
@@ -763,6 +782,7 @@ public class TestCheckpoint extends Test
    * 2. if the NN does not contain an image, importing a checkpoint
    *    succeeds and re-saves the image
    */
+  @Test
   public void testImportCheckpoint() throws Exception {
     Configuration conf = new HdfsConfiguration();
     Path testPath = new Path("/testfile");
@@ -861,6 +881,7 @@ public class TestCheckpoint extends Test
   /**
    * Tests checkpoint in HDFS.
    */
+  @Test
   public void testCheckpoint() throws IOException {
     Path file1 = new Path("checkpoint.dat");
     Path file2 = new Path("checkpoint2.dat");
@@ -951,6 +972,7 @@ public class TestCheckpoint extends Test
   /**
    * Tests save namespace.
    */
+  @Test
   public void testSaveNamespace() throws IOException {
     MiniDFSCluster cluster = null;
     DistributedFileSystem fs = null;
@@ -1057,6 +1079,7 @@ public class TestCheckpoint extends Test
   
   /* Test case to test CheckpointSignature */
   @SuppressWarnings("deprecation")
+  @Test
   public void testCheckpointSignature() throws IOException {
 
     MiniDFSCluster cluster = null;
@@ -1091,6 +1114,7 @@ public class TestCheckpoint extends Test
    * - it then fails again for the same reason
    * - it then tries to checkpoint a third time
    */
+  @Test
   public void testCheckpointAfterTwoFailedUploads() throws IOException {
     MiniDFSCluster cluster = null;
     SecondaryNameNode secondary = null;
@@ -1147,6 +1171,7 @@ public class TestCheckpoint extends Test
    * 
    * @throws IOException
    */
+  @Test
   public void testMultipleSecondaryNamenodes() throws IOException {
     Configuration conf = new HdfsConfiguration();
     String nameserviceId1 = "ns1";
@@ -1197,6 +1222,7 @@ public class TestCheckpoint extends Test
    * Test that the secondary doesn't have to re-download image
    * if it hasn't changed.
    */
+  @Test
   public void testSecondaryImageDownload() throws IOException {
     LOG.info("Starting testSecondaryImageDownload");
     Configuration conf = new HdfsConfiguration();
@@ -1279,6 +1305,7 @@ public class TestCheckpoint extends Test
    * It verifies that this works even though the earlier-txid checkpoint gets
    * uploaded after the later-txid checkpoint.
    */
+  @Test
   public void testMultipleSecondaryNNsAgainstSameNN() throws Exception {
     Configuration conf = new HdfsConfiguration();
 
@@ -1364,6 +1391,7 @@ public class TestCheckpoint extends Test
    * It verifies that one of the two gets an error that it's uploading a
    * duplicate checkpoint, and the other one succeeds.
    */
+  @Test
   public void testMultipleSecondaryNNsAgainstSameNN2() throws Exception {
     Configuration conf = new HdfsConfiguration();
 
@@ -1457,6 +1485,7 @@ public class TestCheckpoint extends Test
    * is running. The secondary should shut itself down if if talks to a NN
    * with the wrong namespace.
    */
+  @Test
   public void testReformatNNBetweenCheckpoints() throws IOException {
     MiniDFSCluster cluster = null;
     SecondaryNameNode secondary = null;
@@ -1514,6 +1543,7 @@ public class TestCheckpoint extends Test
    * Test that the primary NN will not serve any files to a 2NN who doesn't
    * share its namespace ID, and also will not accept any files from one.
    */
+  @Test
   public void testNamespaceVerifiedOnFileTransfer() throws IOException {
     MiniDFSCluster cluster = null;
     
@@ -1575,6 +1605,7 @@ public class TestCheckpoint extends Test
    * the non-failed storage directory receives the checkpoint.
    */
   @SuppressWarnings("deprecation")
+  @Test
   public void testCheckpointWithFailedStorageDir() throws Exception {
     MiniDFSCluster cluster = null;
     SecondaryNameNode secondary = null;
@@ -1639,6 +1670,7 @@ public class TestCheckpoint extends Test
    * @throws Exception
    */
   @SuppressWarnings("deprecation")
+  @Test
   public void testCheckpointWithSeparateDirsAfterNameFails() throws Exception {
     MiniDFSCluster cluster = null;
     SecondaryNameNode secondary = null;
@@ -1711,6 +1743,7 @@ public class TestCheckpoint extends Test
   /**
    * Test that the 2NN triggers a checkpoint after the configurable interval
    */
+  @Test
   public void testCheckpointTriggerOnTxnCount() throws Exception {
     MiniDFSCluster cluster = null;
     SecondaryNameNode secondary = null;
@@ -1764,6 +1797,7 @@ public class TestCheckpoint extends Test
    * logs that connect the 2NN's old checkpoint to the current txid
    * get archived. Then, the 2NN tries to checkpoint again.
    */
+  @Test
   public void testSecondaryHasVeryOutOfDateImage() throws IOException {
     MiniDFSCluster cluster = null;
     SecondaryNameNode secondary = null;
@@ -1801,6 +1835,7 @@ public class TestCheckpoint extends Test
     }
   }
   
+  @Test
   public void testCommandLineParsing() throws ParseException {
     SecondaryNameNode.CommandLineOpts opts =
       new SecondaryNameNode.CommandLineOpts();

Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java?rev=1367365&r1=1367364&r2=1367365&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java (original)
+++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java Mon Jul 30 23:31:42 2012
@@ -29,8 +29,6 @@ import java.util.Map;
 import javax.management.MBeanServer;
 import javax.management.ObjectName;
 
-import junit.framework.Assert;
-
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
@@ -42,6 +40,12 @@ import org.mortbay.util.ajax.JSON;
  * Class for testing {@link NameNodeMXBean} implementation
  */
 public class TestNameNodeMXBean {
+
+  /**
+   * Used to assert equality between doubles
+   */
+  private static final double DELTA = 0.000001;
+
   @SuppressWarnings({ "unchecked", "deprecation" })
   @Test
   public void testNameNodeMXBeanInfo() throws Exception {
@@ -59,36 +63,36 @@ public class TestNameNodeMXBean {
           "Hadoop:service=NameNode,name=NameNodeInfo");
       // get attribute "ClusterId"
       String clusterId = (String) mbs.getAttribute(mxbeanName, "ClusterId");
-      Assert.assertEquals(fsn.getClusterId(), clusterId);
+      assertEquals(fsn.getClusterId(), clusterId);
       // get attribute "BlockPoolId"
       String blockpoolId = (String) mbs.getAttribute(mxbeanName, 
           "BlockPoolId");
-      Assert.assertEquals(fsn.getBlockPoolId(), blockpoolId);
+      assertEquals(fsn.getBlockPoolId(), blockpoolId);
       // get attribute "Version"
       String version = (String) mbs.getAttribute(mxbeanName, "Version");
-      Assert.assertEquals(fsn.getVersion(), version);
-      Assert.assertTrue(version.equals(VersionInfo.getVersion()
+      assertEquals(fsn.getVersion(), version);
+      assertTrue(version.equals(VersionInfo.getVersion()
           + ", r" + VersionInfo.getRevision()));
       // get attribute "Used"
       Long used = (Long) mbs.getAttribute(mxbeanName, "Used");
-      Assert.assertEquals(fsn.getUsed(), used.longValue());
+      assertEquals(fsn.getUsed(), used.longValue());
       // get attribute "Total"
       Long total = (Long) mbs.getAttribute(mxbeanName, "Total");
-      Assert.assertEquals(fsn.getTotal(), total.longValue());
+      assertEquals(fsn.getTotal(), total.longValue());
       // get attribute "safemode"
       String safemode = (String) mbs.getAttribute(mxbeanName, "Safemode");
-      Assert.assertEquals(fsn.getSafemode(), safemode);
+      assertEquals(fsn.getSafemode(), safemode);
       // get attribute nondfs
       Long nondfs = (Long) (mbs.getAttribute(mxbeanName, "NonDfsUsedSpace"));
-      Assert.assertEquals(fsn.getNonDfsUsedSpace(), nondfs.longValue());
+      assertEquals(fsn.getNonDfsUsedSpace(), nondfs.longValue());
       // get attribute percentremaining
       Float percentremaining = (Float) (mbs.getAttribute(mxbeanName,
           "PercentRemaining"));
-      Assert.assertEquals(fsn.getPercentRemaining(), percentremaining
-          .floatValue());
+      assertEquals(fsn.getPercentRemaining(), percentremaining
+          .floatValue(), DELTA);
       // get attribute Totalblocks
       Long totalblocks = (Long) (mbs.getAttribute(mxbeanName, "TotalBlocks"));
-      Assert.assertEquals(fsn.getTotalBlocks(), totalblocks.longValue());
+      assertEquals(fsn.getTotalBlocks(), totalblocks.longValue());
       // get attribute alivenodeinfo
       String alivenodeinfo = (String) (mbs.getAttribute(mxbeanName,
           "LiveNodes"));
@@ -103,15 +107,15 @@ public class TestNameNodeMXBean {
         assertTrue(liveNode.containsKey("numBlocks"));
         assertTrue(((Long)liveNode.get("numBlocks")) == 0);
       }
-      Assert.assertEquals(fsn.getLiveNodes(), alivenodeinfo);
+      assertEquals(fsn.getLiveNodes(), alivenodeinfo);
       // get attribute deadnodeinfo
       String deadnodeinfo = (String) (mbs.getAttribute(mxbeanName,
           "DeadNodes"));
-      Assert.assertEquals(fsn.getDeadNodes(), deadnodeinfo);
+      assertEquals(fsn.getDeadNodes(), deadnodeinfo);
       // get attribute NameDirStatuses
       String nameDirStatuses = (String) (mbs.getAttribute(mxbeanName,
           "NameDirStatuses"));
-      Assert.assertEquals(fsn.getNameDirStatuses(), nameDirStatuses);
+      assertEquals(fsn.getNameDirStatuses(), nameDirStatuses);
       Map<String, Map<String, String>> statusMap =
         (Map<String, Map<String, String>>) JSON.parse(nameDirStatuses);
       Collection<URI> nameDirUris = cluster.getNameDirs(0);

Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestParallelImageWrite.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestParallelImageWrite.java?rev=1367365&r1=1367364&r2=1367365&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestParallelImageWrite.java (original)
+++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestParallelImageWrite.java Mon Jul 30 23:31:42 2012
@@ -26,8 +26,6 @@ import java.io.File;
 import java.util.Collections;
 import java.util.List;
 
-import junit.framework.AssertionFailedError;
-
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
@@ -132,7 +130,7 @@ public class TestParallelImageWrite {
    * @param fsn - the FSNamesystem being checked.
    * @param numImageDirs - the configured number of StorageDirectory of type IMAGE. 
    * @return - the md5 hash of the most recent FSImage files, which must all be the same.
-   * @throws AssertionFailedError if image files are empty or different,
+   * @throws AssertionError if image files are empty or different,
    *     if less than two StorageDirectory are provided, or if the
    *     actual number of StorageDirectory is less than configured.
    */

Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java?rev=1367365&r1=1367364&r2=1367365&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java (original)
+++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java Mon Jul 30 23:31:42 2012
@@ -443,16 +443,15 @@ public class TestStartup {
 
   private void testImageChecksum(boolean compress) throws Exception {
     MiniDFSCluster cluster = null;
-    Configuration conf = new HdfsConfiguration();
     if (compress) {
-      conf.setBoolean(DFSConfigKeys.DFS_IMAGE_COMPRESSION_CODEC_KEY, true);
+      config.setBoolean(DFSConfigKeys.DFS_IMAGE_COMPRESSION_CODEC_KEY, true);
     }
 
     try {
         LOG.info("\n===========================================\n" +
                  "Starting empty cluster");
         
-        cluster = new MiniDFSCluster.Builder(conf)
+        cluster = new MiniDFSCluster.Builder(config)
           .numDataNodes(0)
           .format(true)
           .build();
@@ -479,7 +478,7 @@ public class TestStartup {
         LOG.info("\n===========================================\n" +
         "Starting same cluster after simulated crash");
         try {
-          cluster = new MiniDFSCluster.Builder(conf)
+          cluster = new MiniDFSCluster.Builder(config)
             .numDataNodes(0)
             .format(false)
             .build();
@@ -507,19 +506,18 @@ public class TestStartup {
     FileSystem localFileSys;
     Path hostsFile;
     Path excludeFile;
-    Configuration conf = new HdfsConfiguration();
     int HEARTBEAT_INTERVAL = 1; // heartbeat interval in seconds
     // Set up the hosts/exclude files.
-    localFileSys = FileSystem.getLocal(conf);
+    localFileSys = FileSystem.getLocal(config);
     Path workingDir = localFileSys.getWorkingDirectory();
     Path dir = new Path(workingDir, "build/test/data/work-dir/restartnn");
     hostsFile = new Path(dir, "hosts");
     excludeFile = new Path(dir, "exclude");
 
     // Setup conf
-    conf.set(DFSConfigKeys.DFS_HOSTS_EXCLUDE, excludeFile.toUri().getPath());
+    config.set(DFSConfigKeys.DFS_HOSTS_EXCLUDE, excludeFile.toUri().getPath());
     writeConfigFile(localFileSys, excludeFile, null);
-    conf.set(DFSConfigKeys.DFS_HOSTS, hostsFile.toUri().getPath());
+    config.set(DFSConfigKeys.DFS_HOSTS, hostsFile.toUri().getPath());
     // write into hosts file
     ArrayList<String>list = new ArrayList<String>();
     byte b[] = {127, 0, 0, 1};
@@ -529,7 +527,7 @@ public class TestStartup {
     int numDatanodes = 1;
     
     try {
-      cluster = new MiniDFSCluster.Builder(conf)
+      cluster = new MiniDFSCluster.Builder(config)
       .numDataNodes(numDatanodes).setupHostsFile(true).build();
       cluster.waitActive();
   

Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java?rev=1367365&r1=1367364&r2=1367365&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java (original)
+++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java Mon Jul 30 23:31:42 2012
@@ -19,6 +19,7 @@ package org.apache.hadoop.hdfs.server.na
 
 import static org.apache.hadoop.test.MetricsAsserts.assertCounter;
 import static org.apache.hadoop.test.MetricsAsserts.assertGauge;
+import static org.apache.hadoop.test.MetricsAsserts.assertQuantileGauges;
 import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
 import static org.junit.Assert.assertTrue;
 
@@ -63,6 +64,9 @@ public class TestNameNodeMetrics {
   // Number of datanodes in the cluster
   private static final int DATANODE_COUNT = 3; 
   private static final int WAIT_GAUGE_VALUE_RETRIES = 20;
+  
+  // Rollover interval of percentile metrics (in seconds)
+  private static final int PERCENTILES_INTERVAL = 1;
 
   static {
     CONF.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 100);
@@ -71,6 +75,8 @@ public class TestNameNodeMetrics {
         DFS_REPLICATION_INTERVAL);
     CONF.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 
         DFS_REPLICATION_INTERVAL);
+    CONF.set(DFSConfigKeys.DFS_METRICS_PERCENTILES_INTERVALS_KEY, 
+        "" + PERCENTILES_INTERVAL);
 
     ((Log4JLogger)LogFactory.getLog(MetricsAsserts.class))
       .getLogger().setLevel(Level.DEBUG);
@@ -352,4 +358,24 @@ public class TestNameNodeMetrics {
     assertGauge("TransactionsSinceLastCheckpoint", 1L, getMetrics(NS_METRICS));
     assertGauge("TransactionsSinceLastLogRoll", 1L, getMetrics(NS_METRICS));
   }
+  
+  /**
+   * Tests that the sync and block report metrics get updated on cluster
+   * startup.
+   */
+  @Test
+  public void testSyncAndBlockReportMetric() throws Exception {
+    MetricsRecordBuilder rb = getMetrics(NN_METRICS);
+    // We have one sync when the cluster starts up, just opening the journal
+    assertCounter("SyncsNumOps", 1L, rb);
+    // Each datanode reports in when the cluster comes up
+    assertCounter("BlockReportNumOps", (long)DATANODE_COUNT, rb);
+    
+    // Sleep for an interval+slop to let the percentiles rollover
+    Thread.sleep((PERCENTILES_INTERVAL+1)*1000);
+    
+    // Check that the percentiles were updated
+    assertQuantileGauges("Syncs1s", rb);
+    assertQuantileGauges("BlockReport1s", rb);
+  }
 }

Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestOffsetUrlInputStream.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestOffsetUrlInputStream.java?rev=1367365&r1=1367364&r2=1367365&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestOffsetUrlInputStream.java (original)
+++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestOffsetUrlInputStream.java Mon Jul 30 23:31:42 2012
@@ -18,22 +18,10 @@
 package org.apache.hadoop.hdfs.web;
 
 import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.fail;
-import static org.mockito.Mockito.doReturn;
-import static org.mockito.Mockito.spy;
-import static org.mockito.Mockito.times;
-import static org.mockito.Mockito.verify;
 
 import java.io.IOException;
-import java.net.URI;
 import java.net.URL;
 
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.hdfs.TestByteRangeInputStream.MockHttpURLConnection;
-import org.apache.hadoop.hdfs.web.WebHdfsFileSystem.OffsetUrlInputStream;
-import org.apache.hadoop.hdfs.web.WebHdfsFileSystem.OffsetUrlOpener;
 import org.junit.Test;
 
 public class TestOffsetUrlInputStream {
@@ -73,65 +61,4 @@ public class TestOffsetUrlInputStream {
           WebHdfsFileSystem.removeOffsetParam(new URL(s)).toString());
     }
   }
-  
-  @Test
-  public void testByteRange() throws Exception {
-    final Configuration conf = new Configuration(); 
-    final String uri = WebHdfsFileSystem.SCHEME  + "://localhost:50070/";
-    final WebHdfsFileSystem webhdfs = (WebHdfsFileSystem)FileSystem.get(new URI(uri), conf);
-
-    OffsetUrlOpener ospy = spy(webhdfs.new OffsetUrlOpener(new URL("http://test/")));
-    doReturn(new MockHttpURLConnection(ospy.getURL())).when(ospy)
-        .openConnection();
-    OffsetUrlOpener rspy = spy(webhdfs.new OffsetUrlOpener((URL) null));
-    doReturn(new MockHttpURLConnection(rspy.getURL())).when(rspy)
-        .openConnection();
-    final OffsetUrlInputStream is = new OffsetUrlInputStream(ospy, rspy);
-
-    assertEquals("getPos wrong", 0, is.getPos());
-
-    is.read();
-
-    assertNull("Initial call made incorrectly (Range Check)", ospy
-        .openConnection().getRequestProperty("Range"));
-
-    assertEquals("getPos should be 1 after reading one byte", 1, is.getPos());
-
-    is.read();
-
-    assertEquals("getPos should be 2 after reading two bytes", 2, is.getPos());
-
-    // No additional connections should have been made (no seek)
-
-    rspy.setURL(new URL("http://resolvedurl/"));
-
-    is.seek(100);
-    is.read();
-
-    assertEquals("getPos should be 101 after reading one byte", 101,
-        is.getPos());
-
-    verify(rspy, times(1)).openConnection();
-
-    is.seek(101);
-    is.read();
-
-    verify(rspy, times(1)).openConnection();
-
-    // Seek to 101 should not result in another request"
-
-    is.seek(2500);
-    is.read();
-
-    ((MockHttpURLConnection) rspy.openConnection()).setResponseCode(206);
-    is.seek(0);
-
-    try {
-      is.read();
-      fail("Exception should be thrown when 206 response is given "
-           + "but 200 is expected");
-    } catch (IOException e) {
-      WebHdfsFileSystem.LOG.info(e.toString());
-    }
-  }
 }

Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/WebHdfsTestUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/WebHdfsTestUtil.java?rev=1367365&r1=1367364&r2=1367365&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/WebHdfsTestUtil.java (original)
+++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/WebHdfsTestUtil.java Mon Jul 30 23:31:42 2012
@@ -79,13 +79,9 @@ public class WebHdfsTestUtil {
     return WebHdfsFileSystem.jsonParse(conn, false);
   }
   
-  public static HttpURLConnection twoStepWrite(HttpURLConnection conn,
-      final HttpOpParam.Op op) throws IOException {
-    conn.setRequestMethod(op.getType().toString());
-    conn = WebHdfsFileSystem.twoStepWrite(conn, op);
-    conn.setDoOutput(true);
-    conn.connect();
-    return conn;
+  public static HttpURLConnection twoStepWrite(final WebHdfsFileSystem webhdfs,
+      final HttpOpParam.Op op, HttpURLConnection conn) throws IOException {
+    return webhdfs.new Runner(op, conn).twoStepWrite();
   }
 
   public static FSDataOutputStream write(final WebHdfsFileSystem webhdfs,