You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by st...@apache.org on 2010/08/11 17:03:22 UTC

svn commit: r984433 - in /hbase/trunk: CHANGES.txt src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLog.java src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRolling.java

Author: stack
Date: Wed Aug 11 15:03:21 2010
New Revision: 984433

URL: http://svn.apache.org/viewvc?rev=984433&view=rev
Log:
HBASE-2868 Do some small cleanups in org.apache.hadoop.hbase.regionserver.wal

Modified:
    hbase/trunk/CHANGES.txt
    hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLog.java
    hbase/trunk/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRolling.java

Modified: hbase/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/hbase/trunk/CHANGES.txt?rev=984433&r1=984432&r2=984433&view=diff
==============================================================================
--- hbase/trunk/CHANGES.txt (original)
+++ hbase/trunk/CHANGES.txt Wed Aug 11 15:03:21 2010
@@ -833,6 +833,8 @@ Release 0.21.0 - Unreleased
                (Chongxin Li via Stack)
    HBASE-2844  Capping the number of regions (Pranav Khaitan via Stack)
    HBASE-2870  Add Backup CLI Option to HMaster (Nicolas Spiegelberg via Stack)
+   HBASE-2868  Do some small cleanups in org.apache.hadoop.hbase.regionserver.wal
+               (Alex Newman via Stack)
 
   NEW FEATURES
    HBASE-1961  HBase EC2 scripts

Modified: hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLog.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLog.java?rev=984433&r1=984432&r2=984433&view=diff
==============================================================================
--- hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLog.java (original)
+++ hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLog.java Wed Aug 11 15:03:21 2010
@@ -28,6 +28,7 @@ import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.io.OutputStream;
 import java.io.UnsupportedEncodingException;
+import java.lang.reflect.InvocationTargetException;
 import java.lang.reflect.Method;
 import java.net.URLEncoder;
 import java.util.ArrayList;
@@ -1001,10 +1002,13 @@ public class HLog implements Syncable {
    * If the pipeline isn't started yet or is empty, you will get the default
    * replication factor.  Therefore, if this function returns 0, it means you
    * are not properly running with the HDFS-826 patch.
+   * @throws InvocationTargetException
+   * @throws IllegalAccessException
+   * @throws IllegalArgumentException
    *
    * @throws Exception
    */
-  int getLogReplication() throws Exception {
+  int getLogReplication() throws IllegalArgumentException, IllegalAccessException, InvocationTargetException {
     if(this.getNumCurrentReplicas != null && this.hdfs_out != null) {
       Object repl = this.getNumCurrentReplicas.invoke(this.hdfs_out, NO_ARGS);
       if (repl instanceof Integer) {

Modified: hbase/trunk/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRolling.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRolling.java?rev=984433&r1=984432&r2=984433&view=diff
==============================================================================
--- hbase/trunk/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRolling.java (original)
+++ hbase/trunk/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRolling.java Wed Aug 11 15:03:21 2010
@@ -19,7 +19,9 @@
  */
 package org.apache.hadoop.hbase.regionserver.wal;
 
+import java.io.IOException;
 import java.io.OutputStream;
+import java.lang.reflect.InvocationTargetException;
 import java.lang.reflect.Method;
 import java.util.ArrayList;
 import java.util.List;
@@ -27,10 +29,13 @@ import java.util.List;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.impl.Log4JLogger;
-import org.apache.hadoop.hbase.HBaseClusterTestCase;
+
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.MiniHBaseCluster;
 import org.apache.hadoop.hbase.regionserver.HRegionServer;
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
@@ -39,22 +44,33 @@ import org.apache.hadoop.hbase.client.Pu
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.FSUtils;
 import org.apache.hadoop.hdfs.DFSClient;
-import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.LeaseManager;
 import org.apache.log4j.Level;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import static org.junit.Assert.assertTrue;
 
 /**
  * Test log deletion as logs are rolled.
  */
-public class TestLogRolling extends HBaseClusterTestCase {
+public class TestLogRolling  {
   private static final Log LOG = LogFactory.getLog(TestLogRolling.class);
   private HRegionServer server;
   private HLog log;
   private String tableName;
   private byte[] value;
+  private static FileSystem fs;
+  private static MiniDFSCluster dfsCluster;
+  private static HBaseAdmin admin;
+  private static MiniHBaseCluster cluster;
+  private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
 
  // verbose logging on classes that are touched in these tests
  {
@@ -71,10 +87,9 @@ public class TestLogRolling extends HBas
    * constructor
    * @throws Exception
    */
-  public TestLogRolling() throws Exception {
+  public TestLogRolling()  {
     // start one regionserver and a minidfs.
     super();
-    try {
       this.server = null;
       this.log = null;
       this.tableName = null;
@@ -86,63 +101,67 @@ public class TestLogRolling extends HBas
         v.append(className);
       }
       value = Bytes.toBytes(v.toString());
-
-    } catch (Exception e) {
-      LOG.fatal("error in constructor", e);
-      throw e;
-    }
   }
 
   // Need to override this setup so we can edit the config before it gets sent
  // to the HDFS & HBase cluster startup.
-  @Override
-  protected void setUp() throws Exception {
+ @BeforeClass
+  public static void setUpBeforeClass() throws Exception {
     /**** configuration for testLogRolling ****/
     // Force a region split after every 768KB
-    conf.setLong("hbase.hregion.max.filesize", 768L * 1024L);
+    TEST_UTIL.getConfiguration().setLong("hbase.hregion.max.filesize", 768L * 1024L);
 
     // We roll the log after every 32 writes
-    conf.setInt("hbase.regionserver.maxlogentries", 32);
+    TEST_UTIL.getConfiguration().setInt("hbase.regionserver.maxlogentries", 32);
 
     // For less frequently updated regions flush after every 2 flushes
-    conf.setInt("hbase.hregion.memstore.optionalflushcount", 2);
+    TEST_UTIL.getConfiguration().setInt("hbase.hregion.memstore.optionalflushcount", 2);
 
     // We flush the cache after every 8192 bytes
-    conf.setInt("hbase.hregion.memstore.flush.size", 8192);
+    TEST_UTIL.getConfiguration().setInt("hbase.hregion.memstore.flush.size", 8192);
 
     // Increase the amount of time between client retries
-    conf.setLong("hbase.client.pause", 15 * 1000);
+    TEST_UTIL.getConfiguration().setLong("hbase.client.pause", 15 * 1000);
 
     // Reduce thread wake frequency so that other threads can get
     // a chance to run.
-    conf.setInt(HConstants.THREAD_WAKE_FREQUENCY, 2 * 1000);
+    TEST_UTIL.getConfiguration().setInt(HConstants.THREAD_WAKE_FREQUENCY, 2 * 1000);
 
    /**** configuration for testLogRollOnDatanodeDeath ****/
    // make sure log.hflush() calls syncFs() to open a pipeline
-   conf.setBoolean("dfs.support.append", true);
-   // lower the namenode & datanode heartbeat so the namenode 
+    TEST_UTIL.getConfiguration().setBoolean("dfs.support.append", true);
+   // lower the namenode & datanode heartbeat so the namenode
    // quickly detects datanode failures
-   conf.setInt("heartbeat.recheck.interval", 5000);
-   conf.setInt("dfs.heartbeat.interval", 1);
-   // the namenode might still try to choose the recently-dead datanode 
+    TEST_UTIL.getConfiguration().setInt("heartbeat.recheck.interval", 5000);
+    TEST_UTIL.getConfiguration().setInt("dfs.heartbeat.interval", 1);
+   // the namenode might still try to choose the recently-dead datanode
    // for a pipeline, so try to a new pipeline multiple times
-   conf.setInt("dfs.client.block.write.retries", 30);
-   
-   super.setUp();
+    TEST_UTIL.getConfiguration().setInt("dfs.client.block.write.retries", 30);
+    TEST_UTIL.startMiniCluster(2);
+
+    cluster = TEST_UTIL.getHBaseCluster();
+    dfsCluster = TEST_UTIL.getDFSCluster();
+    fs = TEST_UTIL.getTestFileSystem();
+    admin = TEST_UTIL.getHBaseAdmin();
   }
 
-  private void startAndWriteData() throws Exception {
+  @AfterClass
+  public  static void tearDownAfterClass() throws IOException  {
+    TEST_UTIL.cleanupTestDir();
+    TEST_UTIL.shutdownMiniCluster();
+  }
+
+  private void startAndWriteData() throws IOException {
     // When the META table can be opened, the region servers are running
-    new HTable(conf, HConstants.META_TABLE_NAME);
+    new HTable(TEST_UTIL.getConfiguration(), HConstants.META_TABLE_NAME);
     this.server = cluster.getRegionServerThreads().get(0).getRegionServer();
     this.log = server.getLog();
 
     // Create the test table and open it
     HTableDescriptor desc = new HTableDescriptor(tableName);
     desc.addFamily(new HColumnDescriptor(HConstants.CATALOG_FAMILY));
-    HBaseAdmin admin = new HBaseAdmin(conf);
     admin.createTable(desc);
-    HTable table = new HTable(conf, tableName);
+    HTable table = new HTable(TEST_UTIL.getConfiguration(), tableName);
     for (int i = 1; i <= 256; i++) {    // 256 writes should cause 8 log rolls
       Put put = new Put(Bytes.toBytes("row" + String.format("%1$04d", i)));
       put.add(HConstants.CATALOG_FAMILY, null, value);
@@ -160,12 +179,12 @@ public class TestLogRolling extends HBas
 
   /**
    * Tests that logs are deleted
-   *
-   * @throws Exception
+   * @throws IOException
+   * @throws FailedLogCloseException
    */
-  public void testLogRolling() throws Exception {
+  @Test
+  public void testLogRolling() throws FailedLogCloseException, IOException {
     this.tableName = getName();
-    try {
       startAndWriteData();
       LOG.info("after writing there are " + log.getNumLogFiles() + " log files");
 
@@ -184,13 +203,13 @@ public class TestLogRolling extends HBas
       LOG.info("after flushing all regions and rolling logs there are " +
           log.getNumLogFiles() + " log files");
       assertTrue(("actual count: " + count), count <= 2);
-    } catch (Exception e) {
-      LOG.fatal("unexpected exception", e);
-      throw e;
-    }
   }
 
-  void writeData(HTable table, int rownum) throws Exception {
+  private static String getName() {
+    return "TestLogRolling";
+  }
+
+  void writeData(HTable table, int rownum) throws IOException {
     Put put = new Put(Bytes.toBytes("row" + String.format("%1$04d", rownum)));
     put.add(HConstants.CATALOG_FAMILY, null, value);
     table.put(put);
@@ -202,80 +221,101 @@ public class TestLogRolling extends HBas
       // continue
     }
   }
-  
+
+  /**
+   * Give me the HDFS pipeline for this log file
+   */
+  @SuppressWarnings("null")
+  DatanodeInfo[] getPipeline(HLog log) throws IllegalArgumentException,
+      IllegalAccessException, InvocationTargetException {
+
+    // kill a datanode in the pipeline to force a log roll on the next sync()
+    OutputStream stm = log.getOutputStream();
+    Method getPipeline = null;
+    for (Method m : stm.getClass().getDeclaredMethods()) {
+      if (m.getName().endsWith("getPipeline")) {
+        getPipeline = m;
+        getPipeline.setAccessible(true);
+        break;
+      }
+    }
+
+    assertTrue("Need DFSOutputStream.getPipeline() for this test",
+        null != getPipeline);
+    Object repl = getPipeline.invoke(stm, new Object[] {} /* NO_ARGS */);
+    return (DatanodeInfo[]) repl;
+  }
+
   /**
    * Tests that logs are rolled upon detecting datanode death
    * Requires an HDFS jar with HDFS-826 & syncFs() support (HDFS-200)
-   * 
-   * @throws Exception
-   */
-  public void testLogRollOnDatanodeDeath() throws Exception {
-    assertTrue("This test requires HLog file replication.", 
-        fs.getDefaultReplication() > 1);
-    
+   * @throws IOException
+   * @throws InterruptedException
+   * @throws InvocationTargetException 
+   * @throws IllegalAccessException
+   * @throws IllegalArgumentException 
+    */
+  @Test
+  public void testLogRollOnDatanodeDeath() throws IOException,
+      InterruptedException, IllegalArgumentException, IllegalAccessException,
+      InvocationTargetException {
+    assertTrue("This test requires HLog file replication.", fs
+        .getDefaultReplication() > 1);
     // When the META table can be opened, the region servers are running
-    new HTable(conf, HConstants.META_TABLE_NAME);
+    new HTable(TEST_UTIL.getConfiguration(), HConstants.META_TABLE_NAME);
     this.server = cluster.getRegionServer(0);
     this.log = server.getLog();
-    
+
     assertTrue("Need HDFS-826 for this test", log.canGetCurReplicas());
     // don't run this test without append support (HDFS-200 & HDFS-142)
-    assertTrue("Need append support for this test", FSUtils.isAppendSupported(conf));
+    assertTrue("Need append support for this test", FSUtils
+        .isAppendSupported(TEST_UTIL.getConfiguration()));
 
     // add up the datanode count, to ensure proper replication when we kill 1
-    dfsCluster.startDataNodes(conf, 1, true, null, null);
+    dfsCluster
+        .startDataNodes(TEST_UTIL.getConfiguration(), 1, true, null, null);
     dfsCluster.waitActive();
-    assertTrue(dfsCluster.getDataNodes().size() >= 
-               fs.getDefaultReplication() + 1);
+    assertTrue(dfsCluster.getDataNodes().size() >= fs.getDefaultReplication() + 1);
 
     // Create the test table and open it
     String tableName = getName();
     HTableDescriptor desc = new HTableDescriptor(tableName);
     desc.addFamily(new HColumnDescriptor(HConstants.CATALOG_FAMILY));
-    HBaseAdmin admin = new HBaseAdmin(conf);
+
+    if (admin.tableExists(tableName)) {
+      admin.disableTable(tableName);
+      admin.deleteTable(tableName);
+    }
     admin.createTable(desc);
-    HTable table = new HTable(conf, tableName);
+
+    HTable table = new HTable(TEST_UTIL.getConfiguration(), tableName);
+    writeData(table, 2);
+
     table.setAutoFlush(true);
 
     long curTime = System.currentTimeMillis();
     long oldFilenum = log.getFilenum();
-    assertTrue("Log should have a timestamp older than now", 
-             curTime > oldFilenum && oldFilenum != -1);
+    assertTrue("Log should have a timestamp older than now",
+        curTime > oldFilenum && oldFilenum != -1);
 
-    // normal write
-    writeData(table, 1);
-    assertTrue("The log shouldn't have rolled yet", 
-              oldFilenum == log.getFilenum());
-
-    // kill a datanode in the pipeline to force a log roll on the next sync()
-    OutputStream stm = log.getOutputStream();
-    Method getPipeline = null;
-    for (Method m : stm.getClass().getDeclaredMethods()) {
-      if(m.getName().endsWith("getPipeline")) {
-        getPipeline = m;
-        getPipeline.setAccessible(true);
-        break;
-      }
-    }
-    assertTrue("Need DFSOutputStream.getPipeline() for this test", 
-                getPipeline != null);
-    Object repl = getPipeline.invoke(stm, new Object []{} /*NO_ARGS*/);
-    DatanodeInfo[] pipeline = (DatanodeInfo[]) repl;
+    assertTrue("The log shouldn't have rolled yet", oldFilenum == log
+        .getFilenum());
+    DatanodeInfo[] pipeline = getPipeline(log);
     assertTrue(pipeline.length == fs.getDefaultReplication());
-    DataNodeProperties dnprop = dfsCluster.stopDataNode(pipeline[0].getName());
-    assertTrue(dnprop != null);
 
+    assertTrue(dfsCluster.stopDataNode(pipeline[0].getName()) != null);
+    Thread.sleep(10000);
     // this write should succeed, but trigger a log roll
     writeData(table, 2);
     long newFilenum = log.getFilenum();
-    assertTrue("Missing datanode should've triggered a log roll", 
-              newFilenum > oldFilenum && newFilenum > curTime);
-    
+
+    assertTrue("Missing datanode should've triggered a log roll",
+        newFilenum > oldFilenum && newFilenum > curTime);
+
     // write some more log data (this should use a new hdfs_out)
     writeData(table, 3);
-    assertTrue("The log should not roll again.", 
-              log.getFilenum() == newFilenum);
-    assertTrue("New log file should have the default replication",
-              log.getLogReplication() == fs.getDefaultReplication());
+    assertTrue("The log should not roll again.", log.getFilenum() == newFilenum);
+    assertTrue("New log file should have the default replication", log
+        .getLogReplication() == fs.getDefaultReplication());
   }
 }