You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by st...@apache.org on 2007/09/15 17:14:55 UTC

svn commit: r575928 [2/2] - in /lucene/hadoop/trunk/src/contrib/hbase: ./ bin/ src/java/org/apache/hadoop/hbase/ src/test/org/apache/hadoop/hbase/

Modified: lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/Leases.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/Leases.java?rev=575928&r1=575927&r2=575928&view=diff
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/Leases.java (original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/Leases.java Sat Sep 15 08:14:53 2007
@@ -23,6 +23,7 @@
 import org.apache.commons.logging.LogFactory;
 import java.io.*;
 import java.util.*;
+import java.util.concurrent.atomic.AtomicBoolean;
 
 /**
  * Leases
@@ -41,14 +42,13 @@
 public class Leases {
   protected static final Log LOG = LogFactory.getLog(Leases.class.getName());
 
-  protected final long leasePeriod;
-  protected final long leaseCheckFrequency;
-  private final LeaseMonitor leaseMonitor;
+  protected final int leasePeriod;
+  protected final int leaseCheckFrequency;
   private final Thread leaseMonitorThread;
   protected final Map<LeaseName, Lease> leases =
     new HashMap<LeaseName, Lease>();
   protected final TreeSet<Lease> sortedLeases = new TreeSet<Lease>();
-  protected boolean running = true;
+  protected AtomicBoolean stop = new AtomicBoolean(false);
 
   /**
    * Creates a lease
@@ -57,18 +57,25 @@
    * @param leaseCheckFrequency - how often the lease should be checked
    * (milliseconds)
    */
-  public Leases(long leasePeriod, long leaseCheckFrequency) {
+  public Leases(final int leasePeriod, final int leaseCheckFrequency) {
     this.leasePeriod = leasePeriod;
     this.leaseCheckFrequency = leaseCheckFrequency;
-    this.leaseMonitor = new LeaseMonitor();
-    this.leaseMonitorThread = new Thread(leaseMonitor);
-    this.leaseMonitorThread.setName("Lease.monitor");
+    this.leaseMonitorThread =
+      new LeaseMonitor(this.leaseCheckFrequency, this.stop);
+    this.leaseMonitorThread.setDaemon(true);
   }
   
   /** Starts the lease monitor */
   public void start() {
     leaseMonitorThread.start();
   }
+  
+  /**
+   * @param name Set name on the lease checking daemon thread.
+   */
+  public void setName(final String name) {
+    this.leaseMonitorThread.setName(name);
+  }
 
   /**
    * Shuts down this lease instance when all outstanding leases expire.
@@ -99,8 +106,7 @@
    */
   public void close() {
     LOG.info("closing leases");
-
-    this.running = false;
+    this.stop.set(true);
     try {
       this.leaseMonitorThread.interrupt();
       this.leaseMonitorThread.join();
@@ -196,36 +202,32 @@
         sortedLeases.remove(lease);
         leases.remove(name);
       }
-    }     
-//    if (LOG.isDebugEnabled()) {
-//      LOG.debug("Cancel lease " + name);
-//    }
+    }
   }
 
-  /** LeaseMonitor is a thread that expires Leases that go on too long. */
-  class LeaseMonitor implements Runnable {
-    /** {@inheritDoc} */
-    public void run() {
-      while(running) {
-        synchronized(leases) {
-          synchronized(sortedLeases) {
-            Lease top;
-            while((sortedLeases.size() > 0)
-                && ((top = sortedLeases.first()) != null)) {
-              if(top.shouldExpire()) {
-                leases.remove(top.getLeaseName());
-                sortedLeases.remove(top);
-                top.expired();
-              } else {
-                break;
-              }
+  /**
+   * LeaseMonitor is a thread that expires Leases that go on too long.
+   * Its a daemon thread.
+   */
+  class LeaseMonitor extends Chore {
+    public LeaseMonitor(int p, AtomicBoolean s) {
+      super(p, s);
+    }
+
+    protected void chore() {
+      synchronized(leases) {
+        synchronized(sortedLeases) {
+          Lease top;
+          while((sortedLeases.size() > 0)
+              && ((top = sortedLeases.first()) != null)) {
+            if(top.shouldExpire()) {
+              leases.remove(top.getLeaseName());
+              sortedLeases.remove(top);
+              top.expired();
+            } else {
+              break;
             }
           }
-        }
-        try {
-          Thread.sleep(leaseCheckFrequency);
-        } catch (InterruptedException ie) {
-          // continue
         }
       }
     }

Modified: lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/RemoteExceptionHandler.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/RemoteExceptionHandler.java?rev=575928&r1=575927&r2=575928&view=diff
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/RemoteExceptionHandler.java (original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/RemoteExceptionHandler.java Sat Sep 15 08:14:53 2007
@@ -30,7 +30,29 @@
  * org.apache.hadoop.ipc.RemoteException exceptions.
  */
 public class RemoteExceptionHandler {
-  private RemoteExceptionHandler(){}                    // not instantiable
+  /* Not instantiable */
+  private RemoteExceptionHandler() {super();}
+  
+  /**
+   * Examine passed IOException.  See if its carrying a RemoteException. If so,
+   * run {@link #decodeRemoteException(RemoteException)} on it.  Otherwise,
+   * pass back <code>e</code> unaltered.
+   * @param e Exception to examine.
+   * @return Decoded RemoteException carried by <code>e</code> or
+   * <code>e</code> unaltered.
+   */
+  public static IOException checkIOException(final IOException e) {
+    IOException result = e;
+    if (e instanceof RemoteException) {
+      try {
+        result = RemoteExceptionHandler.decodeRemoteException(
+            (RemoteException) e);
+      } catch (IOException ex) {
+        result = ex;
+      }
+    }
+    return result;
+  }
   
   /**
    * Converts org.apache.hadoop.ipc.RemoteException into original exception,
@@ -69,10 +91,15 @@
       }
 
     } catch (ClassNotFoundException x) {
+      // continue
     } catch (NoSuchMethodException x) {
+      // continue
     } catch (IllegalAccessException x) {
+      // continue
     } catch (InvocationTargetException x) {
+      // continue
     } catch (InstantiationException x) {
+      // continue
     }
     return i;
   }

Modified: lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/MiniHBaseCluster.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/MiniHBaseCluster.java?rev=575928&r1=575927&r2=575928&view=diff
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/MiniHBaseCluster.java (original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/MiniHBaseCluster.java Sat Sep 15 08:14:53 2007
@@ -77,7 +77,6 @@
    */
   public MiniHBaseCluster(Configuration conf, int nRegionNodes,
       final boolean miniHdfsFilesystem) throws IOException {
-    
     this(conf, nRegionNodes, miniHdfsFilesystem, true, true);
   }
 
@@ -127,7 +126,6 @@
       fs.mkdirs(parentdir);
       this.masterThread = startMaster(this.conf);
       this.regionThreads = startRegionServers(this.conf, nRegionNodes);
-
     } catch(IOException e) {
       shutdown();
       throw e;
@@ -233,18 +231,22 @@
    * Starts a region server thread running
    * 
    * @throws IOException
+   * @return Name of regionserver started.
    */
-  public void startRegionServer() throws IOException {
+  public String startRegionServer() throws IOException {
     RegionServerThread t =
       startRegionServer(this.conf, this.regionThreads.size());
     this.regionThreads.add(t);
+    return t.getName();
   }
   
   private static RegionServerThread startRegionServer(final Configuration c,
-    final int index) throws IOException {
-    
-    final HRegionServer hsr = new HRegionServer(c);
-    RegionServerThread t = new RegionServerThread(hsr, index);
+    final int index)
+  throws IOException {  
+    final HRegionServer hrs = new HRegionServer(c);
+    RegionServerThread t = new RegionServerThread(hrs, index);
+    t.setName("regionserver" +
+      t.getRegionServer().server.getListenerAddress().toString());
     t.start();
     return t;
   }
@@ -296,8 +298,9 @@
    * Wait for the specified region server to stop
    * Removes this thread from list of running threads.
    * @param serverNumber
+   * @return Name of region server that just went down.
    */
-  public void waitOnRegionServer(int serverNumber) {
+  public String waitOnRegionServer(int serverNumber) {
     RegionServerThread regionServerThread =
       this.regionThreads.remove(serverNumber);
     try {
@@ -307,6 +310,7 @@
     } catch (InterruptedException e) {
       e.printStackTrace();
     }
+    return regionServerThread.getName();
   }
   
   /**
@@ -353,14 +357,16 @@
     if(masterThread != null) {
       masterThread.getMaster().shutdown();
     }
-    synchronized(regionServerThreads) {
-      if (regionServerThreads != null) {
-        for(Thread t: regionServerThreads) {
-          if (t.isAlive()) {
-            try {
-              t.join();
-            } catch (InterruptedException e) {
-              // continue
+    if (regionServerThreads != null) {
+      synchronized(regionServerThreads) {
+        if (regionServerThreads != null) {
+          for(Thread t: regionServerThreads) {
+            if (t.isAlive()) {
+              try {
+                t.join();
+              } catch (InterruptedException e) {
+                // continue
+              }
             }
           }
         }

Modified: lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/MultiRegionTable.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/MultiRegionTable.java?rev=575928&r1=575927&r2=575928&view=diff
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/MultiRegionTable.java (original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/MultiRegionTable.java Sat Sep 15 08:14:53 2007
@@ -1,3 +1,22 @@
+/**
+ * Copyright 2007 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
 package org.apache.hadoop.hbase;
 
 import java.io.IOException;

Modified: lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestCleanRegionServerExit.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestCleanRegionServerExit.java?rev=575928&r1=575927&r2=575928&view=diff
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestCleanRegionServerExit.java (original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestCleanRegionServerExit.java Sat Sep 15 08:14:53 2007
@@ -22,6 +22,8 @@
 import java.io.IOException;
 import java.util.TreeMap;
 
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.io.Text;
 import org.apache.log4j.Level;
 import org.apache.log4j.Logger;
@@ -30,6 +32,7 @@
  * Tests region server failover when a region server exits.
  */
 public class TestCleanRegionServerExit extends HBaseClusterTestCase {
+  private final Log LOG = LogFactory.getLog(this.getClass());
   private HTable table;
 
   /** constructor */
@@ -65,14 +68,13 @@
     table.commit(lockid);
     // Start up a new region server to take over serving of root and meta
     // after we shut down the current meta/root host.
-    this.cluster.startRegionServer();
+    LOG.info("Started " + this.cluster.startRegionServer());
     // Now shutdown the region server and wait for it to go down.
     this.cluster.stopRegionServer(0);
-    this.cluster.waitOnRegionServer(0);
+    LOG.info(this.cluster.waitOnRegionServer(0) + " is down");
     
     // Verify that the client can find the data after the region has been moved
     // to a different server
-
     HScannerInterface scanner =
       table.obtainScanner(HConstants.COLUMN_FAMILY_ARRAY, new Text());
 

Modified: lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestDFSAbort.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestDFSAbort.java?rev=575928&r1=575927&r2=575928&view=diff
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestDFSAbort.java (original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestDFSAbort.java Sat Sep 15 08:14:53 2007
@@ -19,6 +19,9 @@
  */
 package org.apache.hadoop.hbase;
 
+import junit.framework.TestSuite;
+import junit.textui.TestRunner;
+
 import org.apache.log4j.Level;
 import org.apache.log4j.Logger;
 
@@ -40,10 +43,8 @@
   @Override
   public void setUp() throws Exception {
     super.setUp();
-    
     HTableDescriptor desc = new HTableDescriptor(getName());
     desc.addFamily(new HColumnDescriptor(HConstants.COLUMN_FAMILY_STR));
-    
     HBaseAdmin admin = new HBaseAdmin(conf);
     admin.createTable(desc);
   }
@@ -52,14 +53,14 @@
    * @throws Exception
    */
   public void testDFSAbort() throws Exception {
-    
     // By now the Mini DFS is running, Mini HBase is running and we have
     // created a table. Now let's yank the rug out from HBase
-    
     cluster.getDFSCluster().shutdown();
-    
     // Now wait for Mini HBase Cluster to shut down
-    
     cluster.join();
+  }
+  
+  public static void main(String[] args) {
+    TestRunner.run(new TestSuite(TestDFSAbort.class));
   }
 }