You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by la...@apache.org on 2012/12/23 20:34:56 UTC

svn commit: r1425513 [5/7] - in /hbase/branches/0.94-test: ./ bin/ conf/ security/src/main/java/org/apache/hadoop/hbase/ipc/ security/src/main/java/org/apache/hadoop/hbase/security/access/ security/src/test/java/org/apache/hadoop/hbase/security/access/...

Modified: hbase/branches/0.94-test/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.94-test/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java?rev=1425513&r1=1425512&r2=1425513&view=diff
==============================================================================
--- hbase/branches/0.94-test/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java (original)
+++ hbase/branches/0.94-test/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java Sun Dec 23 19:34:53 2012
@@ -39,6 +39,7 @@ import java.util.Map;
 import java.util.NavigableSet;
 import java.util.Random;
 import java.util.UUID;
+import java.util.concurrent.atomic.AtomicBoolean;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -96,9 +97,7 @@ import org.apache.zookeeper.ZooKeeper;
  * old HBaseTestCase and HBaseClusterTestCase functionality.
  * Create an instance and keep it around testing HBase.  This class is
  * meant to be your one-stop shop for anything you might need testing.  Manages
- * one cluster at a time only. Managed cluster can be an in-process
- * {@link MiniHBaseCluster}, or a deployed cluster of type {@link DistributedHBaseCluster}.
- * Not all methods work with the real cluster.
+ * one cluster at a time only.
  * Depends on log4j being on classpath and
  * hbase-site.xml for logging and test-run configuration.  It does not set
  * logging levels nor make changes to configuration parameters.
@@ -121,7 +120,7 @@ public class HBaseTestingUtility {
   private boolean passedZkCluster = false;
   private MiniDFSCluster dfsCluster = null;
 
-  private HBaseCluster hbaseCluster = null;
+  private MiniHBaseCluster hbaseCluster = null;
   private MiniMRCluster mrCluster = null;
 
   // Directory where we put the data for this instance of HBaseTestingUtility
@@ -214,10 +213,6 @@ public class HBaseTestingUtility {
     return this.conf;
   }
 
-  public void setHBaseCluster(HBaseCluster hbaseCluster) {
-    this.hbaseCluster = hbaseCluster;
-  }
-
   /**
    * @return Where to write test data on local filesystem; usually
    * {@link #DEFAULT_BASE_TEST_DIRECTORY}
@@ -655,7 +650,7 @@ public class HBaseTestingUtility {
 
     getHBaseAdmin(); // create immediately the hbaseAdmin
     LOG.info("Minicluster is up");
-    return (MiniHBaseCluster)this.hbaseCluster;
+    return this.hbaseCluster;
   }
 
   /**
@@ -683,11 +678,7 @@ public class HBaseTestingUtility {
    * @see #startMiniCluster()
    */
   public MiniHBaseCluster getMiniHBaseCluster() {
-    if (this.hbaseCluster instanceof MiniHBaseCluster) {
-      return (MiniHBaseCluster)this.hbaseCluster;
-    }
-    throw new RuntimeException(hbaseCluster + " not an instance of " +
-                               MiniHBaseCluster.class.getName());
+    return this.hbaseCluster;
   }
 
   /**
@@ -730,7 +721,7 @@ public class HBaseTestingUtility {
     if (this.hbaseCluster != null) {
       this.hbaseCluster.shutdown();
       // Wait till hbase is down before going on to shutdown zk.
-      this.hbaseCluster.waitUntilShutDown();
+      this.hbaseCluster.join();
       this.hbaseCluster = null;
     }
   }
@@ -768,7 +759,7 @@ public class HBaseTestingUtility {
    * @throws IOException
    */
   public void flush() throws IOException {
-    getMiniHBaseCluster().flushcache();
+    this.hbaseCluster.flushcache();
   }
 
   /**
@@ -776,7 +767,7 @@ public class HBaseTestingUtility {
    * @throws IOException
    */
   public void flush(byte [] tableName) throws IOException {
-    getMiniHBaseCluster().flushcache(tableName);
+    this.hbaseCluster.flushcache(tableName);
   }
 
   /**
@@ -784,7 +775,7 @@ public class HBaseTestingUtility {
    * @throws IOException
    */
   public void compact(boolean major) throws IOException {
-    getMiniHBaseCluster().compact(major);
+    this.hbaseCluster.compact(major);
   }
 
   /**
@@ -792,7 +783,7 @@ public class HBaseTestingUtility {
    * @throws IOException
    */
   public void compact(byte [] tableName, boolean major) throws IOException {
-    getMiniHBaseCluster().compact(tableName, major);
+    this.hbaseCluster.compact(tableName, major);
   }
 
 
@@ -1012,37 +1003,6 @@ public class HBaseTestingUtility {
     t.flushCommits();
     return rowCount;
   }
-
-  /**
-   * Load table of multiple column families with rows from 'aaa' to 'zzz'.
-   * @param t Table
-   * @param f Array of Families to load
-   * @return Count of rows loaded.
-   * @throws IOException
-   */
-  public int loadTable(final HTable t, final byte[][] f) throws IOException {
-    t.setAutoFlush(false);
-    byte[] k = new byte[3];
-    int rowCount = 0;
-    for (byte b1 = 'a'; b1 <= 'z'; b1++) {
-      for (byte b2 = 'a'; b2 <= 'z'; b2++) {
-        for (byte b3 = 'a'; b3 <= 'z'; b3++) {
-          k[0] = b1;
-          k[1] = b2;
-          k[2] = b3;
-          Put put = new Put(k);
-          for (int i = 0; i < f.length; i++) {
-            put.add(f[i], null, k);
-          }
-          t.put(put);
-          rowCount++;
-        }
-      }
-    }
-    t.flushCommits();
-    return rowCount;
-  }
-
   /**
    * Load region with rows from 'aaa' to 'zzz'.
    * @param r Region
@@ -1109,7 +1069,7 @@ public class HBaseTestingUtility {
    */
   public int createMultiRegions(HTable table, byte[] columnFamily)
   throws IOException {
-    return createMultiRegions(table, columnFamily, true);
+    return createMultiRegions(getConfiguration(), table, columnFamily);
   }
 
   public static final byte[][] KEYS = {
@@ -1126,16 +1086,16 @@ public class HBaseTestingUtility {
 
   /**
    * Creates many regions names "aaa" to "zzz".
-   *
+   * @param c Configuration to use.
    * @param table  The table to use for the data.
    * @param columnFamily  The family to insert the data into.
-   * @param cleanupFS  True if a previous region should be remove from the FS  
    * @return count of regions created.
    * @throws IOException When creating the regions fails.
    */
-  public int createMultiRegions(HTable table, byte[] columnFamily, boolean cleanupFS)
+  public int createMultiRegions(final Configuration c, final HTable table,
+      final byte[] columnFamily)
   throws IOException {
-    return createMultiRegions(getConfiguration(), table, columnFamily, KEYS, cleanupFS);
+    return createMultiRegions(c, table, columnFamily, KEYS);
   }
 
   /**
@@ -1163,12 +1123,7 @@ public class HBaseTestingUtility {
   }
 
   public int createMultiRegions(final Configuration c, final HTable table,
-      final byte[] columnFamily, byte [][] startKeys) throws IOException {
-    return createMultiRegions(c, table, columnFamily, startKeys, true);
-  }
-  
-  public int createMultiRegions(final Configuration c, final HTable table,
-          final byte[] columnFamily, byte [][] startKeys, boolean cleanupFS)
+      final byte[] columnFamily, byte [][] startKeys)
   throws IOException {
     Arrays.sort(startKeys, Bytes.BYTES_COMPARATOR);
     HTable meta = new HTable(c, HConstants.META_TABLE_NAME);
@@ -1182,9 +1137,6 @@ public class HBaseTestingUtility {
     // and end key. Adding the custom regions below adds those blindly,
     // including the new start region from empty to "bbb". lg
     List<byte[]> rows = getMetaTableRows(htd.getName());
-    String regionToDeleteInFS = table
-        .getRegionsInRange(Bytes.toBytes(""), Bytes.toBytes("")).get(0)
-        .getRegionInfo().getEncodedName();
     List<HRegionInfo> newRegions = new ArrayList<HRegionInfo>(startKeys.length);
     // add custom ones
     int count = 0;
@@ -1206,22 +1158,13 @@ public class HBaseTestingUtility {
         Bytes.toStringBinary(row));
       meta.delete(new Delete(row));
     }
-    if (cleanupFS) {
-      // see HBASE-7417 - this confused TestReplication
-      // remove the "old" region from FS
-      Path tableDir = new Path(getDefaultRootDirPath().toString()
-          + System.getProperty("file.separator") + htd.getNameAsString()
-          + System.getProperty("file.separator") + regionToDeleteInFS);
-      getDFSCluster().getFileSystem().delete(tableDir);
-    }
     // flush cache of regions
     HConnection conn = table.getConnection();
     conn.clearRegionCache();
     // assign all the new regions IF table is enabled.
-    HBaseAdmin admin = getHBaseAdmin();
-    if (admin.isTableEnabled(table.getTableName())) {
+    if (getHBaseAdmin().isTableEnabled(table.getTableName())) {
       for(HRegionInfo hri : newRegions) {
-        admin.assign(hri.getRegionName());
+        hbaseCluster.getMaster().assignRegion(hri);
       }
     }
 
@@ -1332,8 +1275,8 @@ public class HBaseTestingUtility {
       Bytes.toString(tableName));
     byte [] firstrow = metaRows.get(0);
     LOG.debug("FirstRow=" + Bytes.toString(firstrow));
-    int index = getMiniHBaseCluster().getServerWith(firstrow);
-    return getMiniHBaseCluster().getRegionServerThreads().get(index).getRegionServer();
+    int index = hbaseCluster.getServerWith(firstrow);
+    return hbaseCluster.getRegionServerThreads().get(index).getRegionServer();
   }
 
   /**
@@ -1414,7 +1357,7 @@ public class HBaseTestingUtility {
    * @throws Exception
    */
   public void expireMasterSession() throws Exception {
-    HMaster master = getMiniHBaseCluster().getMaster();
+    HMaster master = hbaseCluster.getMaster();
     expireSession(master.getZooKeeper(), false);
   }
 
@@ -1424,7 +1367,7 @@ public class HBaseTestingUtility {
    * @throws Exception
    */
   public void expireRegionServerSession(int index) throws Exception {
-    HRegionServer rs = getMiniHBaseCluster().getRegionServer(index);
+    HRegionServer rs = hbaseCluster.getRegionServer(index);
     expireSession(rs.getZooKeeper(), false);
     decrementMinRegionServerCount();
   }
@@ -1498,27 +1441,13 @@ public class HBaseTestingUtility {
     }
   }
 
+
   /**
-   * Get the Mini HBase cluster.
+   * Get the HBase cluster.
    *
    * @return hbase cluster
-   * @see #getHBaseClusterInterface()
    */
   public MiniHBaseCluster getHBaseCluster() {
-    return getMiniHBaseCluster();
-  }
-
-  /**
-   * Returns the HBaseCluster instance.
-   * <p>Returned object can be any of the subclasses of HBaseCluster, and the
-   * tests referring this should not assume that the cluster is a mini cluster or a
-   * distributed one. If the test only works on a mini cluster, then specific
-   * method {@link #getMiniHBaseCluster()} can be used instead w/o the
-   * need to type-cast.
-   */
-  public HBaseCluster getHBaseClusterInterface() {
-    //implementation note: we should rename this method as #getHBaseCluster(),
-    //but this would require refactoring 90+ calls.
     return hbaseCluster;
   }
 
@@ -1680,8 +1609,8 @@ public class HBaseTestingUtility {
   public boolean ensureSomeRegionServersAvailable(final int num)
       throws IOException {
     boolean startedServer = false;
-    MiniHBaseCluster hbaseCluster = getMiniHBaseCluster();
-    for (int i=hbaseCluster.getLiveRegionServerThreads().size(); i<num; ++i) {
+
+    for (int i=hbaseCluster.getLiveRegionServerThreads().size(); i<num; ++i){
       LOG.info("Started new server=" + hbaseCluster.startRegionServer());
       startedServer = true;
     }
@@ -1703,12 +1632,12 @@ public class HBaseTestingUtility {
     boolean startedServer = ensureSomeRegionServersAvailable(num);
 
     for (JVMClusterUtil.RegionServerThread rst :
-      getMiniHBaseCluster().getRegionServerThreads()) {
+      hbaseCluster.getRegionServerThreads()) {
 
       HRegionServer hrs = rst.getRegionServer();
       if (hrs.isStopping() || hrs.isStopped()) {
         LOG.info("A region server is stopped or stopping:"+hrs);
-        LOG.info("Started new server=" + getMiniHBaseCluster().startRegionServer());
+        LOG.info("Started new server=" + hbaseCluster.startRegionServer());
         startedServer = true;
       }
     }
@@ -1978,9 +1907,7 @@ public class HBaseTestingUtility {
         Bytes.toBytes(String.format(keyFormat, splitStartKey)),
         Bytes.toBytes(String.format(keyFormat, splitEndKey)),
         numRegions);
-    if (hbaseCluster != null) {
-      getMiniHBaseCluster().flushcache(HConstants.META_TABLE_NAME);
-    }
+    hbaseCluster.flushcache(HConstants.META_TABLE_NAME);
 
     for (int iFlush = 0; iFlush < numFlushes; ++iFlush) {
       for (int iRow = 0; iRow < numRowsPerFlush; ++iRow) {
@@ -2015,9 +1942,7 @@ public class HBaseTestingUtility {
       }
       LOG.info("Initiating flush #" + iFlush + " for table " + tableName);
       table.flushCommits();
-      if (hbaseCluster != null) {
-        getMiniHBaseCluster().flushcache(tableNameBytes);
-      }
+      hbaseCluster.flushcache(tableNameBytes);
     }
 
     return table;

Modified: hbase/branches/0.94-test/src/test/java/org/apache/hadoop/hbase/LargeTests.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.94-test/src/test/java/org/apache/hadoop/hbase/LargeTests.java?rev=1425513&r1=1425512&r2=1425513&view=diff
==============================================================================
--- hbase/branches/0.94-test/src/test/java/org/apache/hadoop/hbase/LargeTests.java (original)
+++ hbase/branches/0.94-test/src/test/java/org/apache/hadoop/hbase/LargeTests.java Sun Dec 23 19:34:53 2012
@@ -33,7 +33,6 @@ package org.apache.hadoop.hbase;
  *
  * @see SmallTests
  * @see MediumTests
- * @see IntegrationTests
  */
 public interface LargeTests {
 }

Modified: hbase/branches/0.94-test/src/test/java/org/apache/hadoop/hbase/MediumTests.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.94-test/src/test/java/org/apache/hadoop/hbase/MediumTests.java?rev=1425513&r1=1425512&r2=1425513&view=diff
==============================================================================
--- hbase/branches/0.94-test/src/test/java/org/apache/hadoop/hbase/MediumTests.java (original)
+++ hbase/branches/0.94-test/src/test/java/org/apache/hadoop/hbase/MediumTests.java Sun Dec 23 19:34:53 2012
@@ -32,7 +32,6 @@ package org.apache.hadoop.hbase;
  *
  * @see SmallTests
  * @see LargeTests
- * @see IntegrationTests
  */
 public interface MediumTests {
 }

Modified: hbase/branches/0.94-test/src/test/java/org/apache/hadoop/hbase/MiniHBaseCluster.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.94-test/src/test/java/org/apache/hadoop/hbase/MiniHBaseCluster.java?rev=1425513&r1=1425512&r2=1425513&view=diff
==============================================================================
--- hbase/branches/0.94-test/src/test/java/org/apache/hadoop/hbase/MiniHBaseCluster.java (original)
+++ hbase/branches/0.94-test/src/test/java/org/apache/hadoop/hbase/MiniHBaseCluster.java Sun Dec 23 19:34:53 2012
@@ -28,8 +28,6 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.hbase.ipc.HRegionInterface;
-import org.apache.hadoop.hbase.ipc.HMasterInterface;
 import org.apache.hadoop.hbase.client.HConnectionManager;
 import org.apache.hadoop.hbase.master.HMaster;
 import org.apache.hadoop.hbase.regionserver.HRegion;
@@ -37,8 +35,6 @@ import org.apache.hadoop.hbase.regionser
 import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.JVMClusterUtil;
-import org.apache.hadoop.hbase.util.JVMClusterUtil.MasterThread;
-import org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread;
 import org.apache.hadoop.hbase.util.Threads;
 import org.apache.hadoop.io.MapWritable;
 
@@ -48,8 +44,9 @@ import org.apache.hadoop.io.MapWritable;
  * if we are running on DistributedFilesystem, create a FileSystem instance
  * each and will close down their instance on the way out.
  */
-public class MiniHBaseCluster extends HBaseCluster {
+public class MiniHBaseCluster {
   static final Log LOG = LogFactory.getLog(MiniHBaseCluster.class.getName());
+  private Configuration conf;
   public LocalHBaseCluster hbaseCluster;
   private static int index;
 
@@ -72,19 +69,11 @@ public class MiniHBaseCluster extends HB
    * @throws IOException
    */
   public MiniHBaseCluster(Configuration conf, int numMasters,
-                             int numRegionServers)
-      throws IOException, InterruptedException {
-    this(conf, numMasters, numRegionServers, null, null);
-  }
-
-  public MiniHBaseCluster(Configuration conf, int numMasters, int numRegionServers,
-         Class<? extends HMaster> masterClass,
-         Class<? extends MiniHBaseCluster.MiniHBaseClusterRegionServer> regionserverClass)
-      throws IOException, InterruptedException {
-    super(conf);
+      int numRegionServers)
+  throws IOException, InterruptedException {
+    this.conf = conf;
     conf.set(HConstants.MASTER_PORT, "0");
-    init(numMasters, numRegionServers, masterClass, regionserverClass);
-    this.initialClusterStatus = getClusterStatus();
+    init(numMasters, numRegionServers);
   }
 
   public Configuration getConfiguration() {
@@ -189,21 +178,12 @@ public class MiniHBaseCluster extends HB
     }
   }
 
-  private void init(final int nMasterNodes, final int nRegionNodes,
-          Class<? extends HMaster> masterClass,
-          Class<? extends MiniHBaseCluster.MiniHBaseClusterRegionServer> regionserverClass)
+  private void init(final int nMasterNodes, final int nRegionNodes)
   throws IOException, InterruptedException {
     try {
-      if (masterClass == null){
-       masterClass =  HMaster.class;
-      }
-      if (regionserverClass == null){
-       regionserverClass = MiniHBaseCluster.MiniHBaseClusterRegionServer.class;
-      }
-
       // start up a LocalHBaseCluster
       hbaseCluster = new LocalHBaseCluster(conf, nMasterNodes, 0,
-         masterClass, regionserverClass);
+        HMaster.class, MiniHBaseCluster.MiniHBaseClusterRegionServer.class);
 
       // manually add the regionservers as other users
       for (int i=0; i<nRegionNodes; i++) {
@@ -224,54 +204,6 @@ public class MiniHBaseCluster extends HB
     }
   }
 
-  @Override
-  public void startRegionServer(String hostname) throws IOException {
-    this.startRegionServer();
-  }
-
-  @Override
-  public void killRegionServer(ServerName serverName) throws IOException {
-    HRegionServer server = getRegionServer(getRegionServerIndex(serverName));
-    if (server instanceof MiniHBaseClusterRegionServer) {
-      LOG.info("Killing " + server.toString());
-      ((MiniHBaseClusterRegionServer) server).kill();
-    } else {
-      abortRegionServer(getRegionServerIndex(serverName));
-    }
-  }
-
-  @Override
-  public void stopRegionServer(ServerName serverName) throws IOException {
-    stopRegionServer(getRegionServerIndex(serverName));
-  }
-
-  @Override
-  public void waitForRegionServerToStop(ServerName serverName, long timeout) throws IOException {
-    //ignore timeout for now
-    waitOnRegionServer(getRegionServerIndex(serverName));
-  }
-
-  @Override
-  public void startMaster(String hostname) throws IOException {
-    this.startMaster();
-  }
-
-  @Override
-  public void killMaster(ServerName serverName) throws IOException {
-    abortMaster(getMasterIndex(serverName));
-  }
-
-  @Override
-  public void stopMaster(ServerName serverName) throws IOException {
-    stopMaster(getMasterIndex(serverName));
-  }
-
-  @Override
-  public void waitForMasterToStop(ServerName serverName, long timeout) throws IOException {
-    //ignore timeout for now
-    waitOnMaster(getMasterIndex(serverName));
-  }
-
   /**
    * Starts a region server thread running
    *
@@ -367,11 +299,6 @@ public class MiniHBaseCluster extends HB
     return t;
   }
 
-  @Override
-  public HMasterInterface getMasterAdmin() {
-    return this.hbaseCluster.getActiveMaster();
-  }
-
   /**
    * Returns the current active master, if available.
    * @return the active HMaster, null if none is active.
@@ -446,18 +373,15 @@ public class MiniHBaseCluster extends HB
    *         masters left.
    * @throws InterruptedException
    */
-  public boolean waitForActiveAndReadyMaster(long timeout) throws IOException {
+  public boolean waitForActiveAndReadyMaster() throws InterruptedException {
     List<JVMClusterUtil.MasterThread> mts;
-    long start = System.currentTimeMillis();
-    while (!(mts = getMasterThreads()).isEmpty()
-        && (System.currentTimeMillis() - start) < timeout) {
+    while (!(mts = getMasterThreads()).isEmpty()) {
       for (JVMClusterUtil.MasterThread mt : mts) {
         if (mt.getMaster().isActiveMaster() && mt.getMaster().isInitialized()) {
           return true;
         }
       }
-
-      Threads.sleep(100);
+      Thread.sleep(100);
     }
     return false;
   }
@@ -494,16 +418,6 @@ public class MiniHBaseCluster extends HB
     HConnectionManager.deleteAllConnections(false);
   }
 
-  @Override
-  public void close() throws IOException {
-  }
-
-  @Override
-  public ClusterStatus getClusterStatus() throws IOException {
-    HMaster master = getMaster();
-    return master == null ? null : master.getClusterStatus();
-  }
-
   /**
    * Call flushCache on all regions on all participating regionservers.
    * @throws IOException
@@ -626,15 +540,6 @@ public class MiniHBaseCluster extends HB
     return index;
   }
 
-  @Override
-  public ServerName getServerHoldingRegion(byte[] regionName) throws IOException {
-    int index = getServerWith(regionName);
-    if (index < 0) {
-      return null;
-    }
-    return getRegionServer(index).getServerName();
-  }
-
   /**
    * Counts the total numbers of regions being served by the currently online
    * region servers by asking each how many regions they have.  Does not look
@@ -648,30 +553,4 @@ public class MiniHBaseCluster extends HB
     }
     return count;
   }
-
-  @Override
-  public void waitUntilShutDown() {
-    this.hbaseCluster.join();
-  }
-
-  protected int getRegionServerIndex(ServerName serverName) {
-    //we have a small number of region servers, this should be fine for now.
-    List<RegionServerThread> servers = getRegionServerThreads();
-    for (int i=0; i < servers.size(); i++) {
-      if (servers.get(i).getRegionServer().getServerName().equals(serverName)) {
-        return i;
-      }
-    }
-    return -1;
-  }
-
-  protected int getMasterIndex(ServerName serverName) {
-    List<MasterThread> masters = getMasterThreads();
-    for (int i = 0; i < masters.size(); i++) {
-      if (masters.get(i).getMaster().getServerName().equals(serverName)) {
-        return i;
-      }
-    }
-    return -1;
-  }
 }

Modified: hbase/branches/0.94-test/src/test/java/org/apache/hadoop/hbase/SmallTests.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.94-test/src/test/java/org/apache/hadoop/hbase/SmallTests.java?rev=1425513&r1=1425512&r2=1425513&view=diff
==============================================================================
--- hbase/branches/0.94-test/src/test/java/org/apache/hadoop/hbase/SmallTests.java (original)
+++ hbase/branches/0.94-test/src/test/java/org/apache/hadoop/hbase/SmallTests.java Sun Dec 23 19:34:53 2012
@@ -29,7 +29,6 @@ package org.apache.hadoop.hbase;
  *
  * @see MediumTests
  * @see LargeTests
- * @see IntegrationTests
  */
 public interface SmallTests {
 }

Modified: hbase/branches/0.94-test/src/test/java/org/apache/hadoop/hbase/TestDrainingServer.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.94-test/src/test/java/org/apache/hadoop/hbase/TestDrainingServer.java?rev=1425513&r1=1425512&r2=1425513&view=diff
==============================================================================
--- hbase/branches/0.94-test/src/test/java/org/apache/hadoop/hbase/TestDrainingServer.java (original)
+++ hbase/branches/0.94-test/src/test/java/org/apache/hadoop/hbase/TestDrainingServer.java Sun Dec 23 19:34:53 2012
@@ -54,14 +54,13 @@ public class TestDrainingServer {
   private static final byte [] TABLENAME = Bytes.toBytes("t");
   private static final byte [] FAMILY = Bytes.toBytes("f");
   private static final int COUNT_OF_REGIONS = HBaseTestingUtility.KEYS.length;
-  private static final int NB_SLAVES = 5;
 
   /**
    * Spin up a cluster with a bunch of regions on it.
    */
   @BeforeClass
   public static void setUpBeforeClass() throws Exception {
-    TEST_UTIL.startMiniCluster(NB_SLAVES);
+    TEST_UTIL.startMiniCluster(5);
     TEST_UTIL.getConfiguration().setBoolean("hbase.master.enabletable.roundrobin", true);
     ZooKeeperWatcher zkw = HBaseTestingUtility.getZooKeeperWatcher(TEST_UTIL);
     HTableDescriptor htd = new HTableDescriptor(TABLENAME);
@@ -74,25 +73,14 @@ public class TestDrainingServer {
       createTableDescriptor(fs, FSUtils.getRootDir(TEST_UTIL.getConfiguration()), htd);
     // Assign out the regions we just created.
     HBaseAdmin admin = new HBaseAdmin(TEST_UTIL.getConfiguration());
-    MiniHBaseCluster cluster = TEST_UTIL.getMiniHBaseCluster();
     admin.disableTable(TABLENAME);
     admin.enableTable(TABLENAME);
-    boolean ready = false;
-    while (!ready) {
-      ZKAssign.blockUntilNoRIT(zkw);
-      // Assert that every regionserver has some regions on it, else invoke the balancer.
-      ready = true;
-      for (int i = 0; i < NB_SLAVES; i++) {
-        HRegionServer hrs = cluster.getRegionServer(i);
-        if (hrs.getOnlineRegions().isEmpty()) {
-          ready = false;
-          break;
-        }
-      }
-      if (!ready) {
-        admin.balancer();
-        Thread.sleep(100);
-      }
+    ZKAssign.blockUntilNoRIT(zkw);
+    // Assert that every regionserver has some regions on it.
+    MiniHBaseCluster cluster = TEST_UTIL.getMiniHBaseCluster();
+    for (int i = 0; i < cluster.getRegionServerThreads().size(); i++) {
+      HRegionServer hrs = cluster.getRegionServer(i);
+      Assert.assertFalse(hrs.getOnlineRegions().isEmpty());
     }
   }
 

Modified: hbase/branches/0.94-test/src/test/java/org/apache/hadoop/hbase/TestRegionRebalancing.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.94-test/src/test/java/org/apache/hadoop/hbase/TestRegionRebalancing.java?rev=1425513&r1=1425512&r2=1425513&view=diff
==============================================================================
--- hbase/branches/0.94-test/src/test/java/org/apache/hadoop/hbase/TestRegionRebalancing.java (original)
+++ hbase/branches/0.94-test/src/test/java/org/apache/hadoop/hbase/TestRegionRebalancing.java Sun Dec 23 19:34:53 2012
@@ -218,10 +218,9 @@ public class TestRegionRebalancing {
    * Wait until all the regions are assigned.
    */
   private void waitForAllRegionsAssigned() throws IOException {
-    int totalRegions = HBaseTestingUtility.KEYS.length+2;
-    while (getRegionCount() < totalRegions) {
+    while (getRegionCount() < 22) {
     // while (!cluster.getMaster().allRegionsAssigned()) {
-      LOG.debug("Waiting for there to be "+ totalRegions +" regions, but there are " + getRegionCount() + " right now.");
+      LOG.debug("Waiting for there to be 22 regions, but there are " + getRegionCount() + " right now.");
       try {
         Thread.sleep(200);
       } catch (InterruptedException e) {}

Modified: hbase/branches/0.94-test/src/test/java/org/apache/hadoop/hbase/backup/TestHFileArchiving.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.94-test/src/test/java/org/apache/hadoop/hbase/backup/TestHFileArchiving.java?rev=1425513&r1=1425512&r2=1425513&view=diff
==============================================================================
--- hbase/branches/0.94-test/src/test/java/org/apache/hadoop/hbase/backup/TestHFileArchiving.java (original)
+++ hbase/branches/0.94-test/src/test/java/org/apache/hadoop/hbase/backup/TestHFileArchiving.java Sun Dec 23 19:34:53 2012
@@ -35,7 +35,7 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.PathFilter;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.MediumTests;
+import org.apache.hadoop.hbase.LargeTests;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.HRegionServer;
@@ -55,7 +55,7 @@ import org.junit.experimental.categories
  * Test that the {@link HFileArchiver} correctly removes all the parts of a region when cleaning up
  * a region
  */
-@Category(MediumTests.class)
+@Category(LargeTests.class)
 public class TestHFileArchiving {
 
   private static final String STRING_TABLE_NAME = "test_table";
@@ -230,70 +230,18 @@ public class TestHFileArchiving {
 
     // then get the current store files
     Path regionDir = region.getRegionDir();
-    List<String> storeFiles = getRegionStoreFiles(fs, regionDir);
-
-    // then delete the table so the hfiles get archived
-    UTIL.deleteTable(TABLE_NAME);
-
-    // then get the files in the archive directory.
-    Path archiveDir = HFileArchiveUtil.getArchivePath(UTIL.getConfiguration());
-    List<String> archivedFiles = getAllFileNames(fs, archiveDir);
-    Collections.sort(storeFiles);
-    Collections.sort(archivedFiles);
-
-    LOG.debug("Store files:");
+    List<String> storeFiles = getAllFileNames(fs, regionDir);
+    // remove all the non-storefile named files for the region
     for (int i = 0; i < storeFiles.size(); i++) {
-      LOG.debug(i + " - " + storeFiles.get(i));
-    }
-    LOG.debug("Archive files:");
-    for (int i = 0; i < archivedFiles.size(); i++) {
-      LOG.debug(i + " - " + archivedFiles.get(i));
+      String file = storeFiles.get(i);
+      if (file.contains(HRegion.REGIONINFO_FILE) || file.contains("hlog")) {
+        storeFiles.remove(i--);
+      }
     }
-
-    assertTrue("Archived files are missing some of the store files!",
-      archivedFiles.containsAll(storeFiles));
-  }
-
-  /**
-   * Test that the store files are archived when a column family is removed.
-   * @throws Exception
-   */
-  @Test
-  public void testArchiveOnTableFamilyDelete() throws Exception {
-    List<HRegion> servingRegions = UTIL.getHBaseCluster().getRegions(TABLE_NAME);
-    // make sure we only have 1 region serving this table
-    assertEquals(1, servingRegions.size());
-    HRegion region = servingRegions.get(0);
-
-    // get the parent RS and monitor
-    HRegionServer hrs = UTIL.getRSForFirstRegionInTable(TABLE_NAME);
-    FileSystem fs = hrs.getFileSystem();
-
-    // put some data on the region
-    LOG.debug("-------Loading table");
-    UTIL.loadRegion(region, TEST_FAM);
-
-    // get the hfiles in the region
-    List<HRegion> regions = hrs.getOnlineRegions(TABLE_NAME);
-    assertEquals("More that 1 region for test table.", 1, regions.size());
-
-    region = regions.get(0);
-    // wait for all the compactions to complete
-    region.waitForFlushesAndCompactions();
-
-    // disable table to prevent new updates
-    UTIL.getHBaseAdmin().disableTable(TABLE_NAME);
-    LOG.debug("Disabled table");
-
-    // remove all the files from the archive to get a fair comparison
-    clearArchiveDirectory();
-
-    // then get the current store files
-    Path regionDir = region.getRegionDir();
-    List<String> storeFiles = getRegionStoreFiles(fs, regionDir);
+    storeFiles.remove(HRegion.REGIONINFO_FILE);
 
     // then delete the table so the hfiles get archived
-    UTIL.getHBaseAdmin().deleteColumn(TABLE_NAME, TEST_FAM);
+    UTIL.deleteTable(TABLE_NAME);
 
     // then get the files in the archive directory.
     Path archiveDir = HFileArchiveUtil.getArchivePath(UTIL.getConfiguration());
@@ -342,18 +290,4 @@ public class TestHFileArchiving {
     }
     return fileNames;
   }
-
-  private List<String> getRegionStoreFiles(final FileSystem fs, final Path regionDir) 
-      throws IOException {
-    List<String> storeFiles = getAllFileNames(fs, regionDir);
-    // remove all the non-storefile named files for the region
-    for (int i = 0; i < storeFiles.size(); i++) {
-      String file = storeFiles.get(i);
-      if (file.contains(HRegion.REGIONINFO_FILE) || file.contains("hlog")) {
-        storeFiles.remove(i--);
-      }
-    }
-    storeFiles.remove(HRegion.REGIONINFO_FILE);
-    return storeFiles;
-  }
-}
+}
\ No newline at end of file

Modified: hbase/branches/0.94-test/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.94-test/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java?rev=1425513&r1=1425512&r2=1425513&view=diff
==============================================================================
--- hbase/branches/0.94-test/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java (original)
+++ hbase/branches/0.94-test/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java Sun Dec 23 19:34:53 2012
@@ -64,7 +64,6 @@ import org.apache.hadoop.hbase.HServerAd
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.LargeTests;
-import org.apache.hadoop.hbase.MiniHBaseCluster;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.client.metrics.ScanMetrics;
 import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
@@ -3890,24 +3889,6 @@ public class TestFromClientSide {
   }
 
   /**
-   * creates an HTable for tableName using an unmanaged HConnection.
-   *
-   * @param tableName - table to create
-   * @return the created HTable object
-   * @throws IOException
-   */
-  HTable createUnmangedHConnectionHTable(final byte [] tableName) throws IOException {
-    TEST_UTIL.createTable(tableName, HConstants.CATALOG_FAMILY);
-    HConnection conn = HConnectionManager.createConnection(TEST_UTIL.getConfiguration());
-    ExecutorService pool = new ThreadPoolExecutor(1, Integer.MAX_VALUE,
-      60, TimeUnit.SECONDS,
-      new SynchronousQueue<Runnable>(),
-      Threads.newDaemonThreadFactory("test-from-client-table"));
-    ((ThreadPoolExecutor)pool).allowCoreThreadTimeOut(true);
-    return new HTable(tableName, conn, pool);
-  }
-
-  /**
    * simple test that just executes parts of the client
    * API that accept a pre-created HConnction instance
    *
@@ -3916,41 +3897,18 @@ public class TestFromClientSide {
   @Test
   public void testUnmanagedHConnection() throws IOException {
     final byte[] tableName = Bytes.toBytes("testUnmanagedHConnection");
-    HTable t = createUnmangedHConnectionHTable(tableName);
-    HBaseAdmin ha = new HBaseAdmin(t.getConnection());
-    assertTrue(ha.tableExists(tableName));
-    assertTrue(t.get(new Get(ROW)).isEmpty());
-  }
-
-  /**
-   * test of that unmanaged HConnections are able to reconnect
-   * properly (see HBASE-5058)
-   *
-   * @throws Exception
-   */
-  @Test
-  public void testUnmanagedHConnectionReconnect() throws Exception {
-    final byte[] tableName = Bytes.toBytes("testUnmanagedHConnectionReconnect");
-    HTable t = createUnmangedHConnectionHTable(tableName);
-    HConnection conn = t.getConnection();
+    TEST_UTIL.createTable(tableName, HConstants.CATALOG_FAMILY);
+    HConnection conn = HConnectionManager.createConnection(TEST_UTIL
+        .getConfiguration());
+    ExecutorService pool = new ThreadPoolExecutor(1, Integer.MAX_VALUE,
+        60, TimeUnit.SECONDS,
+        new SynchronousQueue<Runnable>(),
+        Threads.newDaemonThreadFactory("test-from-client-table"));
+    ((ThreadPoolExecutor)pool).allowCoreThreadTimeOut(true);
+    HTable t = new HTable(tableName, conn, pool);
     HBaseAdmin ha = new HBaseAdmin(conn);
     assertTrue(ha.tableExists(tableName));
     assertTrue(t.get(new Get(ROW)).isEmpty());
-
-    // stop the master
-    MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
-    cluster.stopMaster(0, false);
-    cluster.waitOnMaster(0);
-
-    // start up a new master
-    cluster.startMaster();
-    assertTrue(cluster.waitForActiveAndReadyMaster());
-
-    // test that the same unmanaged connection works with a new
-    // HBaseAdmin and can connect to the new master;
-    HBaseAdmin newAdmin = new HBaseAdmin(conn);
-    assertTrue(newAdmin.tableExists(tableName));
-    assert(newAdmin.getClusterStatus().getServersSize() == SLAVES);
   }
 
   @Test

Modified: hbase/branches/0.94-test/src/test/java/org/apache/hadoop/hbase/coprocessor/TestClassLoading.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.94-test/src/test/java/org/apache/hadoop/hbase/coprocessor/TestClassLoading.java?rev=1425513&r1=1425512&r2=1425513&view=diff
==============================================================================
--- hbase/branches/0.94-test/src/test/java/org/apache/hadoop/hbase/coprocessor/TestClassLoading.java (original)
+++ hbase/branches/0.94-test/src/test/java/org/apache/hadoop/hbase/coprocessor/TestClassLoading.java Sun Dec 23 19:34:53 2012
@@ -60,7 +60,6 @@ public class TestClassLoading {
   static final String cpName4 = "TestCP4";
   static final String cpName5 = "TestCP5";
   static final String cpName6 = "TestCP6";
-  static final String cpNameInvalid = "TestCPInvalid";
 
   private static Class<?> regionCoprocessor1 = ColumnAggregationEndpoint.class;
   private static Class<?> regionCoprocessor2 = GenericEndpoint.class;
@@ -69,6 +68,14 @@ public class TestClassLoading {
 
   private static final String[] regionServerSystemCoprocessors =
       new String[]{
+      regionCoprocessor1.getSimpleName(),
+      regionServerCoprocessor.getSimpleName()
+  };
+
+  private static final String[] regionServerSystemAndUserCoprocessors =
+      new String[] {
+      regionCoprocessor1.getSimpleName(),
+      regionCoprocessor2.getSimpleName(),
       regionServerCoprocessor.getSimpleName()
   };
 
@@ -200,18 +207,16 @@ public class TestClassLoading {
       new Path(fs.getUri().toString() + Path.SEPARATOR));
     String jarFileOnHDFS1 = fs.getUri().toString() + Path.SEPARATOR +
       jarFile1.getName();
-    Path pathOnHDFS1 = new Path(jarFileOnHDFS1);
     assertTrue("Copy jar file to HDFS failed.",
-      fs.exists(pathOnHDFS1));
+      fs.exists(new Path(jarFileOnHDFS1)));
     LOG.info("Copied jar file to HDFS: " + jarFileOnHDFS1);
 
     fs.copyFromLocalFile(new Path(jarFile2.getPath()),
         new Path(fs.getUri().toString() + Path.SEPARATOR));
     String jarFileOnHDFS2 = fs.getUri().toString() + Path.SEPARATOR +
       jarFile2.getName();
-    Path pathOnHDFS2 = new Path(jarFileOnHDFS2);
     assertTrue("Copy jar file to HDFS failed.",
-      fs.exists(pathOnHDFS2));
+      fs.exists(new Path(jarFileOnHDFS2)));
     LOG.info("Copied jar file to HDFS: " + jarFileOnHDFS2);
 
     // create a table that references the coprocessors
@@ -223,78 +228,41 @@ public class TestClassLoading {
       // with configuration values
     htd.setValue("COPROCESSOR$2", jarFileOnHDFS2.toString() + "|" + cpName2 +
       "|" + Coprocessor.PRIORITY_USER + "|k1=v1,k2=v2,k3=v3");
-    // same jar but invalid class name (should fail to load this class)
-    htd.setValue("COPROCESSOR$3", jarFileOnHDFS2.toString() + "|" + cpNameInvalid +
-      "|" + Coprocessor.PRIORITY_USER);
     HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
     if (admin.tableExists(tableName)) {
       admin.disableTable(tableName);
       admin.deleteTable(tableName);
     }
-    CoprocessorHost.classLoadersCache.clear();
-    byte[] startKey = {10, 63};
-    byte[] endKey = {12, 43};
-    admin.createTable(htd, startKey, endKey, 4);
+    admin.createTable(htd);
     waitForTable(htd.getName());
 
     // verify that the coprocessors were loaded
-    boolean foundTableRegion=false;
-    boolean found_invalid = true, found1 = true, found2 = true, found2_k1 = true,
-        found2_k2 = true, found2_k3 = true;
-    Map<HRegion, Set<ClassLoader>> regionsActiveClassLoaders =
-        new HashMap<HRegion, Set<ClassLoader>>();
+    boolean found1 = false, found2 = false, found2_k1 = false,
+        found2_k2 = false, found2_k3 = false;
     MiniHBaseCluster hbase = TEST_UTIL.getHBaseCluster();
     for (HRegion region:
         hbase.getRegionServer(0).getOnlineRegionsLocalContext()) {
       if (region.getRegionNameAsString().startsWith(tableName)) {
-        foundTableRegion = true;
         CoprocessorEnvironment env;
         env = region.getCoprocessorHost().findCoprocessorEnvironment(cpName1);
-        found1 = found1 && (env != null);
+        if (env != null) {
+          found1 = true;
+        }
         env = region.getCoprocessorHost().findCoprocessorEnvironment(cpName2);
-        found2 = found2 && (env != null);
         if (env != null) {
+          found2 = true;
           Configuration conf = env.getConfiguration();
-          found2_k1 = found2_k1 && (conf.get("k1") != null);
-          found2_k2 = found2_k2 && (conf.get("k2") != null);
-          found2_k3 = found2_k3 && (conf.get("k3") != null);
-        } else {
-          found2_k1 = found2_k2 = found2_k3 = false;
+          found2_k1 = conf.get("k1") != null;
+          found2_k2 = conf.get("k2") != null;
+          found2_k3 = conf.get("k3") != null;
         }
-        env = region.getCoprocessorHost().findCoprocessorEnvironment(cpNameInvalid);
-        found_invalid = found_invalid && (env != null);
-
-        regionsActiveClassLoaders
-            .put(region, ((CoprocessorHost) region.getCoprocessorHost()).getExternalClassLoaders());
       }
     }
-
-    assertTrue("No region was found for table " + tableName, foundTableRegion);
     assertTrue("Class " + cpName1 + " was missing on a region", found1);
     assertTrue("Class " + cpName2 + " was missing on a region", found2);
-    //an invalid CP class name is defined for this table, validate that it is not loaded
-    assertFalse("Class " + cpNameInvalid + " was found on a region", found_invalid);
     assertTrue("Configuration key 'k1' was missing on a region", found2_k1);
     assertTrue("Configuration key 'k2' was missing on a region", found2_k2);
     assertTrue("Configuration key 'k3' was missing on a region", found2_k3);
-    // check if CP classloaders are cached
-    assertTrue(jarFileOnHDFS1 + " was not cached",
-      CoprocessorHost.classLoadersCache.containsKey(pathOnHDFS1));
-    assertTrue(jarFileOnHDFS2 + " was not cached",
-      CoprocessorHost.classLoadersCache.containsKey(pathOnHDFS2));
-    //two external jar used, should be one classloader per jar
-    assertEquals("The number of cached classloaders should be equal to the number" +
-      " of external jar files",
-      2, CoprocessorHost.classLoadersCache.size());
-    //check if region active classloaders are shared across all RS regions
-    Set<ClassLoader> externalClassLoaders = new HashSet<ClassLoader>(
-        CoprocessorHost.classLoadersCache.values());
-    for (Map.Entry<HRegion, Set<ClassLoader>> regionCP : regionsActiveClassLoaders.entrySet()) {
-      assertTrue("Some CP classloaders for region " + regionCP.getKey() + " are not cached."
-            + " ClassLoader Cache:" + externalClassLoaders
-            + " Region ClassLoaders:" + regionCP.getValue(),
-            externalClassLoaders.containsAll(regionCP.getValue()));
-    }
   }
 
   @Test
@@ -456,8 +424,6 @@ public class TestClassLoading {
     File outerJarFile = new File(TEST_UTIL.getDataTestDir().toString(), "outer.jar");
 
     byte buffer[] = new byte[BUFFER_SIZE];
-    // TODO: code here and elsewhere in this file is duplicated w/TestClassFinder.
-    //       Some refactoring may be in order...
     // Open archive file
     FileOutputStream stream = new FileOutputStream(outerJarFile);
     JarOutputStream out = new JarOutputStream(stream, new Manifest());
@@ -467,7 +433,7 @@ public class TestClassLoading {
       JarEntry jarAdd = new JarEntry("/lib/" + jarFile.getName());
       jarAdd.setTime(jarFile.lastModified());
       out.putNextEntry(jarAdd);
-
+  
       // Write file to archive
       FileInputStream in = new FileInputStream(jarFile);
       while (true) {
@@ -539,12 +505,82 @@ public class TestClassLoading {
 
   @Test
   public void testRegionServerCoprocessorsReported() throws Exception {
-    // This was a test for HBASE-4070.
-    // We are removing coprocessors from region load in HBASE-5258.
-    // Therefore, this test now only checks system coprocessors.
+    // HBASE 4070: Improve region server metrics to report loaded coprocessors
+    // to master: verify that each regionserver is reporting the correct set of
+    // loaded coprocessors.
+
+    // We rely on the fact that getCoprocessors() will return a sorted
+    // display of the coprocessors' names, so for example, regionCoprocessor1's
+    // name "ColumnAggregationEndpoint" will appear before regionCoprocessor2's
+    // name "GenericEndpoint" because "C" is before "G" lexicographically.
 
     HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
+
+    // disable all user tables, if any are loaded.
+    for (HTableDescriptor htd: admin.listTables()) {
+      if (!htd.isMetaTable()) {
+        String tableName = htd.getNameAsString();
+        if (admin.isTableEnabled(tableName)) {
+          try {
+            admin.disableTable(htd.getNameAsString());
+          } catch (TableNotEnabledException e) {
+            // ignoring this exception for now : not sure why it's happening.
+          }
+        }
+      }
+    }
+
+    // should only be system coprocessors loaded at this point.
     assertAllRegionServers(regionServerSystemCoprocessors,null);
+
+    // The next two tests enable and disable user tables to see if coprocessor
+    // load reporting changes as coprocessors are loaded and unloaded.
+    //
+
+    // Create a table.
+    // should cause regionCoprocessor2 to be loaded, since we've specified it
+    // for loading on any user table with USER_REGION_COPROCESSOR_CONF_KEY
+    // in setUpBeforeClass().
+    String userTable1 = "userTable1";
+    HTableDescriptor userTD1 = new HTableDescriptor(userTable1);
+    admin.createTable(userTD1);
+    waitForTable(userTD1.getName());
+
+    // table should be enabled now.
+    assertTrue(admin.isTableEnabled(userTable1));
+    assertAllRegionServers(regionServerSystemAndUserCoprocessors, userTable1);
+
+    // unload and make sure we're back to only system coprocessors again.
+    admin.disableTable(userTable1);
+    assertAllRegionServers(regionServerSystemCoprocessors,null);
+
+    // create another table, with its own specified coprocessor.
+    String userTable2 = "userTable2";
+    HTableDescriptor htd2 = new HTableDescriptor(userTable2);
+
+    String userTableCP = "userTableCP";
+    File jarFile1 = buildCoprocessorJar(userTableCP);
+    htd2.addFamily(new HColumnDescriptor("myfamily"));
+    htd2.setValue("COPROCESSOR$1", jarFile1.toString() + "|" + userTableCP +
+      "|" + Coprocessor.PRIORITY_USER);
+    admin.createTable(htd2);
+    waitForTable(htd2.getName());
+    // table should be enabled now.
+    assertTrue(admin.isTableEnabled(userTable2));
+
+    ArrayList<String> existingCPsPlusNew =
+        new ArrayList<String>(Arrays.asList(regionServerSystemAndUserCoprocessors));
+    existingCPsPlusNew.add(userTableCP);
+    String[] existingCPsPlusNewArray = new String[existingCPsPlusNew.size()];
+    assertAllRegionServers(existingCPsPlusNew.toArray(existingCPsPlusNewArray),
+        userTable2);
+
+    admin.disableTable(userTable2);
+    assertTrue(admin.isTableDisabled(userTable2));
+
+    // we should be back to only system coprocessors again.
+    assertAllRegionServers(regionServerSystemCoprocessors, null);
+
   }
 
   /**
@@ -591,7 +627,7 @@ public class TestClassLoading {
       }
       boolean any_failed = false;
       for(Map.Entry<ServerName,HServerLoad> server: servers.entrySet()) {
-        actualCoprocessors = server.getValue().getRsCoprocessors();
+        actualCoprocessors = server.getValue().getCoprocessors();
         if (!Arrays.equals(actualCoprocessors, expectedCoprocessors)) {
           LOG.debug("failed comparison: actual: " +
               Arrays.toString(actualCoprocessors) +

Modified: hbase/branches/0.94-test/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorInterface.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.94-test/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorInterface.java?rev=1425513&r1=1425512&r2=1425513&view=diff
==============================================================================
--- hbase/branches/0.94-test/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorInterface.java (original)
+++ hbase/branches/0.94-test/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorInterface.java Sun Dec 23 19:34:53 2012
@@ -85,18 +85,6 @@ public class TestCoprocessorInterface ex
     }
 
     @Override
-    public boolean nextRaw(List<KeyValue> result, int limit, String metric) 
-        throws IOException {
-      return delegate.nextRaw(result, limit, metric);
-    }
-
-    @Override
-    public boolean nextRaw(List<KeyValue> result, String metric) 
-        throws IOException {
-      return delegate.nextRaw(result, metric);
-    }
-
-    @Override
     public void close() throws IOException {
       delegate.close();
     }
@@ -116,10 +104,6 @@ public class TestCoprocessorInterface ex
       return false;
     }
 
-    @Override
-    public long getMvccReadPoint() {
-      return delegate.getMvccReadPoint();
-    }
   }
 
   public static class CoprocessorImpl extends BaseRegionObserver {

Modified: hbase/branches/0.94-test/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterObserver.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.94-test/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterObserver.java?rev=1425513&r1=1425512&r2=1425513&view=diff
==============================================================================
--- hbase/branches/0.94-test/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterObserver.java (original)
+++ hbase/branches/0.94-test/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterObserver.java Sun Dec 23 19:34:53 2012
@@ -44,7 +44,6 @@ import org.apache.hadoop.hbase.master.HM
 import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
 import org.apache.hadoop.hbase.regionserver.HRegionServer;
 import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.Threads;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.Test;
@@ -542,7 +541,7 @@ public class TestMasterObserver {
 
     // modify table
     htd.setMaxFileSize(512 * 1024 * 1024);
-    modifyTableSync(admin, TEST_TABLE, htd);
+    admin.modifyTable(TEST_TABLE, htd);
     // preModifyTable can't bypass default action.
     assertTrue("Test table should have been modified",
       cp.wasModifyTableCalled());
@@ -585,7 +584,7 @@ public class TestMasterObserver {
 
     // modify table
     htd.setMaxFileSize(512 * 1024 * 1024);
-    modifyTableSync(admin, TEST_TABLE, htd);
+    admin.modifyTable(TEST_TABLE, htd);
     assertTrue("Test table should have been modified",
         cp.wasModifyTableCalled());
 
@@ -630,19 +629,6 @@ public class TestMasterObserver {
         cp.wasDeleteTableCalled());
   }
 
-  private void modifyTableSync(HBaseAdmin admin, byte[] tableName, HTableDescriptor htd)
-      throws IOException {
-    admin.modifyTable(tableName, htd);
-    //wait until modify table finishes
-    for (int t = 0; t < 100; t++) { //10 sec timeout
-      HTableDescriptor td = admin.getTableDescriptor(htd.getName());
-      if (td.equals(htd)) {
-        break;
-      }
-      Threads.sleep(100);
-    }
-  }
-
   @Test
   public void testRegionTransitionOperations() throws Exception {
     MiniHBaseCluster cluster = UTIL.getHBaseCluster();

Modified: hbase/branches/0.94-test/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlock.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.94-test/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlock.java?rev=1425513&r1=1425512&r2=1425513&view=diff
==============================================================================
--- hbase/branches/0.94-test/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlock.java (original)
+++ hbase/branches/0.94-test/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlock.java Sun Dec 23 19:34:53 2012
@@ -55,6 +55,7 @@ import org.apache.hadoop.hbase.util.Byte
 import org.apache.hadoop.hbase.util.ChecksumType;
 import org.apache.hadoop.hbase.util.ClassSize;
 import org.apache.hadoop.io.WritableUtils;
+import org.apache.hadoop.io.compress.CompressionOutputStream;
 import org.apache.hadoop.io.compress.Compressor;
 
 import static org.apache.hadoop.hbase.io.hfile.Compression.Algorithm.*;
@@ -718,8 +719,7 @@ public class TestHFileBlock {
       }
       BlockType bt = BlockType.values()[blockTypeOrdinal];
       DataOutputStream dos = hbw.startWriting(bt);
-      int size = rand.nextInt(500);
-      for (int j = 0; j < size; ++j) {
+      for (int j = 0; j < rand.nextInt(500); ++j) {
         // This might compress well.
         dos.writeShort(i + 1);
         dos.writeInt(j + 1);

Modified: hbase/branches/0.94-test/src/test/java/org/apache/hadoop/hbase/ipc/TestPBOnWritableRpc.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.94-test/src/test/java/org/apache/hadoop/hbase/ipc/TestPBOnWritableRpc.java?rev=1425513&r1=1425512&r2=1425513&view=diff
==============================================================================
--- hbase/branches/0.94-test/src/test/java/org/apache/hadoop/hbase/ipc/TestPBOnWritableRpc.java (original)
+++ hbase/branches/0.94-test/src/test/java/org/apache/hadoop/hbase/ipc/TestPBOnWritableRpc.java Sun Dec 23 19:34:53 2012
@@ -25,17 +25,14 @@ import java.io.IOException;
 import java.net.InetSocketAddress;
 
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.MediumTests;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.Writable;
 import org.junit.Test;
-import org.junit.experimental.categories.Category;
 
 import com.google.protobuf.DescriptorProtos;
 import com.google.protobuf.DescriptorProtos.EnumDescriptorProto;
 
 /** Unit tests to test PB-based types on WritableRpcEngine. */
-@Category(MediumTests.class)
 public class TestPBOnWritableRpc {
 
   private static Configuration conf = new Configuration();

Modified: hbase/branches/0.94-test/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManager.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.94-test/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManager.java?rev=1425513&r1=1425512&r2=1425513&view=diff
==============================================================================
--- hbase/branches/0.94-test/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManager.java (original)
+++ hbase/branches/0.94-test/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManager.java Sun Dec 23 19:34:53 2012
@@ -36,9 +36,9 @@ import org.apache.hadoop.hbase.HConstant
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HServerLoad;
 import org.apache.hadoop.hbase.KeyValue;
-import org.apache.hadoop.hbase.MediumTests;
 import org.apache.hadoop.hbase.Server;
 import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.SmallTests;
 import org.apache.hadoop.hbase.ZooKeeperConnectionException;
 import org.apache.hadoop.hbase.catalog.CatalogTracker;
 import org.apache.hadoop.hbase.client.Get;
@@ -61,12 +61,12 @@ import org.apache.hadoop.hbase.util.Thre
 import org.apache.hadoop.hbase.util.Writables;
 import org.apache.hadoop.hbase.zookeeper.RecoverableZooKeeper;
 import org.apache.hadoop.hbase.zookeeper.ZKAssign;
-import org.apache.hadoop.hbase.zookeeper.ZKTable.TableState;
 import org.apache.hadoop.hbase.zookeeper.ZKUtil;
 import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
+import org.apache.hadoop.hbase.zookeeper.ZKTable.TableState;
 import org.apache.zookeeper.KeeperException;
-import org.apache.zookeeper.KeeperException.NodeExistsException;
 import org.apache.zookeeper.Watcher;
+import org.apache.zookeeper.KeeperException.NodeExistsException;
 import org.junit.After;
 import org.junit.AfterClass;
 import org.junit.Before;
@@ -74,7 +74,6 @@ import org.junit.BeforeClass;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 import org.mockito.Mockito;
-import org.mockito.internal.util.reflection.Whitebox;
 
 import com.google.protobuf.ServiceException;
 
@@ -82,7 +81,7 @@ import com.google.protobuf.ServiceExcept
 /**
  * Test {@link AssignmentManager}
  */
-@Category(MediumTests.class)
+@Category(SmallTests.class)
 public class TestAssignmentManager {
   private static final HBaseTestingUtility HTU = new HBaseTestingUtility();
   private static final ServerName SERVERNAME_A =
@@ -92,10 +91,6 @@ public class TestAssignmentManager {
   private static final HRegionInfo REGIONINFO =
     new HRegionInfo(Bytes.toBytes("t"),
       HConstants.EMPTY_START_ROW, HConstants.EMPTY_START_ROW);
-  private static final HRegionInfo REGIONINFO_2 = new HRegionInfo(Bytes.toBytes("t"),
-      Bytes.toBytes("a"),Bytes.toBytes( "b"));
-  private static int assignmentCount;
-  private static boolean enabling = false;  
 
   // Mocked objects or; get redone for each test.
   private Server server;
@@ -162,7 +157,7 @@ public class TestAssignmentManager {
 
   /**
    * Test a balance going on at same time as a master failover
-   *
+   * 
    * @throws IOException
    * @throws KeeperException
    * @throws InterruptedException
@@ -184,8 +179,10 @@ public class TestAssignmentManager {
       int versionid =
         ZKAssign.transitionNodeClosed(this.watcher, REGIONINFO, SERVERNAME_A, -1);
       assertNotSame(versionid, -1);
-      Mocking.waitForRegionPendingOpenInRIT(am, REGIONINFO.getEncodedName());
-
+      while (!ZKAssign.verifyRegionState(this.watcher, REGIONINFO,
+          EventType.M_ZK_REGION_OFFLINE)) {
+        Threads.sleep(1);
+      }
       // Get current versionid else will fail on transition from OFFLINE to
       // OPENING below
       versionid = ZKAssign.getVersion(this.watcher, REGIONINFO);
@@ -226,8 +223,10 @@ public class TestAssignmentManager {
         ZKAssign.transitionNodeClosed(this.watcher, REGIONINFO, SERVERNAME_A, -1);
       assertNotSame(versionid, -1);
       am.gate.set(false);
-      Mocking.waitForRegionPendingOpenInRIT(am, REGIONINFO.getEncodedName());
-
+      while (!ZKAssign.verifyRegionState(this.watcher, REGIONINFO,
+          EventType.M_ZK_REGION_OFFLINE)) {
+        Threads.sleep(1);
+      }
       // Get current versionid else will fail on transition from OFFLINE to
       // OPENING below
       versionid = ZKAssign.getVersion(this.watcher, REGIONINFO);
@@ -267,8 +266,10 @@ public class TestAssignmentManager {
       int versionid =
         ZKAssign.transitionNodeClosed(this.watcher, REGIONINFO, SERVERNAME_A, -1);
       assertNotSame(versionid, -1);
-      Mocking.waitForRegionPendingOpenInRIT(am, REGIONINFO.getEncodedName());
-
+      while (!ZKAssign.verifyRegionState(this.watcher, REGIONINFO,
+          EventType.M_ZK_REGION_OFFLINE)) {
+        Threads.sleep(1);
+      }
       am.gate.set(false);
       // Get current versionid else will fail on transition from OFFLINE to
       // OPENING below
@@ -307,11 +308,10 @@ public class TestAssignmentManager {
    * from one server to another mocking regionserver responding over zk.
    * @throws IOException
    * @throws KeeperException
-   * @throws InterruptedException
    */
-  @Test(timeout = 10000)
+  @Test
   public void testBalance()
-  throws IOException, KeeperException, InterruptedException {
+  throws IOException, KeeperException {
     // Create and startup an executor.  This is used by AssignmentManager
     // handling zk callbacks.
     ExecutorService executor = startupMasterExecutor("testBalanceExecutor");
@@ -345,9 +345,11 @@ public class TestAssignmentManager {
       // AM is going to notice above CLOSED and queue up a new assign.  The
       // assign will go to open the region in the new location set by the
       // balancer.  The zk node will be OFFLINE waiting for regionserver to
-      // transition it through OPENING, OPENED.  Wait till we see the RIT
-      // before we proceed.
-      Mocking.waitForRegionPendingOpenInRIT(am, REGIONINFO.getEncodedName());
+      // transition it through OPENING, OPENED.  Wait till we see the OFFLINE
+      // zk node before we proceed.
+      while (!ZKAssign.verifyRegionState(this.watcher, REGIONINFO, EventType.M_ZK_REGION_OFFLINE)) {
+        Threads.sleep(1);
+      }
       // Get current versionid else will fail on transition from OFFLINE to OPENING below
       versionid = ZKAssign.getVersion(this.watcher, REGIONINFO);
       assertNotSame(-1, versionid);
@@ -400,7 +402,7 @@ public class TestAssignmentManager {
 
   /**
    * To test closed region handler to remove rit and delete corresponding znode if region in pending
-   * close or closing while processing shutdown of a region server.(HBASE-5927).
+   * close or closing while processing shutdown of a region server.(HBASE-5927). 
    * @throws KeeperException
    * @throws IOException
    */
@@ -410,7 +412,7 @@ public class TestAssignmentManager {
     testCaseWithPartiallyDisabledState(TableState.DISABLING);
     testCaseWithPartiallyDisabledState(TableState.DISABLED);
   }
-
+  
   /**
    * To test if the split region is removed from RIT if the region was in SPLITTING state
    * but the RS has actually completed the splitting in META but went down. See HBASE-6070
@@ -444,7 +446,7 @@ public class TestAssignmentManager {
     am.regionsInTransition.put(REGIONINFO.getEncodedName(), new RegionState(REGIONINFO,
         State.SPLITTING, System.currentTimeMillis(), SERVERNAME_A));
     am.getZKTable().setEnabledTable(REGIONINFO.getTableNameAsString());
-
+    
     RegionTransitionData data = new RegionTransitionData(EventType.RS_ZK_REGION_SPLITTING,
         REGIONINFO.getRegionName(), SERVERNAME_A);
     String node = ZKAssign.getNodeName(this.watcher, REGIONINFO.getEncodedName());
@@ -452,11 +454,11 @@ public class TestAssignmentManager {
     ZKUtil.createAndWatch(this.watcher, node, data.getBytes());
 
     try {
-
+      
       processServerShutdownHandler(ct, am, regionSplitDone);
       // check znode deleted or not.
       // In both cases the znode should be deleted.
-
+      
       if(regionSplitDone){
         assertTrue("Region state of region in SPLITTING should be removed from rit.",
             am.regionsInTransition.isEmpty());
@@ -499,7 +501,7 @@ public class TestAssignmentManager {
     } else {
       am.getZKTable().setDisabledTable(REGIONINFO.getTableNameAsString());
     }
-
+    
     RegionTransitionData data = new RegionTransitionData(EventType.M_ZK_REGION_CLOSING,
         REGIONINFO.getRegionName(), SERVERNAME_A);
     String node = ZKAssign.getNodeName(this.watcher, REGIONINFO.getEncodedName());
@@ -574,7 +576,7 @@ public class TestAssignmentManager {
    * @param hri Region to serialize into HRegionInfo
    * @return A mocked up Result that fakes a Get on a row in the
    * <code>.META.</code> table.
-   * @throws IOException
+   * @throws IOException 
    */
   private Result getMetaTableRowResult(final HRegionInfo hri,
       final ServerName sn)
@@ -593,13 +595,13 @@ public class TestAssignmentManager {
       Bytes.toBytes(sn.getStartcode())));
     return new Result(kvs);
   }
-
+  
   /**
    * @param sn ServerName to use making startcode and server in meta
    * @param hri Region to serialize into HRegionInfo
    * @return A mocked up Result that fakes a Get on a row in the
    * <code>.META.</code> table.
-   * @throws IOException
+   * @throws IOException 
    */
   private Result getMetaTableRowResultAsSplitRegion(final HRegionInfo hri, final ServerName sn)
       throws IOException {
@@ -661,12 +663,12 @@ public class TestAssignmentManager {
       am.shutdown();
     }
   }
-
+  
   /**
    * Tests the processDeadServersAndRegionsInTransition should not fail with NPE
    * when it failed to get the children. Let's abort the system in this
    * situation
-   * @throws ServiceException
+   * @throws ServiceException 
    */
   @Test(timeout = 5000)
   public void testProcessDeadServersAndRegionsInTransitionShouldNotFailWithNPE()
@@ -706,8 +708,8 @@ public class TestAssignmentManager {
    * @param region region to be created as offline
    * @param serverName server event originates from
    * @return Version of znode created.
-   * @throws KeeperException
-   * @throws IOException
+   * @throws KeeperException 
+   * @throws IOException 
    */
   // Copied from SplitTransaction rather than open the method over there in
   // the regionserver package.
@@ -766,27 +768,14 @@ public class TestAssignmentManager {
     // with an encoded name by doing a Get on .META.
     HRegionInterface ri = Mockito.mock(HRegionInterface.class);
     // Get a meta row result that has region up on SERVERNAME_A for REGIONINFO
-    Result[] result = null;
-    if (enabling) {
-      result = new Result[2];
-      result[0] = getMetaTableRowResult(REGIONINFO, SERVERNAME_A);
-      result[1] = getMetaTableRowResult(REGIONINFO_2, SERVERNAME_A);
-    }
     Result r = getMetaTableRowResult(REGIONINFO, SERVERNAME_A);
     Mockito.when(ri .openScanner((byte[]) Mockito.any(), (Scan) Mockito.any())).
       thenReturn(System.currentTimeMillis());
-   if (enabling) {
-      Mockito.when(ri.next(Mockito.anyLong(), Mockito.anyInt())).thenReturn(result, result, result,
-          (Result[]) null);
-      // If a get, return the above result too for REGIONINFO_2
-      Mockito.when(ri.get((byte[]) Mockito.any(), (Get) Mockito.any())).thenReturn(
-          getMetaTableRowResult(REGIONINFO_2, SERVERNAME_A));
-    } else {
-      // Return good result 'r' first and then return null to indicate end of scan
-      Mockito.when(ri.next(Mockito.anyLong(), Mockito.anyInt())).thenReturn(new Result[] { r });
-      // If a get, return the above result too for REGIONINFO
-      Mockito.when(ri.get((byte[]) Mockito.any(), (Get) Mockito.any())).thenReturn(r);
-    }
+    // Return good result 'r' first and then return null to indicate end of scan
+    Mockito.when(ri.next(Mockito.anyLong(), Mockito.anyInt())).thenReturn(new Result[] { r });
+    // If a get, return the above result too for REGIONINFO
+    Mockito.when(ri.get((byte[]) Mockito.any(), (Get) Mockito.any())).
+      thenReturn(r);
     // Get a connection w/ mocked up common methods.
     HConnection connection = HConnectionTestingUtility.
       getMockedConnectionAndDecorate(HTU.getConfiguration(), ri, SERVERNAME_B,
@@ -800,9 +789,9 @@ public class TestAssignmentManager {
         server, manager, ct, balancer, executor);
     return am;
   }
-
+  
   /**
-   * TestCase verifies that the regionPlan is updated whenever a region fails to open
+   * TestCase verifies that the regionPlan is updated whenever a region fails to open 
    * and the master tries to process RS_ZK_FAILED_OPEN state.(HBASE-5546).
    */
   @Test
@@ -850,18 +839,17 @@ public class TestAssignmentManager {
       assertNotSame("Same region plan should not come", regionPlan, newRegionPlan);
       assertTrue("Destnation servers should be different.", !(regionPlan.getDestination().equals(
         newRegionPlan.getDestination())));
-      Mocking.waitForRegionPendingOpenInRIT(am, REGIONINFO.getEncodedName());
     } finally {
       this.server.getConfiguration().setClass(HConstants.HBASE_MASTER_LOADBALANCER_CLASS,
         DefaultLoadBalancer.class, LoadBalancer.class);
       am.shutdown();
     }
   }
-
+  
   /**
    * Test verifies whether assignment is skipped for regions of tables in DISABLING state during
    * clean cluster startup. See HBASE-6281.
-   *
+   * 
    * @throws KeeperException
    * @throws IOException
    * @throws Exception
@@ -904,53 +892,6 @@ public class TestAssignmentManager {
   }
 
   /**
-   * Test verifies whether all the enabling table regions assigned only once during master startup.
-   * 
-   * @throws KeeperException
-   * @throws IOException
-   * @throws Exception
-   */
-  @Test
-  public void testMasterRestartWhenTableInEnabling() throws KeeperException, IOException, Exception {
-    enabling = true;
-    this.server.getConfiguration().setClass(HConstants.HBASE_MASTER_LOADBALANCER_CLASS,
-        DefaultLoadBalancer.class, LoadBalancer.class);
-    Map<ServerName, HServerLoad> serverAndLoad = new HashMap<ServerName, HServerLoad>();
-    serverAndLoad.put(SERVERNAME_A, null);
-    Mockito.when(this.serverManager.getOnlineServers()).thenReturn(serverAndLoad);
-    Mockito.when(this.serverManager.isServerOnline(SERVERNAME_B)).thenReturn(false);
-    Mockito.when(this.serverManager.isServerOnline(SERVERNAME_A)).thenReturn(true);
-    HTU.getConfiguration().setInt(HConstants.MASTER_PORT, 0);
-    Server server = new HMaster(HTU.getConfiguration());
-    Whitebox.setInternalState(server, "serverManager", this.serverManager);
-    assignmentCount = 0;
-    AssignmentManagerWithExtrasForTesting am = setUpMockedAssignmentManager(server,
-        this.serverManager);
-    am.regionOnline(new HRegionInfo("t1".getBytes(), HConstants.EMPTY_START_ROW,
-        HConstants.EMPTY_END_ROW), SERVERNAME_A);
-    am.gate.set(false);
-    try {
-      // set table in enabling state.
-      am.getZKTable().setEnablingTable(REGIONINFO.getTableNameAsString());
-      ZKAssign.createNodeOffline(this.watcher, REGIONINFO_2, SERVERNAME_B);
-
-      am.joinCluster();
-      while (!am.getZKTable().isEnabledTable(REGIONINFO.getTableNameAsString())) {
-        Thread.sleep(10);
-      }
-      assertEquals("Number of assignments should be equal.", 2, assignmentCount);
-      assertTrue("Table should be enabled.",
-          am.getZKTable().isEnabledTable(REGIONINFO.getTableNameAsString()));
-    } finally {
-      enabling = false;
-      am.getZKTable().setEnabledTable(REGIONINFO.getTableNameAsString());
-      am.shutdown();
-      ZKAssign.deleteAllNodes(this.watcher);
-      assignmentCount = 0;
-    }
-  }
-
-  /**
    * Mocked load balancer class used in the testcase to make sure that the testcase waits until
    * random assignment is called and the gate variable is set to true.
    */
@@ -967,7 +908,7 @@ public class TestAssignmentManager {
       this.gate.set(true);
       return randomServerName;
     }
-
+    
     @Override
     public Map<ServerName, List<HRegionInfo>> retainAssignment(
         Map<HRegionInfo, ServerName> regions, List<ServerName> servers) {
@@ -1019,13 +960,8 @@ public class TestAssignmentManager {
     @Override
     public void assign(HRegionInfo region, boolean setOfflineInZK, boolean forceNewPlan,
         boolean hijack) {
-      if (enabling) {
-        assignmentCount++;
-        this.regionOnline(region, SERVERNAME_A);
-      } else {
-        assignInvoked = true;
-        super.assign(region, setOfflineInZK, forceNewPlan, hijack);
-      }
+      assignInvoked = true;
+      super.assign(region, setOfflineInZK, forceNewPlan, hijack);
     }
     
     @Override

Modified: hbase/branches/0.94-test/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.94-test/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java?rev=1425513&r1=1425512&r2=1425513&view=diff
==============================================================================
--- hbase/branches/0.94-test/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java (original)
+++ hbase/branches/0.94-test/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java Sun Dec 23 19:34:53 2012
@@ -241,11 +241,13 @@ public class TestCatalogJanitor {
       return new TableDescriptors() {
         @Override
         public HTableDescriptor remove(String tablename) throws IOException {
+          // TODO Auto-generated method stub
           return null;
         }
         
         @Override
         public Map<String, HTableDescriptor> getAll() throws IOException {
+          // TODO Auto-generated method stub
           return null;
         }
         
@@ -263,6 +265,8 @@ public class TestCatalogJanitor {
         
         @Override
         public void add(HTableDescriptor htd) throws IOException {
+          // TODO Auto-generated method stub
+          
         }
       };
     }
@@ -281,34 +285,6 @@ public class TestCatalogJanitor {
     public <T extends CoprocessorProtocol> boolean registerProtocol(Class<T> protocol, T handler) {
       return false;
     }
-
-    @Override
-    public void deleteTable(byte[] tableName) throws IOException {
-    }
-
-    @Override
-    public void modifyTable(byte[] tableName, HTableDescriptor descriptor) throws IOException {
-    }
-
-    @Override
-    public void enableTable(byte[] tableName) throws IOException {
-    }
-
-    @Override
-    public void disableTable(byte[] tableName) throws IOException {
-    }
-
-    @Override
-    public void addColumn(byte[] tableName, HColumnDescriptor column) throws IOException {
-    }
-
-    @Override
-    public void modifyColumn(byte[] tableName, HColumnDescriptor descriptor) throws IOException {
-    }
-
-    @Override
-    public void deleteColumn(byte[] tableName, byte[] columnName) throws IOException {
-    }
   }
 
   @Test

Modified: hbase/branches/0.94-test/src/test/java/org/apache/hadoop/hbase/master/TestMXBean.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.94-test/src/test/java/org/apache/hadoop/hbase/master/TestMXBean.java?rev=1425513&r1=1425512&r2=1425513&view=diff
==============================================================================
--- hbase/branches/0.94-test/src/test/java/org/apache/hadoop/hbase/master/TestMXBean.java (original)
+++ hbase/branches/0.94-test/src/test/java/org/apache/hadoop/hbase/master/TestMXBean.java Sun Dec 23 19:34:53 2012
@@ -25,14 +25,11 @@ import junit.framework.Assert;
 
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HServerLoad;
-import org.apache.hadoop.hbase.MediumTests;
 import org.apache.hadoop.hbase.regionserver.HRegionServer;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.Test;
-import org.junit.experimental.categories.Category;
 
-@Category(MediumTests.class)
 public class TestMXBean {
 
   private static final HBaseTestingUtility TEST_UTIL =

Modified: hbase/branches/0.94-test/src/test/java/org/apache/hadoop/hbase/master/TestMasterFailover.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.94-test/src/test/java/org/apache/hadoop/hbase/master/TestMasterFailover.java?rev=1425513&r1=1425512&r2=1425513&view=diff
==============================================================================
--- hbase/branches/0.94-test/src/test/java/org/apache/hadoop/hbase/master/TestMasterFailover.java (original)
+++ hbase/branches/0.94-test/src/test/java/org/apache/hadoop/hbase/master/TestMasterFailover.java Sun Dec 23 19:34:53 2012
@@ -35,18 +35,7 @@ import org.apache.commons.logging.LogFac
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.Abortable;
-import org.apache.hadoop.hbase.ClusterStatus;
-import org.apache.hadoop.hbase.HBaseConfiguration;
-import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.HColumnDescriptor;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.LargeTests;
-import org.apache.hadoop.hbase.MasterNotRunningException;
-import org.apache.hadoop.hbase.MiniHBaseCluster;
-import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.*;
 import org.apache.hadoop.hbase.executor.EventHandler.EventType;
 import org.apache.hadoop.hbase.executor.RegionTransitionData;
 import org.apache.hadoop.hbase.master.AssignmentManager.RegionState;
@@ -55,9 +44,9 @@ import org.apache.hadoop.hbase.regionser
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.FSTableDescriptors;
 import org.apache.hadoop.hbase.util.JVMClusterUtil;
+import org.apache.hadoop.hbase.util.Threads;
 import org.apache.hadoop.hbase.util.JVMClusterUtil.MasterThread;
 import org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread;
-import org.apache.hadoop.hbase.util.Threads;
 import org.apache.hadoop.hbase.zookeeper.ZKAssign;
 import org.apache.hadoop.hbase.zookeeper.ZKTable;
 import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
@@ -393,7 +382,7 @@ public class TestMasterFailover {
     enabledAndAssignedRegions.add(enabledRegions.remove(0));
     enabledAndAssignedRegions.add(enabledRegions.remove(0));
     enabledAndAssignedRegions.add(closingRegion);
-
+    
     List<HRegionInfo> disabledAndAssignedRegions = new ArrayList<HRegionInfo>();
     disabledAndAssignedRegions.add(disabledRegions.remove(0));
     disabledAndAssignedRegions.add(disabledRegions.remove(0));
@@ -631,18 +620,18 @@ public class TestMasterFailover {
     // Create a ZKW to use in the test
     ZooKeeperWatcher zkw = new ZooKeeperWatcher(TEST_UTIL.getConfiguration(),
         "unittest", new Abortable() {
-
+          
           @Override
           public void abort(String why, Throwable e) {
             LOG.error("Fatal ZK Error: " + why, e);
             org.junit.Assert.assertFalse("Fatal ZK error", true);
           }
-
+          
           @Override
           public boolean isAborted() {
             return false;
           }
-
+          
     });
 
     // get all the master threads

Modified: hbase/branches/0.94-test/src/test/java/org/apache/hadoop/hbase/master/TestSplitLogManager.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.94-test/src/test/java/org/apache/hadoop/hbase/master/TestSplitLogManager.java?rev=1425513&r1=1425512&r2=1425513&view=diff
==============================================================================
--- hbase/branches/0.94-test/src/test/java/org/apache/hadoop/hbase/master/TestSplitLogManager.java (original)
+++ hbase/branches/0.94-test/src/test/java/org/apache/hadoop/hbase/master/TestSplitLogManager.java Sun Dec 23 19:34:53 2012
@@ -65,7 +65,6 @@ public class TestSplitLogManager {
   private static boolean stopped = false;
   private SplitLogManager slm;
   private Configuration conf;
-  private int to;
 
   private final static HBaseTestingUtility TEST_UTIL =
     new HBaseTestingUtility();
@@ -106,11 +105,6 @@ public class TestSplitLogManager {
 
     stopped = false;
     resetCounters();
-    to = 4000;
-    conf.setInt("hbase.splitlog.manager.timeout", to);
-    conf.setInt("hbase.splitlog.manager.unassigned.timeout", 2 * to);
-    conf.setInt("hbase.splitlog.manager.timeoutmonitor.period", 100);
-    to = to + 4 * 100;
   }
 
   @After
@@ -200,20 +194,26 @@ public class TestSplitLogManager {
         TaskState.TASK_OWNED.get("dummy-worker"), Ids.OPEN_ACL_UNSAFE,
         CreateMode.PERSISTENT);
 
+    int to = 1000;
+    conf.setInt("hbase.splitlog.manager.timeout", to);
+    conf.setInt("hbase.splitlog.manager.timeoutmonitor.period", 100);
+    to = to + 2 * 100;
+
+
     slm = new SplitLogManager(zkw, conf, stopper, "dummy-master", null);
     slm.finishInitialization();
     waitForCounter(tot_mgr_orphan_task_acquired, 0, 1, 100);
     Task task = slm.findOrCreateOrphanTask(tasknode);
     assertTrue(task.isOrphan());
-    waitForCounter(tot_mgr_heartbeat, 0, 1, to/2);
+    waitForCounter(tot_mgr_heartbeat, 0, 1, 100);
     assertFalse(task.isUnassigned());
     long curt = System.currentTimeMillis();
     assertTrue((task.last_update <= curt) &&
         (task.last_update > (curt - 1000)));
     LOG.info("waiting for manager to resubmit the orphan task");
-    waitForCounter(tot_mgr_resubmit, 0, 1, to + to/2);
+    waitForCounter(tot_mgr_resubmit, 0, 1, to + 100);
     assertTrue(task.isUnassigned());
-    waitForCounter(tot_mgr_rescan, 0, 1, to + to/2);
+    waitForCounter(tot_mgr_rescan, 0, 1, to + 100);
   }
 
   @Test
@@ -229,12 +229,12 @@ public class TestSplitLogManager {
 
     slm = new SplitLogManager(zkw, conf, stopper, "dummy-master", null);
     slm.finishInitialization();
-    waitForCounter(tot_mgr_orphan_task_acquired, 0, 1, to/2);
+    waitForCounter(tot_mgr_orphan_task_acquired, 0, 1, 100);
     Task task = slm.findOrCreateOrphanTask(tasknode);
     assertTrue(task.isOrphan());
     assertTrue(task.isUnassigned());
     // wait for RESCAN node to be created
-    waitForCounter(tot_mgr_rescan, 0, 1, to/2);
+    waitForCounter(tot_mgr_rescan, 0, 1, 500);
     Task task2 = slm.findOrCreateOrphanTask(tasknode);
     assertTrue(task == task2);
     LOG.debug("task = " + task);
@@ -250,6 +250,11 @@ public class TestSplitLogManager {
   public void testMultipleResubmits() throws Exception {
     LOG.info("TestMultipleResbmits - no indefinite resubmissions");
 
+    int to = 1000;
+    conf.setInt("hbase.splitlog.manager.timeout", to);
+    conf.setInt("hbase.splitlog.manager.timeoutmonitor.period", 100);
+    to = to + 2 * 100;
+
     conf.setInt("hbase.splitlog.max.resubmit", 2);
     slm = new SplitLogManager(zkw, conf, stopper, "dummy-master", null);
     slm.finishInitialization();
@@ -259,19 +264,19 @@ public class TestSplitLogManager {
     int version = ZKUtil.checkExists(zkw, tasknode);
 
     ZKUtil.setData(zkw, tasknode, TaskState.TASK_OWNED.get("worker1"));
-    waitForCounter(tot_mgr_heartbeat, 0, 1, to/2);
-    waitForCounter(tot_mgr_resubmit, 0, 1, to + to/2);
+    waitForCounter(tot_mgr_heartbeat, 0, 1, 1000);
+    waitForCounter(tot_mgr_resubmit, 0, 1, to + 100);
     int version1 = ZKUtil.checkExists(zkw, tasknode);
     assertTrue(version1 > version);
     ZKUtil.setData(zkw, tasknode, TaskState.TASK_OWNED.get("worker2"));
-    waitForCounter(tot_mgr_heartbeat, 1, 2, to/2);
-    waitForCounter(tot_mgr_resubmit, 1, 2, to + to/2);
+    waitForCounter(tot_mgr_heartbeat, 1, 2, 1000);
+    waitForCounter(tot_mgr_resubmit, 1, 2, to + 100);
     int version2 = ZKUtil.checkExists(zkw, tasknode);
     assertTrue(version2 > version1);
     ZKUtil.setData(zkw, tasknode, TaskState.TASK_OWNED.get("worker3"));
-    waitForCounter(tot_mgr_heartbeat, 1, 2, to/2);
-    waitForCounter(tot_mgr_resubmit_threshold_reached, 0, 1, to + to/2);
-    Thread.sleep(to + to/2);
+    waitForCounter(tot_mgr_heartbeat, 1, 2, 1000);
+    waitForCounter(tot_mgr_resubmit_threshold_reached, 0, 1, to + 100);
+    Thread.sleep(to + 100);
     assertEquals(2L, tot_mgr_resubmit.get());
   }
 
@@ -279,6 +284,8 @@ public class TestSplitLogManager {
   public void testRescanCleanup() throws Exception {
     LOG.info("TestRescanCleanup - ensure RESCAN nodes are cleaned up");
 
+    conf.setInt("hbase.splitlog.manager.timeout", 1000);
+    conf.setInt("hbase.splitlog.manager.timeoutmonitor.period", 100);
     slm = new SplitLogManager(zkw, conf, stopper, "dummy-master", null);
     slm.finishInitialization();
     TaskBatch batch = new TaskBatch();
@@ -287,7 +294,7 @@ public class TestSplitLogManager {
     int version = ZKUtil.checkExists(zkw, tasknode);
 
     ZKUtil.setData(zkw, tasknode, TaskState.TASK_OWNED.get("worker1"));
-    waitForCounter(tot_mgr_heartbeat, 0, 1, to/2);
+    waitForCounter(tot_mgr_heartbeat, 0, 1, 1000);
     waitForCounter(new Expr() {
       @Override
       public long eval() {
@@ -301,7 +308,7 @@ public class TestSplitLogManager {
       assertTrue(Arrays.equals(TaskState.TASK_UNASSIGNED.get("dummy-master"),
           taskstate));
       
-      waitForCounter(tot_mgr_rescan_deleted, 0, 1, to/2);
+      waitForCounter(tot_mgr_rescan_deleted, 0, 1, 1000);
     } else {
       LOG.warn("Could not run test. Lost ZK connection?");
     }
@@ -323,7 +330,7 @@ public class TestSplitLogManager {
         batch.wait();
       }
     }
-    waitForCounter(tot_mgr_task_deleted, 0, 1, to/2);
+    waitForCounter(tot_mgr_task_deleted, 0, 1, 1000);
     assertTrue(ZKUtil.checkExists(zkw, tasknode) == -1);
   }
 
@@ -343,7 +350,7 @@ public class TestSplitLogManager {
         batch.wait();
       }
     }
-    waitForCounter(tot_mgr_task_deleted, 0, 1, to/2);
+    waitForCounter(tot_mgr_task_deleted, 0, 1, 1000);
     assertTrue(ZKUtil.checkExists(zkw, tasknode) == -1);
     conf.setInt("hbase.splitlog.max.resubmit", ZKSplitLog.DEFAULT_MAX_RESUBMIT);
   }
@@ -359,7 +366,7 @@ public class TestSplitLogManager {
     ZKUtil.setData(zkw, tasknode, TaskState.TASK_RESIGNED.get("worker"));
     int version = ZKUtil.checkExists(zkw, tasknode);
 
-    waitForCounter(tot_mgr_resubmit, 0, 1, to/2);
+    waitForCounter(tot_mgr_resubmit, 0, 1, 1000);
     int version1 = ZKUtil.checkExists(zkw, tasknode);
     assertTrue(version1 > version);
 
@@ -379,9 +386,15 @@ public class TestSplitLogManager {
         TaskState.TASK_OWNED.get("dummy-worker"), Ids.OPEN_ACL_UNSAFE,
         CreateMode.PERSISTENT);
 
+    int to = 4000;
+    conf.setInt("hbase.splitlog.manager.timeout", to);
+    conf.setInt("hbase.splitlog.manager.unassigned.timeout", 2 * to);
+    conf.setInt("hbase.splitlog.manager.timeoutmonitor.period", 100);
+
+
     slm = new SplitLogManager(zkw, conf, stopper, "dummy-master", null);
     slm.finishInitialization();
-    waitForCounter(tot_mgr_orphan_task_acquired, 0, 1, to/2);
+    waitForCounter(tot_mgr_orphan_task_acquired, 0, 1, 100);
 
 
     // submit another task which will stay in unassigned mode
@@ -417,10 +430,10 @@ public class TestSplitLogManager {
     int version = ZKUtil.checkExists(zkw, tasknode);
 
     ZKUtil.setData(zkw, tasknode, TaskState.TASK_OWNED.get("worker1"));
-    waitForCounter(tot_mgr_heartbeat, 0, 1, to/2);
+    waitForCounter(tot_mgr_heartbeat, 0, 1, 1000);
     slm.handleDeadWorker("worker1");
-    waitForCounter(tot_mgr_resubmit, 0, 1, to/2);
-    waitForCounter(tot_mgr_resubmit_dead_server_task, 0, 1, to + to/2);
+    waitForCounter(tot_mgr_resubmit, 0, 1, 3000);
+    waitForCounter(tot_mgr_resubmit_dead_server_task, 0, 1, 3000);
 
     int version1 = ZKUtil.checkExists(zkw, tasknode);
     assertTrue(version1 > version);

Modified: hbase/branches/0.94-test/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileCleaner.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.94-test/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileCleaner.java?rev=1425513&r1=1425512&r2=1425513&view=diff
==============================================================================
--- hbase/branches/0.94-test/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileCleaner.java (original)
+++ hbase/branches/0.94-test/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileCleaner.java Sun Dec 23 19:34:53 2012
@@ -88,7 +88,7 @@ public class TestHFileCleaner {
         + status.getAccessTime();
   }
 
-  @Test(timeout = 60 *1000)
+  @Test
   public void testHFileCleaning() throws Exception {
     final EnvironmentEdge originalEdge = EnvironmentEdgeManager.getDelegate();
     String prefix = "someHFileThatWouldBeAUUID";

Modified: hbase/branches/0.94-test/src/test/java/org/apache/hadoop/hbase/metrics/TestExactCounterMetric.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.94-test/src/test/java/org/apache/hadoop/hbase/metrics/TestExactCounterMetric.java?rev=1425513&r1=1425512&r2=1425513&view=diff
==============================================================================
--- hbase/branches/0.94-test/src/test/java/org/apache/hadoop/hbase/metrics/TestExactCounterMetric.java (original)
+++ hbase/branches/0.94-test/src/test/java/org/apache/hadoop/hbase/metrics/TestExactCounterMetric.java Sun Dec 23 19:34:53 2012
@@ -22,12 +22,9 @@ import java.util.List;
 
 import junit.framework.Assert;
 
-import org.apache.hadoop.hbase.SmallTests;
 import org.apache.hadoop.hbase.util.Pair;
 import org.junit.Test;
-import org.junit.experimental.categories.Category;
 
-@Category(SmallTests.class)
 public class TestExactCounterMetric {
 
   @Test

Modified: hbase/branches/0.94-test/src/test/java/org/apache/hadoop/hbase/metrics/TestExponentiallyDecayingSample.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.94-test/src/test/java/org/apache/hadoop/hbase/metrics/TestExponentiallyDecayingSample.java?rev=1425513&r1=1425512&r2=1425513&view=diff
==============================================================================
--- hbase/branches/0.94-test/src/test/java/org/apache/hadoop/hbase/metrics/TestExponentiallyDecayingSample.java (original)
+++ hbase/branches/0.94-test/src/test/java/org/apache/hadoop/hbase/metrics/TestExponentiallyDecayingSample.java Sun Dec 23 19:34:53 2012
@@ -22,13 +22,8 @@ import junit.framework.Assert;
 
 import com.yammer.metrics.stats.ExponentiallyDecayingSample;
 import com.yammer.metrics.stats.Snapshot;
-
-import org.apache.hadoop.hbase.SmallTests;
-
 import org.junit.Test;
-import org.junit.experimental.categories.Category;
 
-@Category(SmallTests.class)
 public class TestExponentiallyDecayingSample {
   
   @Test

Modified: hbase/branches/0.94-test/src/test/java/org/apache/hadoop/hbase/metrics/TestMetricsHistogram.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.94-test/src/test/java/org/apache/hadoop/hbase/metrics/TestMetricsHistogram.java?rev=1425513&r1=1425512&r2=1425513&view=diff
==============================================================================
--- hbase/branches/0.94-test/src/test/java/org/apache/hadoop/hbase/metrics/TestMetricsHistogram.java (original)
+++ hbase/branches/0.94-test/src/test/java/org/apache/hadoop/hbase/metrics/TestMetricsHistogram.java Sun Dec 23 19:34:53 2012
@@ -22,13 +22,10 @@ import java.util.Arrays;
 import java.util.Random;
 
 import org.apache.hadoop.hbase.metrics.histogram.MetricsHistogram;
-import org.apache.hadoop.hbase.SmallTests;
 import com.yammer.metrics.stats.Snapshot;
 import org.junit.Assert;
 import org.junit.Test;
-import org.junit.experimental.categories.Category;
 
-@Category(SmallTests.class)
 public class TestMetricsHistogram {
 
   @Test