You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by st...@apache.org on 2012/04/23 05:57:41 UTC
svn commit: r1329052 [1/3] - in /hbase/branches/0.92: ./
src/main/java/org/apache/hadoop/hbase/client/
src/main/java/org/apache/hadoop/hbase/io/hfile/
src/main/java/org/apache/hadoop/hbase/metrics/histogram/
src/main/java/org/apache/hadoop/hbase/region...
Author: stack
Date: Mon Apr 23 03:57:40 2012
New Revision: 1329052
URL: http://svn.apache.org/viewvc?rev=1329052&view=rev
Log:
HBASE-5833 0.92 build has been failing pretty consistently on TestMasterFailover; PART2
Modified:
hbase/branches/0.92/CHANGES.txt
hbase/branches/0.92/src/main/java/org/apache/hadoop/hbase/client/HTable.java
hbase/branches/0.92/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java
hbase/branches/0.92/src/main/java/org/apache/hadoop/hbase/metrics/histogram/ExponentiallyDecayingSample.java
hbase/branches/0.92/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
hbase/branches/0.92/src/main/java/org/apache/hadoop/hbase/util/JVMClusterUtil.java
hbase/branches/0.92/src/test/java/org/apache/hadoop/hbase/HBaseTestCase.java
hbase/branches/0.92/src/test/java/org/apache/hadoop/hbase/filter/TestColumnPrefixFilter.java
hbase/branches/0.92/src/test/java/org/apache/hadoop/hbase/filter/TestDependentColumnFilter.java
hbase/branches/0.92/src/test/java/org/apache/hadoop/hbase/filter/TestFilter.java
hbase/branches/0.92/src/test/java/org/apache/hadoop/hbase/filter/TestMultipleColumnPrefixFilter.java
hbase/branches/0.92/src/test/java/org/apache/hadoop/hbase/master/TestMasterFailover.java
hbase/branches/0.92/src/test/java/org/apache/hadoop/hbase/master/TestOpenedRegionHandler.java
hbase/branches/0.92/src/test/java/org/apache/hadoop/hbase/regionserver/TestBlocksRead.java
hbase/branches/0.92/src/test/java/org/apache/hadoop/hbase/regionserver/TestColumnSeeking.java
hbase/branches/0.92/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactSelection.java
hbase/branches/0.92/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
hbase/branches/0.92/src/test/java/org/apache/hadoop/hbase/regionserver/TestMinVersions.java
hbase/branches/0.92/src/test/java/org/apache/hadoop/hbase/regionserver/TestMultiColumnScanner.java
hbase/branches/0.92/src/test/java/org/apache/hadoop/hbase/regionserver/TestResettingCounters.java
hbase/branches/0.92/src/test/java/org/apache/hadoop/hbase/regionserver/TestScanWithBloomError.java
hbase/branches/0.92/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransaction.java
hbase/branches/0.92/src/test/java/org/apache/hadoop/hbase/regionserver/handler/TestCloseRegionHandler.java
hbase/branches/0.92/src/test/java/org/apache/hadoop/hbase/regionserver/handler/TestOpenRegionHandler.java
hbase/branches/0.92/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.java
hbase/branches/0.92/src/test/java/org/apache/hadoop/hbase/util/TestMergeTool.java
Modified: hbase/branches/0.92/CHANGES.txt
URL: http://svn.apache.org/viewvc/hbase/branches/0.92/CHANGES.txt?rev=1329052&r1=1329051&r2=1329052&view=diff
==============================================================================
--- hbase/branches/0.92/CHANGES.txt (original)
+++ hbase/branches/0.92/CHANGES.txt Mon Apr 23 03:57:40 2012
@@ -44,7 +44,7 @@ Release 0.92.2 - Unreleased
HBASE-5823 HBASE-5823 Hbck should be able to print help (Enis Soztutar)
HBASE-5787 Table owner can't disable/delete its own table (Matteo)
HBASE-5821 Incorrect handling of null value in Coprocessor aggregation function min() (Maryann Xue)
- HBASE-5833 0.92 build has been failing pretty consistently on TestMasterFailover....
+ HBASE-5833 0.92 build has been failing pretty consistently on TestMasterFailover
IMPROVEMENTS
HBASE-5592 Make it easier to get a table from shell (Ben West)
Modified: hbase/branches/0.92/src/main/java/org/apache/hadoop/hbase/client/HTable.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.92/src/main/java/org/apache/hadoop/hbase/client/HTable.java?rev=1329052&r1=1329051&r2=1329052&view=diff
==============================================================================
--- hbase/branches/0.92/src/main/java/org/apache/hadoop/hbase/client/HTable.java (original)
+++ hbase/branches/0.92/src/main/java/org/apache/hadoop/hbase/client/HTable.java Mon Apr 23 03:57:40 2012
@@ -1447,7 +1447,7 @@ public class HTable implements HTableInt
SecurityManager s = System.getSecurityManager();
group = (s != null)? s.getThreadGroup() :
Thread.currentThread().getThreadGroup();
- namePrefix = "pool-" +
+ namePrefix = "htable-pool-" +
poolNumber.getAndIncrement() +
"-thread-";
}
Modified: hbase/branches/0.92/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.92/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java?rev=1329052&r1=1329051&r2=1329052&view=diff
==============================================================================
--- hbase/branches/0.92/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java (original)
+++ hbase/branches/0.92/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java Mon Apr 23 03:57:40 2012
@@ -45,6 +45,7 @@ import org.apache.hadoop.hbase.util.Byte
import org.apache.hadoop.hbase.util.ClassSize;
import org.apache.hadoop.hbase.util.FSUtils;
import org.apache.hadoop.hbase.util.HasThread;
+import org.apache.hadoop.hbase.util.Threads;
import org.apache.hadoop.util.StringUtils;
import com.google.common.util.concurrent.ThreadFactoryBuilder;
@@ -551,16 +552,17 @@ public class LruBlockCache implements Bl
*/
private static class EvictionThread extends HasThread {
private WeakReference<LruBlockCache> cache;
+ private boolean go = true;
public EvictionThread(LruBlockCache cache) {
- super("LruBlockCache.EvictionThread");
+ super(Thread.currentThread().getName() + ".LruBlockCache.EvictionThread");
setDaemon(true);
this.cache = new WeakReference<LruBlockCache>(cache);
}
@Override
public void run() {
- while(true) {
+ while (this.go) {
synchronized(this) {
try {
this.wait();
@@ -571,11 +573,17 @@ public class LruBlockCache implements Bl
cache.evict();
}
}
+
public void evict() {
synchronized(this) {
this.notify(); // FindBugs NN_NAKED_NOTIFY
}
}
+
+ void shutdown() {
+ this.go = false;
+ interrupt();
+ }
}
/*
@@ -699,5 +707,13 @@ public class LruBlockCache implements Bl
public void shutdown() {
this.scheduleThreadPool.shutdown();
+ for (int i = 0; i < 10; i++) {
+ if (!this.scheduleThreadPool.isShutdown()) Threads.sleep(10);
+ }
+ if (!this.scheduleThreadPool.isShutdown()) {
+ List<Runnable> runnables = this.scheduleThreadPool.shutdownNow();
+ LOG.debug("Still running " + runnables);
+ }
+ this.evictionThread.shutdown();
}
}
Modified: hbase/branches/0.92/src/main/java/org/apache/hadoop/hbase/metrics/histogram/ExponentiallyDecayingSample.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.92/src/main/java/org/apache/hadoop/hbase/metrics/histogram/ExponentiallyDecayingSample.java?rev=1329052&r1=1329051&r2=1329052&view=diff
==============================================================================
--- hbase/branches/0.92/src/main/java/org/apache/hadoop/hbase/metrics/histogram/ExponentiallyDecayingSample.java (original)
+++ hbase/branches/0.92/src/main/java/org/apache/hadoop/hbase/metrics/histogram/ExponentiallyDecayingSample.java Mon Apr 23 03:57:40 2012
@@ -44,7 +44,8 @@ public class ExponentiallyDecayingSample
private static final ScheduledExecutorService TICK_SERVICE =
Executors.newScheduledThreadPool(1,
- Threads.getNamedThreadFactory("decayingSampleTick", true));
+ Threads.getNamedThreadFactory(Thread.currentThread().getName() +
+ ".decayingSampleTick.", true));
private static volatile long CURRENT_TICK =
TimeUnit.MILLISECONDS.toSeconds(System.currentTimeMillis());
Modified: hbase/branches/0.92/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.92/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java?rev=1329052&r1=1329051&r2=1329052&view=diff
==============================================================================
--- hbase/branches/0.92/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java (original)
+++ hbase/branches/0.92/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java Mon Apr 23 03:57:40 2012
@@ -61,13 +61,13 @@ import org.apache.hadoop.hbase.DroppedSn
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HConstants.OperationStatusCode;
import org.apache.hadoop.hbase.HDFSBlocksDistribution;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.NotServingRegionException;
import org.apache.hadoop.hbase.UnknownScannerException;
-import org.apache.hadoop.hbase.HConstants.OperationStatusCode;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.Increment;
@@ -78,11 +78,10 @@ import org.apache.hadoop.hbase.client.Ro
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.client.coprocessor.Exec;
import org.apache.hadoop.hbase.client.coprocessor.ExecResult;
+import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
import org.apache.hadoop.hbase.filter.Filter;
import org.apache.hadoop.hbase.filter.IncompatibleFilterException;
-import org.apache.hadoop.hbase.filter.NullComparator;
import org.apache.hadoop.hbase.filter.WritableByteArrayComparable;
-import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
import org.apache.hadoop.hbase.io.HeapSize;
import org.apache.hadoop.hbase.io.TimeRange;
import org.apache.hadoop.hbase.io.hfile.BlockCache;
@@ -3080,7 +3079,11 @@ public class HRegion implements HeapSize
* bootstrap code in the HMaster constructor.
* Note, this method creates an {@link HLog} for the created region. It
* needs to be closed explicitly. Use {@link HRegion#getLog()} to get
- * access.
+ * access. <b>When done with a region created using this method, you will
+ * need to explicitly close the {@link HLog} it created too; it will not be
+ * done for you. Not closing the log will leave at least a daemon thread
+ * running.</b> Call {@link #closeHRegion(HRegion)} and it will do
+ * necessary cleanup for you.
* @param info Info for region to create.
* @param rootDir Root directory for HBase instance
* @param conf
@@ -3096,6 +3099,23 @@ public class HRegion implements HeapSize
}
/**
+ * This will do the necessary cleanup a call to {@link #createHRegion(HRegionInfo, Path, Configuration, HTableDescriptor)}
+ * requires. This method will close the region and then close its
+ * associated {@link HLog} file. You use it if you call the other createHRegion,
+ * the one that takes an {@link HLog} instance but don't be surprised by the
+ * call to the {@link HLog#closeAndDelete()} on the {@link HLog} the
+ * HRegion was carrying.
+ * @param r
+ * @throws IOException
+ */
+ public static void closeHRegion(final HRegion r) throws IOException {
+ if (r == null) return;
+ r.close();
+ if (r.getLog() == null) return;
+ r.getLog().closeAndDelete();
+ }
+
+ /**
* Convenience method creating new HRegions. Used by createTable.
* The {@link HLog} for the created region needs to be closed explicitly.
* Use {@link HRegion#getLog()} to get access.
Modified: hbase/branches/0.92/src/main/java/org/apache/hadoop/hbase/util/JVMClusterUtil.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.92/src/main/java/org/apache/hadoop/hbase/util/JVMClusterUtil.java?rev=1329052&r1=1329051&r2=1329052&view=diff
==============================================================================
--- hbase/branches/0.92/src/main/java/org/apache/hadoop/hbase/util/JVMClusterUtil.java (original)
+++ hbase/branches/0.92/src/main/java/org/apache/hadoop/hbase/util/JVMClusterUtil.java Mon Apr 23 03:57:40 2012
@@ -192,13 +192,13 @@ public class JVMClusterUtil {
}
// Wait for an active master
while (true) {
- for (JVMClusterUtil.MasterThread t : masters) {
+ for (JVMClusterUtil.MasterThread t: masters) {
if (t.master.isActiveMaster()) {
return t.master.getServerName().toString();
}
}
try {
- Thread.sleep(1000);
+ Thread.sleep(100);
} catch(InterruptedException e) {
// Keep waiting
}
@@ -213,13 +213,17 @@ public class JVMClusterUtil {
final List<RegionServerThread> regionservers) {
LOG.debug("Shutting down HBase Cluster");
if (masters != null) {
+ // Do backups first.
+ JVMClusterUtil.MasterThread activeMaster = null;
for (JVMClusterUtil.MasterThread t : masters) {
- if (t.master.isActiveMaster()) {
- t.master.shutdown();
- } else {
+ if (!t.master.isActiveMaster()) {
t.master.stopMaster();
+ } else {
+ activeMaster = t;
}
}
+ // Do active after.
+ if (activeMaster != null) activeMaster.master.shutdown();
}
// regionServerThreads can never be null because they are initialized when
// the class is constructed.
Modified: hbase/branches/0.92/src/test/java/org/apache/hadoop/hbase/HBaseTestCase.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.92/src/test/java/org/apache/hadoop/hbase/HBaseTestCase.java?rev=1329052&r1=1329051&r2=1329052&view=diff
==============================================================================
--- hbase/branches/0.92/src/test/java/org/apache/hadoop/hbase/HBaseTestCase.java (original)
+++ hbase/branches/0.92/src/test/java/org/apache/hadoop/hbase/HBaseTestCase.java Mon Apr 23 03:57:40 2012
@@ -170,10 +170,19 @@ public abstract class HBaseTestCase exte
);
}
+ /**
+ * You must call close on the returned region and then close on the log file
+ * it created. Do {@link HRegion#close()} followed by {@link HRegion#getLog()}
+ * and on it call close.
+ * @param desc
+ * @param startKey
+ * @param endKey
+ * @return An {@link HRegion}
+ * @throws IOException
+ */
protected HRegion createNewHRegion(HTableDescriptor desc, byte [] startKey,
byte [] endKey)
throws IOException {
- FileSystem filesystem = FileSystem.get(conf);
HRegionInfo hri = new HRegionInfo(desc.getName(), startKey, endKey);
return HRegion.createHRegion(hri, testDir, conf, desc);
}
@@ -679,6 +688,11 @@ public abstract class HBaseTestCase exte
}
}
+ /**
+ * You must call {@link #closeRootAndMeta()} when done after calling this
+ * method. It does cleanup.
+ * @throws IOException
+ */
protected void createRootAndMetaRegions() throws IOException {
root = HRegion.createHRegion(HRegionInfo.ROOT_REGIONINFO, testDir,
conf, HTableDescriptor.ROOT_TABLEDESC);
Modified: hbase/branches/0.92/src/test/java/org/apache/hadoop/hbase/filter/TestColumnPrefixFilter.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.92/src/test/java/org/apache/hadoop/hbase/filter/TestColumnPrefixFilter.java?rev=1329052&r1=1329051&r2=1329052&view=diff
==============================================================================
--- hbase/branches/0.92/src/test/java/org/apache/hadoop/hbase/filter/TestColumnPrefixFilter.java (original)
+++ hbase/branches/0.92/src/test/java/org/apache/hadoop/hbase/filter/TestColumnPrefixFilter.java Mon Apr 23 03:57:40 2012
@@ -53,52 +53,55 @@ public class TestColumnPrefixFilter {
HRegionInfo info = new HRegionInfo(htd.getName(), null, null, false);
HRegion region = HRegion.createHRegion(info, TEST_UTIL.
getDataTestDir(), TEST_UTIL.getConfiguration(), htd);
-
- List<String> rows = generateRandomWords(100, "row");
- List<String> columns = generateRandomWords(10000, "column");
- long maxTimestamp = 2;
-
- List<KeyValue> kvList = new ArrayList<KeyValue>();
-
- Map<String, List<KeyValue>> prefixMap = new HashMap<String,
- List<KeyValue>>();
-
- prefixMap.put("p", new ArrayList<KeyValue>());
- prefixMap.put("s", new ArrayList<KeyValue>());
-
- String valueString = "ValueString";
-
- for (String row: rows) {
- Put p = new Put(Bytes.toBytes(row));
- p.setWriteToWAL(false);
- for (String column: columns) {
- for (long timestamp = 1; timestamp <= maxTimestamp; timestamp++) {
- KeyValue kv = KeyValueTestUtil.create(row, family, column, timestamp,
- valueString);
- p.add(kv);
- kvList.add(kv);
- for (String s: prefixMap.keySet()) {
- if (column.startsWith(s)) {
- prefixMap.get(s).add(kv);
+ try {
+ List<String> rows = generateRandomWords(100, "row");
+ List<String> columns = generateRandomWords(10000, "column");
+ long maxTimestamp = 2;
+
+ List<KeyValue> kvList = new ArrayList<KeyValue>();
+
+ Map<String, List<KeyValue>> prefixMap = new HashMap<String,
+ List<KeyValue>>();
+
+ prefixMap.put("p", new ArrayList<KeyValue>());
+ prefixMap.put("s", new ArrayList<KeyValue>());
+
+ String valueString = "ValueString";
+
+ for (String row: rows) {
+ Put p = new Put(Bytes.toBytes(row));
+ p.setWriteToWAL(false);
+ for (String column: columns) {
+ for (long timestamp = 1; timestamp <= maxTimestamp; timestamp++) {
+ KeyValue kv = KeyValueTestUtil.create(row, family, column, timestamp,
+ valueString);
+ p.add(kv);
+ kvList.add(kv);
+ for (String s: prefixMap.keySet()) {
+ if (column.startsWith(s)) {
+ prefixMap.get(s).add(kv);
+ }
}
}
}
+ region.put(p);
}
- region.put(p);
- }
- ColumnPrefixFilter filter;
- Scan scan = new Scan();
- scan.setMaxVersions();
- for (String s: prefixMap.keySet()) {
- filter = new ColumnPrefixFilter(Bytes.toBytes(s));
-
- scan.setFilter(filter);
-
- InternalScanner scanner = region.getScanner(scan);
- List<KeyValue> results = new ArrayList<KeyValue>();
- while(scanner.next(results));
- assertEquals(prefixMap.get(s).size(), results.size());
+ ColumnPrefixFilter filter;
+ Scan scan = new Scan();
+ scan.setMaxVersions();
+ for (String s: prefixMap.keySet()) {
+ filter = new ColumnPrefixFilter(Bytes.toBytes(s));
+
+ scan.setFilter(filter);
+
+ InternalScanner scanner = region.getScanner(scan);
+ List<KeyValue> results = new ArrayList<KeyValue>();
+ while(scanner.next(results));
+ assertEquals(prefixMap.get(s).size(), results.size());
+ }
+ } finally {
+ HRegion.closeHRegion(region);
}
}
@@ -110,55 +113,59 @@ public class TestColumnPrefixFilter {
HRegionInfo info = new HRegionInfo(htd.getName(), null, null, false);
HRegion region = HRegion.createHRegion(info, TEST_UTIL.
getDataTestDir(), TEST_UTIL.getConfiguration(), htd);
-
- List<String> rows = generateRandomWords(100, "row");
- List<String> columns = generateRandomWords(10000, "column");
- long maxTimestamp = 2;
-
- List<KeyValue> kvList = new ArrayList<KeyValue>();
-
- Map<String, List<KeyValue>> prefixMap = new HashMap<String,
- List<KeyValue>>();
-
- prefixMap.put("p", new ArrayList<KeyValue>());
- prefixMap.put("s", new ArrayList<KeyValue>());
-
- String valueString = "ValueString";
-
- for (String row: rows) {
- Put p = new Put(Bytes.toBytes(row));
- p.setWriteToWAL(false);
- for (String column: columns) {
- for (long timestamp = 1; timestamp <= maxTimestamp; timestamp++) {
- KeyValue kv = KeyValueTestUtil.create(row, family, column, timestamp,
- valueString);
- p.add(kv);
- kvList.add(kv);
- for (String s: prefixMap.keySet()) {
- if (column.startsWith(s)) {
- prefixMap.get(s).add(kv);
+ try {
+ List<String> rows = generateRandomWords(100, "row");
+ List<String> columns = generateRandomWords(10000, "column");
+ long maxTimestamp = 2;
+
+ List<KeyValue> kvList = new ArrayList<KeyValue>();
+
+ Map<String, List<KeyValue>> prefixMap = new HashMap<String,
+ List<KeyValue>>();
+
+ prefixMap.put("p", new ArrayList<KeyValue>());
+ prefixMap.put("s", new ArrayList<KeyValue>());
+
+ String valueString = "ValueString";
+
+ for (String row: rows) {
+ Put p = new Put(Bytes.toBytes(row));
+ p.setWriteToWAL(false);
+ for (String column: columns) {
+ for (long timestamp = 1; timestamp <= maxTimestamp; timestamp++) {
+ KeyValue kv = KeyValueTestUtil.create(row, family, column, timestamp,
+ valueString);
+ p.add(kv);
+ kvList.add(kv);
+ for (String s: prefixMap.keySet()) {
+ if (column.startsWith(s)) {
+ prefixMap.get(s).add(kv);
+ }
}
}
}
+ region.put(p);
}
- region.put(p);
- }
- ColumnPrefixFilter filter;
- Scan scan = new Scan();
- scan.setMaxVersions();
- for (String s: prefixMap.keySet()) {
- filter = new ColumnPrefixFilter(Bytes.toBytes(s));
-
- //this is how this test differs from the one above
- FilterList filterList = new FilterList(FilterList.Operator.MUST_PASS_ALL);
- filterList.addFilter(filter);
- scan.setFilter(filterList);
-
- InternalScanner scanner = region.getScanner(scan);
- List<KeyValue> results = new ArrayList<KeyValue>();
- while(scanner.next(results));
- assertEquals(prefixMap.get(s).size(), results.size());
+ ColumnPrefixFilter filter;
+ Scan scan = new Scan();
+ scan.setMaxVersions();
+ for (String s: prefixMap.keySet()) {
+ filter = new ColumnPrefixFilter(Bytes.toBytes(s));
+
+ //this is how this test differs from the one above
+ FilterList filterList = new FilterList(FilterList.Operator.MUST_PASS_ALL);
+ filterList.addFilter(filter);
+ scan.setFilter(filterList);
+
+ InternalScanner scanner = region.getScanner(scan);
+ List<KeyValue> results = new ArrayList<KeyValue>();
+ while(scanner.next(results));
+ assertEquals(prefixMap.get(s).size(), results.size());
+ }
+ } finally {
+ region.close();
+ region.getLog().closeAndDelete();
}
}
Modified: hbase/branches/0.92/src/test/java/org/apache/hadoop/hbase/filter/TestDependentColumnFilter.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.92/src/test/java/org/apache/hadoop/hbase/filter/TestDependentColumnFilter.java?rev=1329052&r1=1329051&r2=1329052&view=diff
==============================================================================
--- hbase/branches/0.92/src/test/java/org/apache/hadoop/hbase/filter/TestDependentColumnFilter.java (original)
+++ hbase/branches/0.92/src/test/java/org/apache/hadoop/hbase/filter/TestDependentColumnFilter.java Mon Apr 23 03:57:40 2012
@@ -85,6 +85,7 @@ public class TestDependentColumnFilter e
protected void tearDown() throws Exception {
super.tearDown();
this.region.close();
+ this.region.getLog().closeAndDelete();
}
private void addData() throws IOException {
Modified: hbase/branches/0.92/src/test/java/org/apache/hadoop/hbase/filter/TestFilter.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.92/src/test/java/org/apache/hadoop/hbase/filter/TestFilter.java?rev=1329052&r1=1329051&r2=1329052&view=diff
==============================================================================
--- hbase/branches/0.92/src/test/java/org/apache/hadoop/hbase/filter/TestFilter.java (original)
+++ hbase/branches/0.92/src/test/java/org/apache/hadoop/hbase/filter/TestFilter.java Mon Apr 23 03:57:40 2012
@@ -167,6 +167,7 @@ public class TestFilter extends HBaseTes
protected void tearDown() throws Exception {
this.region.close();
+ this.region.getLog().closeAndDelete();
super.tearDown();
}
Modified: hbase/branches/0.92/src/test/java/org/apache/hadoop/hbase/filter/TestMultipleColumnPrefixFilter.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.92/src/test/java/org/apache/hadoop/hbase/filter/TestMultipleColumnPrefixFilter.java?rev=1329052&r1=1329051&r2=1329052&view=diff
==============================================================================
--- hbase/branches/0.92/src/test/java/org/apache/hadoop/hbase/filter/TestMultipleColumnPrefixFilter.java (original)
+++ hbase/branches/0.92/src/test/java/org/apache/hadoop/hbase/filter/TestMultipleColumnPrefixFilter.java Mon Apr 23 03:57:40 2012
@@ -54,54 +54,58 @@ public class TestMultipleColumnPrefixFil
HRegionInfo info = new HRegionInfo(htd.getName(), null, null, false);
HRegion region = HRegion.createHRegion(info, TEST_UTIL.
getDataTestDir(), TEST_UTIL.getConfiguration(), htd);
-
- List<String> rows = generateRandomWords(100, "row");
- List<String> columns = generateRandomWords(10000, "column");
- long maxTimestamp = 2;
-
- List<KeyValue> kvList = new ArrayList<KeyValue>();
-
- Map<String, List<KeyValue>> prefixMap = new HashMap<String,
- List<KeyValue>>();
-
- prefixMap.put("p", new ArrayList<KeyValue>());
- prefixMap.put("q", new ArrayList<KeyValue>());
- prefixMap.put("s", new ArrayList<KeyValue>());
-
- String valueString = "ValueString";
-
- for (String row: rows) {
- Put p = new Put(Bytes.toBytes(row));
- p.setWriteToWAL(false);
- for (String column: columns) {
- for (long timestamp = 1; timestamp <= maxTimestamp; timestamp++) {
- KeyValue kv = KeyValueTestUtil.create(row, family, column, timestamp,
- valueString);
- p.add(kv);
- kvList.add(kv);
- for (String s: prefixMap.keySet()) {
- if (column.startsWith(s)) {
- prefixMap.get(s).add(kv);
+ try {
+ List<String> rows = generateRandomWords(100, "row");
+ List<String> columns = generateRandomWords(10000, "column");
+ long maxTimestamp = 2;
+
+ List<KeyValue> kvList = new ArrayList<KeyValue>();
+
+ Map<String, List<KeyValue>> prefixMap = new HashMap<String,
+ List<KeyValue>>();
+
+ prefixMap.put("p", new ArrayList<KeyValue>());
+ prefixMap.put("q", new ArrayList<KeyValue>());
+ prefixMap.put("s", new ArrayList<KeyValue>());
+
+ String valueString = "ValueString";
+
+ for (String row: rows) {
+ Put p = new Put(Bytes.toBytes(row));
+ p.setWriteToWAL(false);
+ for (String column: columns) {
+ for (long timestamp = 1; timestamp <= maxTimestamp; timestamp++) {
+ KeyValue kv = KeyValueTestUtil.create(row, family, column, timestamp,
+ valueString);
+ p.add(kv);
+ kvList.add(kv);
+ for (String s: prefixMap.keySet()) {
+ if (column.startsWith(s)) {
+ prefixMap.get(s).add(kv);
+ }
}
}
}
+ region.put(p);
}
- region.put(p);
- }
- MultipleColumnPrefixFilter filter;
- Scan scan = new Scan();
- scan.setMaxVersions();
- byte [][] filter_prefix = new byte [2][];
- filter_prefix[0] = new byte [] {'p'};
- filter_prefix[1] = new byte [] {'q'};
-
- filter = new MultipleColumnPrefixFilter(filter_prefix);
- scan.setFilter(filter);
- List<KeyValue> results = new ArrayList<KeyValue>();
- InternalScanner scanner = region.getScanner(scan);
- while(scanner.next(results));
- assertEquals(prefixMap.get("p").size() + prefixMap.get("q").size(), results.size());
+ MultipleColumnPrefixFilter filter;
+ Scan scan = new Scan();
+ scan.setMaxVersions();
+ byte [][] filter_prefix = new byte [2][];
+ filter_prefix[0] = new byte [] {'p'};
+ filter_prefix[1] = new byte [] {'q'};
+
+ filter = new MultipleColumnPrefixFilter(filter_prefix);
+ scan.setFilter(filter);
+ List<KeyValue> results = new ArrayList<KeyValue>();
+ InternalScanner scanner = region.getScanner(scan);
+ while(scanner.next(results));
+ assertEquals(prefixMap.get("p").size() + prefixMap.get("q").size(), results.size());
+ } finally {
+ region.close();
+ region.getLog().closeAndDelete();
+ }
}
@Test
@@ -114,60 +118,64 @@ public class TestMultipleColumnPrefixFil
HRegionInfo info = new HRegionInfo(htd.getName(), null, null, false);
HRegion region = HRegion.createHRegion(info, TEST_UTIL.
getDataTestDir(), TEST_UTIL.getConfiguration(), htd);
-
- List<String> rows = generateRandomWords(100, "row");
- List<String> columns = generateRandomWords(10000, "column");
- long maxTimestamp = 3;
-
- List<KeyValue> kvList = new ArrayList<KeyValue>();
-
- Map<String, List<KeyValue>> prefixMap = new HashMap<String,
- List<KeyValue>>();
-
- prefixMap.put("p", new ArrayList<KeyValue>());
- prefixMap.put("q", new ArrayList<KeyValue>());
- prefixMap.put("s", new ArrayList<KeyValue>());
-
- String valueString = "ValueString";
-
- for (String row: rows) {
- Put p = new Put(Bytes.toBytes(row));
- p.setWriteToWAL(false);
- for (String column: columns) {
- for (long timestamp = 1; timestamp <= maxTimestamp; timestamp++) {
- double rand = Math.random();
- KeyValue kv;
- if (rand < 0.5)
- kv = KeyValueTestUtil.create(row, family1, column, timestamp,
- valueString);
- else
- kv = KeyValueTestUtil.create(row, family2, column, timestamp,
- valueString);
- p.add(kv);
- kvList.add(kv);
- for (String s: prefixMap.keySet()) {
- if (column.startsWith(s)) {
- prefixMap.get(s).add(kv);
+ try {
+ List<String> rows = generateRandomWords(100, "row");
+ List<String> columns = generateRandomWords(10000, "column");
+ long maxTimestamp = 3;
+
+ List<KeyValue> kvList = new ArrayList<KeyValue>();
+
+ Map<String, List<KeyValue>> prefixMap = new HashMap<String,
+ List<KeyValue>>();
+
+ prefixMap.put("p", new ArrayList<KeyValue>());
+ prefixMap.put("q", new ArrayList<KeyValue>());
+ prefixMap.put("s", new ArrayList<KeyValue>());
+
+ String valueString = "ValueString";
+
+ for (String row: rows) {
+ Put p = new Put(Bytes.toBytes(row));
+ p.setWriteToWAL(false);
+ for (String column: columns) {
+ for (long timestamp = 1; timestamp <= maxTimestamp; timestamp++) {
+ double rand = Math.random();
+ KeyValue kv;
+ if (rand < 0.5)
+ kv = KeyValueTestUtil.create(row, family1, column, timestamp,
+ valueString);
+ else
+ kv = KeyValueTestUtil.create(row, family2, column, timestamp,
+ valueString);
+ p.add(kv);
+ kvList.add(kv);
+ for (String s: prefixMap.keySet()) {
+ if (column.startsWith(s)) {
+ prefixMap.get(s).add(kv);
+ }
}
}
}
+ region.put(p);
}
- region.put(p);
- }
- MultipleColumnPrefixFilter filter;
- Scan scan = new Scan();
- scan.setMaxVersions();
- byte [][] filter_prefix = new byte [2][];
- filter_prefix[0] = new byte [] {'p'};
- filter_prefix[1] = new byte [] {'q'};
-
- filter = new MultipleColumnPrefixFilter(filter_prefix);
- scan.setFilter(filter);
- List<KeyValue> results = new ArrayList<KeyValue>();
- InternalScanner scanner = region.getScanner(scan);
- while(scanner.next(results));
- assertEquals(prefixMap.get("p").size() + prefixMap.get("q").size(), results.size());
+ MultipleColumnPrefixFilter filter;
+ Scan scan = new Scan();
+ scan.setMaxVersions();
+ byte [][] filter_prefix = new byte [2][];
+ filter_prefix[0] = new byte [] {'p'};
+ filter_prefix[1] = new byte [] {'q'};
+
+ filter = new MultipleColumnPrefixFilter(filter_prefix);
+ scan.setFilter(filter);
+ List<KeyValue> results = new ArrayList<KeyValue>();
+ InternalScanner scanner = region.getScanner(scan);
+ while(scanner.next(results));
+ assertEquals(prefixMap.get("p").size() + prefixMap.get("q").size(), results.size());
+ } finally {
+ region.close();
+ region.getLog().closeAndDelete();
+ }
}
@Test
@@ -178,49 +186,53 @@ public class TestMultipleColumnPrefixFil
HRegionInfo info = new HRegionInfo(htd.getName(), null, null, false);
HRegion region = HRegion.createHRegion(info, TEST_UTIL.
getDataTestDir(), TEST_UTIL.getConfiguration(),htd);
-
- List<String> rows = generateRandomWords(100, "row");
- List<String> columns = generateRandomWords(10000, "column");
- long maxTimestamp = 2;
-
- String valueString = "ValueString";
-
- for (String row: rows) {
- Put p = new Put(Bytes.toBytes(row));
- p.setWriteToWAL(false);
- for (String column: columns) {
- for (long timestamp = 1; timestamp <= maxTimestamp; timestamp++) {
- KeyValue kv = KeyValueTestUtil.create(row, family, column, timestamp,
- valueString);
- p.add(kv);
+ try {
+ List<String> rows = generateRandomWords(100, "row");
+ List<String> columns = generateRandomWords(10000, "column");
+ long maxTimestamp = 2;
+
+ String valueString = "ValueString";
+
+ for (String row: rows) {
+ Put p = new Put(Bytes.toBytes(row));
+ p.setWriteToWAL(false);
+ for (String column: columns) {
+ for (long timestamp = 1; timestamp <= maxTimestamp; timestamp++) {
+ KeyValue kv = KeyValueTestUtil.create(row, family, column, timestamp,
+ valueString);
+ p.add(kv);
+ }
}
+ region.put(p);
}
- region.put(p);
- }
- MultipleColumnPrefixFilter multiplePrefixFilter;
- Scan scan1 = new Scan();
- scan1.setMaxVersions();
- byte [][] filter_prefix = new byte [1][];
- filter_prefix[0] = new byte [] {'p'};
-
- multiplePrefixFilter = new MultipleColumnPrefixFilter(filter_prefix);
- scan1.setFilter(multiplePrefixFilter);
- List<KeyValue> results1 = new ArrayList<KeyValue>();
- InternalScanner scanner1 = region.getScanner(scan1);
- while(scanner1.next(results1));
-
- ColumnPrefixFilter singlePrefixFilter;
- Scan scan2 = new Scan();
- scan2.setMaxVersions();
- singlePrefixFilter = new ColumnPrefixFilter(Bytes.toBytes("p"));
-
- scan2.setFilter(singlePrefixFilter);
- List<KeyValue> results2 = new ArrayList<KeyValue>();
- InternalScanner scanner2 = region.getScanner(scan1);
- while(scanner2.next(results2));
-
- assertEquals(results1.size(), results2.size());
+ MultipleColumnPrefixFilter multiplePrefixFilter;
+ Scan scan1 = new Scan();
+ scan1.setMaxVersions();
+ byte [][] filter_prefix = new byte [1][];
+ filter_prefix[0] = new byte [] {'p'};
+
+ multiplePrefixFilter = new MultipleColumnPrefixFilter(filter_prefix);
+ scan1.setFilter(multiplePrefixFilter);
+ List<KeyValue> results1 = new ArrayList<KeyValue>();
+ InternalScanner scanner1 = region.getScanner(scan1);
+ while(scanner1.next(results1));
+
+ ColumnPrefixFilter singlePrefixFilter;
+ Scan scan2 = new Scan();
+ scan2.setMaxVersions();
+ singlePrefixFilter = new ColumnPrefixFilter(Bytes.toBytes("p"));
+
+ scan2.setFilter(singlePrefixFilter);
+ List<KeyValue> results2 = new ArrayList<KeyValue>();
+ InternalScanner scanner2 = region.getScanner(scan1);
+ while(scanner2.next(results2));
+
+ assertEquals(results1.size(), results2.size());
+ } finally {
+ region.close();
+ region.getLog().closeAndDelete();
+ }
}
List<String> generateRandomWords(int numberOfWords, String suffix) {
Modified: hbase/branches/0.92/src/test/java/org/apache/hadoop/hbase/master/TestMasterFailover.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.92/src/test/java/org/apache/hadoop/hbase/master/TestMasterFailover.java?rev=1329052&r1=1329051&r2=1329052&view=diff
==============================================================================
--- hbase/branches/0.92/src/test/java/org/apache/hadoop/hbase/master/TestMasterFailover.java (original)
+++ hbase/branches/0.92/src/test/java/org/apache/hadoop/hbase/master/TestMasterFailover.java Mon Apr 23 03:57:40 2012
@@ -24,6 +24,7 @@ import static org.junit.Assert.assertFal
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
+import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.Set;
@@ -63,6 +64,55 @@ import org.junit.Test;
public class TestMasterFailover {
private static final Log LOG = LogFactory.getLog(TestMasterFailover.class);
+ @Test (timeout=180000)
+ public void testShouldCheckMasterFailOverWhenMETAIsInOpenedState()
+ throws Exception {
+ LOG.info("Starting testShouldCheckMasterFailOverWhenMETAIsInOpenedState");
+ final int NUM_MASTERS = 1;
+ final int NUM_RS = 2;
+
+ Configuration conf = HBaseConfiguration.create();
+ conf.setInt("hbase.master.assignment.timeoutmonitor.period", 2000);
+ conf.setInt("hbase.master.assignment.timeoutmonitor.timeout", 8000);
+ // Start the cluster
+ HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(conf);
+
+ TEST_UTIL.startMiniCluster(NUM_MASTERS, NUM_RS);
+ MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
+
+ // Find regionserver carrying meta.
+ List<RegionServerThread> regionServerThreads =
+ cluster.getRegionServerThreads();
+ int count = -1;
+ HRegion metaRegion = null;
+ for (RegionServerThread regionServerThread : regionServerThreads) {
+ HRegionServer regionServer = regionServerThread.getRegionServer();
+ metaRegion = regionServer.getOnlineRegion(HRegionInfo.FIRST_META_REGIONINFO.getRegionName());
+ count++;
+ regionServer.abort("");
+ if (null != metaRegion) break;
+ }
+ HRegionServer regionServer = cluster.getRegionServer(count);
+
+ TEST_UTIL.shutdownMiniHBaseCluster();
+
+ // Create a ZKW to use in the test
+ ZooKeeperWatcher zkw =
+ HBaseTestingUtility.createAndForceNodeToOpenedState(TEST_UTIL,
+ metaRegion, regionServer.getServerName());
+
+ LOG.info("Staring cluster for second time");
+ TEST_UTIL.startMiniHBaseCluster(NUM_MASTERS, NUM_RS);
+
+ // Failover should be completed, now wait for no RIT
+ log("Waiting for no more RIT");
+ ZKAssign.blockUntilNoRIT(zkw);
+
+ zkw.close();
+ // Stop the cluster
+ TEST_UTIL.shutdownMiniCluster();
+ }
+
/**
* Simple test of master failover.
* <p>
@@ -108,6 +158,7 @@ public class TestMasterFailover {
}
assertEquals(1, numActive);
assertEquals(NUM_MASTERS, masterThreads.size());
+ LOG.info("Active master " + activeName);
// Check that ClusterStatus reports the correct active and backup masters
assertNotNull(active);
@@ -117,16 +168,16 @@ public class TestMasterFailover {
assertEquals(2, status.getBackupMasters().size());
// attempt to stop one of the inactive masters
- LOG.debug("\n\nStopping a backup master\n");
int backupIndex = (activeIndex == 0 ? 1 : activeIndex - 1);
+ HMaster master = cluster.getMaster(backupIndex);
+ LOG.debug("\n\nStopping a backup master: " + master.getServerName() + "\n");
cluster.stopMaster(backupIndex, false);
cluster.waitOnMaster(backupIndex);
- // verify still one active master and it's the same
+ // Verify still one active master and it's the same
for (int i = 0; i < masterThreads.size(); i++) {
if (masterThreads.get(i).getMaster().isActiveMaster()) {
- assertTrue(activeName.equals(
- masterThreads.get(i).getMaster().getServerName()));
+ assertTrue(activeName.equals(masterThreads.get(i).getMaster().getServerName()));
activeIndex = i;
active = masterThreads.get(activeIndex).getMaster();
}
@@ -134,7 +185,7 @@ public class TestMasterFailover {
assertEquals(1, numActive);
assertEquals(2, masterThreads.size());
int rsCount = masterThreads.get(activeIndex).getMaster().getClusterStatus().getServersSize();
- LOG.info("Active master managing " + rsCount + " regions servers");
+ LOG.info("Active master " + active.getServerName() + " managing " + rsCount + " regions servers");
assertEquals(3, rsCount);
// Check that ClusterStatus reports the correct active and backup masters
@@ -145,7 +196,7 @@ public class TestMasterFailover {
assertEquals(1, status.getBackupMasters().size());
// kill the active master
- LOG.debug("\n\nStopping the active master\n");
+ LOG.debug("\n\nStopping the active master " + active.getServerName() + "\n");
cluster.stopMaster(activeIndex, false);
cluster.waitOnMaster(activeIndex);
@@ -166,7 +217,7 @@ public class TestMasterFailover {
assertEquals(0, status.getBackupMastersSize());
assertEquals(0, status.getBackupMasters().size());
int rss = status.getServersSize();
- LOG.info("Active master " + mastername.getHostname() + " managing " +
+ LOG.info("Active master " + mastername.getServerName() + " managing " +
rss + " region servers");
assertEquals(3, rss);
@@ -174,83 +225,6 @@ public class TestMasterFailover {
TEST_UTIL.shutdownMiniCluster();
}
- @Test
- public void testShouldCheckMasterFailOverWhenMETAIsInOpenedState()
- throws Exception {
- final int NUM_MASTERS = 1;
- final int NUM_RS = 2;
-
- Configuration conf = HBaseConfiguration.create();
- conf.setInt("hbase.master.assignment.timeoutmonitor.period", 2000);
- conf.setInt("hbase.master.assignment.timeoutmonitor.timeout", 8000);
- // Start the cluster
- HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(conf);
- TEST_UTIL.startMiniCluster(NUM_MASTERS, NUM_RS);
- MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
-
- // get all the master threads
- List<MasterThread> masterThreads = cluster.getMasterThreads();
-
- // wait for each to come online
- for (MasterThread mt : masterThreads) {
- assertTrue(mt.isAlive());
- }
-
- // verify only one is the active master and we have right number
- int numActive = 0;
- ServerName activeName = null;
- for (int i = 0; i < masterThreads.size(); i++) {
- if (masterThreads.get(i).getMaster().isActiveMaster()) {
- numActive++;
- activeName = masterThreads.get(i).getMaster().getServerName();
- }
- }
- assertEquals(1, numActive);
- assertEquals(NUM_MASTERS, masterThreads.size());
-
- // verify still one active master and it's the same
- for (int i = 0; i < masterThreads.size(); i++) {
- if (masterThreads.get(i).getMaster().isActiveMaster()) {
- assertTrue(activeName.equals(masterThreads.get(i).getMaster()
- .getServerName()));
- }
- }
- assertEquals(1, numActive);
- assertEquals(1, masterThreads.size());
-
- List<RegionServerThread> regionServerThreads = cluster
- .getRegionServerThreads();
- int count = -1;
- HRegion metaRegion = null;
- for (RegionServerThread regionServerThread : regionServerThreads) {
- HRegionServer regionServer = regionServerThread.getRegionServer();
- metaRegion = regionServer
- .getOnlineRegion(HRegionInfo.FIRST_META_REGIONINFO.getRegionName());
- count++;
- regionServer.abort("");
- if (null != metaRegion) {
- break;
- }
- }
- HRegionServer regionServer = cluster.getRegionServer(count);
-
- cluster.shutdown();
- // Create a ZKW to use in the test
- ZooKeeperWatcher zkw =
- HBaseTestingUtility.createAndForceNodeToOpenedState(TEST_UTIL,
- metaRegion, regionServer.getServerName());
-
- TEST_UTIL.startMiniHBaseCluster(1, 1);
-
- // Failover should be completed, now wait for no RIT
- log("Waiting for no more RIT");
- ZKAssign.blockUntilNoRIT(zkw);
-
- // Stop the cluster
- TEST_UTIL.shutdownMiniCluster();
- }
-
-
/**
* Complex test of master failover that tests as many permutations of the
* different possible states that regions in transition could be in within ZK.
@@ -386,7 +360,7 @@ public class TestMasterFailover {
FSTableDescriptors.createTableDescriptor(filesystem, rootdir, htdEnabled);
HRegionInfo hriEnabled = new HRegionInfo(htdEnabled.getName(), null, null);
- HRegion.createHRegion(hriEnabled, rootdir, conf, htdEnabled);
+ createRegion(hriEnabled, rootdir, conf, htdEnabled);
List<HRegionInfo> enabledRegions = TEST_UTIL.createMultiRegionsInMeta(
TEST_UTIL.getConfiguration(), htdEnabled, SPLIT_KEYS);
@@ -397,7 +371,7 @@ public class TestMasterFailover {
// Write the .tableinfo
FSTableDescriptors.createTableDescriptor(filesystem, rootdir, htdDisabled);
HRegionInfo hriDisabled = new HRegionInfo(htdDisabled.getName(), null, null);
- HRegion.createHRegion(hriDisabled, rootdir, conf, htdDisabled);
+ createRegion(hriDisabled, rootdir, conf, htdDisabled);
List<HRegionInfo> disabledRegions = TEST_UTIL.createMultiRegionsInMeta(
TEST_UTIL.getConfiguration(), htdDisabled, SPLIT_KEYS);
@@ -699,7 +673,7 @@ public class TestMasterFailover {
FSTableDescriptors.createTableDescriptor(filesystem, rootdir, htdEnabled);
HRegionInfo hriEnabled = new HRegionInfo(htdEnabled.getName(),
null, null);
- HRegion.createHRegion(hriEnabled, rootdir, conf, htdEnabled);
+ createRegion(hriEnabled, rootdir, conf, htdEnabled);
List<HRegionInfo> enabledRegions = TEST_UTIL.createMultiRegionsInMeta(
TEST_UTIL.getConfiguration(), htdEnabled, SPLIT_KEYS);
@@ -710,7 +684,7 @@ public class TestMasterFailover {
// Write the .tableinfo
FSTableDescriptors.createTableDescriptor(filesystem, rootdir, htdDisabled);
HRegionInfo hriDisabled = new HRegionInfo(htdDisabled.getName(), null, null);
- HRegion.createHRegion(hriDisabled, rootdir, conf, htdDisabled);
+ createRegion(hriDisabled, rootdir, conf, htdDisabled);
List<HRegionInfo> disabledRegions = TEST_UTIL.createMultiRegionsInMeta(
TEST_UTIL.getConfiguration(), htdDisabled, SPLIT_KEYS);
@@ -1031,6 +1005,19 @@ public class TestMasterFailover {
TEST_UTIL.shutdownMiniCluster();
}
+ HRegion createRegion(final HRegionInfo hri, final Path rootdir, final Configuration c,
+ final HTableDescriptor htd)
+ throws IOException {
+ HRegion r = HRegion.createHRegion(hri, rootdir, c, htd);
+ // The above call to create a region will create an hlog file. Each
+ // log file create will also create a running thread to do syncing. We need
+ // to close out this log else we will have a running thread trying to sync
+ // the file system continuously which is ugly when dfs is taken away at the
+ // end of the test.
+ HRegion.closeHRegion(r);
+ return r;
+ }
+
// TODO: Next test to add is with testing permutations of the RIT or the RS
// killed are hosting ROOT and META regions.
Modified: hbase/branches/0.92/src/test/java/org/apache/hadoop/hbase/master/TestOpenedRegionHandler.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.92/src/test/java/org/apache/hadoop/hbase/master/TestOpenedRegionHandler.java?rev=1329052&r1=1329051&r2=1329052&view=diff
==============================================================================
--- hbase/branches/0.92/src/test/java/org/apache/hadoop/hbase/master/TestOpenedRegionHandler.java (original)
+++ hbase/branches/0.92/src/test/java/org/apache/hadoop/hbase/master/TestOpenedRegionHandler.java Mon Apr 23 03:57:40 2012
@@ -116,6 +116,7 @@ public class TestOpenedRegionHandler {
@Test
public void testShouldNotCompeleteOpenedRegionSuccessfullyIfVersionMismatches()
throws Exception {
+ HRegion region = null;
try {
int testIndex = 0;
TEST_UTIL.startMiniZKCluster();
@@ -124,8 +125,7 @@ public class TestOpenedRegionHandler {
"testShouldNotCompeleteOpenedRegionSuccessfullyIfVersionMismatches");
HRegionInfo hri = new HRegionInfo(htd.getName(),
Bytes.toBytes(testIndex), Bytes.toBytes(testIndex + 1));
- HRegion region = HRegion.createHRegion(hri, TEST_UTIL
- .getDataTestDir(), TEST_UTIL.getConfiguration(), htd);
+ region = HRegion.createHRegion(hri, TEST_UTIL.getDataTestDir(), TEST_UTIL.getConfiguration(), htd);
assertNotNull(region);
AssignmentManager am = Mockito.mock(AssignmentManager.class);
when(am.isRegionInTransition(hri)).thenReturn(
@@ -164,6 +164,8 @@ public class TestOpenedRegionHandler {
assertEquals("The region should not be opened successfully.", regionName,
region.getRegionInfo().getEncodedName());
} finally {
+ region.close();
+ region.getLog().closeAndDelete();
TEST_UTIL.shutdownMiniZKCluster();
}
}
Modified: hbase/branches/0.92/src/test/java/org/apache/hadoop/hbase/regionserver/TestBlocksRead.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.92/src/test/java/org/apache/hadoop/hbase/regionserver/TestBlocksRead.java?rev=1329052&r1=1329051&r2=1329052&view=diff
==============================================================================
--- hbase/branches/0.92/src/test/java/org/apache/hadoop/hbase/regionserver/TestBlocksRead.java (original)
+++ hbase/branches/0.92/src/test/java/org/apache/hadoop/hbase/regionserver/TestBlocksRead.java Mon Apr 23 03:57:40 2012
@@ -78,7 +78,16 @@ public class TestBlocksRead extends HBas
EnvironmentEdgeManagerTestHelper.reset();
}
- private void initHRegion (byte [] tableName, String callingMethod,
+ /**
+ * Callers must afterward call {@link HRegion#closeHRegion(HRegion)}
+ * @param tableName
+ * @param callingMethod
+ * @param conf
+ * @param families
+ * @throws IOException
+ * @return created and initialized region.
+ */
+ private HRegion initHRegion (byte [] tableName, String callingMethod,
HBaseConfiguration conf, byte [] ... families)
throws IOException {
HTableDescriptor htd = new HTableDescriptor(tableName);
@@ -97,8 +106,9 @@ public class TestBlocksRead extends HBas
}
HRegionInfo info = new HRegionInfo(htd.getName(), null, null, false);
Path path = new Path(DIR + callingMethod);
- region = HRegion.createHRegion(info, path, conf, htd);
+ HRegion r = HRegion.createHRegion(info, path, conf, htd);
blockCache = new CacheConfig(conf).getBlockCache();
+ return r;
}
private void putData(byte[] cf, String row, String col, long version)
@@ -195,38 +205,41 @@ public class TestBlocksRead extends HBas
KeyValue kvs[];
HBaseConfiguration conf = getConf();
- initHRegion(TABLE, getName(), conf, FAMILIES);
-
- putData(FAMILY, "row", "col1", 1);
- putData(FAMILY, "row", "col2", 2);
- putData(FAMILY, "row", "col3", 3);
- putData(FAMILY, "row", "col4", 4);
- putData(FAMILY, "row", "col5", 5);
- putData(FAMILY, "row", "col6", 6);
- putData(FAMILY, "row", "col7", 7);
- region.flushcache();
-
- // Expected block reads: 1
- kvs = getData(FAMILY, "row", "col1", 1);
- assertEquals(1, kvs.length);
- verifyData(kvs[0], "row", "col1", 1);
-
- // Expected block reads: 2
- kvs = getData(FAMILY, "row", Arrays.asList("col1", "col2"), 2);
- assertEquals(2, kvs.length);
- verifyData(kvs[0], "row", "col1", 1);
- verifyData(kvs[1], "row", "col2", 2);
-
- // Expected block reads: 3
- kvs = getData(FAMILY, "row", Arrays.asList("col2", "col3"), 3);
- assertEquals(2, kvs.length);
- verifyData(kvs[0], "row", "col2", 2);
- verifyData(kvs[1], "row", "col3", 3);
-
- // Expected block reads: 3
- kvs = getData(FAMILY, "row", Arrays.asList("col5"), 3);
- assertEquals(1, kvs.length);
- verifyData(kvs[0], "row", "col5", 5);
+ this.region = initHRegion(TABLE, getName(), conf, FAMILIES);
+ try {
+ putData(FAMILY, "row", "col1", 1);
+ putData(FAMILY, "row", "col2", 2);
+ putData(FAMILY, "row", "col3", 3);
+ putData(FAMILY, "row", "col4", 4);
+ putData(FAMILY, "row", "col5", 5);
+ putData(FAMILY, "row", "col6", 6);
+ putData(FAMILY, "row", "col7", 7);
+ region.flushcache();
+
+ // Expected block reads: 1
+ kvs = getData(FAMILY, "row", "col1", 1);
+ assertEquals(1, kvs.length);
+ verifyData(kvs[0], "row", "col1", 1);
+
+ // Expected block reads: 2
+ kvs = getData(FAMILY, "row", Arrays.asList("col1", "col2"), 2);
+ assertEquals(2, kvs.length);
+ verifyData(kvs[0], "row", "col1", 1);
+ verifyData(kvs[1], "row", "col2", 2);
+
+ // Expected block reads: 3
+ kvs = getData(FAMILY, "row", Arrays.asList("col2", "col3"), 3);
+ assertEquals(2, kvs.length);
+ verifyData(kvs[0], "row", "col2", 2);
+ verifyData(kvs[1], "row", "col3", 3);
+
+ // Expected block reads: 3
+ kvs = getData(FAMILY, "row", Arrays.asList("col5"), 3);
+ assertEquals(1, kvs.length);
+ verifyData(kvs[0], "row", "col5", 5);
+ } finally {
+ HRegion.closeHRegion(this.region);
+ }
}
/**
@@ -241,85 +254,88 @@ public class TestBlocksRead extends HBas
KeyValue kvs[];
HBaseConfiguration conf = getConf();
- initHRegion(TABLE, getName(), conf, FAMILIES);
-
- // File 1
- putData(FAMILY, "row", "col1", 1);
- putData(FAMILY, "row", "col2", 2);
- region.flushcache();
-
- // File 2
- putData(FAMILY, "row", "col1", 3);
- putData(FAMILY, "row", "col2", 4);
- region.flushcache();
-
- // Baseline expected blocks read: 2
- kvs = getData(FAMILY, "row", Arrays.asList("col1"), 2);
- assertEquals(1, kvs.length);
- verifyData(kvs[0], "row", "col1", 3);
-
- // Baseline expected blocks read: 4
- kvs = getData(FAMILY, "row", Arrays.asList("col1", "col2"), 4);
- assertEquals(2, kvs.length);
- verifyData(kvs[0], "row", "col1", 3);
- verifyData(kvs[1], "row", "col2", 4);
-
- // File 3: Add another column
- putData(FAMILY, "row", "col3", 5);
- region.flushcache();
-
- // Baseline expected blocks read: 5
- kvs = getData(FAMILY, "row", "col3", 5);
- assertEquals(1, kvs.length);
- verifyData(kvs[0], "row", "col3", 5);
-
- // Get a column from older file.
- // Baseline expected blocks read: 3
- kvs = getData(FAMILY, "row", Arrays.asList("col1"), 3);
- assertEquals(1, kvs.length);
- verifyData(kvs[0], "row", "col1", 3);
-
- // File 4: Delete the entire row.
- deleteFamily(FAMILY, "row", 6);
- region.flushcache();
-
- // Baseline expected blocks read: 6.
- kvs = getData(FAMILY, "row", "col1", 6);
- assertEquals(0, kvs.length);
- kvs = getData(FAMILY, "row", "col2", 6);
- assertEquals(0, kvs.length);
- kvs = getData(FAMILY, "row", "col3", 6);
- assertEquals(0, kvs.length);
- kvs = getData(FAMILY, "row", Arrays.asList("col1", "col2", "col3"), 6);
- assertEquals(0, kvs.length);
-
- // File 5: Delete
- deleteFamily(FAMILY, "row", 10);
- region.flushcache();
-
- // File 6: some more puts, but with timestamps older than the
- // previous delete.
- putData(FAMILY, "row", "col1", 7);
- putData(FAMILY, "row", "col2", 8);
- putData(FAMILY, "row", "col3", 9);
- region.flushcache();
-
- // Baseline expected blocks read: 10
- kvs = getData(FAMILY, "row", Arrays.asList("col1", "col2", "col3"), 10);
- assertEquals(0, kvs.length);
-
- // File 7: Put back new data
- putData(FAMILY, "row", "col1", 11);
- putData(FAMILY, "row", "col2", 12);
- putData(FAMILY, "row", "col3", 13);
- region.flushcache();
-
- // Baseline expected blocks read: 13
- kvs = getData(FAMILY, "row", Arrays.asList("col1", "col2", "col3"), 13);
- assertEquals(3, kvs.length);
- verifyData(kvs[0], "row", "col1", 11);
- verifyData(kvs[1], "row", "col2", 12);
- verifyData(kvs[2], "row", "col3", 13);
+ this.region = initHRegion(TABLE, getName(), conf, FAMILIES);
+ try {
+ // File 1
+ putData(FAMILY, "row", "col1", 1);
+ putData(FAMILY, "row", "col2", 2);
+ region.flushcache();
+
+ // File 2
+ putData(FAMILY, "row", "col1", 3);
+ putData(FAMILY, "row", "col2", 4);
+ region.flushcache();
+
+ // Baseline expected blocks read: 2
+ kvs = getData(FAMILY, "row", Arrays.asList("col1"), 2);
+ assertEquals(1, kvs.length);
+ verifyData(kvs[0], "row", "col1", 3);
+
+ // Baseline expected blocks read: 4
+ kvs = getData(FAMILY, "row", Arrays.asList("col1", "col2"), 4);
+ assertEquals(2, kvs.length);
+ verifyData(kvs[0], "row", "col1", 3);
+ verifyData(kvs[1], "row", "col2", 4);
+
+ // File 3: Add another column
+ putData(FAMILY, "row", "col3", 5);
+ region.flushcache();
+
+ // Baseline expected blocks read: 5
+ kvs = getData(FAMILY, "row", "col3", 5);
+ assertEquals(1, kvs.length);
+ verifyData(kvs[0], "row", "col3", 5);
+
+ // Get a column from older file.
+ // Baseline expected blocks read: 3
+ kvs = getData(FAMILY, "row", Arrays.asList("col1"), 3);
+ assertEquals(1, kvs.length);
+ verifyData(kvs[0], "row", "col1", 3);
+
+ // File 4: Delete the entire row.
+ deleteFamily(FAMILY, "row", 6);
+ region.flushcache();
+
+ // Baseline expected blocks read: 6.
+ kvs = getData(FAMILY, "row", "col1", 6);
+ assertEquals(0, kvs.length);
+ kvs = getData(FAMILY, "row", "col2", 6);
+ assertEquals(0, kvs.length);
+ kvs = getData(FAMILY, "row", "col3", 6);
+ assertEquals(0, kvs.length);
+ kvs = getData(FAMILY, "row", Arrays.asList("col1", "col2", "col3"), 6);
+ assertEquals(0, kvs.length);
+
+ // File 5: Delete
+ deleteFamily(FAMILY, "row", 10);
+ region.flushcache();
+
+ // File 6: some more puts, but with timestamps older than the
+ // previous delete.
+ putData(FAMILY, "row", "col1", 7);
+ putData(FAMILY, "row", "col2", 8);
+ putData(FAMILY, "row", "col3", 9);
+ region.flushcache();
+
+ // Baseline expected blocks read: 10
+ kvs = getData(FAMILY, "row", Arrays.asList("col1", "col2", "col3"), 10);
+ assertEquals(0, kvs.length);
+
+ // File 7: Put back new data
+ putData(FAMILY, "row", "col1", 11);
+ putData(FAMILY, "row", "col2", 12);
+ putData(FAMILY, "row", "col3", 13);
+ region.flushcache();
+
+ // Baseline expected blocks read: 13
+ kvs = getData(FAMILY, "row", Arrays.asList("col1", "col2", "col3"), 13);
+ assertEquals(3, kvs.length);
+ verifyData(kvs[0], "row", "col1", 11);
+ verifyData(kvs[1], "row", "col2", 12);
+ verifyData(kvs[2], "row", "col3", 13);
+ } finally {
+ HRegion.closeHRegion(this.region);
+ }
}
/**
@@ -333,37 +349,40 @@ public class TestBlocksRead extends HBas
byte [][] FAMILIES = new byte[][] { FAMILY };
HBaseConfiguration conf = getConf();
- initHRegion(TABLE, getName(), conf, FAMILIES);
-
- putData(FAMILY, "row", "col1", 1);
- putData(FAMILY, "row", "col2", 2);
- region.flushcache();
-
- // Execute a scan with caching turned off
- // Expected blocks stored: 0
- long blocksStart = getBlkCount();
- Scan scan = new Scan();
- scan.setCacheBlocks(false);
- RegionScanner rs = region.getScanner(scan);
- List<KeyValue> result = new ArrayList<KeyValue>(2);
- rs.next(result);
- assertEquals(2, result.size());
- rs.close();
- long blocksEnd = getBlkCount();
-
- assertEquals(blocksStart, blocksEnd);
-
- // Execute with caching turned on
- // Expected blocks stored: 2
- blocksStart = blocksEnd;
- scan.setCacheBlocks(true);
- rs = region.getScanner(scan);
- result = new ArrayList<KeyValue>(2);
- rs.next(result);
- assertEquals(2, result.size());
- rs.close();
- blocksEnd = getBlkCount();
-
- assertEquals(2, blocksEnd - blocksStart);
+ this.region = initHRegion(TABLE, getName(), conf, FAMILIES);
+ try {
+ putData(FAMILY, "row", "col1", 1);
+ putData(FAMILY, "row", "col2", 2);
+ region.flushcache();
+
+ // Execute a scan with caching turned off
+ // Expected blocks stored: 0
+ long blocksStart = getBlkCount();
+ Scan scan = new Scan();
+ scan.setCacheBlocks(false);
+ RegionScanner rs = region.getScanner(scan);
+ List<KeyValue> result = new ArrayList<KeyValue>(2);
+ rs.next(result);
+ assertEquals(2, result.size());
+ rs.close();
+ long blocksEnd = getBlkCount();
+
+ assertEquals(blocksStart, blocksEnd);
+
+ // Execute with caching turned on
+ // Expected blocks stored: 2
+ blocksStart = blocksEnd;
+ scan.setCacheBlocks(true);
+ rs = region.getScanner(scan);
+ result = new ArrayList<KeyValue>(2);
+ rs.next(result);
+ assertEquals(2, result.size());
+ rs.close();
+ blocksEnd = getBlkCount();
+
+ assertEquals(2, blocksEnd - blocksStart);
+ } finally {
+ HRegion.closeHRegion(this.region);
+ }
}
}
Modified: hbase/branches/0.92/src/test/java/org/apache/hadoop/hbase/regionserver/TestColumnSeeking.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.92/src/test/java/org/apache/hadoop/hbase/regionserver/TestColumnSeeking.java?rev=1329052&r1=1329051&r2=1329052&view=diff
==============================================================================
--- hbase/branches/0.92/src/test/java/org/apache/hadoop/hbase/regionserver/TestColumnSeeking.java (original)
+++ hbase/branches/0.92/src/test/java/org/apache/hadoop/hbase/regionserver/TestColumnSeeking.java Mon Apr 23 03:57:40 2012
@@ -70,95 +70,98 @@ public class TestColumnSeeking {
HRegion region =
HRegion.createHRegion(info, TEST_UTIL.getDataTestDir(), TEST_UTIL
.getConfiguration(), htd);
-
- List<String> rows = generateRandomWords(10, "row");
- List<String> allColumns = generateRandomWords(10, "column");
- List<String> values = generateRandomWords(100, "value");
-
- long maxTimestamp = 2;
- double selectPercent = 0.5;
- int numberOfTests = 5;
- double flushPercentage = 0.2;
- double minorPercentage = 0.2;
- double majorPercentage = 0.2;
- double putPercentage = 0.2;
-
- HashMap<String, KeyValue> allKVMap = new HashMap<String, KeyValue>();
-
- HashMap<String, KeyValue>[] kvMaps = new HashMap[numberOfTests];
- ArrayList<String>[] columnLists = new ArrayList[numberOfTests];
-
- for (int i = 0; i < numberOfTests; i++) {
- kvMaps[i] = new HashMap<String, KeyValue>();
- columnLists[i] = new ArrayList<String>();
- for (String column : allColumns) {
- if (Math.random() < selectPercent) {
- columnLists[i].add(column);
+ try {
+ List<String> rows = generateRandomWords(10, "row");
+ List<String> allColumns = generateRandomWords(10, "column");
+ List<String> values = generateRandomWords(100, "value");
+
+ long maxTimestamp = 2;
+ double selectPercent = 0.5;
+ int numberOfTests = 5;
+ double flushPercentage = 0.2;
+ double minorPercentage = 0.2;
+ double majorPercentage = 0.2;
+ double putPercentage = 0.2;
+
+ HashMap<String, KeyValue> allKVMap = new HashMap<String, KeyValue>();
+
+ HashMap<String, KeyValue>[] kvMaps = new HashMap[numberOfTests];
+ ArrayList<String>[] columnLists = new ArrayList[numberOfTests];
+
+ for (int i = 0; i < numberOfTests; i++) {
+ kvMaps[i] = new HashMap<String, KeyValue>();
+ columnLists[i] = new ArrayList<String>();
+ for (String column : allColumns) {
+ if (Math.random() < selectPercent) {
+ columnLists[i].add(column);
+ }
}
}
- }
- for (String value : values) {
- for (String row : rows) {
- Put p = new Put(Bytes.toBytes(row));
- p.setWriteToWAL(false);
- for (String column : allColumns) {
- for (long timestamp = 1; timestamp <= maxTimestamp; timestamp++) {
- KeyValue kv =
- KeyValueTestUtil.create(row, family, column, timestamp, value);
- if (Math.random() < putPercentage) {
- p.add(kv);
- allKVMap.put(kv.getKeyString(), kv);
- for (int i = 0; i < numberOfTests; i++) {
- if (columnLists[i].contains(column)) {
- kvMaps[i].put(kv.getKeyString(), kv);
+ for (String value : values) {
+ for (String row : rows) {
+ Put p = new Put(Bytes.toBytes(row));
+ p.setWriteToWAL(false);
+ for (String column : allColumns) {
+ for (long timestamp = 1; timestamp <= maxTimestamp; timestamp++) {
+ KeyValue kv =
+ KeyValueTestUtil.create(row, family, column, timestamp, value);
+ if (Math.random() < putPercentage) {
+ p.add(kv);
+ allKVMap.put(kv.getKeyString(), kv);
+ for (int i = 0; i < numberOfTests; i++) {
+ if (columnLists[i].contains(column)) {
+ kvMaps[i].put(kv.getKeyString(), kv);
+ }
}
}
}
}
- }
- region.put(p);
- if (Math.random() < flushPercentage) {
- LOG.info("Flushing... ");
- region.flushcache();
- }
+ region.put(p);
+ if (Math.random() < flushPercentage) {
+ LOG.info("Flushing... ");
+ region.flushcache();
+ }
- if (Math.random() < minorPercentage) {
- LOG.info("Minor compacting... ");
- region.compactStores(false);
- }
+ if (Math.random() < minorPercentage) {
+ LOG.info("Minor compacting... ");
+ region.compactStores(false);
+ }
- if (Math.random() < majorPercentage) {
- LOG.info("Major compacting... ");
- region.compactStores(true);
+ if (Math.random() < majorPercentage) {
+ LOG.info("Major compacting... ");
+ region.compactStores(true);
+ }
}
}
- }
-
- for (int i = 0; i < numberOfTests + 1; i++) {
- Collection<KeyValue> kvSet;
- Scan scan = new Scan();
- scan.setMaxVersions();
- if (i < numberOfTests) {
- kvSet = kvMaps[i].values();
- for (String column : columnLists[i]) {
- scan.addColumn(familyBytes, Bytes.toBytes(column));
- }
- LOG.info("ExplicitColumns scanner");
- LOG.info("Columns: " + columnLists[i].size() + " Keys: "
- + kvSet.size());
- } else {
- kvSet = allKVMap.values();
- LOG.info("Wildcard scanner");
- LOG.info("Columns: " + allColumns.size() + " Keys: " + kvSet.size());
+ for (int i = 0; i < numberOfTests + 1; i++) {
+ Collection<KeyValue> kvSet;
+ Scan scan = new Scan();
+ scan.setMaxVersions();
+ if (i < numberOfTests) {
+ kvSet = kvMaps[i].values();
+ for (String column : columnLists[i]) {
+ scan.addColumn(familyBytes, Bytes.toBytes(column));
+ }
+ LOG.info("ExplicitColumns scanner");
+ LOG.info("Columns: " + columnLists[i].size() + " Keys: "
+ + kvSet.size());
+ } else {
+ kvSet = allKVMap.values();
+ LOG.info("Wildcard scanner");
+ LOG.info("Columns: " + allColumns.size() + " Keys: " + kvSet.size());
+
+ }
+ InternalScanner scanner = region.getScanner(scan);
+ List<KeyValue> results = new ArrayList<KeyValue>();
+ while (scanner.next(results))
+ ;
+ assertEquals(kvSet.size(), results.size());
+ assertTrue(results.containsAll(kvSet));
}
- InternalScanner scanner = region.getScanner(scan);
- List<KeyValue> results = new ArrayList<KeyValue>();
- while (scanner.next(results))
- ;
- assertEquals(kvSet.size(), results.size());
- assertTrue(results.containsAll(kvSet));
+ } finally {
+ HRegion.closeHRegion(region);
}
}
Modified: hbase/branches/0.92/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactSelection.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.92/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactSelection.java?rev=1329052&r1=1329051&r2=1329052&view=diff
==============================================================================
--- hbase/branches/0.92/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactSelection.java (original)
+++ hbase/branches/0.92/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactSelection.java Mon Apr 23 03:57:40 2012
@@ -84,9 +84,10 @@ public class TestCompactSelection extend
HRegionInfo info = new HRegionInfo(htd.getName(), null, null, false);
HLog hlog = new HLog(fs, logdir, oldLogDir, conf);
- HRegion.createHRegion(info, basedir, conf, htd);
+ HRegion region = HRegion.createHRegion(info, basedir, conf, htd);
+ HRegion.closeHRegion(region);
Path tableDir = new Path(basedir, Bytes.toString(htd.getName()));
- HRegion region = new HRegion(tableDir, hlog, fs, conf, info, htd, null);
+ region = new HRegion(tableDir, hlog, fs, conf, info, htd, null);
store = new Store(basedir, region, hcd, fs, conf);
TEST_FILE = StoreFile.getRandomFilename(fs, store.getHomedir());