You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by st...@apache.org on 2012/04/23 05:57:41 UTC
svn commit: r1329052 [3/3] - in /hbase/branches/0.92: ./
src/main/java/org/apache/hadoop/hbase/client/
src/main/java/org/apache/hadoop/hbase/io/hfile/
src/main/java/org/apache/hadoop/hbase/metrics/histogram/
src/main/java/org/apache/hadoop/hbase/region...
Modified: hbase/branches/0.92/src/test/java/org/apache/hadoop/hbase/regionserver/TestMinVersions.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.92/src/test/java/org/apache/hadoop/hbase/regionserver/TestMinVersions.java?rev=1329052&r1=1329051&r2=1329052&view=diff
==============================================================================
--- hbase/branches/0.92/src/test/java/org/apache/hadoop/hbase/regionserver/TestMinVersions.java (original)
+++ hbase/branches/0.92/src/test/java/org/apache/hadoop/hbase/regionserver/TestMinVersions.java Mon Apr 23 03:57:40 2012
@@ -53,37 +53,41 @@ public class TestMinVersions extends HBa
HRegion region = createNewHRegion(htd, null, null);
long ts = System.currentTimeMillis() - 2000; // 2s in the past
-
- Put p = new Put(T1, ts);
- p.add(c0, c0, T1);
- region.put(p);
-
- p = new Put(T1, ts+1);
- p.add(c0, c0, T4);
- region.put(p);
-
- p = new Put(T3, ts);
- p.add(c0, c0, T3);
- region.put(p);
-
- // now make sure that getClosestBefore(...) get can
- // rows that would be expired without minVersion.
- // also make sure it gets the latest version
- Result r = region.getClosestRowBefore(T1, c0);
- checkResult(r, c0, T4);
-
- r = region.getClosestRowBefore(T2, c0);
- checkResult(r, c0, T4);
-
- // now flush/compact
- region.flushcache();
- region.compactStores(true);
-
- r = region.getClosestRowBefore(T1, c0);
- checkResult(r, c0, T4);
-
- r = region.getClosestRowBefore(T2, c0);
- checkResult(r, c0, T4);
+ try {
+ Put p = new Put(T1, ts);
+ p.add(c0, c0, T1);
+ region.put(p);
+
+ p = new Put(T1, ts+1);
+ p.add(c0, c0, T4);
+ region.put(p);
+
+ p = new Put(T3, ts);
+ p.add(c0, c0, T3);
+ region.put(p);
+
+ // now make sure that getClosestBefore(...) get can
+ // rows that would be expired without minVersion.
+ // also make sure it gets the latest version
+ Result r = region.getClosestRowBefore(T1, c0);
+ checkResult(r, c0, T4);
+
+ r = region.getClosestRowBefore(T2, c0);
+ checkResult(r, c0, T4);
+
+ // now flush/compact
+ region.flushcache();
+ region.compactStores(true);
+
+ r = region.getClosestRowBefore(T1, c0);
+ checkResult(r, c0, T4);
+
+ r = region.getClosestRowBefore(T2, c0);
+ checkResult(r, c0, T4);
+ } finally {
+ region.close();
+ region.getLog().closeAndDelete();
+ }
}
/**
@@ -97,44 +101,49 @@ public class TestMinVersions extends HBa
long ts = System.currentTimeMillis() - 2000; // 2s in the past
- Put p = new Put(T1, ts-1);
- p.add(c0, c0, T2);
- region.put(p);
-
- p = new Put(T1, ts-3);
- p.add(c0, c0, T0);
- region.put(p);
-
- // now flush/compact
- region.flushcache();
- region.compactStores(true);
-
- p = new Put(T1, ts);
- p.add(c0, c0, T3);
- region.put(p);
-
- p = new Put(T1, ts-2);
- p.add(c0, c0, T1);
- region.put(p);
-
- p = new Put(T1, ts-3);
- p.add(c0, c0, T0);
- region.put(p);
-
- // newest version in the memstore
- // the 2nd oldest in the store file
- // and the 3rd, 4th oldest also in the memstore
-
- Get g = new Get(T1);
- g.setMaxVersions();
- Result r = region.get(g, null); // this'll use ScanWildcardColumnTracker
- checkResult(r, c0, T3,T2,T1);
-
- g = new Get(T1);
- g.setMaxVersions();
- g.addColumn(c0, c0);
- r = region.get(g, null); // this'll use ExplicitColumnTracker
- checkResult(r, c0, T3,T2,T1);
+ try {
+ Put p = new Put(T1, ts-1);
+ p.add(c0, c0, T2);
+ region.put(p);
+
+ p = new Put(T1, ts-3);
+ p.add(c0, c0, T0);
+ region.put(p);
+
+ // now flush/compact
+ region.flushcache();
+ region.compactStores(true);
+
+ p = new Put(T1, ts);
+ p.add(c0, c0, T3);
+ region.put(p);
+
+ p = new Put(T1, ts-2);
+ p.add(c0, c0, T1);
+ region.put(p);
+
+ p = new Put(T1, ts-3);
+ p.add(c0, c0, T0);
+ region.put(p);
+
+ // newest version in the memstore
+ // the 2nd oldest in the store file
+ // and the 3rd, 4th oldest also in the memstore
+
+ Get g = new Get(T1);
+ g.setMaxVersions();
+ Result r = region.get(g, null); // this'll use ScanWildcardColumnTracker
+ checkResult(r, c0, T3,T2,T1);
+
+ g = new Get(T1);
+ g.setMaxVersions();
+ g.addColumn(c0, c0);
+ r = region.get(g, null); // this'll use ExplicitColumnTracker
+ checkResult(r, c0, T3,T2,T1);
+ } finally {
+ region.close();
+ region.getLog().closeAndDelete();
+ }
}
/**
@@ -146,47 +155,52 @@ public class TestMinVersions extends HBa
long ts = System.currentTimeMillis() - 2000; // 2s in the past
- Put p = new Put(T1, ts-2);
- p.add(c0, c0, T1);
- region.put(p);
-
- p = new Put(T1, ts-1);
- p.add(c0, c0, T2);
- region.put(p);
-
- p = new Put(T1, ts);
- p.add(c0, c0, T3);
- region.put(p);
-
- Delete d = new Delete(T1, ts-1, null);
- region.delete(d, null, true);
-
- Get g = new Get(T1);
- g.setMaxVersions();
- Result r = region.get(g, null); // this'll use ScanWildcardColumnTracker
- checkResult(r, c0, T3);
-
- g = new Get(T1);
- g.setMaxVersions();
- g.addColumn(c0, c0);
- r = region.get(g, null); // this'll use ExplicitColumnTracker
- checkResult(r, c0, T3);
-
- // now flush/compact
- region.flushcache();
- region.compactStores(true);
-
- // try again
- g = new Get(T1);
- g.setMaxVersions();
- r = region.get(g, null); // this'll use ScanWildcardColumnTracker
- checkResult(r, c0, T3);
-
- g = new Get(T1);
- g.setMaxVersions();
- g.addColumn(c0, c0);
- r = region.get(g, null); // this'll use ExplicitColumnTracker
- checkResult(r, c0, T3);
+ try {
+ Put p = new Put(T1, ts-2);
+ p.add(c0, c0, T1);
+ region.put(p);
+
+ p = new Put(T1, ts-1);
+ p.add(c0, c0, T2);
+ region.put(p);
+
+ p = new Put(T1, ts);
+ p.add(c0, c0, T3);
+ region.put(p);
+
+ Delete d = new Delete(T1, ts-1, null);
+ region.delete(d, null, true);
+
+ Get g = new Get(T1);
+ g.setMaxVersions();
+ Result r = region.get(g, null); // this'll use ScanWildcardColumnTracker
+ checkResult(r, c0, T3);
+
+ g = new Get(T1);
+ g.setMaxVersions();
+ g.addColumn(c0, c0);
+ r = region.get(g, null); // this'll use ExplicitColumnTracker
+ checkResult(r, c0, T3);
+
+ // now flush/compact
+ region.flushcache();
+ region.compactStores(true);
+
+ // try again
+ g = new Get(T1);
+ g.setMaxVersions();
+ r = region.get(g, null); // this'll use ScanWildcardColumnTracker
+ checkResult(r, c0, T3);
+
+ g = new Get(T1);
+ g.setMaxVersions();
+ g.addColumn(c0, c0);
+ r = region.get(g, null); // this'll use ExplicitColumnTracker
+ checkResult(r, c0, T3);
+ } finally {
+ region.close();
+ region.getLog().closeAndDelete();
+ }
}
/**
@@ -198,63 +212,68 @@ public class TestMinVersions extends HBa
long ts = System.currentTimeMillis() - 2000; // 2s in the past
- // 2nd version
- Put p = new Put(T1, ts-2);
- p.add(c0, c0, T2);
- region.put(p);
-
- // 3rd version
- p = new Put(T1, ts-1);
- p.add(c0, c0, T3);
- region.put(p);
-
- // 4th version
- p = new Put(T1, ts);
- p.add(c0, c0, T4);
- region.put(p);
-
- // now flush/compact
- region.flushcache();
- region.compactStores(true);
-
- // now put the first version (backdated)
- p = new Put(T1, ts-3);
- p.add(c0, c0, T1);
- region.put(p);
-
- // now the latest change is in the memstore,
- // but it is not the latest version
-
- Result r = region.get(new Get(T1), null);
- checkResult(r, c0, T4);
-
- Get g = new Get(T1);
- g.setMaxVersions();
- r = region.get(g, null); // this'll use ScanWildcardColumnTracker
- checkResult(r, c0, T4,T3);
-
- g = new Get(T1);
- g.setMaxVersions();
- g.addColumn(c0, c0);
- r = region.get(g, null); // this'll use ExplicitColumnTracker
- checkResult(r, c0, T4,T3);
-
- p = new Put(T1, ts+1);
- p.add(c0, c0, T5);
- region.put(p);
-
- // now the latest version is in the memstore
-
- g = new Get(T1);
- g.setMaxVersions();
- r = region.get(g, null); // this'll use ScanWildcardColumnTracker
- checkResult(r, c0, T5,T4);
-
- g = new Get(T1);
- g.setMaxVersions();
- g.addColumn(c0, c0);
- r = region.get(g, null); // this'll use ExplicitColumnTracker
- checkResult(r, c0, T5,T4);
+ try {
+ // 2nd version
+ Put p = new Put(T1, ts-2);
+ p.add(c0, c0, T2);
+ region.put(p);
+
+ // 3rd version
+ p = new Put(T1, ts-1);
+ p.add(c0, c0, T3);
+ region.put(p);
+
+ // 4th version
+ p = new Put(T1, ts);
+ p.add(c0, c0, T4);
+ region.put(p);
+
+ // now flush/compact
+ region.flushcache();
+ region.compactStores(true);
+
+ // now put the first version (backdated)
+ p = new Put(T1, ts-3);
+ p.add(c0, c0, T1);
+ region.put(p);
+
+ // now the latest change is in the memstore,
+ // but it is not the latest version
+
+ Result r = region.get(new Get(T1), null);
+ checkResult(r, c0, T4);
+
+ Get g = new Get(T1);
+ g.setMaxVersions();
+ r = region.get(g, null); // this'll use ScanWildcardColumnTracker
+ checkResult(r, c0, T4,T3);
+
+ g = new Get(T1);
+ g.setMaxVersions();
+ g.addColumn(c0, c0);
+ r = region.get(g, null); // this'll use ExplicitColumnTracker
+ checkResult(r, c0, T4,T3);
+
+ p = new Put(T1, ts+1);
+ p.add(c0, c0, T5);
+ region.put(p);
+
+ // now the latest version is in the memstore
+
+ g = new Get(T1);
+ g.setMaxVersions();
+ r = region.get(g, null); // this'll use ScanWildcardColumnTracker
+ checkResult(r, c0, T5,T4);
+
+ g = new Get(T1);
+ g.setMaxVersions();
+ g.addColumn(c0, c0);
+ r = region.get(g, null); // this'll use ExplicitColumnTracker
+ checkResult(r, c0, T5,T4);
+ } finally {
+ region.close();
+ region.getLog().closeAndDelete();
+ }
}
/**
@@ -265,81 +284,86 @@ public class TestMinVersions extends HBa
HTableDescriptor htd = createTableDescriptor(getName(), 2, 1000, 1);
HRegion region = createNewHRegion(htd, null, null);
- long ts = System.currentTimeMillis() - 2000; // 2s in the past
+ try {
+ long ts = System.currentTimeMillis() - 2000; // 2s in the past
- // 1st version
- Put p = new Put(T1, ts-3);
- p.add(c0, c0, T1);
- region.put(p);
-
- // 2nd version
- p = new Put(T1, ts-2);
- p.add(c0, c0, T2);
- region.put(p);
-
- // 3rd version
- p = new Put(T1, ts-1);
- p.add(c0, c0, T3);
- region.put(p);
-
- // 4th version
- p = new Put(T1, ts);
- p.add(c0, c0, T4);
- region.put(p);
-
- Result r = region.get(new Get(T1), null);
- checkResult(r, c0, T4);
-
- Get g = new Get(T1);
- g.setTimeRange(0L, ts+1);
- r = region.get(g, null);
- checkResult(r, c0, T4);
-
- // oldest version still exists
- g.setTimeRange(0L, ts-2);
- r = region.get(g, null);
- checkResult(r, c0, T1);
-
- // gets see only available versions
- // even before compactions
- g = new Get(T1);
- g.setMaxVersions();
- r = region.get(g, null); // this'll use ScanWildcardColumnTracker
- checkResult(r, c0, T4,T3);
-
- g = new Get(T1);
- g.setMaxVersions();
- g.addColumn(c0, c0);
- r = region.get(g, null); // this'll use ExplicitColumnTracker
- checkResult(r, c0, T4,T3);
-
- // now flush
- region.flushcache();
-
- // with HBASE-4241 a flush will eliminate the expired rows
- g = new Get(T1);
- g.setTimeRange(0L, ts-2);
- r = region.get(g, null);
- assertTrue(r.isEmpty());
-
- // major compaction
- region.compactStores(true);
-
- // after compaction the 4th version is still available
- g = new Get(T1);
- g.setTimeRange(0L, ts+1);
- r = region.get(g, null);
- checkResult(r, c0, T4);
-
- // so is the 3rd
- g.setTimeRange(0L, ts);
- r = region.get(g, null);
- checkResult(r, c0, T3);
-
- // but the 2nd and earlier versions are gone
- g.setTimeRange(0L, ts-1);
- r = region.get(g, null);
- assertTrue(r.isEmpty());
+ // 1st version
+ Put p = new Put(T1, ts-3);
+ p.add(c0, c0, T1);
+ region.put(p);
+
+ // 2nd version
+ p = new Put(T1, ts-2);
+ p.add(c0, c0, T2);
+ region.put(p);
+
+ // 3rd version
+ p = new Put(T1, ts-1);
+ p.add(c0, c0, T3);
+ region.put(p);
+
+ // 4th version
+ p = new Put(T1, ts);
+ p.add(c0, c0, T4);
+ region.put(p);
+
+ Result r = region.get(new Get(T1), null);
+ checkResult(r, c0, T4);
+
+ Get g = new Get(T1);
+ g.setTimeRange(0L, ts+1);
+ r = region.get(g, null);
+ checkResult(r, c0, T4);
+
+ // oldest version still exists
+ g.setTimeRange(0L, ts-2);
+ r = region.get(g, null);
+ checkResult(r, c0, T1);
+
+ // gets see only available versions
+ // even before compactions
+ g = new Get(T1);
+ g.setMaxVersions();
+ r = region.get(g, null); // this'll use ScanWildcardColumnTracker
+ checkResult(r, c0, T4,T3);
+
+ g = new Get(T1);
+ g.setMaxVersions();
+ g.addColumn(c0, c0);
+ r = region.get(g, null); // this'll use ExplicitColumnTracker
+ checkResult(r, c0, T4,T3);
+
+ // now flush
+ region.flushcache();
+
+ // with HBASE-4241 a flush will eliminate the expired rows
+ g = new Get(T1);
+ g.setTimeRange(0L, ts-2);
+ r = region.get(g, null);
+ assertTrue(r.isEmpty());
+
+ // major compaction
+ region.compactStores(true);
+
+ // after compaction the 4th version is still available
+ g = new Get(T1);
+ g.setTimeRange(0L, ts+1);
+ r = region.get(g, null);
+ checkResult(r, c0, T4);
+
+ // so is the 3rd
+ g.setTimeRange(0L, ts);
+ r = region.get(g, null);
+ checkResult(r, c0, T3);
+
+ // but the 2nd and earlier versions are gone
+ g.setTimeRange(0L, ts-1);
+ r = region.get(g, null);
+ assertTrue(r.isEmpty());
+ } finally {
+ region.close();
+ region.getLog().closeAndDelete();
+ }
}
/**
@@ -353,61 +377,66 @@ public class TestMinVersions extends HBa
long ts = System.currentTimeMillis() - 2000; // 2s in the past
- Put p = new Put(T1, ts-3);
- p.add(c0, c0, T0);
- p.add(c1, c1, T0);
- region.put(p);
-
- p = new Put(T1, ts-2);
- p.add(c0, c0, T1);
- p.add(c1, c1, T1);
- region.put(p);
-
- p = new Put(T1, ts-1);
- p.add(c0, c0, T2);
- p.add(c1, c1, T2);
- region.put(p);
-
- p = new Put(T1, ts);
- p.add(c0, c0, T3);
- p.add(c1, c1, T3);
- region.put(p);
-
- List<Long> tss = new ArrayList<Long>();
- tss.add(ts-1);
- tss.add(ts-2);
-
- Get g = new Get(T1);
- g.addColumn(c1,c1);
- g.setFilter(new TimestampsFilter(tss));
- g.setMaxVersions();
- Result r = region.get(g, null);
- checkResult(r, c1, T2,T1);
-
- g = new Get(T1);
- g.addColumn(c0,c0);
- g.setFilter(new TimestampsFilter(tss));
- g.setMaxVersions();
- r = region.get(g, null);
- checkResult(r, c0, T2,T1);
-
- // now flush/compact
- region.flushcache();
- region.compactStores(true);
-
- g = new Get(T1);
- g.addColumn(c1,c1);
- g.setFilter(new TimestampsFilter(tss));
- g.setMaxVersions();
- r = region.get(g, null);
- checkResult(r, c1, T2);
-
- g = new Get(T1);
- g.addColumn(c0,c0);
- g.setFilter(new TimestampsFilter(tss));
- g.setMaxVersions();
- r = region.get(g, null);
- checkResult(r, c0, T2);
+ try {
+ Put p = new Put(T1, ts-3);
+ p.add(c0, c0, T0);
+ p.add(c1, c1, T0);
+ region.put(p);
+
+ p = new Put(T1, ts-2);
+ p.add(c0, c0, T1);
+ p.add(c1, c1, T1);
+ region.put(p);
+
+ p = new Put(T1, ts-1);
+ p.add(c0, c0, T2);
+ p.add(c1, c1, T2);
+ region.put(p);
+
+ p = new Put(T1, ts);
+ p.add(c0, c0, T3);
+ p.add(c1, c1, T3);
+ region.put(p);
+
+ List<Long> tss = new ArrayList<Long>();
+ tss.add(ts-1);
+ tss.add(ts-2);
+
+ Get g = new Get(T1);
+ g.addColumn(c1,c1);
+ g.setFilter(new TimestampsFilter(tss));
+ g.setMaxVersions();
+ Result r = region.get(g, null);
+ checkResult(r, c1, T2,T1);
+
+ g = new Get(T1);
+ g.addColumn(c0,c0);
+ g.setFilter(new TimestampsFilter(tss));
+ g.setMaxVersions();
+ r = region.get(g, null);
+ checkResult(r, c0, T2,T1);
+
+ // now flush/compact
+ region.flushcache();
+ region.compactStores(true);
+
+ g = new Get(T1);
+ g.addColumn(c1,c1);
+ g.setFilter(new TimestampsFilter(tss));
+ g.setMaxVersions();
+ r = region.get(g, null);
+ checkResult(r, c1, T2);
+
+ g = new Get(T1);
+ g.addColumn(c0,c0);
+ g.setFilter(new TimestampsFilter(tss));
+ g.setMaxVersions();
+ r = region.get(g, null);
+ checkResult(r, c0, T2);
+ } finally {
+ region.close();
+ region.getLog().closeAndDelete();
+ }
}
private void checkResult(Result r, byte[] col, byte[] ... vals) {
Modified: hbase/branches/0.92/src/test/java/org/apache/hadoop/hbase/regionserver/TestMultiColumnScanner.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.92/src/test/java/org/apache/hadoop/hbase/regionserver/TestMultiColumnScanner.java?rev=1329052&r1=1329051&r2=1329052&view=diff
==============================================================================
--- hbase/branches/0.92/src/test/java/org/apache/hadoop/hbase/regionserver/TestMultiColumnScanner.java (original)
+++ hbase/branches/0.92/src/test/java/org/apache/hadoop/hbase/regionserver/TestMultiColumnScanner.java Mon Apr 23 03:57:40 2012
@@ -131,140 +131,143 @@ public class TestMultiColumnScanner {
@Test
public void testMultiColumnScanner() throws IOException {
HRegion region = createRegion(TABLE_NAME, comprAlgo, bloomType);
- List<String> rows = sequentialStrings("row", NUM_ROWS);
- List<String> qualifiers = sequentialStrings("qual", NUM_COLUMNS);
- List<KeyValue> kvs = new ArrayList<KeyValue>();
- Set<String> keySet = new HashSet<String>();
-
- // A map from <row>_<qualifier> to the most recent delete timestamp for
- // that column.
- Map<String, Long> lastDelTimeMap = new HashMap<String, Long>();
-
- Random rand = new Random(29372937L);
- Set<String> rowQualSkip = new HashSet<String>();
-
- // Skip some columns in some rows. We need to test scanning over a set
- // of columns when some of the columns are not there.
- for (String row : rows)
+ try {
+ List<String> rows = sequentialStrings("row", NUM_ROWS);
+ List<String> qualifiers = sequentialStrings("qual", NUM_COLUMNS);
+ List<KeyValue> kvs = new ArrayList<KeyValue>();
+ Set<String> keySet = new HashSet<String>();
+
+ // A map from <row>_<qualifier> to the most recent delete timestamp for
+ // that column.
+ Map<String, Long> lastDelTimeMap = new HashMap<String, Long>();
+
+ Random rand = new Random(29372937L);
+ Set<String> rowQualSkip = new HashSet<String>();
+
+ // Skip some columns in some rows. We need to test scanning over a set
+ // of columns when some of the columns are not there.
+ for (String row : rows)
+ for (String qual : qualifiers)
+ if (rand.nextDouble() < COLUMN_SKIP_IN_ROW_PROB) {
+ LOG.info("Skipping " + qual + " in row " + row);
+ rowQualSkip.add(rowQualKey(row, qual));
+ }
+
+ // Also skip some columns in all rows.
for (String qual : qualifiers)
- if (rand.nextDouble() < COLUMN_SKIP_IN_ROW_PROB) {
- LOG.info("Skipping " + qual + " in row " + row);
- rowQualSkip.add(rowQualKey(row, qual));
+ if (rand.nextDouble() < COLUMN_SKIP_EVERYWHERE_PROB) {
+ LOG.info("Skipping " + qual + " in all rows");
+ for (String row : rows)
+ rowQualSkip.add(rowQualKey(row, qual));
}
- // Also skip some columns in all rows.
- for (String qual : qualifiers)
- if (rand.nextDouble() < COLUMN_SKIP_EVERYWHERE_PROB) {
- LOG.info("Skipping " + qual + " in all rows");
- for (String row : rows)
- rowQualSkip.add(rowQualKey(row, qual));
- }
-
- for (int iFlush = 0; iFlush < NUM_FLUSHES; ++iFlush) {
- for (String qual : qualifiers) {
- // This is where we decide to include or not include this column into
- // this store file, regardless of row and timestamp.
- if (rand.nextDouble() < COLUMN_SKIP_IN_STORE_FILE_PROB)
- continue;
-
- byte[] qualBytes = Bytes.toBytes(qual);
- for (String row : rows) {
- Put p = new Put(Bytes.toBytes(row));
- for (long ts : TIMESTAMPS) {
- String value = createValue(row, qual, ts);
- KeyValue kv = KeyValueTestUtil.create(row, FAMILY, qual, ts,
- value);
- assertEquals(kv.getTimestamp(), ts);
- p.add(kv);
- String keyAsString = kv.toString();
- if (!keySet.contains(keyAsString)) {
- keySet.add(keyAsString);
- kvs.add(kv);
+ for (int iFlush = 0; iFlush < NUM_FLUSHES; ++iFlush) {
+ for (String qual : qualifiers) {
+ // This is where we decide to include or not include this column into
+ // this store file, regardless of row and timestamp.
+ if (rand.nextDouble() < COLUMN_SKIP_IN_STORE_FILE_PROB)
+ continue;
+
+ byte[] qualBytes = Bytes.toBytes(qual);
+ for (String row : rows) {
+ Put p = new Put(Bytes.toBytes(row));
+ for (long ts : TIMESTAMPS) {
+ String value = createValue(row, qual, ts);
+ KeyValue kv = KeyValueTestUtil.create(row, FAMILY, qual, ts,
+ value);
+ assertEquals(kv.getTimestamp(), ts);
+ p.add(kv);
+ String keyAsString = kv.toString();
+ if (!keySet.contains(keyAsString)) {
+ keySet.add(keyAsString);
+ kvs.add(kv);
+ }
}
- }
- region.put(p);
+ region.put(p);
- Delete d = new Delete(Bytes.toBytes(row));
- boolean deletedSomething = false;
- for (long ts : TIMESTAMPS)
- if (rand.nextDouble() < DELETE_PROBABILITY) {
- d.deleteColumns(FAMILY_BYTES, qualBytes, ts);
- String rowAndQual = row + "_" + qual;
- Long whenDeleted = lastDelTimeMap.get(rowAndQual);
- lastDelTimeMap.put(rowAndQual, whenDeleted == null ? ts
- : Math.max(ts, whenDeleted));
- deletedSomething = true;
- }
- if (deletedSomething)
- region.delete(d, null, true);
+ Delete d = new Delete(Bytes.toBytes(row));
+ boolean deletedSomething = false;
+ for (long ts : TIMESTAMPS)
+ if (rand.nextDouble() < DELETE_PROBABILITY) {
+ d.deleteColumns(FAMILY_BYTES, qualBytes, ts);
+ String rowAndQual = row + "_" + qual;
+ Long whenDeleted = lastDelTimeMap.get(rowAndQual);
+ lastDelTimeMap.put(rowAndQual, whenDeleted == null ? ts
+ : Math.max(ts, whenDeleted));
+ deletedSomething = true;
+ }
+ if (deletedSomething)
+ region.delete(d, null, true);
+ }
}
+ region.flushcache();
}
- region.flushcache();
- }
- Collections.sort(kvs, KeyValue.COMPARATOR);
- for (int maxVersions = 1; maxVersions <= TIMESTAMPS.length; ++maxVersions) {
- for (int columnBitMask = 1; columnBitMask <= MAX_COLUMN_BIT_MASK; ++columnBitMask) {
- Scan scan = new Scan();
- scan.setMaxVersions(maxVersions);
- Set<String> qualSet = new TreeSet<String>();
- {
- int columnMaskTmp = columnBitMask;
- for (String qual : qualifiers) {
- if ((columnMaskTmp & 1) != 0) {
- scan.addColumn(FAMILY_BYTES, Bytes.toBytes(qual));
- qualSet.add(qual);
+ Collections.sort(kvs, KeyValue.COMPARATOR);
+ for (int maxVersions = 1; maxVersions <= TIMESTAMPS.length; ++maxVersions) {
+ for (int columnBitMask = 1; columnBitMask <= MAX_COLUMN_BIT_MASK; ++columnBitMask) {
+ Scan scan = new Scan();
+ scan.setMaxVersions(maxVersions);
+ Set<String> qualSet = new TreeSet<String>();
+ {
+ int columnMaskTmp = columnBitMask;
+ for (String qual : qualifiers) {
+ if ((columnMaskTmp & 1) != 0) {
+ scan.addColumn(FAMILY_BYTES, Bytes.toBytes(qual));
+ qualSet.add(qual);
+ }
+ columnMaskTmp >>= 1;
}
- columnMaskTmp >>= 1;
+ assertEquals(0, columnMaskTmp);
}
- assertEquals(0, columnMaskTmp);
- }
- InternalScanner scanner = region.getScanner(scan);
- List<KeyValue> results = new ArrayList<KeyValue>();
+ InternalScanner scanner = region.getScanner(scan);
+ List<KeyValue> results = new ArrayList<KeyValue>();
- int kvPos = 0;
- int numResults = 0;
- String queryInfo = "columns queried: " + qualSet + " (columnBitMask="
- + columnBitMask + "), maxVersions=" + maxVersions;
-
- while (scanner.next(results) || results.size() > 0) {
- for (KeyValue kv : results) {
- while (kvPos < kvs.size()
- && !matchesQuery(kvs.get(kvPos), qualSet, maxVersions,
- lastDelTimeMap)) {
+ int kvPos = 0;
+ int numResults = 0;
+ String queryInfo = "columns queried: " + qualSet + " (columnBitMask="
+ + columnBitMask + "), maxVersions=" + maxVersions;
+
+ while (scanner.next(results) || results.size() > 0) {
+ for (KeyValue kv : results) {
+ while (kvPos < kvs.size()
+ && !matchesQuery(kvs.get(kvPos), qualSet, maxVersions,
+ lastDelTimeMap)) {
+ ++kvPos;
+ }
+ String rowQual = getRowQualStr(kv);
+ String deleteInfo = "";
+ Long lastDelTS = lastDelTimeMap.get(rowQual);
+ if (lastDelTS != null) {
+ deleteInfo = "; last timestamp when row/column " + rowQual
+ + " was deleted: " + lastDelTS;
+ }
+ assertTrue("Scanner returned additional key/value: " + kv + ", "
+ + queryInfo + deleteInfo + ";", kvPos < kvs.size());
+ assertEquals("Scanner returned wrong key/value; " + queryInfo
+ + deleteInfo + ";", kvs.get(kvPos), kv);
++kvPos;
+ ++numResults;
}
- String rowQual = getRowQualStr(kv);
- String deleteInfo = "";
- Long lastDelTS = lastDelTimeMap.get(rowQual);
- if (lastDelTS != null) {
- deleteInfo = "; last timestamp when row/column " + rowQual
- + " was deleted: " + lastDelTS;
- }
- assertTrue("Scanner returned additional key/value: " + kv + ", "
- + queryInfo + deleteInfo + ";", kvPos < kvs.size());
- assertEquals("Scanner returned wrong key/value; " + queryInfo
- + deleteInfo + ";", kvs.get(kvPos), kv);
- ++kvPos;
- ++numResults;
+ results.clear();
+ }
+ for (; kvPos < kvs.size(); ++kvPos) {
+ KeyValue remainingKV = kvs.get(kvPos);
+ assertFalse("Matching column not returned by scanner: "
+ + remainingKV + ", " + queryInfo + ", results returned: "
+ + numResults, matchesQuery(remainingKV, qualSet, maxVersions,
+ lastDelTimeMap));
}
- results.clear();
- }
- for (; kvPos < kvs.size(); ++kvPos) {
- KeyValue remainingKV = kvs.get(kvPos);
- assertFalse("Matching column not returned by scanner: "
- + remainingKV + ", " + queryInfo + ", results returned: "
- + numResults, matchesQuery(remainingKV, qualSet, maxVersions,
- lastDelTimeMap));
}
}
+ assertTrue("This test is supposed to delete at least some row/column " +
+ "pairs", lastDelTimeMap.size() > 0);
+ LOG.info("Number of row/col pairs deleted at least once: " +
+ lastDelTimeMap.size());
+ } finally {
+ HRegion.closeHRegion(region);
}
- assertTrue("This test is supposed to delete at least some row/column " +
- "pairs", lastDelTimeMap.size() > 0);
- LOG.info("Number of row/col pairs deleted at least once: " +
- lastDelTimeMap.size());
- region.close();
}
static HRegion createRegion(String tableName,
Modified: hbase/branches/0.92/src/test/java/org/apache/hadoop/hbase/regionserver/TestResettingCounters.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.92/src/test/java/org/apache/hadoop/hbase/regionserver/TestResettingCounters.java?rev=1329052&r1=1329051&r2=1329052&view=diff
==============================================================================
--- hbase/branches/0.92/src/test/java/org/apache/hadoop/hbase/regionserver/TestResettingCounters.java (original)
+++ hbase/branches/0.92/src/test/java/org/apache/hadoop/hbase/regionserver/TestResettingCounters.java Mon Apr 23 03:57:40 2012
@@ -70,31 +70,34 @@ public class TestResettingCounters {
}
}
HRegion region = HRegion.createHRegion(hri, path, conf, htd);
+ try {
+ Increment odd = new Increment(rows[0]);
+ Increment even = new Increment(rows[0]);
+ Increment all = new Increment(rows[0]);
+ for (int i=0;i<numQualifiers;i++) {
+ if (i % 2 == 0) even.addColumn(families[0], qualifiers[i], 1);
+ else odd.addColumn(families[0], qualifiers[i], 1);
+ all.addColumn(families[0], qualifiers[i], 1);
+ }
- Increment odd = new Increment(rows[0]);
- Increment even = new Increment(rows[0]);
- Increment all = new Increment(rows[0]);
- for (int i=0;i<numQualifiers;i++) {
- if (i % 2 == 0) even.addColumn(families[0], qualifiers[i], 1);
- else odd.addColumn(families[0], qualifiers[i], 1);
- all.addColumn(families[0], qualifiers[i], 1);
- }
-
- // increment odd qualifiers 5 times and flush
- for (int i=0;i<5;i++) region.increment(odd, null, false);
- region.flushcache();
+ // increment odd qualifiers 5 times and flush
+ for (int i=0;i<5;i++) region.increment(odd, null, false);
+ region.flushcache();
- // increment even qualifiers 5 times
- for (int i=0;i<5;i++) region.increment(even, null, false);
+ // increment even qualifiers 5 times
+ for (int i=0;i<5;i++) region.increment(even, null, false);
- // increment all qualifiers, should have value=6 for all
- Result result = region.increment(all, null, false);
- assertEquals(numQualifiers, result.size());
- KeyValue [] kvs = result.raw();
- for (int i=0;i<kvs.length;i++) {
- System.out.println(kvs[i].toString());
- assertTrue(Bytes.equals(kvs[i].getQualifier(), qualifiers[i]));
- assertEquals(6, Bytes.toLong(kvs[i].getValue()));
+ // increment all qualifiers, should have value=6 for all
+ Result result = region.increment(all, null, false);
+ assertEquals(numQualifiers, result.size());
+ KeyValue [] kvs = result.raw();
+ for (int i=0;i<kvs.length;i++) {
+ System.out.println(kvs[i].toString());
+ assertTrue(Bytes.equals(kvs[i].getQualifier(), qualifiers[i]));
+ assertEquals(6, Bytes.toLong(kvs[i].getValue()));
+ }
+ } finally {
+ HRegion.closeHRegion(region);
}
}
}
Modified: hbase/branches/0.92/src/test/java/org/apache/hadoop/hbase/regionserver/TestScanWithBloomError.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.92/src/test/java/org/apache/hadoop/hbase/regionserver/TestScanWithBloomError.java?rev=1329052&r1=1329051&r2=1329052&view=diff
==============================================================================
--- hbase/branches/0.92/src/test/java/org/apache/hadoop/hbase/regionserver/TestScanWithBloomError.java (original)
+++ hbase/branches/0.92/src/test/java/org/apache/hadoop/hbase/regionserver/TestScanWithBloomError.java Mon Apr 23 03:57:40 2012
@@ -96,13 +96,16 @@ public class TestScanWithBloomError {
@Test
public void testThreeStoreFiles() throws IOException {
- region = createRegion(TABLE_NAME, Compression.Algorithm.GZ, bloomType);
- createStoreFile(new int[] {1, 2, 6});
- createStoreFile(new int[] {1, 2, 3, 7});
- createStoreFile(new int[] {1, 9});
- scanColSet(new int[]{1, 4, 6, 7}, new int[]{1, 6, 7});
-
- region.close();
+ this.region = createRegion(TABLE_NAME, Compression.Algorithm.GZ, bloomType);
+ try {
+ createStoreFile(new int[] {1, 2, 6});
+ createStoreFile(new int[] {1, 2, 3, 7});
+ createStoreFile(new int[] {1, 9});
+ scanColSet(new int[]{1, 4, 6, 7}, new int[]{1, 6, 7});
+ } finally {
+ HRegion.closeHRegion(this.region);
+ this.region = null;
+ }
}
private void scanColSet(int[] colSet, int[] expectedResultCols)
Modified: hbase/branches/0.92/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransaction.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.92/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransaction.java?rev=1329052&r1=1329051&r2=1329052&view=diff
==============================================================================
--- hbase/branches/0.92/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransaction.java (original)
+++ hbase/branches/0.92/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransaction.java Mon Apr 23 03:57:40 2012
@@ -321,7 +321,9 @@ public class TestSplitTransaction {
HColumnDescriptor hcd = new HColumnDescriptor(CF);
htd.addFamily(hcd);
HRegionInfo hri = new HRegionInfo(htd.getName(), STARTROW, ENDROW);
- HRegion.createHRegion(hri, testdir, TEST_UTIL.getConfiguration(), htd);
+ HRegion r = HRegion.createHRegion(hri, testdir, TEST_UTIL.getConfiguration(), htd);
+ r.close();
+ r.getLog().closeAndDelete();
return HRegion.openHRegion(testdir, hri, htd, wal,
TEST_UTIL.getConfiguration());
}
Modified: hbase/branches/0.92/src/test/java/org/apache/hadoop/hbase/regionserver/handler/TestCloseRegionHandler.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.92/src/test/java/org/apache/hadoop/hbase/regionserver/handler/TestCloseRegionHandler.java?rev=1329052&r1=1329051&r2=1329052&view=diff
==============================================================================
--- hbase/branches/0.92/src/test/java/org/apache/hadoop/hbase/regionserver/handler/TestCloseRegionHandler.java (original)
+++ hbase/branches/0.92/src/test/java/org/apache/hadoop/hbase/regionserver/handler/TestCloseRegionHandler.java Mon Apr 23 03:57:40 2012
@@ -97,28 +97,32 @@ public class TestCloseRegionHandler {
HRegion region =
HRegion.createHRegion(hri, HTU.getDataTestDir(),
HTU.getConfiguration(), htd);
- assertNotNull(region);
- // Spy on the region so can throw exception when close is called.
- HRegion spy = Mockito.spy(region);
- final boolean abort = false;
- Mockito.when(spy.close(abort)).
- thenThrow(new RuntimeException("Mocked failed close!"));
- // The CloseRegionHandler will try to get an HRegion that corresponds
- // to the passed hri -- so insert the region into the online region Set.
- rss.addToOnlineRegions(spy);
- // Assert the Server is NOT stopped before we call close region.
- assertFalse(server.isStopped());
- CloseRegionHandler handler =
- new CloseRegionHandler(server, rss, hri, false, false, -1);
- boolean throwable = false;
try {
- handler.process();
- } catch (Throwable t) {
- throwable = true;
+ assertNotNull(region);
+ // Spy on the region so can throw exception when close is called.
+ HRegion spy = Mockito.spy(region);
+ final boolean abort = false;
+ Mockito.when(spy.close(abort)).
+ thenThrow(new RuntimeException("Mocked failed close!"));
+ // The CloseRegionHandler will try to get an HRegion that corresponds
+ // to the passed hri -- so insert the region into the online region Set.
+ rss.addToOnlineRegions(spy);
+ // Assert the Server is NOT stopped before we call close region.
+ assertFalse(server.isStopped());
+ CloseRegionHandler handler =
+ new CloseRegionHandler(server, rss, hri, false, false, -1);
+ boolean throwable = false;
+ try {
+ handler.process();
+ } catch (Throwable t) {
+ throwable = true;
+ } finally {
+ assertTrue(throwable);
+ // Abort calls stop so stopped flag should be set.
+ assertTrue(server.isStopped());
+ }
} finally {
- assertTrue(throwable);
- // Abort calls stop so stopped flag should be set.
- assertTrue(server.isStopped());
+ HRegion.closeHRegion(region);
}
}
Modified: hbase/branches/0.92/src/test/java/org/apache/hadoop/hbase/regionserver/handler/TestOpenRegionHandler.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.92/src/test/java/org/apache/hadoop/hbase/regionserver/handler/TestOpenRegionHandler.java?rev=1329052&r1=1329051&r2=1329052&view=diff
==============================================================================
--- hbase/branches/0.92/src/test/java/org/apache/hadoop/hbase/regionserver/handler/TestOpenRegionHandler.java (original)
+++ hbase/branches/0.92/src/test/java/org/apache/hadoop/hbase/regionserver/handler/TestOpenRegionHandler.java Mon Apr 23 03:57:40 2012
@@ -99,30 +99,34 @@ public class TestOpenRegionHandler {
HRegion.createHRegion(hri, HTU.getDataTestDir(), HTU
.getConfiguration(), htd);
assertNotNull(region);
- OpenRegionHandler handler = new OpenRegionHandler(server, rss, hri, htd) {
- HRegion openRegion() {
- // Open region first, then remove znode as though it'd been hijacked.
- HRegion region = super.openRegion();
-
- // Don't actually open region BUT remove the znode as though it'd
- // been hijacked on us.
- ZooKeeperWatcher zkw = this.server.getZooKeeper();
- String node = ZKAssign.getNodeName(zkw, hri.getEncodedName());
- try {
- ZKUtil.deleteNodeFailSilent(zkw, node);
- } catch (KeeperException e) {
- throw new RuntimeException("Ugh failed delete of " + node, e);
+ try {
+ OpenRegionHandler handler = new OpenRegionHandler(server, rss, hri, htd) {
+ HRegion openRegion() {
+ // Open region first, then remove znode as though it'd been hijacked.
+ HRegion region = super.openRegion();
+
+ // Don't actually open region BUT remove the znode as though it'd
+ // been hijacked on us.
+ ZooKeeperWatcher zkw = this.server.getZooKeeper();
+ String node = ZKAssign.getNodeName(zkw, hri.getEncodedName());
+ try {
+ ZKUtil.deleteNodeFailSilent(zkw, node);
+ } catch (KeeperException e) {
+ throw new RuntimeException("Ugh failed delete of " + node, e);
+ }
+ return region;
}
- return region;
- }
- };
- // Call process without first creating OFFLINE region in zk, see if
- // exception or just quiet return (expected).
- handler.process();
- ZKAssign.createNodeOffline(server.getZooKeeper(), hri, server.getServerName());
- // Call process again but this time yank the zk znode out from under it
- // post OPENING; again will expect it to come back w/o NPE or exception.
- handler.process();
+ };
+ // Call process without first creating OFFLINE region in zk, see if
+ // exception or just quiet return (expected).
+ handler.process();
+ ZKAssign.createNodeOffline(server.getZooKeeper(), hri, server.getServerName());
+ // Call process again but this time yank the zk znode out from under it
+ // post OPENING; again will expect it to come back w/o NPE or exception.
+ handler.process();
+ } finally {
+ HRegion.closeHRegion(region);
+ }
}
@Test
Modified: hbase/branches/0.92/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.92/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.java?rev=1329052&r1=1329051&r2=1329052&view=diff
==============================================================================
--- hbase/branches/0.92/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.java (original)
+++ hbase/branches/0.92/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.java Mon Apr 23 03:57:40 2012
@@ -137,7 +137,8 @@ public class TestWALReplay {
HTableDescriptor htd = createBasic3FamilyHTD(tableNameStr);
HRegion region2 = HRegion.createHRegion(hri,
hbaseRootDir, this.conf, htd);
-
+ region2.close();
+ region2.getLog().closeAndDelete();
final byte [] tableName = Bytes.toBytes(tableNameStr);
final byte [] rowName = tableName;
@@ -197,6 +198,8 @@ public class TestWALReplay {
final HTableDescriptor htd = createBasic3FamilyHTD(tableNameStr);
HRegion region2 = HRegion.createHRegion(hri,
hbaseRootDir, this.conf, htd);
+ region2.close();
+ region2.getLog().closeAndDelete();
HLog wal = createWAL(this.conf);
HRegion region = HRegion.openHRegion(hri, htd, wal, this.conf);
Path f = new Path(basedir, "hfile");
@@ -256,7 +259,8 @@ public class TestWALReplay {
final HTableDescriptor htd = createBasic3FamilyHTD(tableNameStr);
HRegion region3 = HRegion.createHRegion(hri,
hbaseRootDir, this.conf, htd);
-
+ region3.close();
+ region3.getLog().closeAndDelete();
// Write countPerFamily edits into the three families. Do a flush on one
// of the families during the load of edits so its seqid is not same as
// others to test we do right thing when different seqids.
@@ -373,7 +377,8 @@ public class TestWALReplay {
final HTableDescriptor htd = createBasic3FamilyHTD(tableNameStr);
HRegion region3 = HRegion.createHRegion(hri,
hbaseRootDir, this.conf, htd);
-
+ region3.close();
+ region3.getLog().closeAndDelete();
// Write countPerFamily edits into the three families. Do a flush on one
// of the families during the load of edits so its seqid is not same as
// others to test we do right thing when different seqids.
@@ -439,7 +444,8 @@ public class TestWALReplay {
final HTableDescriptor htd = createBasic3FamilyHTD(tableNameStr);
HRegion region2 = HRegion.createHRegion(hri,
hbaseRootDir, this.conf, htd);
-
+ region2.close();
+ region2.getLog().closeAndDelete();
final HLog wal = createWAL(this.conf);
final byte[] tableName = Bytes.toBytes(tableNameStr);
final byte[] rowName = tableName;
Modified: hbase/branches/0.92/src/test/java/org/apache/hadoop/hbase/util/TestMergeTool.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.92/src/test/java/org/apache/hadoop/hbase/util/TestMergeTool.java?rev=1329052&r1=1329051&r2=1329052&view=diff
==============================================================================
--- hbase/branches/0.92/src/test/java/org/apache/hadoop/hbase/util/TestMergeTool.java (original)
+++ hbase/branches/0.92/src/test/java/org/apache/hadoop/hbase/util/TestMergeTool.java Mon Apr 23 03:57:40 2012
@@ -167,6 +167,13 @@ public class TestMergeTool extends HBase
@Override
public void tearDown() throws Exception {
super.tearDown();
+ for (int i = 0; i < sourceRegions.length; i++) {
+ HRegion r = regions[i];
+ if (r != null) {
+ r.close();
+ r.getLog().closeAndDelete();
+ }
+ }
TEST_UTIL.shutdownMiniCluster();
}