You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by st...@apache.org on 2012/05/19 00:20:34 UTC
svn commit: r1340285 - in /hbase/branches/0.92: ./
src/main/java/org/apache/hadoop/hbase/regionserver/
src/main/java/org/apache/hadoop/hbase/regionserver/compactions/
src/test/java/org/apache/hadoop/hbase/regionserver/
Author: stack
Date: Fri May 18 22:20:33 2012
New Revision: 1340285
URL: http://svn.apache.org/viewvc?rev=1340285&view=rev
Log:
HBASE-5920 New Compactions Logic can silently prevent user-initiated compactions from occurring
Modified:
hbase/branches/0.92/CHANGES.txt
hbase/branches/0.92/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java
hbase/branches/0.92/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
hbase/branches/0.92/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java
hbase/branches/0.92/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionRequest.java
hbase/branches/0.92/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java
Modified: hbase/branches/0.92/CHANGES.txt
URL: http://svn.apache.org/viewvc/hbase/branches/0.92/CHANGES.txt?rev=1340285&r1=1340284&r2=1340285&view=diff
==============================================================================
--- hbase/branches/0.92/CHANGES.txt (original)
+++ hbase/branches/0.92/CHANGES.txt Fri May 18 22:20:33 2012
@@ -67,6 +67,8 @@ Release 0.92.2 - Unreleased
HBASE-6021 NullPointerException when running LoadTestTool without specifying compression type
HBASE-5342 Grant/Revoke global permissions (Matteo)
HBASE-6054 0.92 failing because of missing commons-io after upgrade to hadoop 1.0.3
+ HBASE-5920 New Compactions Logic can silently prevent user-initiated compactions from occurring
+ (Derek Wollenstein)
IMPROVEMENTS
HBASE-5592 Make it easier to get a table from shell (Ben West)
Modified: hbase/branches/0.92/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.92/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java?rev=1340285&r1=1340284&r2=1340285&view=diff
==============================================================================
--- hbase/branches/0.92/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java (original)
+++ hbase/branches/0.92/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java Fri May 18 22:20:33 2012
@@ -49,12 +49,6 @@ public class CompactSplitThread implemen
private final ThreadPoolExecutor splits;
private final long throttleSize;
- /* The default priority for user-specified compaction requests.
- * The user gets top priority unless we have blocking compactions. (Pri <= 0)
- */
- public static final int PRIORITY_USER = 1;
- public static final int NO_PRIORITY = Integer.MIN_VALUE;
-
/**
* Splitting should not take place if the total number of regions exceed this.
* This is not a hard limit to the number of regions but it is a guideline to
@@ -145,7 +139,7 @@ public class CompactSplitThread implemen
public synchronized boolean requestSplit(final HRegion r) {
// don't split regions that are blocking
- if (shouldSplitRegion() && r.getCompactPriority() >= PRIORITY_USER) {
+ if (shouldSplitRegion() && r.getCompactPriority() >= Store.PRIORITY_USER) {
byte[] midKey = r.checkSplit();
if (midKey != null) {
requestSplit(r, midKey);
@@ -174,13 +168,13 @@ public class CompactSplitThread implemen
public synchronized void requestCompaction(final HRegion r,
final String why) {
for(Store s : r.getStores().values()) {
- requestCompaction(r, s, why, NO_PRIORITY);
+ requestCompaction(r, s, why, Store.NO_PRIORITY);
}
}
public synchronized void requestCompaction(final HRegion r, final Store s,
final String why) {
- requestCompaction(r, s, why, NO_PRIORITY);
+ requestCompaction(r, s, why, Store.NO_PRIORITY);
}
public synchronized void requestCompaction(final HRegion r, final String why,
@@ -201,10 +195,10 @@ public class CompactSplitThread implemen
if (this.server.isStopped()) {
return;
}
- CompactionRequest cr = s.requestCompaction();
+ CompactionRequest cr = s.requestCompaction(priority);
if (cr != null) {
cr.setServer(server);
- if (priority != NO_PRIORITY) {
+ if (priority != Store.NO_PRIORITY) {
cr.setPriority(priority);
}
ThreadPoolExecutor pool = largeCompactions;
@@ -222,6 +216,10 @@ public class CompactSplitThread implemen
+ (why != null && !why.isEmpty() ? "; Because: " + why : "")
+ "; " + this);
}
+ } else {
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Not compacting " + r.getRegionNameAsString() + " because compaction request was cancelled");
+ }
}
}
Modified: hbase/branches/0.92/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.92/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java?rev=1340285&r1=1340284&r2=1340285&view=diff
==============================================================================
--- hbase/branches/0.92/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java (original)
+++ hbase/branches/0.92/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java Fri May 18 22:20:33 2012
@@ -38,7 +38,6 @@ import java.util.HashMap;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
-import java.util.Map.Entry;
import java.util.Random;
import java.util.Set;
import java.util.SortedMap;
@@ -152,8 +151,6 @@ import org.apache.hadoop.net.DNS;
import org.apache.hadoop.util.StringUtils;
import org.apache.zookeeper.KeeperException;
import org.codehaus.jackson.map.ObjectMapper;
-import org.apache.hadoop.hbase.regionserver.wal.FailedLogCloseException;
-import org.apache.hadoop.util.ReflectionUtils;
import com.google.common.base.Function;
import com.google.common.collect.Lists;
@@ -2672,9 +2669,10 @@ public class HRegionServer implements HR
if (major) {
region.triggerMajorCompaction();
}
+ LOG.trace("User-triggered compaction requested for region " + region.getRegionNameAsString());
compactSplitThread.requestCompaction(region, "User-triggered "
+ (major ? "major " : "") + "compaction",
- CompactSplitThread.PRIORITY_USER);
+ Store.PRIORITY_USER);
}
/** @return the info server */
Modified: hbase/branches/0.92/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.92/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java?rev=1340285&r1=1340284&r2=1340285&view=diff
==============================================================================
--- hbase/branches/0.92/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java (original)
+++ hbase/branches/0.92/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java Fri May 18 22:20:33 2012
@@ -88,6 +88,13 @@ import com.google.common.collect.Lists;
*/
public class Store implements HeapSize {
static final Log LOG = LogFactory.getLog(Store.class);
+
+ /* The default priority for user-specified compaction requests.
+ * The user gets top priority unless we have blocking compactions. (Pri <= 0)
+ */
+ public static final int PRIORITY_USER = 1;
+ public static final int NO_PRIORITY = Integer.MIN_VALUE;
+
protected final MemStore memstore;
// This stores directory in the filesystem.
private final Path homedir;
@@ -970,6 +977,11 @@ public class Store implements HeapSize {
}
public CompactionRequest requestCompaction() {
+ return requestCompaction(NO_PRIORITY);
+ }
+
+
+ public CompactionRequest requestCompaction(int priority) {
// don't even select for compaction if writes are disabled
if (!this.region.areWritesEnabled()) {
return null;
@@ -1000,7 +1012,7 @@ public class Store implements HeapSize {
// coprocessor is overriding normal file selection
filesToCompact = candidates;
} else {
- filesToCompact = compactSelection(candidates);
+ filesToCompact = compactSelection(candidates, priority);
}
if (region.getCoprocessorHost() != null) {
@@ -1031,7 +1043,7 @@ public class Store implements HeapSize {
}
// everything went better than expected. create a compaction request
- int pri = getCompactPriority();
+ int pri = getCompactPriority(priority);
ret = new CompactionRequest(region, this, filesToCompact, isMajor, pri);
}
} catch (IOException ex) {
@@ -1049,6 +1061,18 @@ public class Store implements HeapSize {
}
}
+
+ /**
+ * Default priority version of {@link #compactSelection(java.util.List, int)}
+ * @param candidates
+ * @return
+ * @throws IOException
+ */
+ List<StoreFile> compactSelection(List<StoreFile> candidates) throws IOException {
+ return compactSelection(candidates,NO_PRIORITY);
+ }
+
+
/**
* Algorithm to choose which files to compact
*
@@ -1064,11 +1088,13 @@ public class Store implements HeapSize {
* "hbase.hstore.compaction.max"
* max files to compact at once (avoids OOM)
*
+ *
* @param candidates candidate files, ordered from oldest to newest
+ * @param priority
* @return subset copy of candidate list that meets compaction criteria
* @throws IOException
*/
- List<StoreFile> compactSelection(List<StoreFile> candidates)
+ List<StoreFile> compactSelection(List<StoreFile> candidates, int priority)
throws IOException {
// ASSUMPTION!!! filesCompacting is locked when calling this function
@@ -1103,8 +1129,14 @@ public class Store implements HeapSize {
}
// major compact on user action or age (caveat: we have too many files)
- boolean majorcompaction = filesToCompact.size() < this.maxFilesToCompact
- && (forcemajor || isMajorCompaction(filesToCompact));
+ // Force a major compaction if this is a user-requested major compaction,
+ // or if we do not have too many files to compact and this was requested
+ // as a major compaction
+ boolean majorcompaction = (forcemajor && priority == PRIORITY_USER) ||
+ (filesToCompact.size() < this.maxFilesToCompact
+ && (forcemajor || isMajorCompaction(filesToCompact)));
+ LOG.debug(this.getHRegionInfo().getEncodedName() + " - " +
+ this.storeNameStr + ": Initiating " + (majorcompaction ? "major" : "minor") + "compaction");
if (!majorcompaction && !hasReferences(filesToCompact)) {
// we're doing a minor compaction, let's see what files are applicable
@@ -1113,6 +1145,11 @@ public class Store implements HeapSize {
// skip selection algorithm if we don't have enough files
if (filesToCompact.size() < this.minFilesToCompact) {
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Not compacting files because we only have " + filesToCompact.size() +
+ " files ready for compaction. Need " + this.minFilesToCompact + " to initiate.");
+
+ }
return Collections.emptyList();
}
@@ -1169,10 +1206,17 @@ public class Store implements HeapSize {
return Collections.emptyList();
}
} else {
- // all files included in this compaction, up to max
- if (filesToCompact.size() > this.maxFilesToCompact) {
- int pastMax = filesToCompact.size() - this.maxFilesToCompact;
- filesToCompact.subList(0, pastMax).clear();
+ if(!majorcompaction) {
+ // all files included in this compaction, up to max
+ if (filesToCompact.size() > this.maxFilesToCompact) {
+ int pastMax = filesToCompact.size() - this.maxFilesToCompact;
+ filesToCompact.subList(0, pastMax).clear();
+ }
+ } else if (filesToCompact.size() > this.maxFilesToCompact) {
+ LOG.debug("Warning, compacting more than " + this.maxFilesToCompact + " files, probably because of a user-requested major compaction");
+ if(priority != PRIORITY_USER) {
+ LOG.error("Compacting more than max files on a non user-requested compaction");
+ }
}
}
return filesToCompact;
@@ -1807,10 +1851,23 @@ public class Store implements HeapSize {
}
/**
- * @return The priority that this store should have in the compaction queue
+ * Same as {@link #getCompactPriority(int)} with a default priority
*/
public int getCompactPriority() {
- return this.blockingStoreFileCount - this.storefiles.size();
+ return getCompactPriority(NO_PRIORITY);
+ }
+
+ /**
+ * @return The priority that this store should have in the compaction queue
+ * @param priority
+ */
+ public int getCompactPriority(int priority) {
+ // If this is a user-requested compaction, leave this at the highest priority
+ if(priority == PRIORITY_USER) {
+ return PRIORITY_USER;
+ } else {
+ return this.blockingStoreFileCount - this.storefiles.size();
+ }
}
HRegion getHRegion() {
Modified: hbase/branches/0.92/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionRequest.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.92/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionRequest.java?rev=1340285&r1=1340284&r2=1340285&view=diff
==============================================================================
--- hbase/branches/0.92/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionRequest.java (original)
+++ hbase/branches/0.92/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionRequest.java Fri May 18 22:20:33 2012
@@ -181,7 +181,7 @@ public class CompactionRequest implement
if (completed) {
server.getMetrics().addCompaction(now - start, this.totalSize);
// degenerate case: blocked regions require recursive enqueues
- if (s.getCompactPriority() <= 0) {
+ if (s.getCompactPriority(Store.NO_PRIORITY) <= 0) {
server.compactSplitThread
.requestCompaction(r, s, "Recursive enqueue");
} else {
Modified: hbase/branches/0.92/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.92/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java?rev=1340285&r1=1340284&r2=1340285&view=diff
==============================================================================
--- hbase/branches/0.92/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java (original)
+++ hbase/branches/0.92/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java Fri May 18 22:20:33 2012
@@ -19,19 +19,6 @@
*/
package org.apache.hadoop.hbase.regionserver;
-import static org.mockito.Mockito.doAnswer;
-import static org.mockito.Mockito.spy;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-
-import static org.junit.Assert.fail;
-
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.FSDataOutputStream;
@@ -39,22 +26,23 @@ import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.*;
-import org.apache.hadoop.hbase.client.Delete;
-import org.apache.hadoop.hbase.client.Get;
-import org.apache.hadoop.hbase.client.Put;
-import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.*;
import org.apache.hadoop.hbase.io.hfile.HFileScanner;
import org.apache.hadoop.hbase.regionserver.compactions.CompactionProgress;
-import org.apache.hadoop.hbase.regionserver.StoreFile;
+import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest;
import org.apache.hadoop.hbase.regionserver.wal.HLog;
import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
+
import org.mockito.invocation.InvocationOnMock;
import org.mockito.stubbing.Answer;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
+import static org.mockito.Mockito.doAnswer;
+import static org.mockito.Mockito.spy;
+
/**
* Test compactions
@@ -66,22 +54,26 @@ public class TestCompaction extends HBas
private HRegion r = null;
private Path compactionDir = null;
private Path regionCompactionDir = null;
- private static final byte [] COLUMN_FAMILY = fam1;
- private final byte [] STARTROW = Bytes.toBytes(START_KEY);
- private static final byte [] COLUMN_FAMILY_TEXT = COLUMN_FAMILY;
+ private static final byte[] COLUMN_FAMILY = fam1;
+ private final byte[] STARTROW = Bytes.toBytes(START_KEY);
+ private static final byte[] COLUMN_FAMILY_TEXT = COLUMN_FAMILY;
private int compactionThreshold;
private byte[] firstRowBytes, secondRowBytes, thirdRowBytes;
final private byte[] col1, col2;
+ private static final long MAX_FILES_TO_COMPACT = 10;
- /** constructor */
+ /**
+ * constructor
+ */
public TestCompaction() throws Exception {
super();
// Set cache flush size to 1MB
- conf.setInt("hbase.hregion.memstore.flush.size", 1024*1024);
+ conf.setInt("hbase.hregion.memstore.flush.size", 1024 * 1024);
conf.setInt("hbase.hregion.memstore.block.multiplier", 100);
compactionThreshold = conf.getInt("hbase.hstore.compactionThreshold", 3);
+ this.conf.setLong("hbase.hstore.compaction.max.size", MAX_FILES_TO_COMPACT);
firstRowBytes = START_KEY.getBytes(HConstants.UTF8_ENCODING);
secondRowBytes = START_KEY.getBytes(HConstants.UTF8_ENCODING);
@@ -110,9 +102,9 @@ public class TestCompaction extends HBas
}
/**
- * Test that on a major compaction, if all cells are expired or deleted, then
- * we'll end up with no product. Make sure scanner over region returns
- * right answer in this case - and that it just basically works.
+ * Test that on a major compaction, if all cells are expired or deleted, then we'll end up with no product. Make sure
+ * scanner over region returns right answer in this case - and that it just basically works.
+ *
* @throws IOException
*/
public void testMajorCompactingToNoOutput() throws IOException {
@@ -127,7 +119,7 @@ public class TestCompaction extends HBas
boolean result = s.next(results);
r.delete(new Delete(results.get(0).getRow()), null, false);
if (!result) break;
- } while(true);
+ } while (true);
// Flush
r.flushcache();
// Major compact.
@@ -139,13 +131,13 @@ public class TestCompaction extends HBas
boolean result = s.next(results);
if (!result) break;
counter++;
- } while(true);
+ } while (true);
assertEquals(0, counter);
}
/**
- * Run compaction and flushing memstore
- * Assert deletes get cleaned up.
+ * Run compaction and flushing memstore Assert deletes get cleaned up.
+ *
* @throws Exception
*/
public void testMajorCompaction() throws Exception {
@@ -164,7 +156,7 @@ public class TestCompaction extends HBas
assertEquals(compactionThreshold, result.size());
// see if CompactionProgress is in place but null
- for (Store store: this.r.stores.values()) {
+ for (Store store : this.r.stores.values()) {
assertNull(store.getCompactionProgress());
}
@@ -173,19 +165,19 @@ public class TestCompaction extends HBas
// see if CompactionProgress has done its thing on at least one store
int storeCount = 0;
- for (Store store: this.r.stores.values()) {
+ for (Store store : this.r.stores.values()) {
CompactionProgress progress = store.getCompactionProgress();
- if( progress != null ) {
+ if (progress != null) {
++storeCount;
- assert(progress.currentCompactedKVs > 0);
- assert(progress.totalCompactingKVs > 0);
+ assert (progress.currentCompactedKVs > 0);
+ assert (progress.totalCompactingKVs > 0);
}
- assert(storeCount > 0);
+ assert (storeCount > 0);
}
// look at the second row
// Increment the least significant character so we get to next row.
- byte [] secondRowBytes = START_KEY.getBytes(HConstants.UTF8_ENCODING);
+ byte[] secondRowBytes = START_KEY.getBytes(HConstants.UTF8_ENCODING);
secondRowBytes[START_KEY_BYTES.length - 1]++;
// Always 3 versions if that is what max versions is.
@@ -198,42 +190,42 @@ public class TestCompaction extends HBas
// should result in a compacted store file that has no references to the
// deleted row.
Delete delete = new Delete(secondRowBytes, System.currentTimeMillis(), null);
- byte [][] famAndQf = {COLUMN_FAMILY, null};
+ byte[][] famAndQf = {COLUMN_FAMILY, null};
delete.deleteFamily(famAndQf[0]);
r.delete(delete, null, true);
// Assert deleted.
- result = r.get(new Get(secondRowBytes).addFamily(COLUMN_FAMILY_TEXT).setMaxVersions(100), null );
+ result = r.get(new Get(secondRowBytes).addFamily(COLUMN_FAMILY_TEXT).setMaxVersions(100), null);
assertTrue("Second row should have been deleted", result.isEmpty());
r.flushcache();
- result = r.get(new Get(secondRowBytes).addFamily(COLUMN_FAMILY_TEXT).setMaxVersions(100), null );
+ result = r.get(new Get(secondRowBytes).addFamily(COLUMN_FAMILY_TEXT).setMaxVersions(100), null);
assertTrue("Second row should have been deleted", result.isEmpty());
// Add a bit of data and flush. Start adding at 'bbb'.
createSmallerStoreFile(this.r);
r.flushcache();
// Assert that the second row is still deleted.
- result = r.get(new Get(secondRowBytes).addFamily(COLUMN_FAMILY_TEXT).setMaxVersions(100), null );
+ result = r.get(new Get(secondRowBytes).addFamily(COLUMN_FAMILY_TEXT).setMaxVersions(100), null);
assertTrue("Second row should still be deleted", result.isEmpty());
// Force major compaction.
r.compactStores(true);
assertEquals(r.getStore(COLUMN_FAMILY_TEXT).getStorefiles().size(), 1);
- result = r.get(new Get(secondRowBytes).addFamily(COLUMN_FAMILY_TEXT).setMaxVersions(100), null );
+ result = r.get(new Get(secondRowBytes).addFamily(COLUMN_FAMILY_TEXT).setMaxVersions(100), null);
assertTrue("Second row should still be deleted", result.isEmpty());
// Make sure the store files do have some 'aaa' keys in them -- exactly 3.
// Also, that compacted store files do not have any secondRowBytes because
// they were deleted.
- verifyCounts(3,0);
+ verifyCounts(3, 0);
// Multiple versions allowed for an entry, so the delete isn't enough
// Lower TTL and expire to ensure that all our entries have been wiped
final int ttlInSeconds = 1;
- for (Store store: this.r.stores.values()) {
+ for (Store store : this.r.stores.values()) {
store.ttl = ttlInSeconds * 1000;
}
Thread.sleep(ttlInSeconds * 1000);
@@ -247,12 +239,14 @@ public class TestCompaction extends HBas
Delete deleteRow = new Delete(secondRowBytes);
testMinorCompactionWithDelete(deleteRow);
}
+
public void testMinorCompactionWithDeleteColumn1() throws Exception {
Delete dc = new Delete(secondRowBytes);
/* delete all timestamps in the column */
dc.deleteColumns(fam2, col2);
testMinorCompactionWithDelete(dc);
}
+
public void testMinorCompactionWithDeleteColumn2() throws Exception {
Delete dc = new Delete(secondRowBytes);
dc.deleteColumn(fam2, col2);
@@ -265,11 +259,13 @@ public class TestCompaction extends HBas
//testMinorCompactionWithDelete(dc, 2);
testMinorCompactionWithDelete(dc, 3);
}
+
public void testMinorCompactionWithDeleteColumnFamily() throws Exception {
Delete deleteCF = new Delete(secondRowBytes);
deleteCF.deleteFamily(fam2);
testMinorCompactionWithDelete(deleteCF);
}
+
public void testMinorCompactionWithDeleteVersion1() throws Exception {
Delete deleteVersion = new Delete(secondRowBytes);
deleteVersion.deleteColumns(fam2, col2, 2);
@@ -278,6 +274,7 @@ public class TestCompaction extends HBas
*/
testMinorCompactionWithDelete(deleteVersion, 1);
}
+
public void testMinorCompactionWithDeleteVersion2() throws Exception {
Delete deleteVersion = new Delete(secondRowBytes);
deleteVersion.deleteColumn(fam2, col2, 1);
@@ -299,6 +296,7 @@ public class TestCompaction extends HBas
private void testMinorCompactionWithDelete(Delete delete) throws Exception {
testMinorCompactionWithDelete(delete, 0);
}
+
private void testMinorCompactionWithDelete(Delete delete, int expectedResultsAfterDelete) throws Exception {
HRegionIncommon loader = new HRegionIncommon(r);
for (int i = 0; i < compactionThreshold + 1; i++) {
@@ -360,25 +358,25 @@ public class TestCompaction extends HBas
private void verifyCounts(int countRow1, int countRow2) throws Exception {
int count1 = 0;
int count2 = 0;
- for (StoreFile f: this.r.stores.get(COLUMN_FAMILY_TEXT).getStorefiles()) {
+ for (StoreFile f : this.r.stores.get(COLUMN_FAMILY_TEXT).getStorefiles()) {
HFileScanner scanner = f.getReader().getScanner(false, false);
scanner.seekTo();
do {
- byte [] row = scanner.getKeyValue().getRow();
+ byte[] row = scanner.getKeyValue().getRow();
if (Bytes.equals(row, STARTROW)) {
count1++;
- } else if(Bytes.equals(row, secondRowBytes)) {
+ } else if (Bytes.equals(row, secondRowBytes)) {
count2++;
}
- } while(scanner.next());
+ } while (scanner.next());
}
- assertEquals(countRow1,count1);
- assertEquals(countRow2,count2);
+ assertEquals(countRow1, count1);
+ assertEquals(countRow2, count2);
}
/**
- * Verify that you can stop a long-running compaction
- * (used during RS shutdown)
+ * Verify that you can stop a long-running compaction (used during RS shutdown)
+ *
* @throws Exception
*/
public void testInterruptCompaction() throws Exception {
@@ -386,12 +384,12 @@ public class TestCompaction extends HBas
// lower the polling interval for this test
int origWI = Store.closeCheckInterval;
- Store.closeCheckInterval = 10*1000; // 10 KB
+ Store.closeCheckInterval = 10 * 1000; // 10 KB
try {
// Create a couple store files w/ 15KB (over 10KB interval)
- int jmax = (int) Math.ceil(15.0/compactionThreshold);
- byte [] pad = new byte[1000]; // 1 KB chunk
+ int jmax = (int) Math.ceil(15.0 / compactionThreshold);
+ byte[] pad = new byte[1000]; // 1 KB chunk
for (int i = 0; i < compactionThreshold; i++) {
HRegionIncommon loader = new HRegionIncommon(r);
Put p = new Put(Bytes.add(STARTROW, Bytes.toBytes(i)));
@@ -418,7 +416,7 @@ public class TestCompaction extends HBas
// ensure that the compaction stopped, all old files are intact,
Store s = r.stores.get(COLUMN_FAMILY);
assertEquals(compactionThreshold, s.getStorefilesCount());
- assertTrue(s.getStorefilesSize() > 15*1000);
+ assertTrue(s.getStorefilesSize() > 15 * 1000);
// and no new store files persisted past compactStores()
FileStatus[] ls = FileSystem.get(conf).listStatus(r.getTmpDir());
assertEquals(0, ls.length);
@@ -431,7 +429,7 @@ public class TestCompaction extends HBas
// Delete all Store information once done using
for (int i = 0; i < compactionThreshold; i++) {
Delete delete = new Delete(Bytes.add(STARTROW, Bytes.toBytes(i)));
- byte [][] famAndQf = {COLUMN_FAMILY, null};
+ byte[][] famAndQf = {COLUMN_FAMILY, null};
delete.deleteFamily(famAndQf[0]);
r.delete(delete, null, true);
}
@@ -440,7 +438,7 @@ public class TestCompaction extends HBas
// Multiple versions allowed for an entry, so the delete isn't enough
// Lower TTL and expire to ensure that all our entries have been wiped
final int ttlInSeconds = 1;
- for (Store store: this.r.stores.values()) {
+ for (Store store : this.r.stores.values()) {
store.ttl = ttlInSeconds * 1000;
}
Thread.sleep(ttlInSeconds * 1000);
@@ -452,7 +450,7 @@ public class TestCompaction extends HBas
private int count() throws IOException {
int count = 0;
- for (StoreFile f: this.r.stores.
+ for (StoreFile f : this.r.stores.
get(COLUMN_FAMILY_TEXT).getStorefiles()) {
HFileScanner scanner = f.getReader().getScanner(false, false);
if (!scanner.seekTo()) {
@@ -460,7 +458,7 @@ public class TestCompaction extends HBas
}
do {
count++;
- } while(scanner.next());
+ } while (scanner.next());
}
return count;
}
@@ -474,7 +472,7 @@ public class TestCompaction extends HBas
private void createSmallerStoreFile(final HRegion region) throws IOException {
HRegionIncommon loader = new HRegionIncommon(region);
addContent(loader, Bytes.toString(COLUMN_FAMILY), ("" +
- "bbb").getBytes(), null);
+ "bbb").getBytes(), null);
loader.flushcache();
}
@@ -514,4 +512,41 @@ public class TestCompaction extends HBas
fail("testCompactionWithCorruptResult failed since no exception was" +
"thrown while completing a corrupt file");
}
+
+ /**
+ * Test for HBASE-5920 - Test user requested major compactions always occurring
+ */
+ public void testNonUserMajorCompactionRequest() throws Exception {
+ Store store = r.getStore(COLUMN_FAMILY);
+ createStoreFile(r);
+ for (int i = 0; i < MAX_FILES_TO_COMPACT + 1; i++) {
+ createStoreFile(r);
+ }
+ store.triggerMajorCompaction();
+
+ CompactionRequest request = store.requestCompaction(Store.NO_PRIORITY);
+ assertNotNull("Expected to receive a compaction request", request);
+ assertEquals(
+ "System-requested major compaction should not occur if there are too many store files",
+ false,
+ request.isMajor());
+ }
+
+ /**
+ * Test for HBASE-5920
+ */
+ public void testUserMajorCompactionRequest() throws IOException{
+ Store store = r.getStore(COLUMN_FAMILY);
+ createStoreFile(r);
+ for (int i = 0; i < MAX_FILES_TO_COMPACT + 1; i++) {
+ createStoreFile(r);
+ }
+ store.triggerMajorCompaction();
+ CompactionRequest request = store.requestCompaction(Store.PRIORITY_USER);
+ assertNotNull("Expected to receive a compaction request", request);
+ assertEquals(
+ "User-requested major compaction should always occur, even if there are too many store files",
+ true,
+ request.isMajor());
+ }
}