You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by mb...@apache.org on 2012/08/28 23:13:39 UTC
svn commit: r1378348 [2/2] - in /hbase/branches/0.89-fb/src:
main/java/org/apache/hadoop/hbase/ main/java/org/apache/hadoop/hbase/client/
main/java/org/apache/hadoop/hbase/ipc/
main/java/org/apache/hadoop/hbase/mapreduce/
main/java/org/apache/hadoop/hb...
Added: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/TierCompactionManager.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/TierCompactionManager.java?rev=1378348&view=auto
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/TierCompactionManager.java (added)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/TierCompactionManager.java Tue Aug 28 21:13:38 2012
@@ -0,0 +1,252 @@
+/**
+ * Copyright 2012 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.regionserver;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+import org.apache.hadoop.util.StringUtils;
+
+import java.io.IOException;
+public class TierCompactionManager extends CompactionManager {
+
+ private static final Log LOG = LogFactory.getLog(TierCompactionManager.class);
+
+ private int[] endInTier;
+ private int[] tierOf;
+
+ private TierCompactionConfiguration tierConf;
+
+ TierCompactionManager(Configuration configuration, Store store) {
+ super(configuration, store);
+ comConf = new TierCompactionConfiguration(configuration, store);
+ tierConf = (TierCompactionConfiguration) comConf;
+ }
+
+ /**
+ * @param candidates pre-filtrate
+ * @return filtered subset
+ * -- Tier Based minor compaction selection algorithm: Choose CompactSelection from candidates --
+ * <p/>
+ * First exclude bulk-load files if indicated in configuration.
+ * Arrange files from oldest to newest then select an appropriate ['start','end') pair
+ * try 'start' from oldest to newest (smallest to largest fileIndex)
+ * for each value, identify the 'end' fileIndex
+ * stop when the range ['start','end') is an admissible compaction
+ * <p/>
+ * Notes:
+ * <p/>
+ * a compaction is admissible if
+ * - file fileSize[start] is at most maxCompactSize AND
+ * - number of files is at least currentTier.minFilesToCompact AND
+ * - (fileSize[start] is at most ratio times the rest of the files in the compaction OR
+ * - fileSize[start] is at most minCompactSize)
+ * <p/>
+ * end is endInTier[tierOf[start].endingInclusionTier]
+ * By default currentTier.endingIndexForTier = currentTier, so in the default
+ * case 'end' is always 1 + the last fileIndex in currentTier, making sure
+ * files from different tiers are never selected together in the default case
+ * normal skew:
+ *
+ * older ----> newer (increasing seqID, increasing minFlushTime)
+ *
+ * Tier 2 | Tier 1 | Tier 0
+ * | |
+ * _ | |
+ * | | | _ |
+ * | | | | | _ |
+ * --|-|-|-|-|- |-|-|--_-------_------- minCompactSize
+ * | | | | | | | | | | _ | |
+ * | | | | | | | | | | | | | |
+ * | | | | | | | | | | | | | |
+ */
+ @Override
+ CompactSelection applyCompactionPolicy(CompactSelection candidates) throws IOException {
+ // we're doing a minor compaction, let's see what files are applicable
+ int start = -1;
+ int end = -1;
+
+ // skip selection algorithm if we don't have enough files
+ if (candidates.getFilesToCompact().isEmpty()) {
+ candidates.emptyFileList();
+ return candidates;
+ }
+
+ // get store file sizes for incremental compacting selection.
+ int countOfFiles = candidates.getFilesToCompact().size();
+ long[] fileSizes = new long[countOfFiles];
+ StoreFile file;
+ long[] sumSize = new long[countOfFiles + 1];
+ sumSize[countOfFiles] = 0;
+ for (int i = countOfFiles - 1; i >= 0; --i) {
+ file = candidates.getFilesToCompact().get(i);
+ fileSizes[i] = file.getReader().length();
+ // calculate the sum of fileSizes[i,i+maxFilesToCompact-1) for algo
+ sumSize[i] = fileSizes[i] + sumSize[i + 1];
+ }
+
+ /**
+ * divide into tiers:
+ * assign tierOf[fileIndex] = tierIndex
+ * assign endInTier[tierIndex] = 1 + index of the last file in tierIndex
+ */
+ // Backward compatibility - if files with indices < i don't have minFlushTime field, then
+ // all of them get tierOf[i]. If no file has minFlushTime all gets tier zero.
+ int numTiers = tierConf.getNumCompactionTiers();
+ TierCompactionConfiguration.CompactionTier tier;
+ tierOf = new int[countOfFiles];
+ endInTier = new int[numTiers + 1];
+ endInTier[numTiers] = 0;
+
+ LOG.info("Applying TierCompactionPolicy with " + countOfFiles + " files");
+
+ int i;
+ int j = countOfFiles;
+
+ for (i = 0; i < numTiers; i++) {
+ tier = tierConf.getCompactionTier(i);
+ endInTier[i] = j;
+ while (j > 0) {
+ file = candidates.getFilesToCompact().get(j - 1);
+ if (!isInTier(file, tier)) {
+ break;
+ }
+ j--;
+ tierOf[j] = i;
+ }
+ }
+
+ long restSize;
+ double ratio;
+
+ //Main algorithm
+ for (j = 0; j < countOfFiles; j++) {
+ start = next(start);
+ tier = tierConf.getCompactionTier(tierOf[start]);
+ end = endInTier[tier.getEndingIndexForTier()];
+ restSize = sumSize[start + 1] - sumSize[end];
+ ratio = tier.getCompactionRatio();
+ if (fileSizes[start] <= tierConf.getMaxCompactSize() &&
+ end - start >= tier.getMinFilesToCompact() &&
+ (fileSizes[start] <= tierConf.getMinCompactSize() ||
+ (fileSizes[start] <= restSize * ratio))) {
+ break;
+ }
+ }
+ String tab = " ";
+ for (i = 0; i < numTiers; i++) {
+ LOG.info("Tier " + i + " : " + tierConf.getCompactionTier(i).getDescription());
+ if (endInTier[i] == endInTier[i+1]) {
+ LOG.info(tab + "No file is assigned to this tier.");
+ } else {
+ LOG.info(tab + (endInTier[i] - endInTier[i+1])
+ + " file(s) are assigned to this tier with serial number(s) "
+ + endInTier[i + 1] + " to " + (endInTier[i] - 1));
+ }
+ for (j = endInTier[i + 1]; j < endInTier[i]; j++) {
+ file = candidates.getFilesToCompact().get(j);
+ LOG.info(tab + tab + "SeqID = " + file.getMaxSequenceId()
+ + ", Age = " + StringUtils.formatTimeDiff(
+ EnvironmentEdgeManager.currentTimeMillis(), file.getMinFlushTime())
+ + ", Size = " + StringUtils.humanReadableInt(fileSizes[j])
+ + ", Path = " + file.getPath());
+ }
+ }
+ if (start < countOfFiles) {
+ end = Math.min(end, start
+ + tierConf.getCompactionTier(tierOf[start]).getMaxFilesToCompact());
+ }
+ if (start < end) {
+ String strTier = String.valueOf(tierOf[start]);
+ if (tierOf[end - 1] != tierOf[start]) {
+ strTier += " to " + tierOf[end - 1];
+ }
+ LOG.info("Tier Based compaction algorithm has selected " + (end - start)
+ + " files from tier " + strTier + " out of " + countOfFiles + " candidates");
+ }
+
+ candidates = candidates.getSubList(start, end);
+ return candidates;
+ }
+
+ private boolean isInTier(StoreFile file, TierCompactionConfiguration.CompactionTier tier) {
+ return file.getReader().length() <= tier.getMaxSize() &&
+ EnvironmentEdgeManager.currentTimeMillis()-file.getMinFlushTime() <= tier.getMaxAgeInDisk();
+ }
+
+ /**
+ * This function iterates over the start values in order.
+ * Whenever an admissible compaction is found, we return the selection.
+ * Hence the order is important if there are more than one admissible compaction.
+ * @param start current Value
+ * @return next Value
+ */
+ private int next(int start) {
+ if (tierConf.isRecentFirstOrder()) {
+ return backNext(start);
+ }
+ return fwdNext(start);
+ }
+
+ /**
+ * This function iterates over the start values in newer-first order of tiers,
+ * but older-first order of files within a tier.
+ * For example, suppose the tiers are:
+ * Tier 3 - files 0,1,2
+ * Tier 2 - files 3,4
+ * Tier 1 - no files
+ * Tier 0 - files 5,6,7
+ * Then the order of 'start' files will be:
+ * 5,6,7,3,4,0,1,2
+ * @param start current Value
+ * @return next Value
+ */
+ private int backNext(int start) {
+ int tier = 0;
+ if (start == -1) {
+ while (endInTier[tier] >= endInTier[0]) {
+ tier++;
+ }
+ return endInTier[tier];
+ }
+ tier = tierOf[start];
+ if (endInTier[tier] == start + 1) {
+ tier++;
+ start = endInTier[tier];
+ while (endInTier[tier] >= start) {
+ tier++;
+ }
+ return endInTier[tier];
+ }
+ return start + 1;
+ }
+
+ /**
+ * This function iterates over the start values in older-first order of files.
+ * @param start current Value
+ * @return next Value
+ */
+ private int fwdNext(int start) {
+ return start + 1;
+ }
+
+}
\ No newline at end of file
Added: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/util/UpdateConfigTool.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/util/UpdateConfigTool.java?rev=1378348&view=auto
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/util/UpdateConfigTool.java (added)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/util/UpdateConfigTool.java Tue Aug 28 21:13:38 2012
@@ -0,0 +1,37 @@
+/**
+ * Copyright 2012 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.util;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.client.HBaseAdmin;
+
+public class UpdateConfigTool {
+
+ public static void main(String[] args) {
+ try {
+ Configuration conf = HBaseConfiguration.create();
+ new HBaseAdmin(conf).updateConfiguration();
+ } catch (Exception e) {
+ e.printStackTrace();
+ }
+ }
+
+}
\ No newline at end of file
Added: hbase/branches/0.89-fb/src/main/resources/hbase-compactions.xml
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/resources/hbase-compactions.xml?rev=1378348&view=auto
==============================================================================
--- hbase/branches/0.89-fb/src/main/resources/hbase-compactions.xml (added)
+++ hbase/branches/0.89-fb/src/main/resources/hbase-compactions.xml Tue Aug 28 21:13:38 2012
@@ -0,0 +1,162 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Copyright 2012 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration>
+ <property>
+ <name>hbase.hstore.compactionThreshold</name>
+ <value>3</value>
+ <description>
+ If more than this number of HStoreFiles in any one HStore
+ (one HStoreFile is written per flush of memstore) then a compaction
+ is run to rewrite all HStoreFiles files as one. Larger numbers
+ put off compaction but when it runs, it takes longer to complete.
+ During a compaction, updates cannot be flushed to disk. Long
+ compactions require memory sufficient to carry the logging of
+ all updates across the duration of the compaction.
+
+ If too large, clients timeout during compaction.
+ </description>
+ </property>
+ <property>
+ <name>hbase.hstore.compaction.max</name>
+ <value>10</value>
+ <description>Max number of HStoreFiles to compact per 'minor' compaction.
+ </description>
+ </property>
+ <property>
+ <name>hbase.hregion.majorcompaction</name>
+ <value>86400000</value>
+ <description>The time (in miliseconds) between 'major' compactions of all
+ HStoreFiles in a region. Default: 1 day.
+ Set to 0 to disable automated major compactions.
+ </description>
+ </property>
+
+ <property>
+ <name>hbase.hstore.compaction.CompactionPolicy</name>
+ <value>TierBasedCompactionPolicy</value>
+ <description>The compaction policy which should be used
+ </description>
+ </property>
+ <property>
+ <name>hbase.hstore.compaction.tbl.cluster_test.cf.test_cf.NumCompactionTiers</name>
+ <value>4</value>
+ <description>The number of tiers into which the files are assigned
+ </description>
+ </property>
+
+ <property>
+ <name>hbase.hstore.compaction.Default.Tier.0.MaxAgeInDisk</name>
+ <value>3600000</value>
+ <description>Length of time for which flush files are in 1st tier
+ value one hour.
+ </description>
+ </property>
+ <property>
+ <name>hbase.hstore.compaction.tbl.cluster_test.cf.test_cf.Tier.1.MaxAgeInDisk</name>
+ <value>10800000</value>
+ <description>Maximum age of a file to be in second tier
+ value 3 hours.
+ </description>
+ </property>
+ <property>
+ <name>hbase.hstore.compaction.Default.Tier.2.MaxAgeInDisk</name>
+ <value>36000000</value>
+ <description>Maximum age of a file to be in third tier
+ value 10 hours
+ </description>
+ </property>
+
+ <property>
+ <name>hbase.hstore.compaction.Default.CompactionRatio</name>
+ <value>0.0</value>
+ <description>The default compaction ratio used if unspecified.
+ value 0.
+ </description>
+ </property>
+ <property>
+ <name>hbase.hstore.compaction.Default.Tier.1.CompactionRatio</name>
+ <value>1.0</value>
+ <description>The compaction ratio for the second tier.
+ value 1.5.
+ </description>
+ </property>
+ <property>
+ <name>hbase.hstore.compaction.Default.Tier.2.CompactionRatio</name>
+ <value>0.75</value>
+ <description>The compaction ratio for the third tier.
+ value 0.75.
+ </description>
+ </property>
+ <property>
+ <name>hbase.hstore.compaction.Default.Tier.3.CompactionRatio</name>
+ <value>0.2</value>
+ <description>The compaction ratio for the fourth tier.
+ value 0.2.
+ </description>
+ </property>
+
+ <property>
+ <name>hbase.hstore.compaction.min</name>
+ <value>2</value>
+ <description>Default minimum number of files to compact
+ value 2.
+ </description>
+ </property>
+ <property>
+ <name>hbase.hstore.compaction.tbl.cluster_test.cf.MinFilesToCompact</name>
+ <value>3</value>
+ <description>Overridden Default minimum number of files to compact
+ value 3.
+ </description>
+ </property>
+ <property>
+ <name>hbase.hstore.compaction.max</name>
+ <value>7</value>
+ <description>Default maximum number of files to compact
+ value 7.
+ </description>
+ </property>
+ <property>
+ <name>hbase.hstore.compaction.Default.Tier.1.MinFilesToCompact</name>
+ <value>2</value>
+ <description>minimum number of files to compact in second tier
+ value 2.
+ </description>
+ </property>
+ <property>
+ <name>hbase.hstore.compaction.Default.Tier.3.MaxFilesToCompact</name>
+ <value>6</value>
+ <description>maximum number of files to compact in fourth tier
+ value 6.
+ </description>
+ </property>
+
+ <property>
+ <name>hbase.hstore.compaction.Default.Tier.2.EndInclusionTier</name>
+ <value>1</value>
+ <description>The minimum tier whose files go together with this tier
+ value 1.
+ </description>
+ </property>
+</configuration>
\ No newline at end of file
Modified: hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactSelection.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactSelection.java?rev=1378348&r1=1378347&r2=1378348&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactSelection.java (original)
+++ hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactSelection.java Tue Aug 28 21:13:38 2012
@@ -1,277 +0,0 @@
-/**
- * Copyright 2010 The Apache Software Foundation
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.regionserver;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Calendar;
-import java.util.GregorianCalendar;
-import java.util.List;
-
-import junit.framework.TestCase;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.HColumnDescriptor;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.io.hfile.CacheConfig;
-import org.apache.hadoop.hbase.io.hfile.NoOpDataBlockEncoder;
-import org.apache.hadoop.hbase.regionserver.wal.HLog;
-import org.apache.hadoop.hbase.util.Bytes;
-
-import com.google.common.collect.Lists;
-
-public class TestCompactSelection extends TestCase {
- private final static Log LOG = LogFactory.getLog(TestCompactSelection.class);
- private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
-
- private Configuration conf;
- private Store store;
- private static final String DIR
- = TEST_UTIL.getTestDir() + "/TestCompactSelection/";
- private static Path TEST_FILE;
-
- private static final int minFiles = 3;
- private static final int maxFiles = 5;
-
- private static final long minSize = 10;
- private static final long maxSize = 1000;
-
-
- @Override
- public void setUp() throws Exception {
- // setup config values necessary for store
- this.conf = TEST_UTIL.getConfiguration();
- this.conf.setLong(HConstants.MAJOR_COMPACTION_PERIOD, 0);
- this.conf.setInt("hbase.hstore.compaction.min", minFiles);
- this.conf.setInt("hbase.hstore.compaction.max", maxFiles);
- this.conf.setLong(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, minSize);
- this.conf.setLong("hbase.hstore.compaction.max.size", maxSize);
- this.conf.setFloat("hbase.hstore.compaction.ratio", 1.0F);
-
- //Setting up a Store
- Path basedir = new Path(DIR);
- Path logdir = new Path(DIR+"/logs");
- Path oldLogDir = new Path(basedir, HConstants.HREGION_OLDLOGDIR_NAME);
- HColumnDescriptor hcd = new HColumnDescriptor(Bytes.toBytes("family"));
- FileSystem fs = FileSystem.get(conf);
-
- fs.delete(logdir, true);
-
- HTableDescriptor htd = new HTableDescriptor(Bytes.toBytes("table"));
- htd.addFamily(hcd);
- HRegionInfo info = new HRegionInfo(htd, null, null, false);
- HLog hlog = new HLog(fs, logdir, oldLogDir, conf, null);
- HRegion region = new HRegion(basedir, hlog, fs, conf, info, null);
-
- store = new Store(basedir, region, hcd, fs, conf);
- TEST_FILE = StoreFile.getRandomFilename(fs, store.getHomedir());
- fs.create(TEST_FILE);
- }
-
- // used so our tests don't deal with actual StoreFiles
- static class MockStoreFile extends StoreFile {
- long length = 0;
- boolean isRef = false;
-
- MockStoreFile(long length, boolean isRef) throws IOException {
- super(TEST_UTIL.getTestFileSystem(), TEST_FILE,
- TEST_UTIL.getConfiguration(),
- new CacheConfig(TEST_UTIL.getConfiguration()), BloomType.NONE,
- NoOpDataBlockEncoder.INSTANCE);
- this.length = length;
- this.isRef = isRef;
- }
-
- void setLength(long newLen) {
- this.length = newLen;
- }
-
- @Override
- boolean isMajorCompaction() {
- return false;
- }
-
- @Override
- boolean isReference() {
- return this.isRef;
- }
-
- @Override
- public StoreFile.Reader getReader() {
- final long len = this.length;
- return new StoreFile.Reader() {
- @Override
- public long length() {
- return len;
- }
- };
- }
- }
-
- List<StoreFile> sfCreate(long ... sizes) throws IOException {
- return sfCreate(false, sizes);
- }
-
- List<StoreFile> sfCreate(boolean isReference, long ... sizes)
- throws IOException {
- List<StoreFile> ret = Lists.newArrayList();
- for (long i : sizes) {
- ret.add(new MockStoreFile(i, isReference));
- }
- return ret;
- }
-
- long[] getSizes(List<StoreFile> sfList) {
- long[] aNums = new long[sfList.size()];
- for (int i=0; i <sfList.size(); ++i) {
- aNums[i] = sfList.get(i).getReader().length();
- }
- return aNums;
- }
-
- void compactEquals(List<StoreFile> candidates, long ... expected)
- throws IOException {
- compactEquals(candidates, false, expected);
- }
-
- void compactEquals(List<StoreFile> candidates, boolean forcemajor,
- long ... expected)
- throws IOException {
- store.forceMajor = forcemajor;
- List<StoreFile> actual = store.compactSelection(candidates).getFilesToCompact();
- store.forceMajor = false;
- assertEquals(Arrays.toString(expected), Arrays.toString(getSizes(actual)));
- }
-
- public void testCompactionRatio() throws IOException {
- /*
- * NOTE: these tests are specific to describe the implementation of the
- * current compaction algorithm. Developed to ensure that refactoring
- * doesn't implicitly alter this.
- */
- long tooBig = maxSize + 1;
-
- // default case. preserve user ratio on size
- compactEquals(sfCreate(100,50,23,12,12), 23, 12, 12);
- // less than compact threshold = don't compact
- compactEquals(sfCreate(100,50,25,12,12) /* empty */);
- // greater than compact size = skip those
- compactEquals(sfCreate(tooBig, tooBig, 700, 700, 700), 700, 700, 700);
- // big size + threshold
- compactEquals(sfCreate(tooBig, tooBig, 700,700) /* empty */);
- // small files = don't care about ratio
- compactEquals(sfCreate(8,3,1), 8,3,1);
- /* TODO: add sorting + unit test back in when HBASE-2856 is fixed
- // sort first so you don't include huge file the tail end.
- // happens with HFileOutputFormat bulk migration
- compactEquals(sfCreate(100,50,23,12,12, 500), 23, 12, 12);
- */
- // don't exceed max file compact threshold
- assertEquals(maxFiles,
- store.compactSelection(sfCreate(7,6,5,4,3,2,1)).getFilesToCompact().size());
-
- /* MAJOR COMPACTION */
- // if a major compaction has been forced, then compact everything
- compactEquals(sfCreate(50,25,12,12), true, 50, 25, 12, 12);
- // also choose files < threshold on major compaction
- compactEquals(sfCreate(12,12), true, 12, 12);
- // even if one of those files is too big
- compactEquals(sfCreate(tooBig, 12,12), true, tooBig, 12, 12);
- // don't exceed max file compact threshold, even with major compaction
- store.forceMajor = true;
- assertEquals(maxFiles,
- store.compactSelection(sfCreate(7,6,5,4,3,2,1)).getFilesToCompact().size());
- store.forceMajor = false;
- // if we exceed maxCompactSize, downgrade to minor
- // if not, it creates a 'snowball effect' when files >> maxCompactSize:
- // the last file in compaction is the aggregate of all previous compactions
- compactEquals(sfCreate(100,50,23,12,12), true, 23, 12, 12);
- conf.setInt(HConstants.MAJOR_COMPACTION_PERIOD, 1);
- conf.setFloat("hbase.hregion.majorcompaction.jitter", 0);
- try {
- // trigger an aged major compaction
- compactEquals(sfCreate(50,25,12,12), 50, 25, 12, 12);
- // major sure exceeding maxCompactSize also downgrades aged minors
- compactEquals(sfCreate(100,50,23,12,12), 23, 12, 12);
- } finally {
- conf.setLong(HConstants.MAJOR_COMPACTION_PERIOD, 1000*60*60*24);
- conf.setFloat("hbase.hregion.majorcompaction.jitter", 0.20F);
- }
-
- /* REFERENCES == file is from a region that was split */
- // treat storefiles that have references like a major compaction
- compactEquals(sfCreate(true, 100,50,25,12,12), 100, 50, 25, 12, 12);
- // reference files shouldn't obey max threshold
- compactEquals(sfCreate(true, tooBig, 12,12), tooBig, 12, 12);
- // reference files should obey max file compact to avoid OOM
- assertEquals(maxFiles,
- store.compactSelection(sfCreate(true, 7,6,5,4,3,2,1)).getFilesToCompact().size());
-
- // empty case
- compactEquals(new ArrayList<StoreFile>() /* empty */);
- // empty case (because all files are too big)
- compactEquals(sfCreate(tooBig, tooBig) /* empty */);
- }
-
- public void testOffPeakCompactionRatio() throws IOException {
- /*
- * NOTE: these tests are specific to describe the implementation of the
- * current compaction algorithm. Developed to ensure that refactoring
- * doesn't implicitly alter this.
- */
- long tooBig = maxSize + 1;
-
- Calendar calendar = new GregorianCalendar();
- int hourOfDay = calendar.get(Calendar.HOUR_OF_DAY);
- LOG.debug("Hour of day = " + hourOfDay);
- int hourPlusOne = ((hourOfDay+1)%24);
- int hourMinusOne = ((hourOfDay-1+24)%24);
- int hourMinusTwo = ((hourOfDay-2+24)%24);
-
- // check compact selection without peak hour setting
- LOG.debug("Testing compact selection without off-peak settings...");
- compactEquals(sfCreate(999,50,12,12,1), 12, 12, 1);
-
- // set an off-peak compaction threshold
- this.conf.setFloat("hbase.hstore.compaction.ratio.offpeak", 5.0F);
-
- // set peak hour to current time and check compact selection
- this.conf.setLong("hbase.offpeak.start.hour", hourMinusOne);
- this.conf.setLong("hbase.offpeak.end.hour", hourPlusOne);
- LOG.debug("Testing compact selection with off-peak settings (" +
- hourMinusOne + ", " + hourPlusOne + ")");
- compactEquals(sfCreate(999,50,12,12, 1), 50, 12, 12, 1);
-
- // set peak hour outside current selection and check compact selection
- this.conf.setLong("hbase.offpeak.start.hour", hourMinusTwo);
- this.conf.setLong("hbase.offpeak.end.hour", hourMinusOne);
- LOG.debug("Testing compact selection with off-peak settings (" +
- hourMinusTwo + ", " + hourMinusOne + ")");
- compactEquals(sfCreate(999,50,12,12, 1), 12, 12, 1);
- }
-}
Modified: hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java?rev=1378348&r1=1378347&r2=1378348&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java (original)
+++ hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java Tue Aug 28 21:13:38 2012
@@ -303,6 +303,7 @@ public class TestCompaction extends HBas
float jitterPct = 0.20f; // 20%
conf.setLong(HConstants.MAJOR_COMPACTION_PERIOD, delay);
conf.setFloat("hbase.hregion.majorcompaction.jitter", jitterPct);
+ r.updateConfiguration();
Store s = r.getStore(COLUMN_FAMILY);
try {
@@ -316,9 +317,10 @@ public class TestCompaction extends HBas
assertEquals(2, s.getStorefilesCount());
// ensure that major compaction time is deterministic
- long mcTime = s.getNextMajorCompactTime();
+ CompactionManager c = s.compactionManager;
+ long mcTime = c.getNextMajorCompactTime();
for (int i = 0; i < 10; ++i) {
- assertEquals(mcTime, s.getNextMajorCompactTime());
+ assertEquals(mcTime, c.getNextMajorCompactTime());
}
// ensure that the major compaction time is within the variance
Copied: hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultCompactSelection.java (from r1378342, hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactSelection.java)
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultCompactSelection.java?p2=hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultCompactSelection.java&p1=hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactSelection.java&r1=1378342&r2=1378348&rev=1378348&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactSelection.java (original)
+++ hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultCompactSelection.java Tue Aug 28 21:13:38 2012
@@ -42,26 +42,28 @@ import org.apache.hadoop.hbase.io.hfile.
import org.apache.hadoop.hbase.io.hfile.NoOpDataBlockEncoder;
import org.apache.hadoop.hbase.regionserver.wal.HLog;
import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import com.google.common.collect.Lists;
-public class TestCompactSelection extends TestCase {
- private final static Log LOG = LogFactory.getLog(TestCompactSelection.class);
- private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
-
- private Configuration conf;
- private Store store;
- private static final String DIR
+public class TestDefaultCompactSelection extends TestCase {
+ private final static Log LOG = LogFactory.getLog(TestDefaultCompactSelection.class);
+ final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
+
+ Configuration conf;
+ Store store;
+ private CompactionManager manager;
+ static final String DIR
= TEST_UTIL.getTestDir() + "/TestCompactSelection/";
- private static Path TEST_FILE;
-
- private static final int minFiles = 3;
- private static final int maxFiles = 5;
-
- private static final long minSize = 10;
- private static final long maxSize = 1000;
+ static Path TEST_FILE;
+
+ static final int minFiles = 3;
+ static final int maxFiles = 5;
+ static final long minSize = 10;
+ static final long maxSize = 1000;
+
@Override
public void setUp() throws Exception {
// setup config values necessary for store
@@ -89,22 +91,26 @@ public class TestCompactSelection extend
HRegion region = new HRegion(basedir, hlog, fs, conf, info, null);
store = new Store(basedir, region, hcd, fs, conf);
+ manager = store.compactionManager;
TEST_FILE = StoreFile.getRandomFilename(fs, store.getHomedir());
fs.create(TEST_FILE);
}
-
+
// used so our tests don't deal with actual StoreFiles
static class MockStoreFile extends StoreFile {
long length = 0;
boolean isRef = false;
+ long ageInDisk;
+ long sequenceid;
- MockStoreFile(long length, boolean isRef) throws IOException {
- super(TEST_UTIL.getTestFileSystem(), TEST_FILE,
- TEST_UTIL.getConfiguration(),
+ MockStoreFile(long length, long ageInDisk, boolean isRef, long sequenceid) throws IOException {
+ super(TEST_UTIL.getTestFileSystem(), TEST_FILE, TEST_UTIL.getConfiguration(),
new CacheConfig(TEST_UTIL.getConfiguration()), BloomType.NONE,
NoOpDataBlockEncoder.INSTANCE);
this.length = length;
- this.isRef = isRef;
+ this.isRef = isRef;
+ this.ageInDisk = ageInDisk;
+ this.sequenceid = sequenceid;
}
void setLength(long newLen) {
@@ -112,6 +118,24 @@ public class TestCompactSelection extend
}
@Override
+ public boolean hasMinFlushTime() {
+ return ageInDisk != 0;
+ }
+
+ @Override
+ public long getMinFlushTime() {
+ if (ageInDisk < 0) {
+ return ageInDisk;
+ }
+ return EnvironmentEdgeManager.currentTimeMillis() - ageInDisk;
+ }
+
+ @Override
+ public long getMaxSequenceId() {
+ return sequenceid;
+ }
+
+ @Override
boolean isMajorCompaction() {
return false;
}
@@ -133,29 +157,54 @@ public class TestCompactSelection extend
}
}
- List<StoreFile> sfCreate(long ... sizes) throws IOException {
- return sfCreate(false, sizes);
+ ArrayList<Long> toArrayList(long... numbers) {
+ ArrayList<Long> result = new ArrayList<Long>();
+ for (long i : numbers) {
+ result.add(i);
+ }
+ return result;
}
- List<StoreFile> sfCreate(boolean isReference, long ... sizes)
- throws IOException {
+ List<StoreFile> sfCreate(long... sizes) throws IOException {
+ ArrayList<Long> ageInDisk = new ArrayList<Long>();
+ for (int i = 0; i < sizes.length; i++) {
+ ageInDisk.add(0L);
+ }
+ return sfCreate(toArrayList(sizes), ageInDisk);
+ }
+
+ List<StoreFile> sfCreate(ArrayList<Long> sizes, ArrayList<Long> ageInDisk)
+ throws IOException {
+ return sfCreate(false, sizes, ageInDisk);
+ }
+
+ List<StoreFile> sfCreate(boolean isReference, long... sizes) throws IOException {
+ ArrayList<Long> ageInDisk = new ArrayList<Long>(sizes.length);
+ for (int i = 0; i < sizes.length; i++) {
+ ageInDisk.add(0L);
+ }
+ return sfCreate(isReference, toArrayList(sizes), ageInDisk);
+ }
+
+ List<StoreFile> sfCreate(boolean isReference, ArrayList<Long> sizes, ArrayList<Long> ageInDisk)
+ throws IOException {
List<StoreFile> ret = Lists.newArrayList();
- for (long i : sizes) {
- ret.add(new MockStoreFile(i, isReference));
+ for (int i = 0; i < sizes.size(); i++) {
+ ret.add(new MockStoreFile(sizes.get(i), ageInDisk.get(i), isReference, i));
}
return ret;
}
long[] getSizes(List<StoreFile> sfList) {
long[] aNums = new long[sfList.size()];
- for (int i=0; i <sfList.size(); ++i) {
+ for (int i = 0; i < sfList.size(); ++i) {
aNums[i] = sfList.get(i).getReader().length();
}
return aNums;
}
- void compactEquals(List<StoreFile> candidates, long ... expected)
- throws IOException {
+ void compactEquals(List<StoreFile> candidates, long... expected)
+ throws IOException {
compactEquals(candidates, false, expected);
}
@@ -163,13 +212,15 @@ public class TestCompactSelection extend
long ... expected)
throws IOException {
store.forceMajor = forcemajor;
- List<StoreFile> actual = store.compactSelection(candidates).getFilesToCompact();
- store.forceMajor = false;
+ //Test Default compactions
+ List<StoreFile> actual = store.compactionManager
+ .selectCompaction(candidates, forcemajor).getFilesToCompact();
assertEquals(Arrays.toString(expected), Arrays.toString(getSizes(actual)));
+ store.forceMajor = false;
}
public void testCompactionRatio() throws IOException {
- /*
+ /**
* NOTE: these tests are specific to describe the implementation of the
* current compaction algorithm. Developed to ensure that refactoring
* doesn't implicitly alter this.
@@ -193,7 +244,7 @@ public class TestCompactSelection extend
*/
// don't exceed max file compact threshold
assertEquals(maxFiles,
- store.compactSelection(sfCreate(7,6,5,4,3,2,1)).getFilesToCompact().size());
+ manager.selectCompaction(sfCreate(7, 6, 5, 4, 3, 2, 1), false).getFilesToCompact().size());
/* MAJOR COMPACTION */
// if a major compaction has been forced, then compact everything
@@ -205,7 +256,7 @@ public class TestCompactSelection extend
// don't exceed max file compact threshold, even with major compaction
store.forceMajor = true;
assertEquals(maxFiles,
- store.compactSelection(sfCreate(7,6,5,4,3,2,1)).getFilesToCompact().size());
+ manager.selectCompaction(sfCreate(7, 6, 5, 4, 3, 2, 1), false).getFilesToCompact().size());
store.forceMajor = false;
// if we exceed maxCompactSize, downgrade to minor
// if not, it creates a 'snowball effect' when files >> maxCompactSize:
@@ -213,6 +264,7 @@ public class TestCompactSelection extend
compactEquals(sfCreate(100,50,23,12,12), true, 23, 12, 12);
conf.setInt(HConstants.MAJOR_COMPACTION_PERIOD, 1);
conf.setFloat("hbase.hregion.majorcompaction.jitter", 0);
+ store.updateConfiguration();
try {
// trigger an aged major compaction
compactEquals(sfCreate(50,25,12,12), 50, 25, 12, 12);
@@ -229,8 +281,8 @@ public class TestCompactSelection extend
// reference files shouldn't obey max threshold
compactEquals(sfCreate(true, tooBig, 12,12), tooBig, 12, 12);
// reference files should obey max file compact to avoid OOM
- assertEquals(maxFiles,
- store.compactSelection(sfCreate(true, 7,6,5,4,3,2,1)).getFilesToCompact().size());
+ assertEquals(maxFiles, manager.selectCompaction(sfCreate(true, 7, 6, 5, 4, 3, 2, 1), false)
+ .getFilesToCompact().size());
// empty case
compactEquals(new ArrayList<StoreFile>() /* empty */);
@@ -240,19 +292,19 @@ public class TestCompactSelection extend
public void testOffPeakCompactionRatio() throws IOException {
/*
- * NOTE: these tests are specific to describe the implementation of the
- * current compaction algorithm. Developed to ensure that refactoring
+ * NOTE: these tests are specific to describe the implementation of the
+ * current compaction algorithm. Developed to ensure that refactoring
* doesn't implicitly alter this.
*/
long tooBig = maxSize + 1;
-
+
Calendar calendar = new GregorianCalendar();
int hourOfDay = calendar.get(Calendar.HOUR_OF_DAY);
LOG.debug("Hour of day = " + hourOfDay);
int hourPlusOne = ((hourOfDay+1)%24);
int hourMinusOne = ((hourOfDay-1+24)%24);
- int hourMinusTwo = ((hourOfDay-2+24)%24);
-
+ int hourMinusTwo = ((hourOfDay-2+24)%24);
+
// check compact selection without peak hour setting
LOG.debug("Testing compact selection without off-peak settings...");
compactEquals(sfCreate(999,50,12,12,1), 12, 12, 1);
@@ -262,15 +314,18 @@ public class TestCompactSelection extend
// set peak hour to current time and check compact selection
this.conf.setLong("hbase.offpeak.start.hour", hourMinusOne);
- this.conf.setLong("hbase.offpeak.end.hour", hourPlusOne);
- LOG.debug("Testing compact selection with off-peak settings (" +
+ this.conf.setLong("hbase.offpeak.end.hour", hourPlusOne);
+ LOG.debug("Testing compact selection with off-peak settings (" +
hourMinusOne + ", " + hourPlusOne + ")");
- compactEquals(sfCreate(999,50,12,12, 1), 50, 12, 12, 1);
-
+ // update the compaction policy to include conf changes
+ store.setCompactionPolicy(CompactionManager.class.getName());
+ compactEquals(sfCreate(999, 50, 12, 12, 1), 50, 12, 12, 1);
+
// set peak hour outside current selection and check compact selection
this.conf.setLong("hbase.offpeak.start.hour", hourMinusTwo);
this.conf.setLong("hbase.offpeak.end.hour", hourMinusOne);
- LOG.debug("Testing compact selection with off-peak settings (" +
+ store.setCompactionPolicy(CompactionManager.class.getName());
+ LOG.debug("Testing compact selection with off-peak settings (" +
hourMinusTwo + ", " + hourMinusOne + ")");
compactEquals(sfCreate(999,50,12,12, 1), 12, 12, 1);
}
Modified: hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/regionserver/TestStore.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/regionserver/TestStore.java?rev=1378348&r1=1378347&r2=1378348&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/regionserver/TestStore.java (original)
+++ hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/regionserver/TestStore.java Tue Aug 28 21:13:38 2012
@@ -53,7 +53,6 @@ import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
import org.apache.hadoop.hbase.monitoring.MonitoredTask;
-import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest;
import org.apache.hadoop.hbase.regionserver.wal.HLog;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.security.UnixUserGroupInformation;
@@ -196,17 +195,15 @@ public class TestStore extends TestCase
flush(i);
}
// after flush; check the lowest time stamp
- long lowestTimeStampFromStore =
- Store.getLowestTimestamp(store.getStorefiles());
- long lowestTimeStampFromFS =
- getLowestTimeStampFromFS(fs,store.getStorefiles());
- assertEquals(lowestTimeStampFromStore,lowestTimeStampFromFS);
+ long lowestTimestampFromManager = CompactionManager.getLowestTimestamp(store.getStorefiles());
+ long lowestTimeStampFromFS = getLowestTimeStampFromFS(fs,store.getStorefiles());
+ assertEquals(lowestTimestampFromManager, lowestTimeStampFromFS);
// after compact; check the lowest time stamp
store.compact(store.requestCompaction());
- lowestTimeStampFromStore = Store.getLowestTimestamp(store.getStorefiles());
+ lowestTimestampFromManager = CompactionManager.getLowestTimestamp(store.getStorefiles());
lowestTimeStampFromFS = getLowestTimeStampFromFS(fs,store.getStorefiles());
- assertEquals(lowestTimeStampFromStore,lowestTimeStampFromFS);
+ assertEquals(lowestTimestampFromManager,lowestTimeStampFromFS);
}
private static long getLowestTimeStampFromFS(FileSystem fs,
Added: hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/regionserver/TestTierCompactSelection.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/regionserver/TestTierCompactSelection.java?rev=1378348&view=auto
==============================================================================
--- hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/regionserver/TestTierCompactSelection.java (added)
+++ hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/regionserver/TestTierCompactSelection.java Tue Aug 28 21:13:38 2012
@@ -0,0 +1,315 @@
+/**
+ * Copyright 2012 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.regionserver;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.HConstants;
+
+import java.io.IOException;
+import java.util.*;
+
+public class TestTierCompactSelection extends TestDefaultCompactSelection {
+ private final static Log LOG = LogFactory.getLog(TestTierCompactSelection.class);
+
+ private static final int numTiers = 4;
+
+ private String strPrefix, strSchema, strTier;
+
+
+ @Override
+ public void setUp() throws Exception {
+
+ super.setUp();
+
+ // setup config values necessary for store
+ strPrefix = "hbase.hstore.compaction.";
+ strSchema = "tbl." + store.getHRegion().getTableDesc().getNameAsString()
+ + "cf." + store.getFamily().getNameAsString() + ".";
+
+ this.conf.setStrings(strPrefix + "CompactionPolicy", "TierBasedCompactionPolicy");
+
+ this.conf.setLong(HConstants.MAJOR_COMPACTION_PERIOD, 0);
+
+ // The following parameters are for default compaction
+ // Some of them are used as default values of tier based compaction
+ this.conf.setInt(strPrefix + "min", 2);
+ this.conf.setInt(strPrefix + "max", 10);
+ this.conf.setLong(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, 0);
+ this.conf.setLong(strPrefix + "max.size", 10000);
+ this.conf.setFloat(strPrefix + "ratio", 10.0F);
+
+ // Specifying the family parameters here
+ conf.setInt(strPrefix + strSchema + "NumCompactionTiers", numTiers);
+ conf.setLong(strPrefix + strSchema + "MinCompactSize", minSize);
+ conf.setLong(strPrefix + strSchema + "MaxCompactSize", maxSize);
+
+ // Specifying parameters for the default tier
+ strTier = "";
+ conf.setFloat(strPrefix + strSchema + strTier + "CompactionRatio", 0.1F);
+ conf.setInt(strPrefix + strSchema + strTier + "MinFilesToCompact", minFiles);
+ conf.setInt(strPrefix + strSchema + strTier + "MaxFilesToCompact", maxFiles);
+
+ // Specifying parameters for individual tiers here
+
+ // Don't compact in this tier (likely to be in block cache)
+ strTier = "Tier.0.";
+ conf.setFloat(strPrefix + strSchema + strTier + "CompactionRatio", 0.0F);
+
+ // Most aggressive tier
+ strTier = "Tier.1.";
+ conf.setFloat(strPrefix + strSchema + strTier + "CompactionRatio", 2.0F);
+ conf.setInt(strPrefix + strSchema + strTier + "MinFilesToCompact", 2);
+ conf.setInt(strPrefix + strSchema + strTier + "MaxFilesToCompact", 10);
+
+ // Medium tier
+ strTier = "Tier.2.";
+ conf.setFloat(strPrefix + strSchema + strTier + "CompactionRatio", 1.0F);
+ // Also include files in tier 1 here
+ conf.setInt(strPrefix + strSchema + strTier + "EndingIndexForTier", 1);
+
+ // Last tier - least aggressive compaction
+ // has default tier settings only
+ // Max Time elapsed is Infinity by default
+
+ }
+
+ @Override
+ void compactEquals(
+ List<StoreFile> candidates, boolean forcemajor,
+ long... expected
+ )
+ throws IOException {
+ store.forceMajor = forcemajor;
+ //update the policy for now in case any change
+ store.setCompactionPolicy(TierCompactionManager.class.getName());
+ List<StoreFile> actual =
+ store.compactionManager.selectCompaction(candidates, forcemajor).getFilesToCompact();
+ store.forceMajor = false;
+ assertEquals(Arrays.toString(expected), Arrays.toString(getSizes(actual)));
+ }
+
+ public void testAgeBasedAssignment() throws IOException {
+
+ conf.setLong(strPrefix + strSchema + "Tier.0.MaxAgeInDisk", 10L);
+ conf.setLong(strPrefix + strSchema + "Tier.1.MaxAgeInDisk", 100L);
+ conf.setLong(strPrefix + strSchema + "Tier.2.MaxAgeInDisk", 1000L);
+ conf.setLong(strPrefix + strSchema + "Tier.0.MaxSize", Long.MAX_VALUE);
+ conf.setLong(strPrefix + strSchema + "Tier.1.MaxSize", Long.MAX_VALUE);
+ conf.setLong(strPrefix + strSchema + "Tier.2.MaxSize", Long.MAX_VALUE);
+
+ //everything in first tier, don't compact!
+ compactEquals(sfCreate(toArrayList(
+ 151, 30, 13, 12, 11 ), toArrayList( // Sizes
+ 8, 5, 4, 2, 1 )) // ageInDisk ( = currentTime - minFlushTime)
+ /* empty expected */ ); // Selected sizes
+
+ //below minSize should compact
+ compactEquals(sfCreate(toArrayList(
+ 12, 11, 8, 3, 1 ), toArrayList(
+ 8, 5, 4, 2, 1 )),
+ 8, 3, 1 );
+
+ //everything in second tier
+ compactEquals(sfCreate(toArrayList(
+ 251, 70, 13, 12, 11 ), toArrayList(
+ 80, 50, 40, 20, 11 )),
+ 70, 13, 12, 11 );
+
+ //everything in third tier
+ compactEquals(sfCreate(toArrayList(
+ 251, 70, 13, 12, 11 ), toArrayList(
+ 800, 500, 400, 200, 110 )),
+ 13, 12, 11 );
+
+ //everything in fourth tier
+ compactEquals(sfCreate(toArrayList(
+ 251, 70, 13, 12, 11 ), toArrayList(
+ 8000, 5000, 4000, 2000, 1100 ))
+ /* empty expected */ );
+
+ //Valid compaction in 4th tier with ratio 0.10, hits maxFilesToCompact
+ compactEquals(sfCreate(toArrayList(
+ 500, 93, 92, 91, 90, 89, 88, 87, 86, 85, 84, 83, 82, 81, 80 ), toArrayList(
+ 5094, 5093, 5092, 5091, 5090, 5089, 5088, 5087, 5086, 5085, 5084, 5083, 5082, 5081, 5080)),
+ 93, 92, 91, 90, 89 );
+
+ //Now mixing tiers 1,0, expected selection in tier 1 only
+ compactEquals(sfCreate(toArrayList(
+ 999, 110, 100, 12, 11 ), toArrayList(
+ 90, 80, 50, 4, 1 )),
+ 110, 100 );
+
+ //Mixing tier 2,1, expected selection in tier 2 including tier 1 but not zero
+ compactEquals(sfCreate(toArrayList(
+ 999, 110, 100, 12, 11 ), toArrayList(
+ 900, 800, 500, 40, 1 )),
+ 110, 100, 12 );
+
+ //Mixing tier 2,1, expected selection in tier 1 because of recentFirstOrder = true
+ compactEquals(sfCreate(toArrayList(
+ 999, 110, 100, 12, 13, 11 ), toArrayList(
+ 900, 800, 500, 40, 30, 1 )),
+ 12, 13 );
+
+ conf.setBoolean(strPrefix + strSchema + "IsRecentFirstOrder", false);
+
+ //Mixing tier 2,1, expected selection in tier 1 because of recentFirstOrder = false
+ compactEquals(sfCreate(toArrayList(
+ 999, 110, 100, 12, 13, 11 ), toArrayList(
+ 900, 800, 500, 40, 30, 1 )),
+ 110, 100, 12, 13 );
+
+ //Mixing all tier 3,2,1,0 expected selection in tier 1 only
+ compactEquals(sfCreate(toArrayList(
+ 999, 800, 110, 100, 12, 13, 11 ), toArrayList(
+ 9000, 800, 50, 40, 8, 3, 1 )),
+ 110, 100 );
+
+ //Checking backward compatibility, first 3 files don't have minFlushTime,
+ //all should go to tier 1, not tier 0
+ compactEquals(sfCreate(toArrayList(
+ 999, 800, 110, 100, 12, 13, 11 ), toArrayList(
+ 0, 0, 0, 40, 8, 3, 1 )),
+ 999, 800, 110, 100 );
+
+ //make sure too big files don't get compacted
+ compactEquals(sfCreate(toArrayList(
+ 1002, 1001, 999, 800, 700, 12, 13, 11 ), toArrayList(
+ 900, 80, 50, 40, 30, 20, 4, 2 )),
+ 999, 800, 700, 12 );
+
+ }
+
+ public void testSizeBasedAssignment() throws IOException {
+
+ conf.setLong(strPrefix + strSchema + "MinCompactSize", 3);
+
+ conf.setLong(strPrefix + strSchema + "Tier.0.MaxSize", 10L);
+ conf.setLong(strPrefix + strSchema + "Tier.1.MaxSize", 100L);
+ conf.setLong(strPrefix + strSchema + "Tier.2.MaxSize", 1000L);
+ conf.setLong(strPrefix + strSchema + "Tier.0.MaxAgeInDisk", Long.MAX_VALUE);
+ conf.setLong(strPrefix + strSchema + "Tier.1.MaxAgeInDisk", Long.MAX_VALUE);
+ conf.setLong(strPrefix + strSchema + "Tier.2.MaxAgeInDisk", Long.MAX_VALUE);
+
+ compactEquals(sfCreate(false,
+ 500, 3, 2, 1 ),
+ 3, 2, 1 );
+
+ compactEquals(sfCreate(false,
+ 500, 8, 7, 6, 5, 4, 2, 1 )
+ /* empty */ );
+
+ compactEquals(sfCreate(false,
+ 500, 6, 8, 4, 7, 4, 2, 1 )
+ /* empty */ );
+
+ compactEquals(sfCreate(false,
+ 500, 23, 11, 8, 4, 1 )
+ /* empty */ );
+
+ compactEquals(sfCreate(false,
+ 500, 11, 23, 8, 4, 1 ),
+ 11, 23 );
+
+ compactEquals(sfCreate(false,
+ 500, 9, 23, 8, 4, 1 ),
+ 9, 23 );
+
+ compactEquals(sfCreate(false,
+ 500, 70, 23, 11, 8, 4, 1 )
+ /* empty */ );
+
+ compactEquals(sfCreate(false,
+ 500, 60, 23, 11, 8, 4, 1 ),
+ 60, 23, 11 );
+
+ compactEquals(sfCreate(false,
+ 500, 90, 60, 23, 11, 8, 4, 1 ),
+ 90, 60, 23, 11 );
+
+ conf.setBoolean(strPrefix + strSchema + "IsRecentFirstOrder", false);
+
+ compactEquals(sfCreate(false,
+ 500, 450, 60, 23, 11, 8, 4, 1 ),
+ 500, 450, 60, 23, 11 );
+
+ compactEquals(sfCreate(false,
+ 450, 500, 60, 23, 11, 8, 4, 1 ),
+ 450, 500, 60, 23, 11 );
+
+ compactEquals(sfCreate(false,
+ 1013, 1012, 1011, 1010, 1009, 1008, 1007, 1006, 1005, 1004, 1003, 1002, 1001, 999, 450, 550 ),
+ 999, 450, 550 );
+
+ conf.setLong(strPrefix + strSchema + "MaxCompactSize", 10000);
+
+ compactEquals(sfCreate(false,
+ 1013, 1012, 1011, 1010, 1009, 1008, 1007, 1006, 1005, 1004, 1003, 1002, 1001, 999, 450, 550 ),
+ 1013, 1012, 1011, 1010, 1009 );
+
+ compactEquals(sfCreate(false,
+ 1013, 992, 1011, 1010, 1009, 1008, 1007, 1006, 1005, 1004, 1003, 1002, 1001, 999, 450, 550),
+ 1013, 992, 1011, 1010, 1009 );
+
+ compactEquals(sfCreate(false,
+ 992, 993, 1011, 990, 1009, 998, 1007, 996, 1005, 994, 1003, 992, 1001, 999, 450, 550 ),
+ 992, 993, 1011, 990, 1009 );
+
+ conf.setBoolean(strPrefix + strSchema + "IsRecentFirstOrder", true);
+
+ compactEquals(sfCreate(false,
+ 500, 450, 60, 23, 11, 8, 4, 1 ),
+ 60, 23, 11 );
+
+ compactEquals(sfCreate(false,
+ 450, 500, 60, 23, 11, 8, 4, 1 ),
+ 60, 23, 11 );
+
+ compactEquals(sfCreate(false,
+ 1013, 1012, 1011, 1010, 1009, 1008, 1007, 1006, 1005, 1004, 1003, 1002, 1001, 999, 450, 550 ),
+ 999, 450, 550 );
+
+ compactEquals(sfCreate(false,
+ 992, 993, 1011, 990, 1009, 998, 1007, 996, 1005, 994, 1003, 992, 1001, 999, 450, 550 ),
+ 999, 450, 550 );
+
+ compactEquals(sfCreate(false,
+ 992, 993, 1011, 990, 1009, 998, 1007, 996, 1005, 994, 1003, 992, 991, 999, 450, 550 ),
+ 992, 991, 999, 450, 550 );
+
+ compactEquals(sfCreate(false,
+ 992, 993, 1011, 990, 1009, 998, 1007, 996, 1005, 994, 1003, 992, 991, 999, 450, 550, 1001),
+ 992, 993, 1011, 990, 1009 );
+
+ }
+
+ @Override
+ public void testCompactionRatio() throws IOException {
+ conf.setInt(strPrefix + strSchema + "NumCompactionTiers", 1);
+ conf.setFloat(strPrefix + strSchema + "Tier.0.CompactionRatio", 1.0F);
+ conf.setInt(strPrefix + "max", 5);
+ super.testCompactionRatio();
+ }
+
+ @Override
+ public void testOffPeakCompactionRatio() throws IOException {}
+
+}