You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by te...@apache.org on 2012/10/30 21:14:02 UTC
svn commit: r1403852 [2/2] - in /hbase/trunk:
hbase-common/src/main/java/org/apache/hadoop/hbase/
hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/
hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/
hbase-server/src/main/java/...
Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java?rev=1403852&r1=1403851&r2=1403852&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java (original)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java Tue Oct 30 20:14:01 2012
@@ -303,6 +303,8 @@ public class TestCompaction extends HBas
conf.setFloat("hbase.hregion.majorcompaction.jitter", jitterPct);
HStore s = ((HStore) r.getStore(COLUMN_FAMILY));
+ // TODO: temporary call, until HBASE-3909 is committed in some form.
+ s.updateConfiguration();
try {
createStoreFile(r);
createStoreFile(r);
@@ -314,9 +316,10 @@ public class TestCompaction extends HBas
assertEquals(2, s.getStorefilesCount());
// ensure that major compaction time is deterministic
- long mcTime = s.getNextMajorCompactTime();
+ CompactionManager c = s.compactionManager;
+ long mcTime = c.getNextMajorCompactTime();
for (int i = 0; i < 10; ++i) {
- assertEquals(mcTime, s.getNextMajorCompactTime());
+ assertEquals(mcTime, c.getNextMajorCompactTime());
}
// ensure that the major compaction time is within the variance
Added: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultCompactSelection.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultCompactSelection.java?rev=1403852&view=auto
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultCompactSelection.java (added)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultCompactSelection.java Tue Oct 30 20:14:01 2012
@@ -0,0 +1,338 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.regionserver;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Calendar;
+import java.util.GregorianCalendar;
+import java.util.List;
+
+import junit.framework.TestCase;
+import org.junit.experimental.categories.Category;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.*;
+import org.apache.hadoop.hbase.io.hfile.CacheConfig;
+import org.apache.hadoop.hbase.io.hfile.NoOpDataBlockEncoder;
+import org.apache.hadoop.hbase.regionserver.compactions.CompactSelection;
+import org.apache.hadoop.hbase.regionserver.wal.HLog;
+import org.apache.hadoop.hbase.regionserver.wal.HLogFactory;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+
+import com.google.common.collect.Lists;
+
+@Category(SmallTests.class)
+public class TestDefaultCompactSelection extends TestCase {
+ private final static Log LOG = LogFactory.getLog(TestDefaultCompactSelection.class);
+ private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
+
+ protected Configuration conf;
+ protected HStore store;
+ private static final String DIR=
+ TEST_UTIL.getDataTestDir("TestCompactSelection").toString();
+ private static Path TEST_FILE;
+ private CompactionManager manager;
+
+ protected static final int minFiles = 3;
+ protected static final int maxFiles = 5;
+
+ protected static final long minSize = 10;
+ protected static final long maxSize = 1000;
+
+
+ @Override
+ public void setUp() throws Exception {
+ // setup config values necessary for store
+ this.conf = TEST_UTIL.getConfiguration();
+ this.conf.setLong(HConstants.MAJOR_COMPACTION_PERIOD, 0);
+ this.conf.setInt("hbase.hstore.compaction.min", minFiles);
+ this.conf.setInt("hbase.hstore.compaction.max", maxFiles);
+ this.conf.setLong(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, minSize);
+ this.conf.setLong("hbase.hstore.compaction.max.size", maxSize);
+ this.conf.setFloat("hbase.hstore.compaction.ratio", 1.0F);
+
+ //Setting up a Store
+ Path basedir = new Path(DIR);
+ String logName = "logs";
+ Path logdir = new Path(DIR, logName);
+ Path oldLogDir = new Path(basedir, HConstants.HREGION_OLDLOGDIR_NAME);
+ HColumnDescriptor hcd = new HColumnDescriptor(Bytes.toBytes("family"));
+ FileSystem fs = FileSystem.get(conf);
+
+ fs.delete(logdir, true);
+
+ HTableDescriptor htd = new HTableDescriptor(Bytes.toBytes("table"));
+ htd.addFamily(hcd);
+ HRegionInfo info = new HRegionInfo(htd.getName(), null, null, false);
+
+ HLog hlog = HLogFactory.createHLog(fs, basedir,
+ logName, conf);
+ HRegion region = HRegion.createHRegion(info, basedir, conf, htd);
+ HRegion.closeHRegion(region);
+ Path tableDir = new Path(basedir, Bytes.toString(htd.getName()));
+ region = new HRegion(tableDir, hlog, fs, conf, info, htd, null);
+
+ store = new HStore(basedir, region, hcd, fs, conf);
+ manager = store.compactionManager;
+
+ TEST_FILE = StoreFile.getRandomFilename(fs, store.getHomedir());
+ fs.create(TEST_FILE);
+ }
+
+ // used so our tests don't deal with actual StoreFiles
+ static class MockStoreFile extends StoreFile {
+ long length = 0;
+ boolean isRef = false;
+ long ageInDisk;
+ long sequenceid;
+
+ MockStoreFile(long length, long ageInDisk, boolean isRef, long sequenceid) throws IOException {
+ super(TEST_UTIL.getTestFileSystem(), TEST_FILE, TEST_UTIL.getConfiguration(),
+ new CacheConfig(TEST_UTIL.getConfiguration()), BloomType.NONE,
+ NoOpDataBlockEncoder.INSTANCE);
+ this.length = length;
+ this.isRef = isRef;
+ this.ageInDisk = ageInDisk;
+ this.sequenceid = sequenceid;
+ }
+
+ void setLength(long newLen) {
+ this.length = newLen;
+ }
+
+ @Override
+ public boolean hasMinFlushTime() {
+ return ageInDisk != 0;
+ }
+
+ @Override
+ public long getMinFlushTime() {
+ if (ageInDisk < 0) {
+ return ageInDisk;
+ }
+ return EnvironmentEdgeManager.currentTimeMillis() - ageInDisk;
+ }
+
+ @Override
+ public long getMaxSequenceId() {
+ return sequenceid;
+ }
+
+ @Override
+ boolean isMajorCompaction() {
+ return false;
+ }
+
+ @Override
+ boolean isReference() {
+ return this.isRef;
+ }
+
+ @Override
+ public StoreFile.Reader getReader() {
+ final long len = this.length;
+ return new StoreFile.Reader() {
+ @Override
+ public long length() {
+ return len;
+ }
+ };
+ }
+ }
+
+ ArrayList<Long> toArrayList(long... numbers) {
+ ArrayList<Long> result = new ArrayList<Long>();
+ for (long i : numbers) {
+ result.add(i);
+ }
+ return result;
+ }
+
+ List<StoreFile> sfCreate(long... sizes) throws IOException {
+ ArrayList<Long> ageInDisk = new ArrayList<Long>();
+ for (int i = 0; i < sizes.length; i++) {
+ ageInDisk.add(0L);
+ }
+ return sfCreate(toArrayList(sizes), ageInDisk);
+ }
+
+ List<StoreFile> sfCreate(ArrayList<Long> sizes, ArrayList<Long> ageInDisk)
+ throws IOException {
+ return sfCreate(false, sizes, ageInDisk);
+ }
+
+ List<StoreFile> sfCreate(boolean isReference, long... sizes) throws IOException {
+ ArrayList<Long> ageInDisk = new ArrayList<Long>(sizes.length);
+ for (int i = 0; i < sizes.length; i++) {
+ ageInDisk.add(0L);
+ }
+ return sfCreate(isReference, toArrayList(sizes), ageInDisk);
+ }
+
+ List<StoreFile> sfCreate(boolean isReference, ArrayList<Long> sizes, ArrayList<Long> ageInDisk)
+ throws IOException {
+ List<StoreFile> ret = Lists.newArrayList();
+ for (int i = 0; i < sizes.size(); i++) {
+ ret.add(new MockStoreFile(sizes.get(i), ageInDisk.get(i), isReference, i));
+ }
+ return ret;
+ }
+
+ long[] getSizes(List<StoreFile> sfList) {
+ long[] aNums = new long[sfList.size()];
+ for (int i = 0; i < sfList.size(); ++i) {
+ aNums[i] = sfList.get(i).getReader().length();
+ }
+ return aNums;
+ }
+
+ void compactEquals(List<StoreFile> candidates, long... expected)
+ throws IOException {
+ compactEquals(candidates, false, expected);
+ }
+
+ void compactEquals(List<StoreFile> candidates, boolean forcemajor,
+ long ... expected)
+ throws IOException {
+ store.forceMajor = forcemajor;
+ //Test Default compactions
+ List<StoreFile> actual = store.compactionManager
+ .selectCompaction(candidates, Store.NO_PRIORITY, forcemajor).getFilesToCompact();
+ assertEquals(Arrays.toString(expected), Arrays.toString(getSizes(actual)));
+ store.forceMajor = false;
+ }
+
+ public void testCompactionRatio() throws IOException {
+ /**
+ * NOTE: these tests are specific to describe the implementation of the
+ * current compaction algorithm. Developed to ensure that refactoring
+ * doesn't implicitly alter this.
+ */
+ long tooBig = maxSize + 1;
+
+ // default case. preserve user ratio on size
+ compactEquals(sfCreate(100,50,23,12,12), 23, 12, 12);
+ // less than compact threshold = don't compact
+ compactEquals(sfCreate(100,50,25,12,12) /* empty */);
+ // greater than compact size = skip those
+ compactEquals(sfCreate(tooBig, tooBig, 700, 700, 700), 700, 700, 700);
+ // big size + threshold
+ compactEquals(sfCreate(tooBig, tooBig, 700,700) /* empty */);
+ // small files = don't care about ratio
+ compactEquals(sfCreate(8,3,1), 8,3,1);
+ /* TODO: add sorting + unit test back in when HBASE-2856 is fixed
+ // sort first so you don't include huge file the tail end.
+ // happens with HFileOutputFormat bulk migration
+ compactEquals(sfCreate(100,50,23,12,12, 500), 23, 12, 12);
+ */
+ // don't exceed max file compact threshold
+ // note: file selection starts with largest to smallest.
+ compactEquals(sfCreate(7, 6, 5, 4, 3, 2, 1), 7, 6, 5, 4, 3);
+
+ /* MAJOR COMPACTION */
+ // if a major compaction has been forced, then compact everything
+ compactEquals(sfCreate(50,25,12,12), true, 50, 25, 12, 12);
+ // also choose files < threshold on major compaction
+ compactEquals(sfCreate(12,12), true, 12, 12);
+ // even if one of those files is too big
+ compactEquals(sfCreate(tooBig, 12,12), true, tooBig, 12, 12);
+ // don't exceed max file compact threshold, even with major compaction
+ store.forceMajor = true;
+ assertEquals(maxFiles,
+ manager.selectCompaction(sfCreate(7, 6, 5, 4, 3, 2, 1), Store.NO_PRIORITY, false)
+ .getFilesToCompact().size());
+ compactEquals(sfCreate(7, 6, 5, 4, 3, 2, 1), 7, 6, 5, 4, 3);
+ store.forceMajor = false;
+ // if we exceed maxCompactSize, downgrade to minor
+ // if not, it creates a 'snowball effect' when files >> maxCompactSize:
+ // the last file in compaction is the aggregate of all previous compactions
+ compactEquals(sfCreate(100,50,23,12,12), true, 23, 12, 12);
+ conf.setLong(HConstants.MAJOR_COMPACTION_PERIOD, 1);
+ conf.setFloat("hbase.hregion.majorcompaction.jitter", 0);
+ store.updateConfiguration();
+ try {
+ // trigger an aged major compaction
+ compactEquals(sfCreate(50,25,12,12), 50, 25, 12, 12);
+ // major sure exceeding maxCompactSize also downgrades aged minors
+ compactEquals(sfCreate(100,50,23,12,12), 23, 12, 12);
+ } finally {
+ conf.setLong(HConstants.MAJOR_COMPACTION_PERIOD, 1000*60*60*24);
+ conf.setFloat("hbase.hregion.majorcompaction.jitter", 0.20F);
+ }
+
+ /* REFERENCES == file is from a region that was split */
+ // treat storefiles that have references like a major compaction
+ compactEquals(sfCreate(true, 100,50,25,12,12), 100, 50, 25, 12, 12);
+ // reference files shouldn't obey max threshold
+ compactEquals(sfCreate(true, tooBig, 12,12), tooBig, 12, 12);
+ // reference files should obey max file compact to avoid OOM
+ compactEquals(sfCreate(true, 7, 6, 5, 4, 3, 2, 1), 7, 6, 5, 4, 3);
+
+ // empty case
+ compactEquals(new ArrayList<StoreFile>() /* empty */);
+ // empty case (because all files are too big)
+ compactEquals(sfCreate(tooBig, tooBig) /* empty */);
+ }
+
+ public void testOffPeakCompactionRatio() throws IOException {
+ /*
+ * NOTE: these tests are specific to describe the implementation of the
+ * current compaction algorithm. Developed to ensure that refactoring
+ * doesn't implicitly alter this.
+ */
+ long tooBig = maxSize + 1;
+
+ Calendar calendar = new GregorianCalendar();
+ int hourOfDay = calendar.get(Calendar.HOUR_OF_DAY);
+ LOG.debug("Hour of day = " + hourOfDay);
+ int hourPlusOne = ((hourOfDay+1)%24);
+ int hourMinusOne = ((hourOfDay-1+24)%24);
+ int hourMinusTwo = ((hourOfDay-2+24)%24);
+
+ // check compact selection without peak hour setting
+ LOG.debug("Testing compact selection without off-peak settings...");
+ compactEquals(sfCreate(999,50,12,12,1), 12, 12, 1);
+
+ // set an off-peak compaction threshold
+ this.conf.setFloat("hbase.hstore.compaction.ratio.offpeak", 5.0F);
+
+ // set peak hour to current time and check compact selection
+ this.conf.setLong("hbase.offpeak.start.hour", hourMinusOne);
+ this.conf.setLong("hbase.offpeak.end.hour", hourPlusOne);
+ LOG.debug("Testing compact selection with off-peak settings (" +
+ hourMinusOne + ", " + hourPlusOne + ")");
+ // update the compaction policy to include conf changes
+ store.setCompactionPolicy(CompactionManager.class.getName());
+ compactEquals(sfCreate(999, 50, 12, 12, 1), 50, 12, 12, 1);
+
+ // set peak hour outside current selection and check compact selection
+ this.conf.setLong("hbase.offpeak.start.hour", hourMinusTwo);
+ this.conf.setLong("hbase.offpeak.end.hour", hourMinusOne);
+ store.setCompactionPolicy(CompactionManager.class.getName());
+ LOG.debug("Testing compact selection with off-peak settings (" +
+ hourMinusTwo + ", " + hourMinusOne + ")");
+ compactEquals(sfCreate(999,50,12,12, 1), 12, 12, 1);
+ }
+}
Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStore.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStore.java?rev=1403852&r1=1403851&r2=1403852&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStore.java (original)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStore.java Tue Oct 30 20:14:01 2012
@@ -216,17 +216,15 @@ public class TestStore extends TestCase
flush(i);
}
// after flush; check the lowest time stamp
- long lowestTimeStampFromStore =
- HStore.getLowestTimestamp(store.getStorefiles());
- long lowestTimeStampFromFS =
- getLowestTimeStampFromFS(fs,store.getStorefiles());
- assertEquals(lowestTimeStampFromStore,lowestTimeStampFromFS);
-
+ long lowestTimeStampFromManager = CompactionManager.getLowestTimestamp(store.getStorefiles());
+ long lowestTimeStampFromFS = getLowestTimeStampFromFS(fs, store.getStorefiles());
+ assertEquals(lowestTimeStampFromManager,lowestTimeStampFromFS);
+
// after compact; check the lowest time stamp
store.compact(store.requestCompaction());
- lowestTimeStampFromStore = HStore.getLowestTimestamp(store.getStorefiles());
- lowestTimeStampFromFS = getLowestTimeStampFromFS(fs,store.getStorefiles());
- assertEquals(lowestTimeStampFromStore,lowestTimeStampFromFS);
+ lowestTimeStampFromManager = CompactionManager.getLowestTimestamp(store.getStorefiles());
+ lowestTimeStampFromFS = getLowestTimeStampFromFS(fs, store.getStorefiles());
+ assertEquals(lowestTimeStampFromManager, lowestTimeStampFromFS);
}
private static long getLowestTimeStampFromFS(FileSystem fs,
Added: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestTierCompactSelection.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestTierCompactSelection.java?rev=1403852&view=auto
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestTierCompactSelection.java (added)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestTierCompactSelection.java Tue Oct 30 20:14:01 2012
@@ -0,0 +1,318 @@
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.regionserver;
+
+import java.io.IOException;
+import java.util.*;
+
+import org.junit.experimental.categories.Category;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.SmallTests;
+
+@Category(SmallTests.class)
+public class TestTierCompactSelection extends TestDefaultCompactSelection {
+ private final static Log LOG = LogFactory.getLog(TestTierCompactSelection.class);
+
+ private static final int numTiers = 4;
+
+ private String strPrefix, strSchema, strTier;
+
+
+ @Override
+ public void setUp() throws Exception {
+
+ super.setUp();
+
+ // setup config values necessary for store
+ strPrefix = "hbase.hstore.compaction.";
+ strSchema = "tbl." + store.getHRegion().getTableDesc().getNameAsString()
+ + "cf." + store.getFamily().getNameAsString() + ".";
+
+ this.conf.setStrings(strPrefix + "CompactionPolicy", "TierBasedCompactionPolicy");
+
+ this.conf.setLong(HConstants.MAJOR_COMPACTION_PERIOD, 0);
+
+ // The following parameters are for default compaction
+ // Some of them are used as default values of tier based compaction
+ this.conf.setInt(strPrefix + "min", 2);
+ this.conf.setInt(strPrefix + "max", 10);
+ this.conf.setLong(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, 0);
+ this.conf.setLong(strPrefix + "max.size", 10000);
+ this.conf.setFloat(strPrefix + "ratio", 10.0F);
+
+ // Specifying the family parameters here
+ conf.setInt(strPrefix + strSchema + "NumCompactionTiers", numTiers);
+ conf.setLong(strPrefix + strSchema + "MinCompactSize", minSize);
+ conf.setLong(strPrefix + strSchema + "MaxCompactSize", maxSize);
+
+ // Specifying parameters for the default tier
+ strTier = "";
+ conf.setFloat(strPrefix + strSchema + strTier + "CompactionRatio", 0.1F);
+ conf.setInt(strPrefix + strSchema + strTier + "MinFilesToCompact", minFiles);
+ conf.setInt(strPrefix + strSchema + strTier + "MaxFilesToCompact", maxFiles);
+
+ // Specifying parameters for individual tiers here
+
+ // Don't compact in this tier (likely to be in block cache)
+ strTier = "Tier.0.";
+ conf.setFloat(strPrefix + strSchema + strTier + "CompactionRatio", 0.0F);
+
+ // Most aggressive tier
+ strTier = "Tier.1.";
+ conf.setFloat(strPrefix + strSchema + strTier + "CompactionRatio", 2.0F);
+ conf.setInt(strPrefix + strSchema + strTier + "MinFilesToCompact", 2);
+ conf.setInt(strPrefix + strSchema + strTier + "MaxFilesToCompact", 10);
+
+ // Medium tier
+ strTier = "Tier.2.";
+ conf.setFloat(strPrefix + strSchema + strTier + "CompactionRatio", 1.0F);
+ // Also include files in tier 1 here
+ conf.setInt(strPrefix + strSchema + strTier + "EndingIndexForTier", 1);
+
+ // Last tier - least aggressive compaction
+ // has default tier settings only
+ // Max Time elapsed is Infinity by default
+
+ }
+
+ @Override
+ void compactEquals(
+ List<StoreFile> candidates, boolean forcemajor,
+ long... expected
+ )
+ throws IOException {
+ store.forceMajor = forcemajor;
+ //update the policy for now in case any change
+ store.setCompactionPolicy(TierCompactionManager.class.getName());
+ List<StoreFile> actual =
+ store.compactionManager.selectCompaction(candidates, Store.NO_PRIORITY, forcemajor).getFilesToCompact();
+ store.forceMajor = false;
+ assertEquals(Arrays.toString(expected), Arrays.toString(getSizes(actual)));
+ }
+
+ public void testAgeBasedAssignment() throws IOException {
+
+ conf.setLong(strPrefix + strSchema + "Tier.0.MaxAgeInDisk", 10L);
+ conf.setLong(strPrefix + strSchema + "Tier.1.MaxAgeInDisk", 100L);
+ conf.setLong(strPrefix + strSchema + "Tier.2.MaxAgeInDisk", 1000L);
+ conf.setLong(strPrefix + strSchema + "Tier.0.MaxSize", Long.MAX_VALUE);
+ conf.setLong(strPrefix + strSchema + "Tier.1.MaxSize", Long.MAX_VALUE);
+ conf.setLong(strPrefix + strSchema + "Tier.2.MaxSize", Long.MAX_VALUE);
+
+ //everything in first tier, don't compact!
+ compactEquals(sfCreate(toArrayList(
+ 151, 30, 13, 12, 11 ), toArrayList( // Sizes
+ 8, 5, 4, 2, 1 )) // ageInDisk ( = currentTime - minFlushTime)
+ /* empty expected */ ); // Selected sizes
+
+ //below minSize should compact
+ compactEquals(sfCreate(toArrayList(
+ 12, 11, 8, 3, 1 ), toArrayList(
+ 8, 5, 4, 2, 1 )),
+ 8, 3, 1 );
+
+ //everything in second tier
+ compactEquals(sfCreate(toArrayList(
+ 251, 70, 13, 12, 11 ), toArrayList(
+ 80, 50, 40, 20, 11 )),
+ 70, 13, 12, 11 );
+
+ //everything in third tier
+ compactEquals(sfCreate(toArrayList(
+ 251, 70, 13, 12, 11 ), toArrayList(
+ 800, 500, 400, 200, 110 )),
+ 13, 12, 11 );
+
+ //everything in fourth tier
+ compactEquals(sfCreate(toArrayList(
+ 251, 70, 13, 12, 11 ), toArrayList(
+ 8000, 5000, 4000, 2000, 1100 ))
+ /* empty expected */ );
+
+ //Valid compaction in 4th tier with ratio 0.10, hits maxFilesToCompact
+ compactEquals(sfCreate(toArrayList(
+ 500, 93, 92, 91, 90, 89, 88, 87, 86, 85, 84, 83, 82, 81, 80 ), toArrayList(
+ 5094, 5093, 5092, 5091, 5090, 5089, 5088, 5087, 5086, 5085, 5084, 5083, 5082, 5081, 5080)),
+ 93, 92, 91, 90, 89 );
+
+ //Now mixing tiers 1,0, expected selection in tier 1 only
+ compactEquals(sfCreate(toArrayList(
+ 999, 110, 100, 12, 11 ), toArrayList(
+ 90, 80, 50, 4, 1 )),
+ 110, 100 );
+
+ //Mixing tier 2,1, expected selection in tier 2 including tier 1 but not zero
+ compactEquals(sfCreate(toArrayList(
+ 999, 110, 100, 12, 11 ), toArrayList(
+ 900, 800, 500, 40, 1 )),
+ 110, 100, 12 );
+
+ //Mixing tier 2,1, expected selection in tier 1 because of recentFirstOrder = true
+ compactEquals(sfCreate(toArrayList(
+ 999, 110, 100, 12, 13, 11 ), toArrayList(
+ 900, 800, 500, 40, 30, 1 )),
+ 12, 13 );
+
+ conf.setBoolean(strPrefix + strSchema + "IsRecentFirstOrder", false);
+
+ //Mixing tier 2,1, expected selection in tier 1 because of recentFirstOrder = false
+ compactEquals(sfCreate(toArrayList(
+ 999, 110, 100, 12, 13, 11 ), toArrayList(
+ 900, 800, 500, 40, 30, 1 )),
+ 110, 100, 12, 13 );
+
+ //Mixing all tier 3,2,1,0 expected selection in tier 1 only
+ compactEquals(sfCreate(toArrayList(
+ 999, 800, 110, 100, 12, 13, 11 ), toArrayList(
+ 9000, 800, 50, 40, 8, 3, 1 )),
+ 110, 100 );
+
+ //Checking backward compatibility, first 3 files don't have minFlushTime,
+ //all should go to tier 1, not tier 0
+ compactEquals(sfCreate(toArrayList(
+ 999, 800, 110, 100, 12, 13, 11 ), toArrayList(
+ 0, 0, 0, 40, 8, 3, 1 )),
+ 999, 800, 110, 100 );
+
+ //make sure too big files don't get compacted
+ compactEquals(sfCreate(toArrayList(
+ 1002, 1001, 999, 800, 700, 12, 13, 11 ), toArrayList(
+ 900, 80, 50, 40, 30, 20, 4, 2 )),
+ 999, 800, 700, 12 );
+
+ }
+
+ public void testSizeBasedAssignment() throws IOException {
+
+ conf.setLong(strPrefix + strSchema + "MinCompactSize", 3);
+
+ conf.setLong(strPrefix + strSchema + "Tier.0.MaxSize", 10L);
+ conf.setLong(strPrefix + strSchema + "Tier.1.MaxSize", 100L);
+ conf.setLong(strPrefix + strSchema + "Tier.2.MaxSize", 1000L);
+ conf.setLong(strPrefix + strSchema + "Tier.0.MaxAgeInDisk", Long.MAX_VALUE);
+ conf.setLong(strPrefix + strSchema + "Tier.1.MaxAgeInDisk", Long.MAX_VALUE);
+ conf.setLong(strPrefix + strSchema + "Tier.2.MaxAgeInDisk", Long.MAX_VALUE);
+
+ compactEquals(sfCreate(false,
+ 500, 3, 2, 1 ),
+ 3, 2, 1 );
+
+ compactEquals(sfCreate(false,
+ 500, 8, 7, 6, 5, 4, 2, 1 )
+ /* empty */ );
+
+ compactEquals(sfCreate(false,
+ 500, 6, 8, 4, 7, 4, 2, 1 )
+ /* empty */ );
+
+ compactEquals(sfCreate(false,
+ 500, 23, 11, 8, 4, 1 )
+ /* empty */ );
+
+ compactEquals(sfCreate(false,
+ 500, 11, 23, 8, 4, 1 ),
+ 11, 23 );
+
+ compactEquals(sfCreate(false,
+ 500, 9, 23, 8, 4, 1 ),
+ 9, 23 );
+
+ compactEquals(sfCreate(false,
+ 500, 70, 23, 11, 8, 4, 1 )
+ /* empty */ );
+
+ compactEquals(sfCreate(false,
+ 500, 60, 23, 11, 8, 4, 1 ),
+ 60, 23, 11 );
+
+ compactEquals(sfCreate(false,
+ 500, 90, 60, 23, 11, 8, 4, 1 ),
+ 90, 60, 23, 11 );
+
+ conf.setBoolean(strPrefix + strSchema + "IsRecentFirstOrder", false);
+
+ compactEquals(sfCreate(false,
+ 500, 450, 60, 23, 11, 8, 4, 1 ),
+ 500, 450, 60, 23, 11 );
+
+ compactEquals(sfCreate(false,
+ 450, 500, 60, 23, 11, 8, 4, 1 ),
+ 450, 500, 60, 23, 11 );
+
+ compactEquals(sfCreate(false,
+ 1013, 1012, 1011, 1010, 1009, 1008, 1007, 1006, 1005, 1004, 1003, 1002, 1001, 999, 450, 550 ),
+ 999, 450, 550 );
+
+ conf.setLong(strPrefix + strSchema + "MaxCompactSize", 10000);
+
+ compactEquals(sfCreate(false,
+ 1013, 1012, 1011, 1010, 1009, 1008, 1007, 1006, 1005, 1004, 1003, 1002, 1001, 999, 450, 550 ),
+ 1013, 1012, 1011, 1010, 1009 );
+
+ compactEquals(sfCreate(false,
+ 1013, 992, 1011, 1010, 1009, 1008, 1007, 1006, 1005, 1004, 1003, 1002, 1001, 999, 450, 550),
+ 1013, 992, 1011, 1010, 1009 );
+
+ compactEquals(sfCreate(false,
+ 992, 993, 1011, 990, 1009, 998, 1007, 996, 1005, 994, 1003, 992, 1001, 999, 450, 550 ),
+ 992, 993, 1011, 990, 1009 );
+
+ conf.setBoolean(strPrefix + strSchema + "IsRecentFirstOrder", true);
+
+ compactEquals(sfCreate(false,
+ 500, 450, 60, 23, 11, 8, 4, 1 ),
+ 60, 23, 11 );
+
+ compactEquals(sfCreate(false,
+ 450, 500, 60, 23, 11, 8, 4, 1 ),
+ 60, 23, 11 );
+
+ compactEquals(sfCreate(false,
+ 1013, 1012, 1011, 1010, 1009, 1008, 1007, 1006, 1005, 1004, 1003, 1002, 1001, 999, 450, 550 ),
+ 999, 450, 550 );
+
+ compactEquals(sfCreate(false,
+ 992, 993, 1011, 990, 1009, 998, 1007, 996, 1005, 994, 1003, 992, 1001, 999, 450, 550 ),
+ 999, 450, 550 );
+
+ compactEquals(sfCreate(false,
+ 992, 993, 1011, 990, 1009, 998, 1007, 996, 1005, 994, 1003, 992, 991, 999, 450, 550 ),
+ 992, 991, 999, 450, 550 );
+
+ compactEquals(sfCreate(false,
+ 992, 993, 1011, 990, 1009, 998, 1007, 996, 1005, 994, 1003, 992, 991, 999, 450, 550, 1001),
+ 992, 993, 1011, 990, 1009 );
+
+ }
+
+ @Override
+ public void testCompactionRatio() throws IOException {
+ conf.setInt(strPrefix + strSchema + "NumCompactionTiers", 1);
+ conf.setFloat(strPrefix + strSchema + "Tier.0.CompactionRatio", 1.0F);
+ conf.setInt(strPrefix + "max", 5);
+ super.testCompactionRatio();
+ }
+
+ @Override
+ public void testOffPeakCompactionRatio() throws IOException {}
+
+}