You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by li...@apache.org on 2013/04/25 20:18:18 UTC
svn commit: r1475883 - in
/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase:
io/hfile/AbstractHFileReader.java io/hfile/HFile.java regionserver/Store.java
Author: liyin
Date: Thu Apr 25 18:18:18 2013
New Revision: 1475883
URL: http://svn.apache.org/r1475883
Log:
[HBASE-8423] Allow Major Compaction to Use Different Compression
Author: nspiegelberg
Summary:
For Titan, ~90% of the disk util is associated with major compacted files
but less than 33% of the get IOPS are due to mc files. Allowing a
different compaction algorithm for major compacted files will allow us
to get ~30% disk savings. Latency should be higher, but storefile
querying is sequential, so only 1/3 of the get latency is from disk access to these
files. Need to deploy this to dark launch to understand get latency increase.
Test Plan: - mvn test -Dtest=TestCompaction
Reviewers: liyintang, aaiyer, manukranthk
Reviewed By: liyintang
CC: hbase-eng@
Differential Revision: https://phabricator.fb.com/D779262
Modified:
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/io/hfile/AbstractHFileReader.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java
Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/io/hfile/AbstractHFileReader.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/io/hfile/AbstractHFileReader.java?rev=1475883&r1=1475882&r2=1475883&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/io/hfile/AbstractHFileReader.java (original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/io/hfile/AbstractHFileReader.java Thu Apr 25 18:18:18 2013
@@ -130,10 +130,20 @@ public abstract class AbstractHFileReade
public abstract boolean isFileInfoLoaded();
+ public String toShortString() {
+ return path.toString() +
+ (!isFileInfoLoaded()? "":
+ ", encoding=" + getEncodingOnDisk() +
+ ", compression=" + compressAlgo.getName() +
+ ", entries=" + trailer.getEntryCount() +
+ ", length=" + fileSize);
+ }
+
@Override
public String toString() {
return "reader=" + path.toString() +
(!isFileInfoLoaded()? "":
+ ", encoding=" + getEncodingOnDisk() +
", compression=" + compressAlgo.getName() +
", cacheConf=" + cacheConf +
", firstKey=" + toStringFirstKey() +
Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java?rev=1475883&r1=1475882&r2=1475883&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java (original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java Thu Apr 25 18:18:18 2013
@@ -499,6 +499,8 @@ public class HFile {
void close(boolean evictOnClose) throws IOException;
DataBlockEncoding getEncodingOnDisk();
+
+ String toShortString();
}
private static Reader pickReaderVersion(Path path, FSDataInputStream fsdis,
Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java?rev=1475883&r1=1475882&r2=1475883&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java (original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java Thu Apr 25 18:18:18 2013
@@ -145,6 +145,7 @@ public class Store extends SchemaConfigu
private final int blocksize;
private final boolean blockcache;
private final Compression.Algorithm compression;
+ private final Compression.Algorithm mcCompression;
private HFileDataBlockEncoder dataBlockEncoder;
@@ -189,6 +190,8 @@ public class Store extends SchemaConfigu
this.blockcache = family.isBlockCacheEnabled();
this.blocksize = family.getBlocksize();
this.compression = family.getCompression();
+ String mcStr = conf.get("hbase.hstore.majorcompaction.compression", compression.getName());
+ this.mcCompression = Compression.Algorithm.valueOf(mcStr.toUpperCase());
this.dataBlockEncoder =
new HFileDataBlockEncoderImpl(family.getDataBlockEncodingOnDisk(),
@@ -961,8 +964,8 @@ public class Store extends SchemaConfigu
+ StringUtils.formatTimeDiff(compactionStartTime, cr.getSelectionTime()) + ", and took "
+ StringUtils.formatTimeDiff(EnvironmentEdgeManager.currentTimeMillis(),
compactionStartTime)
- + " to execute. New storefile name=" + (sf == null ? "none" : sf.toString())
- + ", size=" + (sf == null? "none" : StringUtils.humanReadableInt(sf.getReader().length()))
+ + " to execute. New storefile name="
+ + (sf == null ? "(none)" : sf.getReader().getHFileReader().toShortString())
+ "; total size for store is "
+ StringUtils.humanReadableInt(storeSize));
if (writer != null) {
@@ -1173,11 +1176,7 @@ public class Store extends SchemaConfigu
? r.getFilterEntries() : r.getEntries();
maxKeyCount += keyCount;
if (LOG.isDebugEnabled()) {
- LOG.debug("Compacting " + file +
- ", keycount=" + keyCount +
- ", bloomtype=" + r.getBloomFilterType().toString() +
- ", size=" + StringUtils.humanReadableInt(r.length()) +
- ", encoding=" + r.getHFileReader().getEncodingOnDisk());
+ LOG.debug("Compacting " + r.getHFileReader().toShortString());
}
}
}
@@ -1190,6 +1189,8 @@ public class Store extends SchemaConfigu
// Make the instantiation lazy in case compaction produces no product; i.e.
// where all source cells are expired or deleted.
StoreFile.Writer writer = null;
+ // determine compression type (may be different for major compaction)
+ Compression.Algorithm compression = (majorCompaction) ? this.mcCompression : this.compression;
// Find the smallest read point across all the Scanners.
long smallestReadPoint = region.getSmallestReadPoint();
MultiVersionConsistencyControl.setThreadReadPoint(smallestReadPoint);