You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by ns...@apache.org on 2011/10/11 04:16:00 UTC
svn commit: r1181517 - in /hbase/branches/0.89/src:
main/java/org/apache/hadoop/hbase/io/
main/java/org/apache/hadoop/hbase/io/hfile/
main/java/org/apache/hadoop/hbase/mapreduce/
main/java/org/apache/hadoop/hbase/regionserver/ main/java/org/apache/hado...
Author: nspiegelberg
Date: Tue Oct 11 02:15:59 2011
New Revision: 1181517
URL: http://svn.apache.org/viewvc?rev=1181517&view=rev
Log:
Back-porting the changes to cache blocks on HFile write and evict blocks on HFile close (HBASE-3287, 3417) to hbase-89.
Summary:
This is a straightforward back-port of Jonathan Gray's changes from
https://issues.apache.org/jira/browse/HBASE-3287 and
https://issues.apache.org/jira/browse/HBASE-3417 to hbase-89 and some trivial
conflict resolution. This is being done as a preliminary step to adding
cache-on-write to Bloom filters (Task #496626).
Two boolean configuration options are being added: hbase.rs.cacheblocksonwrite
(populates the HFile block cache when finished writing a block) and
hbase.rs.evictblocksonclose (evicts all blocks of an HFile from the cache when
the file is being closed).
Test Plan:
Ran unit tests. Initially TestHFile and TestColumnSeeking failed, and after my
fixes all unit tests should pass (I will rerun unit tests to double-check). Not
sure what kind of load-testing should be done here -- are there some standard
end-to-end integration test suites?
Reviewed By: jgray
Reviewers: kenny, jgray, kannan, kranganathan
Commenters: kannan
CC: kenny, jgray, mbautin, liyintang, kannan
Tasks:
#496626: better handling of large sized blooms
Revert Plan:
OK
Differential Revision: 231742
Modified:
hbase/branches/0.89/src/main/java/org/apache/hadoop/hbase/io/HalfStoreFileReader.java
hbase/branches/0.89/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java
hbase/branches/0.89/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java
hbase/branches/0.89/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java
hbase/branches/0.89/src/main/java/org/apache/hadoop/hbase/io/hfile/SimpleBlockCache.java
hbase/branches/0.89/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java
hbase/branches/0.89/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java
hbase/branches/0.89/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java
hbase/branches/0.89/src/main/java/org/apache/hadoop/hbase/util/CompressionTest.java
hbase/branches/0.89/src/main/resources/hbase-default.xml
hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/HFilePerformanceEvaluation.java
hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/io/TestHalfStoreFileReader.java
hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/io/hfile/RandomSeek.java
hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFile.java
hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFilePerformance.java
hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileSeek.java
hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/io/hfile/TestReseekTo.java
hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/io/hfile/TestSeekTo.java
hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFiles.java
hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java
hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSource.java
Modified: hbase/branches/0.89/src/main/java/org/apache/hadoop/hbase/io/HalfStoreFileReader.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89/src/main/java/org/apache/hadoop/hbase/io/HalfStoreFileReader.java?rev=1181517&r1=1181516&r2=1181517&view=diff
==============================================================================
--- hbase/branches/0.89/src/main/java/org/apache/hadoop/hbase/io/HalfStoreFileReader.java (original)
+++ hbase/branches/0.89/src/main/java/org/apache/hadoop/hbase/io/HalfStoreFileReader.java Tue Oct 11 02:15:59 2011
@@ -62,7 +62,7 @@ public class HalfStoreFileReader extends
public HalfStoreFileReader(final FileSystem fs, final Path p, final BlockCache c,
final Reference r)
throws IOException {
- super(fs, p, c, false);
+ super(fs, p, c, false, false);
// This is not actual midkey for this half-file; its just border
// around which we split top and bottom. Have to look in files to find
// actual last and first keys for bottom and top halves. Half-files don't
Modified: hbase/branches/0.89/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java?rev=1181517&r1=1181516&r2=1181517&view=diff
==============================================================================
--- hbase/branches/0.89/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java (original)
+++ hbase/branches/0.89/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java Tue Oct 11 02:15:59 2011
@@ -21,6 +21,8 @@ package org.apache.hadoop.hbase.io.hfile
import java.nio.ByteBuffer;
+import org.apache.hadoop.hbase.io.hfile.LruBlockCache.CacheStats;
+
/**
* Block cache interface.
* TODO: Add filename or hash of filename to block cache key.
@@ -49,6 +51,19 @@ public interface BlockCache {
public ByteBuffer getBlock(String blockName);
/**
+ * Evict block from cache.
+ * @param blockName Block name to evict
+ * @return true if block existed and was evicted, false if not
+ */
+ public boolean evictBlock(String blockName);
+
+ /**
+ * Get the statistics for this block cache.
+ * @return
+ */
+ public CacheStats getStats();
+
+ /**
* Shutdown the cache.
*/
public void shutdown();
Modified: hbase/branches/0.89/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java?rev=1181517&r1=1181516&r2=1181517&view=diff
==============================================================================
--- hbase/branches/0.89/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java (original)
+++ hbase/branches/0.89/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java Tue Oct 11 02:15:59 2011
@@ -20,10 +20,11 @@
package org.apache.hadoop.hbase.io.hfile;
import java.io.BufferedInputStream;
+import java.io.ByteArrayInputStream;
+import java.io.ByteArrayOutputStream;
import java.io.Closeable;
import java.io.DataInputStream;
import java.io.DataOutputStream;
-import java.io.ByteArrayInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
@@ -48,10 +49,10 @@ import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathFilter;
import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.hbase.KeyValue.KeyComparator;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.KeyValue.KeyComparator;
import org.apache.hadoop.hbase.io.HbaseMapWritable;
import org.apache.hadoop.hbase.io.HeapSize;
import org.apache.hadoop.hbase.regionserver.HRegion;
@@ -283,6 +284,14 @@ public class HFile {
// May be null if we were passed a stream.
private Path path = null;
+ // Block cache to optionally fill on write
+ private BlockCache blockCache;
+
+ // Additional byte array output stream used to fill block cache
+ private ByteArrayOutputStream baos;
+ private DataOutputStream baosDos;
+ private int blockNumber = 0;
+
/**
* Constructor that uses all defaults for compression and block size.
* @param fs
@@ -292,7 +301,7 @@ public class HFile {
public Writer(FileSystem fs, Path path)
throws IOException {
this(fs, path, DEFAULT_BLOCKSIZE, DEFAULT_BYTES_PER_CHECKSUM,
- (Compression.Algorithm) null, null);
+ (Compression.Algorithm) null, null, null);
}
/**
@@ -312,7 +321,7 @@ public class HFile {
this(fs, path, blocksize, bytesPerChecksum,
compress == null? DEFAULT_COMPRESSION_ALGORITHM:
Compression.getCompressionAlgorithmByName(compress),
- comparator);
+ comparator, null);
}
/**
@@ -327,7 +336,7 @@ public class HFile {
*/
public Writer(FileSystem fs, Path path, int blocksize, int bytesPerChecksum,
Compression.Algorithm compress,
- final KeyComparator comparator)
+ final KeyComparator comparator, BlockCache blockCache)
throws IOException {
this(fs.create(path,
FsPermission.getDefault(),
@@ -339,8 +348,9 @@ public class HFile {
null),
blocksize, compress, comparator);
this.closeOutputStream = true;
- this.name = path.toString();
+ this.name = path.getName();
this.path = path;
+ this.blockCache = blockCache;
}
/**
@@ -405,6 +415,17 @@ public class HFile {
writeTime += System.currentTimeMillis() - now;
writeOps++;
+
+ if (blockCache != null) {
+ baosDos.flush();
+ byte [] bytes = baos.toByteArray();
+ ByteBuffer blockToCache = ByteBuffer.wrap(bytes, DATABLOCKMAGIC.length,
+ bytes.length - DATABLOCKMAGIC.length);
+ String blockName = name + blockNumber;
+ blockCache.cacheBlock(blockName, blockToCache);
+ baosDos.close();
+ }
+ blockNumber++;
}
/*
@@ -417,6 +438,11 @@ public class HFile {
this.out = getCompressingStream();
this.out.write(DATABLOCKMAGIC);
firstKey = null;
+ if (blockCache != null) {
+ this.baos = new ByteArrayOutputStream();
+ this.baosDos = new DataOutputStream(baos);
+ this.baosDos.write(DATABLOCKMAGIC);
+ }
}
/*
@@ -519,8 +545,9 @@ public class HFile {
@Override
public String toString() {
- return "writer=" + this.name + ", compression=" +
- this.compressAlgo.getName();
+ return "writer=" + (path != null ? path.toString() : null) +
+ ", name=" + this.name +
+ ", compression=" + this.compressAlgo.getName();
}
/**
@@ -586,6 +613,13 @@ public class HFile {
this.lastKeyOffset = koffset;
this.lastKeyLength = klength;
this.entryCount ++;
+ // If we are pre-caching blocks on write, fill byte array stream
+ if (blockCache != null) {
+ this.baosDos.writeInt(klength);
+ this.baosDos.writeInt(vlength);
+ this.baosDos.write(key, koffset, klength);
+ this.baosDos.write(value, voffset, vlength);
+ }
}
/*
@@ -774,10 +808,12 @@ public class HFile {
// Whether file is from in-memory store
private boolean inMemory = false;
- // Name for this object used when logging or in toString. Is either
- // the result of a toString on the stream or else is toString of passed
- // file Path plus metadata key/value pairs.
- protected String name;
+ // Whether blocks of file should be evicted on close of file
+ private final boolean evictOnClose;
+
+ // Path of file and file name to be used for block names
+ private final Path path;
+ private final String name;
// table qualified cfName for this HFile.
// This is used to report stats on a per-table/CF basis
@@ -837,12 +873,13 @@ public class HFile {
* @param cache block cache. Pass null if none.
* @throws IOException
*/
- public Reader(FileSystem fs, Path path, BlockCache cache, boolean inMemory)
+ public Reader(FileSystem fs, Path path, BlockCache cache, boolean inMemory,
+ boolean evictOnClose)
throws IOException {
- this(fs.open(path), fs.getFileStatus(path).getLen(), cache, inMemory);
+ this(path, fs.open(path), fs.getFileStatus(path).getLen(), cache,
+ inMemory, evictOnClose);
this.closeIStream = true;
- this.name = path.toString();
- this.parsePath(this.name);
+ this.parsePath(this.path.toString());
}
/**
@@ -853,21 +890,26 @@ public class HFile {
* stream.
* @param size Length of the stream.
* @param cache block cache. Pass null if none.
+ * @param inMemory whether blocks should be marked as in-memory in cache
+ * @param evictOnClose whether blocks in cache should be evicted on close
* @throws IOException
*/
- public Reader(final FSDataInputStream fsdis, final long size,
- final BlockCache cache, final boolean inMemory) {
+ public Reader(Path path, final FSDataInputStream fsdis, final long size,
+ final BlockCache cache, final boolean inMemory,
+ final boolean evictOnClose) {
this.cache = cache;
this.fileSize = size;
this.istream = fsdis;
this.closeIStream = false;
- this.name = this.istream == null? "": this.istream.toString();
this.inMemory = inMemory;
+ this.evictOnClose = evictOnClose;
+ this.path = path;
+ this.name = path.getName();
}
@Override
public String toString() {
- return "reader=" + this.name +
+ return "reader=" + this.path.toString() +
(!isFileInfoLoaded()? "":
", compression=" + this.compressAlgo.getName() +
", inMemory=" + this.inMemory +
@@ -1329,6 +1371,14 @@ public class HFile {
}
public void close() throws IOException {
+ if (evictOnClose && this.cache != null) {
+ int numEvicted = 0;
+ for (int i=0; i<blockIndex.count; i++) {
+ if (this.cache.evictBlock(name + i)) numEvicted++;
+ }
+ LOG.debug("On close of file " + name + " evicted " + numEvicted +
+ " block(s) of " + blockIndex.count + " total blocks");
+ }
if (this.closeIStream && this.istream != null) {
this.istream.close();
this.istream = null;
@@ -1339,6 +1389,10 @@ public class HFile {
return name;
}
+ public Path getPath() {
+ return path;
+ }
+
/*
* Implementation of {@link HFileScanner} interface.
*/
@@ -2060,7 +2114,7 @@ public class HFile {
continue;
}
// create reader and load file info
- HFile.Reader reader = new HFile.Reader(fs, file, null, false);
+ HFile.Reader reader = new HFile.Reader(fs, file, null, false, false);
Map<byte[],byte[]> fileInfo = reader.loadFileInfo();
int count = 0;
Modified: hbase/branches/0.89/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java?rev=1181517&r1=1181516&r2=1181517&view=diff
==============================================================================
--- hbase/branches/0.89/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java (original)
+++ hbase/branches/0.89/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java Tue Oct 11 02:15:59 2011
@@ -288,6 +288,14 @@ public class LruBlockCache implements Bl
return cb.getBuffer();
}
+ @Override
+ public boolean evictBlock(String blockName) {
+ CachedBlock cb = map.get(blockName);
+ if (cb == null) return false;
+ evictBlock(cb);
+ return true;
+ }
+
protected long evictBlock(CachedBlock block) {
map.remove(block.getName());
size.addAndGet(-1 * block.heapSize());
Modified: hbase/branches/0.89/src/main/java/org/apache/hadoop/hbase/io/hfile/SimpleBlockCache.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89/src/main/java/org/apache/hadoop/hbase/io/hfile/SimpleBlockCache.java?rev=1181517&r1=1181516&r2=1181517&view=diff
==============================================================================
--- hbase/branches/0.89/src/main/java/org/apache/hadoop/hbase/io/hfile/SimpleBlockCache.java (original)
+++ hbase/branches/0.89/src/main/java/org/apache/hadoop/hbase/io/hfile/SimpleBlockCache.java Tue Oct 11 02:15:59 2011
@@ -25,6 +25,8 @@ import java.nio.ByteBuffer;
import java.util.HashMap;
import java.util.Map;
+import org.apache.hadoop.hbase.io.hfile.LruBlockCache.CacheStats;
+
/**
* Simple one RFile soft reference cache.
@@ -83,7 +85,18 @@ public class SimpleBlockCache implements
cache.put(blockName, new Ref(blockName, buf, q));
}
+ @Override
+ public boolean evictBlock(String blockName) {
+ return cache.remove(blockName) != null;
+ }
+
public void shutdown() {
// noop
}
+
+ @Override
+ public CacheStats getStats() {
+ // TODO: implement this if we ever actually use this block cache
+ return null;
+ }
}
Modified: hbase/branches/0.89/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java?rev=1181517&r1=1181516&r2=1181517&view=diff
==============================================================================
--- hbase/branches/0.89/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java (original)
+++ hbase/branches/0.89/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java Tue Oct 11 02:15:59 2011
@@ -184,7 +184,7 @@ public class LoadIncrementalHFiles exten
throws IOException {
final Path hfilePath = item.hfilePath;
final FileSystem fs = hfilePath.getFileSystem(getConf());
- HFile.Reader hfr = new HFile.Reader(fs, hfilePath, null, false);
+ HFile.Reader hfr = new HFile.Reader(fs, hfilePath, null, false, false);
final byte[] first, last;
try {
hfr.loadFileInfo();
@@ -276,7 +276,7 @@ public class LoadIncrementalHFiles exten
halfWriter = new StoreFile.Writer(
fs, outFile, blocksize, compression, conf, KeyValue.COMPARATOR,
- bloomFilterType, 0);
+ bloomFilterType, 0, false);
HFileScanner scanner = halfReader.getScanner(false, false, false);
scanner.seekTo();
do {
Modified: hbase/branches/0.89/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java?rev=1181517&r1=1181516&r2=1181517&view=diff
==============================================================================
--- hbase/branches/0.89/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java (original)
+++ hbase/branches/0.89/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java Tue Oct 11 02:15:59 2011
@@ -26,9 +26,7 @@ import java.util.Collection;
import java.util.Collections;
import java.util.List;
import java.util.NavigableSet;
-import java.util.Set;
import java.util.SortedSet;
-import java.util.TreeSet;
import java.util.concurrent.CopyOnWriteArraySet;
import java.util.concurrent.locks.ReentrantReadWriteLock;
@@ -43,9 +41,7 @@ import org.apache.hadoop.hbase.HColumnDe
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.KeyValue;
-import org.apache.hadoop.hbase.KeyValue.KeyComparator;
import org.apache.hadoop.hbase.RemoteExceptionHandler;
-import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.io.HeapSize;
import org.apache.hadoop.hbase.io.hfile.Compression;
@@ -326,7 +322,7 @@ public class Store implements HeapSize {
LOG.info("Validating hfile at " + srcPath + " for inclusion in "
+ "store " + this + " region " + this.region);
reader = new HFile.Reader(srcPath.getFileSystem(conf),
- srcPath, null, false);
+ srcPath, null, false, false);
reader.loadFileInfo();
byte[] firstKey = reader.getFirstRowKey();
@@ -458,7 +454,8 @@ public class Store implements HeapSize {
final long logCacheFlushId,
TimeRangeTracker snapshotTimeRangeTracker)
throws IOException {
- StoreFile.Writer writer = null;
+ StoreFile.Writer writer;
+ String fileName;
long flushed = 0;
// Don't flush if there are no entries.
if (set.size() == 0) {
@@ -472,6 +469,7 @@ public class Store implements HeapSize {
// A. Write the map out to the disk
writer = createWriterInTmp(set.size());
writer.setTimeRangeTracker(snapshotTimeRangeTracker);
+ fileName = writer.getPath().getName();
int entries = 0;
try {
for (KeyValue kv: set) {
@@ -490,7 +488,7 @@ public class Store implements HeapSize {
}
// Write-out finished successfully, move into the right spot
- Path dstPath = StoreFile.getUniqueFile(fs, homedir);
+ Path dstPath = new Path(homedir, fileName);
LOG.info("Renaming flushed file at " + writer.getPath() + " to " + dstPath);
fs.rename(writer.getPath(), dstPath);
@@ -515,7 +513,8 @@ public class Store implements HeapSize {
throws IOException {
return StoreFile.createWriter(this.fs, region.getTmpDir(), this.blocksize,
this.compression, this.comparator, this.conf,
- this.family.getBloomFilterType(), maxKeyCount);
+ this.family.getBloomFilterType(), maxKeyCount,
+ conf.getBoolean("hbase.rs.cacheblocksonwrite", false));
}
/*
@@ -1051,15 +1050,15 @@ public class Store implements HeapSize {
// be if all cells were expired or deleted).
StoreFile result = null;
if (compactedFile != null) {
- Path p = null;
- try {
- p = StoreFile.rename(this.fs, compactedFile.getPath(),
- StoreFile.getRandomFilename(fs, this.homedir));
- } catch (IOException e) {
- LOG.error("Failed move of compacted file " + compactedFile.getPath(), e);
- return null;
+ // Move file into the right spot
+ Path origPath = compactedFile.getPath();
+ Path dstPath = new Path(homedir, origPath.getName());
+ LOG.info("Renaming compacted file at " + origPath + " to " + dstPath);
+ if (!fs.rename(origPath, dstPath)) {
+ LOG.error("Failed move of compacted file " + origPath + " to " +
+ dstPath);
}
- result = new StoreFile(this.fs, p, blockcache, this.conf,
+ result = new StoreFile(this.fs, dstPath, blockcache, this.conf,
this.family.getBloomFilterType(), this.inMemory);
result.createReader();
}
Modified: hbase/branches/0.89/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java?rev=1181517&r1=1181516&r2=1181517&view=diff
==============================================================================
--- hbase/branches/0.89/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java (original)
+++ hbase/branches/0.89/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java Tue Oct 11 02:15:59 2011
@@ -19,6 +19,23 @@
*/
package org.apache.hadoop.hbase.regionserver;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.lang.management.ManagementFactory;
+import java.lang.management.MemoryUsage;
+import java.nio.ByteBuffer;
+import java.text.NumberFormat;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.Comparator;
+import java.util.Map;
+import java.util.SortedSet;
+import java.util.UUID;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
@@ -48,24 +65,6 @@ import com.google.common.base.Function;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Ordering;
-import java.io.FileNotFoundException;
-import java.io.IOException;
-import java.lang.management.ManagementFactory;
-import java.lang.management.MemoryUsage;
-import java.nio.ByteBuffer;
-import java.text.NumberFormat;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.Comparator;
-import java.util.List;
-import java.util.Map;
-import java.util.Random;
-import java.util.SortedSet;
-import java.util.concurrent.atomic.AtomicBoolean;
-import java.util.regex.Matcher;
-import java.util.regex.Pattern;
-
/**
* A Store data file. Stores usually have one or more of these files. They
* are produced by flushing the memstore to disk. To
@@ -159,7 +158,7 @@ public class StoreFile {
/*
* Regex that will work for straight filenames and for reference names.
* If reference, then the regex has more than just one group. Group 1 is
- * this files id. Group 2 the referenced region name, etc.
+ * this file's id. Group 2 the referenced region name, etc.
*/
private static final Pattern REF_NAME_PARSER =
Pattern.compile("^([0-9a-f]+)(?:\\.(.+))?$");
@@ -167,8 +166,6 @@ public class StoreFile {
// StoreFile.Reader
private volatile Reader reader;
- // Used making file ids.
- private final static Random rand = new Random();
private final Configuration conf;
private final BloomType bloomType;
@@ -398,7 +395,8 @@ public class StoreFile {
getBlockCache(), this.reference);
} else {
this.reader = new Reader(this.fs, this.path, getBlockCache(),
- this.inMemory);
+ this.inMemory,
+ this.conf.getBoolean("hbase.rs.evictblocksonclose", true));
}
// Load up indices and fileinfo.
@@ -409,7 +407,7 @@ public class StoreFile {
// By convention, if halfhfile, top half has a sequence number > bottom
// half. Thats why we add one in below. Its done for case the two halves
// are ever merged back together --rare. Without it, on open of store,
- // since store files are distingushed by sequence id, the one half would
+ // since store files are distinguished by sequence id, the one half would
// subsume the other.
this.sequenceid = Bytes.toLong(b);
if (isReference()) {
@@ -548,7 +546,8 @@ public class StoreFile {
final int blocksize)
throws IOException {
- return createWriter(fs, dir, blocksize, null, null, null, BloomType.NONE, 0);
+ return createWriter(fs, dir, blocksize, null, null, null, BloomType.NONE, 0,
+ false);
}
/**
@@ -573,7 +572,8 @@ public class StoreFile {
final KeyValue.KVComparator c,
final Configuration conf,
BloomType bloomType,
- long maxKeySize)
+ long maxKeySize,
+ final boolean cacheOnWrite)
throws IOException {
if (!fs.exists(dir)) {
@@ -586,7 +586,8 @@ public class StoreFile {
return new Writer(fs, path, blocksize,
algorithm == null? HFile.DEFAULT_COMPRESSION_ALGORITHM: algorithm,
- conf, c == null? KeyValue.COMPARATOR: c, bloomType, maxKeySize);
+ conf, c == null? KeyValue.COMPARATOR: c, bloomType, maxKeySize,
+ cacheOnWrite);
}
/**
@@ -600,7 +601,7 @@ public class StoreFile {
throw new IOException("Expecting " + dir.toString() +
" to be a directory");
}
- return fs.getFileStatus(dir).isDir()? getRandomFilename(fs, dir): dir;
+ return getRandomFilename(fs, dir);
}
/**
@@ -627,20 +628,14 @@ public class StoreFile {
final Path dir,
final String suffix)
throws IOException {
- long id = -1;
- Path p = null;
- do {
- id = Math.abs(rand.nextLong());
- p = new Path(dir, Long.toString(id) +
- ((suffix == null || suffix.length() <= 0)? "": suffix));
- } while(fs.exists(p));
- return p;
+ return new Path(dir, UUID.randomUUID().toString().replaceAll("-", "")
+ + ((suffix == null || suffix.length() <= 0) ? "" : suffix));
}
/**
* Write out a split reference.
*
- * Package local so it doesnt leak out of regionserver.
+ * Package local so it does not leak out of regionserver.
*
* @param fs
* @param splitDir Presumes path format is actually
@@ -701,16 +696,19 @@ public class StoreFile {
* @param comparator key comparator
* @param bloomType bloom filter setting
* @param maxKeys maximum amount of keys to add (for blooms)
+ * @param cacheOnWrite whether to cache blocks as we write file
* @throws IOException problem writing to FS
*/
public Writer(FileSystem fs, Path path, int blocksize,
Compression.Algorithm compress, final Configuration conf,
- final KVComparator comparator, BloomType bloomType, long maxKeys)
+ final KVComparator comparator, BloomType bloomType, long maxKeys,
+ boolean cacheOnWrite)
throws IOException {
writer = new HFile.Writer(
fs, path, blocksize, HFile.getBytesPerChecksum(conf, fs.getConf()),
- compress, comparator.getRawComparator());
+ compress, comparator.getRawComparator(),
+ cacheOnWrite ? StoreFile.getBlockCache(conf) : null);
this.kvComparator = comparator;
@@ -918,9 +916,10 @@ public class StoreFile {
private final String bloomAccessedMetric;
private final String bloomSkippedMetric;
- public Reader(FileSystem fs, Path path, BlockCache blockCache, boolean inMemory)
+ public Reader(FileSystem fs, Path path, BlockCache blockCache,
+ boolean inMemory, boolean evictOnClose)
throws IOException {
- reader = new HFile.Reader(fs, path, blockCache, inMemory);
+ reader = new HFile.Reader(fs, path, blockCache, inMemory, evictOnClose);
// prepare the text (key) for the metrics
bloomAccessedMetric = reader.cfName + ".keyMaybeInBloomCnt";
Modified: hbase/branches/0.89/src/main/java/org/apache/hadoop/hbase/util/CompressionTest.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89/src/main/java/org/apache/hadoop/hbase/util/CompressionTest.java?rev=1181517&r1=1181516&r2=1181517&view=diff
==============================================================================
--- hbase/branches/0.89/src/main/java/org/apache/hadoop/hbase/util/CompressionTest.java (original)
+++ hbase/branches/0.89/src/main/java/org/apache/hadoop/hbase/util/CompressionTest.java Tue Oct 11 02:15:59 2011
@@ -70,7 +70,7 @@ public class CompressionTest {
writer.appendFileInfo(Bytes.toBytes("infokey"), Bytes.toBytes("infoval"));
writer.close();
- HFile.Reader reader = new HFile.Reader(dfs, path, null, false);
+ HFile.Reader reader = new HFile.Reader(dfs, path, null, false, false);
reader.loadFileInfo();
byte[] key = reader.getFirstKey();
boolean rc = Bytes.toString(key).equals("testkey");
Modified: hbase/branches/0.89/src/main/resources/hbase-default.xml
URL: http://svn.apache.org/viewvc/hbase/branches/0.89/src/main/resources/hbase-default.xml?rev=1181517&r1=1181516&r2=1181517&view=diff
==============================================================================
--- hbase/branches/0.89/src/main/resources/hbase-default.xml (original)
+++ hbase/branches/0.89/src/main/resources/hbase-default.xml Tue Oct 11 02:15:59 2011
@@ -422,6 +422,22 @@
</description>
</property>
<property>
+ <name>hbase.rs.cacheblocksonwrite</name>
+ <value>false</value>
+ <description>
+ Whether an HFile block should be added to the block cache when the
+ block is finished.
+ </description>
+ </property>
+ <property>
+ <name>hbase.rs.evictblocksonclose</name>
+ <value>false</value>
+ <description>
+ Specifies if all blocks in an HFile should be evicted from the block
+ cache at the time the file is closed.
+ </description>
+ </property>
+ <property>
<name>hbase.hash.type</name>
<value>murmur</value>
<description>The hashing algorithm for use in HashFunction. Two values are
Modified: hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java?rev=1181517&r1=1181516&r2=1181517&view=diff
==============================================================================
--- hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java (original)
+++ hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java Tue Oct 11 02:15:59 2011
@@ -97,7 +97,7 @@ public class HBaseTestingUtility {
public static final String TEST_DIRECTORY_KEY = "test.build.data";
/**
- * Default parent direccounttory for test output.
+ * Default parent directory for test output.
*/
public static final String DEFAULT_TEST_DIRECTORY = "target/build/data";
Modified: hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/HFilePerformanceEvaluation.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/HFilePerformanceEvaluation.java?rev=1181517&r1=1181516&r2=1181517&view=diff
==============================================================================
--- hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/HFilePerformanceEvaluation.java (original)
+++ hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/HFilePerformanceEvaluation.java Tue Oct 11 02:15:59 2011
@@ -189,7 +189,8 @@ public class HFilePerformanceEvaluation
@Override
void setUp() throws Exception {
writer = new HFile.Writer(this.fs, this.mf, RFILE_BLOCKSIZE,
- HFile.DEFAULT_BYTES_PER_CHECKSUM, (Compression.Algorithm) null, null);
+ HFile.DEFAULT_BYTES_PER_CHECKSUM, (Compression.Algorithm) null, null,
+ null);
}
@Override
@@ -225,7 +226,7 @@ public class HFilePerformanceEvaluation
@Override
void setUp() throws Exception {
- reader = new HFile.Reader(this.fs, this.mf, null, false);
+ reader = new HFile.Reader(this.fs, this.mf, null, false, false);
this.reader.loadFileInfo();
}
Modified: hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/io/TestHalfStoreFileReader.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/io/TestHalfStoreFileReader.java?rev=1181517&r1=1181516&r2=1181517&view=diff
==============================================================================
--- hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/io/TestHalfStoreFileReader.java (original)
+++ hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/io/TestHalfStoreFileReader.java Tue Oct 11 02:15:59 2011
@@ -72,7 +72,7 @@ public class TestHalfStoreFileReader {
}
w.close();
- HFile.Reader r = new HFile.Reader(fs, p, null, false);
+ HFile.Reader r = new HFile.Reader(fs, p, null, false, false);
r.loadFileInfo();
byte [] midkey = r.midkey();
KeyValue midKV = KeyValue.createKeyValueFromKey(midkey);
Modified: hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/io/hfile/RandomSeek.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/io/hfile/RandomSeek.java?rev=1181517&r1=1181516&r2=1181517&view=diff
==============================================================================
--- hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/io/hfile/RandomSeek.java (original)
+++ hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/io/hfile/RandomSeek.java Tue Oct 11 02:15:59 2011
@@ -68,7 +68,7 @@ public class RandomSeek {
long start = System.currentTimeMillis();
SimpleBlockCache cache = new SimpleBlockCache();
//LruBlockCache cache = new LruBlockCache();
- Reader reader = new HFile.Reader(lfs, path, cache, false);
+ Reader reader = new HFile.Reader(lfs, path, cache, false, false);
reader.loadFileInfo();
System.out.println(reader.trailer);
long end = System.currentTimeMillis();
Modified: hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFile.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFile.java?rev=1181517&r1=1181516&r2=1181517&view=diff
==============================================================================
--- hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFile.java (original)
+++ hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFile.java Tue Oct 11 02:15:59 2011
@@ -65,7 +65,7 @@ public class TestHFile extends HBaseTest
Path f = new Path(ROOT_DIR, getName());
Writer w = new Writer(this.fs, f);
w.close();
- Reader r = new Reader(fs, f, null, false);
+ Reader r = new Reader(fs, f, null, false, false);
r.loadFileInfo();
assertNull(r.getFirstKey());
assertNull(r.getLastKey());
@@ -140,8 +140,8 @@ public class TestHFile extends HBaseTest
writeRecords(writer);
fout.close();
FSDataInputStream fin = fs.open(ncTFile);
- Reader reader = new Reader(fs.open(ncTFile),
- fs.getFileStatus(ncTFile).getLen(), null, false);
+ Reader reader = new Reader(ncTFile, fs.open(ncTFile),
+ fs.getFileStatus(ncTFile).getLen(), null, false, false);
// Load up the index.
reader.loadFileInfo();
// Get a scanner that caches and that does not use pread.
@@ -215,8 +215,8 @@ public class TestHFile extends HBaseTest
writer.close();
fout.close();
FSDataInputStream fin = fs.open(mFile);
- Reader reader = new Reader(fs.open(mFile), this.fs.getFileStatus(mFile)
- .getLen(), null, false);
+ Reader reader = new Reader(mFile, fs.open(mFile),
+ this.fs.getFileStatus(mFile).getLen(), null, false, false);
reader.loadFileInfo();
// No data -- this should return false.
assertFalse(reader.getScanner(false, false).seekTo());
@@ -240,7 +240,7 @@ public class TestHFile extends HBaseTest
writer.append("foo".getBytes(), "value".getBytes());
writer.close();
fout.close();
- Reader reader = new Reader(fs, mFile, null, false);
+ Reader reader = new Reader(fs, mFile, null, false, false);
reader.loadFileInfo();
assertNull(reader.getMetaBlock("non-existant", false));
}
Modified: hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFilePerformance.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFilePerformance.java?rev=1181517&r1=1181516&r2=1181517&view=diff
==============================================================================
--- hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFilePerformance.java (original)
+++ hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFilePerformance.java Tue Oct 11 02:15:59 2011
@@ -236,8 +236,8 @@ public class TestHFilePerformance extend
FSDataInputStream fin = fs.open(path);
if ("HFile".equals(fileType)){
- HFile.Reader reader = new HFile.Reader(fs.open(path),
- fs.getFileStatus(path).getLen(), null, false);
+ HFile.Reader reader = new HFile.Reader(path, fs.open(path),
+ fs.getFileStatus(path).getLen(), null, false, false);
reader.loadFileInfo();
switch (method) {
Modified: hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileSeek.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileSeek.java?rev=1181517&r1=1181516&r2=1181517&view=diff
==============================================================================
--- hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileSeek.java (original)
+++ hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileSeek.java Tue Oct 11 02:15:59 2011
@@ -155,8 +155,8 @@ public class TestHFileSeek extends TestC
int miss = 0;
long totalBytes = 0;
FSDataInputStream fsdis = fs.open(path);
- Reader reader =
- new Reader(fsdis, fs.getFileStatus(path).getLen(), null, false);
+ Reader reader = new Reader(path, fsdis, fs.getFileStatus(path).getLen(),
+ null, false, false);
reader.loadFileInfo();
KeySampler kSampler =
new KeySampler(rng, reader.getFirstKey(), reader.getLastKey(),
Modified: hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/io/hfile/TestReseekTo.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/io/hfile/TestReseekTo.java?rev=1181517&r1=1181516&r2=1181517&view=diff
==============================================================================
--- hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/io/hfile/TestReseekTo.java (original)
+++ hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/io/hfile/TestReseekTo.java Tue Oct 11 02:15:59 2011
@@ -60,7 +60,7 @@ public class TestReseekTo {
fout.close();
HFile.Reader reader = new HFile.Reader(TEST_UTIL.getTestFileSystem(),
- ncTFile, null, false);
+ ncTFile, null, false, false);
reader.loadFileInfo();
HFileScanner scanner = reader.getScanner(false, true);
Modified: hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/io/hfile/TestSeekTo.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/io/hfile/TestSeekTo.java?rev=1181517&r1=1181516&r2=1181517&view=diff
==============================================================================
--- hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/io/hfile/TestSeekTo.java (original)
+++ hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/io/hfile/TestSeekTo.java Tue Oct 11 02:15:59 2011
@@ -60,7 +60,7 @@ public class TestSeekTo extends HBaseTes
}
public void testSeekBefore() throws Exception {
Path p = makeNewFile();
- HFile.Reader reader = new HFile.Reader(fs, p, null, false);
+ HFile.Reader reader = new HFile.Reader(fs, p, null, false, false);
reader.loadFileInfo();
HFileScanner scanner = reader.getScanner(false, true);
assertEquals(false, scanner.seekBefore(toKV("a").getKey()));
@@ -93,7 +93,7 @@ public class TestSeekTo extends HBaseTes
public void testSeekTo() throws Exception {
Path p = makeNewFile();
- HFile.Reader reader = new HFile.Reader(fs, p, null, false);
+ HFile.Reader reader = new HFile.Reader(fs, p, null, false, false);
reader.loadFileInfo();
assertEquals(2, reader.blockIndex.count);
HFileScanner scanner = reader.getScanner(false, true);
@@ -113,7 +113,7 @@ public class TestSeekTo extends HBaseTes
public void testBlockContainingKey() throws Exception {
Path p = makeNewFile();
- HFile.Reader reader = new HFile.Reader(fs, p, null, false);
+ HFile.Reader reader = new HFile.Reader(fs, p, null, false, false);
reader.loadFileInfo();
System.out.println(reader.blockIndex.toString());
int klen = toKV("a").getKey().length;
Modified: hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFiles.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFiles.java?rev=1181517&r1=1181516&r2=1181517&view=diff
==============================================================================
--- hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFiles.java (original)
+++ hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFiles.java Tue Oct 11 02:15:59 2011
@@ -150,7 +150,7 @@ public class TestLoadIncrementalHFiles {
private int verifyHFile(Path p) throws IOException {
Configuration conf = util.getConfiguration();
HFile.Reader reader = new HFile.Reader(
- p.getFileSystem(conf), p, null, false);
+ p.getFileSystem(conf), p, null, false, false);
reader.loadFileInfo();
HFileScanner scanner = reader.getScanner(false, false);
scanner.seekTo();
Modified: hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java?rev=1181517&r1=1181516&r2=1181517&view=diff
==============================================================================
--- hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java (original)
+++ hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java Tue Oct 11 02:15:59 2011
@@ -30,6 +30,7 @@ import java.util.TreeSet;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseTestCase;
@@ -38,16 +39,15 @@ import org.apache.hadoop.hbase.HConstant
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.io.Reference.Range;
+import org.apache.hadoop.hbase.io.hfile.BlockCache;
import org.apache.hadoop.hbase.io.hfile.HFile;
import org.apache.hadoop.hbase.io.hfile.HFileScanner;
-import org.apache.hadoop.hbase.util.ByteBloomFilter;
+import org.apache.hadoop.hbase.io.hfile.LruBlockCache.CacheStats;
import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.Hash;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.mockito.Mockito;
import com.google.common.base.Joiner;
-import com.google.common.collect.Collections2;
import com.google.common.collect.Iterables;
import com.google.common.collect.Lists;
@@ -338,7 +338,7 @@ public class TestStoreFile extends HBase
}
writer.close();
- StoreFile.Reader reader = new StoreFile.Reader(fs, f, null, false);
+ StoreFile.Reader reader = new StoreFile.Reader(fs, f, null, false, false);
reader.loadFileInfo();
reader.loadBloomfilter();
StoreFileScanner scanner = reader.getStoreFileScanner(false, false);
@@ -380,7 +380,7 @@ public class TestStoreFile extends HBase
Path f = new Path(ROOT_DIR, getName());
StoreFile.Writer writer = new StoreFile.Writer(fs, f,
StoreFile.DEFAULT_BLOCKSIZE_SMALL, HFile.DEFAULT_COMPRESSION_ALGORITHM,
- conf, KeyValue.COMPARATOR, StoreFile.BloomType.ROW, 2000);
+ conf, KeyValue.COMPARATOR, StoreFile.BloomType.ROW, 2000, false);
bloomWriteRead(writer, fs);
}
@@ -411,7 +411,7 @@ public class TestStoreFile extends HBase
StoreFile.Writer writer = new StoreFile.Writer(fs, f,
StoreFile.DEFAULT_BLOCKSIZE_SMALL,
HFile.DEFAULT_COMPRESSION_ALGORITHM,
- conf, KeyValue.COMPARATOR, bt[x], expKeys[x]);
+ conf, KeyValue.COMPARATOR, bt[x], expKeys[x], false);
long now = System.currentTimeMillis();
for (int i = 0; i < rowCount*2; i += 2) { // rows
@@ -428,7 +428,7 @@ public class TestStoreFile extends HBase
}
writer.close();
- StoreFile.Reader reader = new StoreFile.Reader(fs, f, null, false);
+ StoreFile.Reader reader = new StoreFile.Reader(fs, f, null, false, false);
reader.loadFileInfo();
reader.loadBloomfilter();
StoreFileScanner scanner = reader.getStoreFileScanner(false, false);
@@ -466,7 +466,7 @@ public class TestStoreFile extends HBase
assertTrue(falsePos < 2*expErr[x]);
}
}
-
+
public void testBloomEdgeCases() throws Exception {
float err = (float)0.005;
FileSystem fs = FileSystem.getLocal(conf);
@@ -478,7 +478,7 @@ public class TestStoreFile extends HBase
// this should not create a bloom because the max keys is too small
StoreFile.Writer writer = new StoreFile.Writer(fs, f,
StoreFile.DEFAULT_BLOCKSIZE_SMALL, HFile.DEFAULT_COMPRESSION_ALGORITHM,
- conf, KeyValue.COMPARATOR, StoreFile.BloomType.ROW, 2000);
+ conf, KeyValue.COMPARATOR, StoreFile.BloomType.ROW, 2000, false);
assertFalse(writer.hasBloom());
writer.close();
fs.delete(f, true);
@@ -500,7 +500,8 @@ public class TestStoreFile extends HBase
// because Java can't create a contiguous array > MAX_INT
writer = new StoreFile.Writer(fs, f,
StoreFile.DEFAULT_BLOCKSIZE_SMALL, HFile.DEFAULT_COMPRESSION_ALGORITHM,
- conf, KeyValue.COMPARATOR, StoreFile.BloomType.ROW, Integer.MAX_VALUE);
+ conf, KeyValue.COMPARATOR, StoreFile.BloomType.ROW, Integer.MAX_VALUE,
+ false);
assertFalse(writer.hasBloom());
writer.close();
fs.delete(f, true);
@@ -626,4 +627,146 @@ public class TestStoreFile extends HBase
//scan.setTimeRange(27, 50);
//assertTrue(!scanner.shouldSeek(scan, columns));
}
+
+ public void testCacheOnWriteEvictOnClose() throws Exception {
+ Configuration conf = this.conf;
+ conf.setBoolean("hbase.rs.evictblocksonclose", false);
+
+ // Find a home for our files
+ Path baseDir = new Path(new Path(this.testDir, "regionname"),
+ "twoCOWEOC");
+
+ // Grab the block cache and get the initial hit/miss counts
+ BlockCache bc = StoreFile.getBlockCache(conf);
+ assertNotNull(bc);
+ CacheStats cs = bc.getStats();
+ long startHit = cs.getHitCount();
+ long startMiss = cs.getMissCount();
+ long startEvicted = cs.getEvictedCount();
+
+ // Let's write a StoreFile with three blocks, with cache on write off
+ conf.setBoolean("hbase.rs.cacheblocksonwrite", false);
+ Path pathCowOff = new Path(baseDir, "123456789");
+ StoreFile.Writer writer = writeStoreFile(conf, pathCowOff, 3);
+ StoreFile hsf = new StoreFile(this.fs, writer.getPath(), true, conf,
+ StoreFile.BloomType.NONE, false);
+ LOG.debug(hsf.getPath().toString());
+
+ // Read this file, we should see 3 misses
+ StoreFile.Reader reader = hsf.createReader();
+ reader.loadFileInfo();
+ StoreFileScanner scanner = reader.getStoreFileScanner(true, true, false);
+ scanner.seek(KeyValue.LOWESTKEY);
+ while (scanner.next() != null);
+ assertEquals(startHit, cs.getHitCount());
+ assertEquals(startMiss + 3, cs.getMissCount());
+ assertEquals(startEvicted, cs.getEvictedCount());
+ startMiss += 3;
+ scanner.close();
+ reader.close();
+
+ // Now write a StoreFile with three blocks, with cache on write on
+ conf.setBoolean("hbase.rs.cacheblocksonwrite", true);
+ Path pathCowOn = new Path(baseDir, "123456788");
+ writer = writeStoreFile(conf, pathCowOn, 3);
+ hsf = new StoreFile(this.fs, writer.getPath(), true, conf,
+ StoreFile.BloomType.NONE, false);
+
+ // Read this file, we should see 3 hits
+ reader = hsf.createReader();
+ scanner = reader.getStoreFileScanner(true, true, false);
+ scanner.seek(KeyValue.LOWESTKEY);
+ while (scanner.next() != null);
+ assertEquals(startHit + 3, cs.getHitCount());
+ assertEquals(startMiss, cs.getMissCount());
+ assertEquals(startEvicted, cs.getEvictedCount());
+ startHit += 3;
+ scanner.close();
+ reader.close();
+
+ // Let's read back the two files to ensure the blocks exactly match
+ hsf = new StoreFile(this.fs, pathCowOff, true, conf,
+ StoreFile.BloomType.NONE, false);
+ StoreFile.Reader readerOne = hsf.createReader();
+ readerOne.loadFileInfo();
+ StoreFileScanner scannerOne = readerOne.getStoreFileScanner(true, true,
+ false);
+ scannerOne.seek(KeyValue.LOWESTKEY);
+ hsf = new StoreFile(this.fs, pathCowOn, true, conf,
+ StoreFile.BloomType.NONE, false);
+ StoreFile.Reader readerTwo = hsf.createReader();
+ readerTwo.loadFileInfo();
+ StoreFileScanner scannerTwo = readerTwo.getStoreFileScanner(true, true,
+ false);
+ scannerTwo.seek(KeyValue.LOWESTKEY);
+ KeyValue kv1 = null;
+ KeyValue kv2 = null;
+ while ((kv1 = scannerOne.next()) != null) {
+ kv2 = scannerTwo.next();
+ assertTrue(kv1.equals(kv2));
+ assertTrue(Bytes.equals(kv1.getBuffer(), kv2.getBuffer()));
+ }
+ assertNull(scannerTwo.next());
+ assertEquals(startHit + 6, cs.getHitCount());
+ assertEquals(startMiss, cs.getMissCount());
+ assertEquals(startEvicted, cs.getEvictedCount());
+ startHit += 6;
+ scannerOne.close();
+ readerOne.close();
+ scannerTwo.close();
+ readerTwo.close();
+
+ // Let's close the first file with evict on close turned on
+ conf.setBoolean("hbase.rs.evictblocksonclose", true);
+ hsf = new StoreFile(this.fs, pathCowOff, true, conf,
+ StoreFile.BloomType.NONE, false);
+ reader = hsf.createReader();
+ reader.close();
+
+ // We should have 3 new evictions
+ assertEquals(startHit, cs.getHitCount());
+ assertEquals(startMiss, cs.getMissCount());
+ assertEquals(startEvicted + 3, cs.getEvictedCount());
+ startEvicted += 3;
+
+ // Let's close the second file with evict on close turned off
+ conf.setBoolean("hbase.rs.evictblocksonclose", false);
+ hsf = new StoreFile(this.fs, pathCowOn, true, conf,
+ StoreFile.BloomType.NONE, false);
+ reader = hsf.createReader();
+ reader.close();
+
+ // We expect no changes
+ assertEquals(startHit, cs.getHitCount());
+ assertEquals(startMiss, cs.getMissCount());
+ assertEquals(startEvicted, cs.getEvictedCount());
+ }
+
+ private StoreFile.Writer writeStoreFile(Configuration conf, Path path,
+ int numBlocks)
+ throws IOException {
+ // Let's put ~5 small KVs in each block, so let's make 5*numBlocks KVs
+ int numKVs = 5 * numBlocks;
+ List<KeyValue> kvs = new ArrayList<KeyValue>(numKVs);
+ byte [] b = Bytes.toBytes("x");
+ int totalSize = 0;
+ for (int i=numKVs;i>0;i--) {
+ KeyValue kv = new KeyValue(b, b, b, i, b);
+ kvs.add(kv);
+ totalSize += kv.getLength();
+ }
+ int blockSize = totalSize / numBlocks;
+ StoreFile.Writer writer = new StoreFile.Writer(fs, path, blockSize,
+ HFile.DEFAULT_COMPRESSION_ALGORITHM,
+ conf, KeyValue.COMPARATOR, StoreFile.BloomType.NONE, 2000,
+ conf.getBoolean("hbase.rs.cacheblocksonwrite", false));
+ // We'll write N-1 KVs to ensure we don't write an extra block
+ kvs.remove(kvs.size()-1);
+ for (KeyValue kv : kvs) {
+ writer.append(kv);
+ }
+ writer.appendMetadata(0, false);
+ writer.close();
+ return writer;
+ }
}
Modified: hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSource.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSource.java?rev=1181517&r1=1181516&r2=1181517&view=diff
==============================================================================
--- hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSource.java (original)
+++ hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSource.java Tue Oct 11 02:15:59 2011
@@ -32,6 +32,8 @@ import org.apache.hadoop.hbase.regionser
import org.apache.hadoop.hbase.regionserver.wal.HLogKey;
import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.FSUtils;
+import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
@@ -56,10 +58,17 @@ public class TestReplicationSource {
public static void setUpBeforeClass() throws Exception {
TEST_UTIL.startMiniDFSCluster(1);
fs = TEST_UTIL.getDFSCluster().getFileSystem();
- oldLogDir = new Path(fs.getHomeDirectory(),
- HConstants.HREGION_OLDLOGDIR_NAME);
- logDir = new Path(fs.getHomeDirectory(),
- HConstants.HREGION_LOGDIR_NAME);
+
+ oldLogDir = TEST_UTIL.getTestDir(HConstants.HREGION_OLDLOGDIR_NAME);
+ fs.mkdirs(oldLogDir);
+ logDir = TEST_UTIL.getTestDir(HConstants.HREGION_LOGDIR_NAME);
+ fs.mkdirs(logDir);
+ }
+
+ @AfterClass
+ public static void tearDownAfterClass() throws Exception {
+ fs.delete(oldLogDir, true);
+ fs.delete(logDir, true);
}
/**