You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by br...@apache.org on 2008/02/24 01:19:44 UTC
svn commit: r630550 [5/7] - in /hadoop/hbase/trunk: bin/ conf/
src/java/org/apache/hadoop/hbase/ src/java/org/apache/hadoop/hbase/client/
src/java/org/apache/hadoop/hbase/filter/
src/java/org/apache/hadoop/hbase/generated/regionserver/ src/java/org/apa...
Added: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HStoreFile.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HStoreFile.java?rev=630550&view=auto
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HStoreFile.java (added)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HStoreFile.java Sat Feb 23 16:19:34 2008
@@ -0,0 +1,967 @@
+/**
+ * Copyright 2007 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.regionserver;
+
+import java.io.DataInput;
+import java.io.DataInputStream;
+import java.io.DataOutput;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.io.UnsupportedEncodingException;
+import java.util.List;
+import java.util.Random;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.io.BlockFSInputStream;
+import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
+import org.apache.hadoop.hbase.util.Writables;
+import org.apache.hadoop.io.MapFile;
+import org.apache.hadoop.io.SequenceFile;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.io.Writable;
+import org.apache.hadoop.io.WritableComparable;
+import org.onelab.filter.Filter;
+import org.onelab.filter.Key;
+
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+
+/**
+ * A HStore data file. HStores usually have one or more of these files. They
+ * are produced by flushing the memcache to disk.
+ *
+ * <p>Each HStore maintains a bunch of different data files. The filename is a
+ * mix of the parent dir, the region name, the column name, and a file
+ * identifier. The name may also be a reference to a store file located
+ * elsewhere. This class handles all that path-building stuff for you.
+ *
+ * <p>An HStoreFile usually tracks 4 things: its parent dir, the region
+ * identifier, the column family, and the file identifier. If you know those
+ * four things, you know how to obtain the right HStoreFile. HStoreFiles may
+ * also refernce store files in another region serving either from
+ * the top-half of the remote file or from the bottom-half. Such references
+ * are made fast splitting regions.
+ *
+ * <p>Plain HStoreFiles are named for a randomly generated id as in:
+ * <code>1278437856009925445</code> A file by this name is made in both the
+ * <code>mapfiles</code> and <code>info</code> subdirectories of a
+ * HStore columnfamily directoy: E.g. If the column family is 'anchor:', then
+ * under the region directory there is a subdirectory named 'anchor' within
+ * which is a 'mapfiles' and 'info' subdirectory. In each will be found a
+ * file named something like <code>1278437856009925445</code>, one to hold the
+ * data in 'mapfiles' and one under 'info' that holds the sequence id for this
+ * store file.
+ *
+ * <p>References to store files located over in some other region look like
+ * this:
+ * <code>1278437856009925445.hbaserepository,qAReLZD-OyQORZWq_vqR1k==,959247014679548184</code>:
+ * i.e. an id followed by the name of the referenced region. The data
+ * ('mapfiles') of HStoreFile references are empty. The accompanying
+ * <code>info</code> file contains the
+ * midkey, the id of the remote store we're referencing and whether we're
+ * to serve the top or bottom region of the remote store file. Note, a region
+ * is not splitable if it has instances of store file references (References
+ * are cleaned up by compactions).
+ *
+ * <p>When merging or splitting HRegions, we might want to modify one of the
+ * params for an HStoreFile (effectively moving it elsewhere).
+ */
+public class HStoreFile implements HConstants {
+ static final Log LOG = LogFactory.getLog(HStoreFile.class.getName());
+ static final byte INFO_SEQ_NUM = 0;
+ static final String HSTORE_DATFILE_DIR = "mapfiles";
+ static final String HSTORE_INFO_DIR = "info";
+ static final String HSTORE_FILTER_DIR = "filter";
+
+ /**
+ * For split HStoreFiles, specifies if the file covers the lower half or
+ * the upper half of the key range
+ */
+ public static enum Range {
+ /** HStoreFile contains upper half of key range */
+ top,
+ /** HStoreFile contains lower half of key range */
+ bottom
+ }
+
+ private final static Random rand = new Random();
+
+ private final Path basedir;
+ private final String encodedRegionName;
+ private final Text colFamily;
+ private final long fileId;
+ private final HBaseConfiguration conf;
+ private final FileSystem fs;
+ private final Reference reference;
+
+ /**
+ * Constructor that fully initializes the object
+ * @param conf Configuration object
+ * @param basedir qualified path that is parent of region directory
+ * @param encodedRegionName file name friendly name of the region
+ * @param colFamily name of the column family
+ * @param fileId file identifier
+ * @param ref Reference to another HStoreFile.
+ * @throws IOException
+ */
+ HStoreFile(HBaseConfiguration conf, FileSystem fs, Path basedir,
+ String encodedRegionName, Text colFamily, long fileId,
+ final Reference ref) throws IOException {
+ this.conf = conf;
+ this.fs = fs;
+ this.basedir = basedir;
+ this.encodedRegionName = encodedRegionName;
+ this.colFamily = new Text(colFamily);
+
+ long id = fileId;
+ if (id == -1) {
+ Path mapdir = HStoreFile.getMapDir(basedir, encodedRegionName, colFamily);
+ Path testpath = null;
+ do {
+ id = Math.abs(rand.nextLong());
+ testpath = new Path(mapdir, createHStoreFilename(id, null));
+ } while(fs.exists(testpath));
+ }
+ this.fileId = id;
+
+ // If a reference, construction does not write the pointer files. Thats
+ // done by invocations of writeReferenceFiles(hsf, fs). Happens at fast
+ // split time.
+ this.reference = ref;
+ }
+
+ /** @return the region name */
+ boolean isReference() {
+ return reference != null;
+ }
+
+ Reference getReference() {
+ return reference;
+ }
+
+ String getEncodedRegionName() {
+ return encodedRegionName;
+ }
+
+ /** @return the column family */
+ Text getColFamily() {
+ return colFamily;
+ }
+
+ /** @return the file identifier */
+ long getFileId() {
+ return fileId;
+ }
+
+ // Build full filenames from those components
+
+ /** @return path for MapFile */
+ Path getMapFilePath() {
+ if (isReference()) {
+ return getMapFilePath(encodedRegionName, fileId,
+ reference.getEncodedRegionName());
+ }
+ return getMapFilePath(encodedRegionName, fileId, null);
+ }
+
+ private Path getMapFilePath(final Reference r) {
+ if (r == null) {
+ return getMapFilePath();
+ }
+ return getMapFilePath(r.getEncodedRegionName(), r.getFileId(), null);
+ }
+
+ private Path getMapFilePath(final String encodedName, final long fid,
+ final String ern) {
+ return new Path(HStoreFile.getMapDir(basedir, encodedName, colFamily),
+ createHStoreFilename(fid, ern));
+ }
+
+ /** @return path for info file */
+ Path getInfoFilePath() {
+ if (isReference()) {
+ return getInfoFilePath(encodedRegionName, fileId,
+ reference.getEncodedRegionName());
+
+ }
+ return getInfoFilePath(encodedRegionName, fileId, null);
+ }
+
+ private Path getInfoFilePath(final String encodedName, final long fid,
+ final String ern) {
+ return new Path(HStoreFile.getInfoDir(basedir, encodedName, colFamily),
+ createHStoreFilename(fid, ern));
+ }
+
+ // File handling
+
+ /*
+ * Split by making two new store files that reference top and bottom regions
+ * of original store file.
+ * @param midKey
+ * @param dstA
+ * @param dstB
+ * @param fs
+ * @param c
+ * @throws IOException
+ *
+ * @param midKey the key which will be the starting key of the second region
+ * @param dstA the file which will contain keys from the start of the source
+ * @param dstB the file which will contain keys from midKey to end of source
+ * @param fs file system
+ * @param c configuration
+ * @throws IOException
+ */
+ void splitStoreFile(final HStoreFile dstA, final HStoreFile dstB,
+ final FileSystem fs)
+ throws IOException {
+ dstA.writeReferenceFiles(fs);
+ dstB.writeReferenceFiles(fs);
+ }
+
+ void writeReferenceFiles(final FileSystem fs)
+ throws IOException {
+ createOrFail(fs, getMapFilePath());
+ writeSplitInfo(fs);
+ }
+
+ /*
+ * If reference, create and write the remote store file id, the midkey and
+ * whether we're going against the top file region of the referent out to
+ * the info file.
+ * @param p Path to info file.
+ * @param hsf
+ * @param fs
+ * @throws IOException
+ */
+ private void writeSplitInfo(final FileSystem fs) throws IOException {
+ Path p = getInfoFilePath();
+ if (fs.exists(p)) {
+ throw new IOException("File already exists " + p.toString());
+ }
+ FSDataOutputStream out = fs.create(p);
+ try {
+ reference.write(out);
+ } finally {
+ out.close();
+ }
+ }
+
+ private void createOrFail(final FileSystem fs, final Path p)
+ throws IOException {
+ if (fs.exists(p)) {
+ throw new IOException("File already exists " + p.toString());
+ }
+ if (!fs.createNewFile(p)) {
+ throw new IOException("Failed create of " + p);
+ }
+ }
+
+ /**
+ * Merges the contents of the given source HStoreFiles into a single new one.
+ *
+ * @param srcFiles files to be merged
+ * @param fs file system
+ * @param conf configuration object
+ * @throws IOException
+ */
+ void mergeStoreFiles(List<HStoreFile> srcFiles, FileSystem fs,
+ @SuppressWarnings("hiding") Configuration conf)
+ throws IOException {
+ // Copy all the source MapFile tuples into this HSF's MapFile
+ MapFile.Writer out = new MapFile.Writer(conf, fs,
+ getMapFilePath().toString(),
+ HStoreKey.class, ImmutableBytesWritable.class);
+
+ try {
+ for(HStoreFile src: srcFiles) {
+ MapFile.Reader in = src.getReader(fs, null);
+ try {
+ HStoreKey readkey = new HStoreKey();
+ ImmutableBytesWritable readval = new ImmutableBytesWritable();
+ while(in.next(readkey, readval)) {
+ out.append(readkey, readval);
+ }
+
+ } finally {
+ in.close();
+ }
+ }
+ } finally {
+ out.close();
+ }
+ // Build a unified InfoFile from the source InfoFiles.
+
+ long unifiedSeqId = -1;
+ for(HStoreFile hsf: srcFiles) {
+ long curSeqId = hsf.loadInfo(fs);
+ if(curSeqId > unifiedSeqId) {
+ unifiedSeqId = curSeqId;
+ }
+ }
+ writeInfo(fs, unifiedSeqId);
+ }
+
+ /**
+ * Reads in an info file
+ *
+ * @param fs file system
+ * @return The sequence id contained in the info file
+ * @throws IOException
+ */
+ long loadInfo(FileSystem fs) throws IOException {
+ Path p = null;
+ if (isReference()) {
+ p = getInfoFilePath(reference.getEncodedRegionName(),
+ reference.getFileId(), null);
+ } else {
+ p = getInfoFilePath();
+ }
+ DataInputStream in = new DataInputStream(fs.open(p));
+ try {
+ byte flag = in.readByte();
+ if(flag == INFO_SEQ_NUM) {
+ return in.readLong();
+ }
+ throw new IOException("Cannot process log file: " + p);
+ } finally {
+ in.close();
+ }
+ }
+
+ /**
+ * Writes the file-identifier to disk
+ *
+ * @param fs file system
+ * @param infonum file id
+ * @throws IOException
+ */
+ void writeInfo(FileSystem fs, long infonum) throws IOException {
+ Path p = getInfoFilePath();
+ FSDataOutputStream out = fs.create(p);
+ try {
+ out.writeByte(INFO_SEQ_NUM);
+ out.writeLong(infonum);
+ } finally {
+ out.close();
+ }
+ }
+
+ /**
+ * Delete store map files.
+ * @throws IOException
+ */
+ public void delete() throws IOException {
+ fs.delete(getMapFilePath());
+ fs.delete(getInfoFilePath());
+ }
+
+ /**
+ * Renames the mapfiles and info directories under the passed
+ * <code>hsf</code> directory.
+ * @param fs
+ * @param hsf
+ * @return True if succeeded.
+ * @throws IOException
+ */
+ public boolean rename(final FileSystem fs, final HStoreFile hsf)
+ throws IOException {
+ Path src = getMapFilePath();
+ if (!fs.exists(src)) {
+ throw new FileNotFoundException(src.toString());
+ }
+ boolean success = fs.rename(src, hsf.getMapFilePath());
+ if (!success) {
+ LOG.warn("Failed rename of " + src + " to " + hsf.getMapFilePath());
+ } else {
+ src = getInfoFilePath();
+ if (!fs.exists(src)) {
+ throw new FileNotFoundException(src.toString());
+ }
+ success = fs.rename(src, hsf.getInfoFilePath());
+ if (!success) {
+ LOG.warn("Failed rename of " + src + " to " + hsf.getInfoFilePath());
+ }
+ }
+ return success;
+ }
+
+ /**
+ * Get reader for the store file map file.
+ * Client is responsible for closing file when done.
+ * @param fs
+ * @param bloomFilter If null, no filtering is done.
+ * @return MapFile.Reader
+ * @throws IOException
+ */
+ public synchronized MapFile.Reader getReader(final FileSystem fs,
+ final Filter bloomFilter)
+ throws IOException {
+
+ if (isReference()) {
+ return new HStoreFile.HalfMapFileReader(fs,
+ getMapFilePath(reference).toString(), conf,
+ reference.getFileRegion(), reference.getMidkey(), bloomFilter);
+ }
+ return new BloomFilterMapFile.Reader(fs, getMapFilePath().toString(),
+ conf, bloomFilter);
+ }
+
+ /**
+ * Get reader for the store file map file.
+ * Client is responsible for closing file when done.
+ * @param fs
+ * @param bloomFilter If null, no filtering is done.
+ * @param blockCacheEnabled If true, MapFile blocks should be cached.
+ * @return MapFile.Reader
+ * @throws IOException
+ */
+ public synchronized MapFile.Reader getReader(final FileSystem fs,
+ final Filter bloomFilter, final boolean blockCacheEnabled)
+ throws IOException {
+
+ if (isReference()) {
+ return new HStoreFile.HalfMapFileReader(fs,
+ getMapFilePath(reference).toString(), conf,
+ reference.getFileRegion(), reference.getMidkey(), bloomFilter,
+ blockCacheEnabled);
+ }
+ return new BloomFilterMapFile.Reader(fs, getMapFilePath().toString(),
+ conf, bloomFilter, blockCacheEnabled);
+ }
+
+ /**
+ * Get a store file writer.
+ * Client is responsible for closing file when done.
+ * @param fs
+ * @param compression Pass <code>SequenceFile.CompressionType.NONE</code>
+ * for none.
+ * @param bloomFilter If null, no filtering is done.
+ * @return MapFile.Writer
+ * @throws IOException
+ */
+ public MapFile.Writer getWriter(final FileSystem fs,
+ final SequenceFile.CompressionType compression,
+ final Filter bloomFilter)
+ throws IOException {
+ if (isReference()) {
+ throw new IOException("Illegal Access: Cannot get a writer on a" +
+ "HStoreFile reference");
+ }
+ return new BloomFilterMapFile.Writer(conf, fs,
+ getMapFilePath().toString(), HStoreKey.class,
+ ImmutableBytesWritable.class, compression, bloomFilter);
+ }
+
+ /**
+ * @return Length of the store map file. If a reference, size is
+ * approximation.
+ * @throws IOException
+ */
+ public long length() throws IOException {
+ Path p = new Path(getMapFilePath(reference), MapFile.DATA_FILE_NAME);
+ long l = p.getFileSystem(conf).getFileStatus(p).getLen();
+ return (isReference())? l / 2: l;
+ }
+
+ /** {@inheritDoc} */
+ @Override
+ public String toString() {
+ return encodedRegionName + "/" + colFamily + "/" + fileId +
+ (isReference()? "/" + reference.toString(): "");
+ }
+
+ /**
+ * Custom bloom filter key maker.
+ * @param key
+ * @return Key made of bytes of row and column only.
+ * @throws IOException
+ */
+ static Key getBloomFilterKey(WritableComparable key)
+ throws IOException {
+ HStoreKey hsk = (HStoreKey)key;
+ byte [] bytes = null;
+ try {
+ bytes = (hsk.getRow().toString() + hsk.getColumn().toString()).
+ getBytes(UTF8_ENCODING);
+ } catch (UnsupportedEncodingException e) {
+ throw new IOException(e.toString());
+ }
+ return new Key(bytes);
+ }
+
+ static boolean isTopFileRegion(final Range r) {
+ return r.equals(Range.top);
+ }
+
+ private static String createHStoreFilename(final long fid,
+ final String encodedRegionName) {
+ return Long.toString(fid) +
+ ((encodedRegionName != null) ? "." + encodedRegionName : "");
+ }
+
+ public static Path getMapDir(Path dir, String encodedRegionName, Text colFamily) {
+ return new Path(dir, new Path(encodedRegionName,
+ new Path(colFamily.toString(), HSTORE_DATFILE_DIR)));
+ }
+
+ /** @return the info directory path */
+ public static Path getInfoDir(Path dir, String encodedRegionName, Text colFamily) {
+ return new Path(dir, new Path(encodedRegionName,
+ new Path(colFamily.toString(), HSTORE_INFO_DIR)));
+ }
+
+ /** @return the bloom filter directory path */
+ public static Path getFilterDir(Path dir, String encodedRegionName, Text colFamily) {
+ return new Path(dir, new Path(encodedRegionName,
+ new Path(colFamily.toString(), HSTORE_FILTER_DIR)));
+ }
+
+ /*
+ * Data structure to hold reference to a store file over in another region.
+ */
+ static class Reference implements Writable {
+ private String encodedRegionName;
+ private long fileid;
+ private Range region;
+ private HStoreKey midkey;
+
+ Reference(final String ern, final long fid, final HStoreKey m,
+ final Range fr) {
+ this.encodedRegionName = ern;
+ this.fileid = fid;
+ this.region = fr;
+ this.midkey = m;
+ }
+
+ Reference() {
+ this(null, -1, null, Range.bottom);
+ }
+
+ long getFileId() {
+ return fileid;
+ }
+
+ Range getFileRegion() {
+ return region;
+ }
+
+ HStoreKey getMidkey() {
+ return midkey;
+ }
+
+ String getEncodedRegionName() {
+ return encodedRegionName;
+ }
+
+ /** {@inheritDoc} */
+ @Override
+ public String toString() {
+ return encodedRegionName + "/" + fileid + "/" + region;
+ }
+
+ // Make it serializable.
+
+ /** {@inheritDoc} */
+ public void write(DataOutput out) throws IOException {
+ out.writeUTF(encodedRegionName);
+ out.writeLong(fileid);
+ // Write true if we're doing top of the file.
+ out.writeBoolean(isTopFileRegion(region));
+ midkey.write(out);
+ }
+
+ /** {@inheritDoc} */
+ public void readFields(DataInput in) throws IOException {
+ encodedRegionName = in.readUTF();
+ fileid = in.readLong();
+ boolean tmp = in.readBoolean();
+ // If true, set region to top.
+ region = tmp? Range.top: Range.bottom;
+ midkey = new HStoreKey();
+ midkey.readFields(in);
+ }
+ }
+
+ /**
+ * Hbase customizations of MapFile.
+ */
+ static class HbaseMapFile extends MapFile {
+
+ /**
+ * A reader capable of reading and caching blocks of the data file.
+ */
+ static class HbaseReader extends MapFile.Reader {
+
+ private final boolean blockCacheEnabled;
+
+ /**
+ * @param fs
+ * @param dirName
+ * @param conf
+ * @throws IOException
+ */
+ public HbaseReader(FileSystem fs, String dirName, Configuration conf)
+ throws IOException {
+ this(fs, dirName, conf, false);
+ }
+
+ /**
+ * @param fs
+ * @param dirName
+ * @param conf
+ * @param blockCacheEnabled
+ * @throws IOException
+ */
+ public HbaseReader(FileSystem fs, String dirName, Configuration conf,
+ boolean blockCacheEnabled)
+ throws IOException {
+ super(fs, dirName, null, conf, false); // defer opening streams
+ this.blockCacheEnabled = blockCacheEnabled;
+ open(fs, dirName, null, conf);
+
+ // Force reading of the mapfile index by calling midKey.
+ // Reading the index will bring the index into memory over
+ // here on the client and then close the index file freeing
+ // up socket connection and resources in the datanode.
+ // Usually, the first access on a MapFile.Reader will load the
+ // index force the issue in HStoreFile MapFiles because an
+ // access may not happen for some time; meantime we're
+ // using up datanode resources. See HADOOP-2341.
+ midKey();
+ }
+
+ @Override
+ protected org.apache.hadoop.io.SequenceFile.Reader createDataFileReader(
+ FileSystem fs, Path dataFile, Configuration conf)
+ throws IOException {
+ if (!blockCacheEnabled) {
+ return super.createDataFileReader(fs, dataFile, conf);
+ }
+ LOG.info("Block Cache enabled");
+ final int blockSize = conf.getInt("hbase.hstore.blockCache.blockSize",
+ 64 * 1024);
+ return new SequenceFile.Reader(fs, dataFile, conf) {
+ @Override
+ protected FSDataInputStream openFile(FileSystem fs, Path file,
+ int bufferSize, long length) throws IOException {
+
+ return new FSDataInputStream(new BlockFSInputStream(
+ super.openFile(fs, file, bufferSize, length), length,
+ blockSize));
+ }
+ };
+ }
+ }
+
+ static class HbaseWriter extends MapFile.Writer {
+ /**
+ * @param conf
+ * @param fs
+ * @param dirName
+ * @param keyClass
+ * @param valClass
+ * @param compression
+ * @throws IOException
+ */
+ public HbaseWriter(Configuration conf, FileSystem fs, String dirName,
+ Class<Writable> keyClass, Class<Writable> valClass,
+ SequenceFile.CompressionType compression)
+ throws IOException {
+ super(conf, fs, dirName, keyClass, valClass, compression);
+ // Default for mapfiles is 128. Makes random reads faster if we
+ // have more keys indexed and we're not 'next'-ing around in the
+ // mapfile.
+ setIndexInterval(conf.getInt("hbase.index.interval", 128));
+ }
+ }
+ }
+
+ /**
+ * On write, all keys are added to a bloom filter. On read, all keys are
+ * tested first against bloom filter. Keys are HStoreKey. If passed bloom
+ * filter is null, just passes invocation to parent.
+ */
+ static class BloomFilterMapFile extends HbaseMapFile {
+ static class Reader extends HbaseReader {
+ private final Filter bloomFilter;
+
+ /**
+ * @param fs
+ * @param dirName
+ * @param conf
+ * @param filter
+ * @throws IOException
+ */
+ public Reader(FileSystem fs, String dirName, Configuration conf,
+ final Filter filter)
+ throws IOException {
+ super(fs, dirName, conf);
+ bloomFilter = filter;
+ }
+
+ public Reader(FileSystem fs, String dirName, Configuration conf,
+ final Filter filter, final boolean blockCacheEnabled)
+ throws IOException {
+ super(fs, dirName, conf, blockCacheEnabled);
+ bloomFilter = filter;
+ }
+
+ /** {@inheritDoc} */
+ @Override
+ public Writable get(WritableComparable key, Writable val)
+ throws IOException {
+ if (bloomFilter == null) {
+ return super.get(key, val);
+ }
+ if(bloomFilter.membershipTest(getBloomFilterKey(key))) {
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("bloom filter reported that key exists");
+ }
+ return super.get(key, val);
+ }
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("bloom filter reported that key does not exist");
+ }
+ return null;
+ }
+
+ /** {@inheritDoc} */
+ @Override
+ public WritableComparable getClosest(WritableComparable key,
+ Writable val) throws IOException {
+ if (bloomFilter == null) {
+ return super.getClosest(key, val);
+ }
+ // Note - the key being passed to us is always a HStoreKey
+ if(bloomFilter.membershipTest(getBloomFilterKey(key))) {
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("bloom filter reported that key exists");
+ }
+ return super.getClosest(key, val);
+ }
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("bloom filter reported that key does not exist");
+ }
+ return null;
+ }
+ }
+
+ static class Writer extends HbaseWriter {
+ private final Filter bloomFilter;
+
+ /**
+ * @param conf
+ * @param fs
+ * @param dirName
+ * @param keyClass
+ * @param valClass
+ * @param compression
+ * @param filter
+ * @throws IOException
+ */
+ @SuppressWarnings("unchecked")
+ public Writer(Configuration conf, FileSystem fs, String dirName,
+ Class keyClass, Class valClass,
+ SequenceFile.CompressionType compression, final Filter filter)
+ throws IOException {
+ super(conf, fs, dirName, keyClass, valClass, compression);
+ bloomFilter = filter;
+ }
+
+ /** {@inheritDoc} */
+ @Override
+ public void append(WritableComparable key, Writable val)
+ throws IOException {
+ if (bloomFilter != null) {
+ bloomFilter.add(getBloomFilterKey(key));
+ }
+ super.append(key, val);
+ }
+ }
+ }
+
+ /**
+ * A facade for a {@link MapFile.Reader} that serves up either the top or
+ * bottom half of a MapFile (where 'bottom' is the first half of the file
+ * containing the keys that sort lowest and 'top' is the second half of the
+ * file with keys that sort greater than those of the bottom half).
+ * Subclasses BloomFilterMapFile.Reader in case
+ *
+ * <p>This file is not splitable. Calls to {@link #midKey()} return null.
+ */
+ static class HalfMapFileReader extends BloomFilterMapFile.Reader {
+ private final boolean top;
+ private final WritableComparable midkey;
+ private boolean firstNextCall = true;
+
+ HalfMapFileReader(final FileSystem fs, final String dirName,
+ final Configuration conf, final Range r,
+ final WritableComparable midKey)
+ throws IOException {
+ this(fs, dirName, conf, r, midKey, null, false);
+ }
+
+ HalfMapFileReader(final FileSystem fs, final String dirName,
+ final Configuration conf, final Range r,
+ final WritableComparable midKey, final Filter filter)
+ throws IOException {
+ super(fs, dirName, conf, filter);
+ top = isTopFileRegion(r);
+ midkey = midKey;
+ }
+
+ HalfMapFileReader(final FileSystem fs, final String dirName,
+ final Configuration conf, final Range r,
+ final WritableComparable midKey, final Filter filter,
+ final boolean blockCacheEnabled)
+ throws IOException {
+ super(fs, dirName, conf, filter, blockCacheEnabled);
+ top = isTopFileRegion(r);
+ midkey = midKey;
+ }
+
+ @SuppressWarnings("unchecked")
+ private void checkKey(final WritableComparable key)
+ throws IOException {
+ if (top) {
+ if (key.compareTo(midkey) < 0) {
+ throw new IOException("Illegal Access: Key is less than midKey of " +
+ "backing mapfile");
+ }
+ } else if (key.compareTo(midkey) >= 0) {
+ throw new IOException("Illegal Access: Key is greater than or equal " +
+ "to midKey of backing mapfile");
+ }
+ }
+
+ /** {@inheritDoc} */
+ @Override
+ public synchronized void finalKey(WritableComparable key)
+ throws IOException {
+ if (top) {
+ super.finalKey(key);
+ } else {
+ reset();
+ Writable value = new ImmutableBytesWritable();
+ key = super.getClosest(midkey, value, true);
+ }
+ }
+
+ /** {@inheritDoc} */
+ @Override
+ public synchronized Writable get(WritableComparable key, Writable val)
+ throws IOException {
+ checkKey(key);
+ return super.get(key, val);
+ }
+
+ /** {@inheritDoc} */
+ @SuppressWarnings("unchecked")
+ @Override
+ public synchronized WritableComparable getClosest(WritableComparable key,
+ Writable val)
+ throws IOException {
+ WritableComparable closest = null;
+ if (top) {
+ // If top, the lowest possible key is midkey. Do not have to check
+ // what comes back from super getClosest. Will return exact match or
+ // greater.
+ closest = (key.compareTo(this.midkey) < 0)?
+ this.midkey: super.getClosest(key, val);
+ } else {
+ // We're serving bottom of the file.
+ if (key.compareTo(this.midkey) < 0) {
+ // Check key is within range for bottom.
+ closest = super.getClosest(key, val);
+ // midkey was made against largest store file at time of split. Smaller
+ // store files could have anything in them. Check return value is
+ // not beyond the midkey (getClosest returns exact match or next
+ // after).
+ if (closest != null && closest.compareTo(this.midkey) >= 0) {
+ // Don't let this value out.
+ closest = null;
+ }
+ }
+ // Else, key is > midkey so let out closest = null.
+ }
+ return closest;
+ }
+
+ /** {@inheritDoc} */
+ @SuppressWarnings("unused")
+ @Override
+ public synchronized WritableComparable midKey() throws IOException {
+ // Returns null to indicate file is not splitable.
+ return null;
+ }
+
+ /** {@inheritDoc} */
+ @SuppressWarnings("unchecked")
+ @Override
+ public synchronized boolean next(WritableComparable key, Writable val)
+ throws IOException {
+ if (firstNextCall) {
+ firstNextCall = false;
+ if (this.top) {
+ // Seek to midkey. Midkey may not exist in this file. That should be
+ // fine. Then we'll either be positioned at end or start of file.
+ WritableComparable nearest = getClosest(midkey, val);
+ // Now copy the mid key into the passed key.
+ if (nearest != null) {
+ Writables.copyWritable(nearest, key);
+ return true;
+ }
+ return false;
+ }
+ }
+ boolean result = super.next(key, val);
+ if (!top && key.compareTo(midkey) >= 0) {
+ result = false;
+ }
+ return result;
+ }
+
+ /** {@inheritDoc} */
+ @Override
+ public synchronized void reset() throws IOException {
+ if (top) {
+ firstNextCall = true;
+ seek(midkey);
+ return;
+ }
+ super.reset();
+ }
+
+ /** {@inheritDoc} */
+ @Override
+ public synchronized boolean seek(WritableComparable key)
+ throws IOException {
+ checkKey(key);
+ return super.seek(key);
+ }
+ }
+}
Added: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HStoreKey.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HStoreKey.java?rev=630550&view=auto
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HStoreKey.java (added)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HStoreKey.java Sat Feb 23 16:19:34 2008
@@ -0,0 +1,355 @@
+/**
+ * Copyright 2007 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.regionserver;
+
+import org.apache.hadoop.hbase.io.TextSequence;
+import org.apache.hadoop.hbase.InvalidColumnNameException;
+import org.apache.hadoop.io.*;
+import org.apache.hadoop.hbase.HConstants;
+
+import java.io.*;
+import java.nio.ByteBuffer;
+
+/**
+ * A Key for a stored row
+ */
+public class HStoreKey implements WritableComparable {
+ /**
+ * Colon character in UTF-8
+ */
+ public static final char COLUMN_FAMILY_DELIMITER = ':';
+
+ private Text row;
+ private Text column;
+ private long timestamp;
+
+
+ /** Default constructor used in conjunction with Writable interface */
+ public HStoreKey() {
+ this(new Text());
+ }
+
+ /**
+ * Create an HStoreKey specifying only the row
+ * The column defaults to the empty string and the time stamp defaults to
+ * Long.MAX_VALUE
+ *
+ * @param row - row key
+ */
+ public HStoreKey(Text row) {
+ this(row, Long.MAX_VALUE);
+ }
+
+ /**
+ * Create an HStoreKey specifying the row and timestamp
+ * The column name defaults to the empty string
+ *
+ * @param row row key
+ * @param timestamp timestamp value
+ */
+ public HStoreKey(Text row, long timestamp) {
+ this(row, new Text(), timestamp);
+ }
+
+ /**
+ * Create an HStoreKey specifying the row and column names
+ * The timestamp defaults to LATEST_TIMESTAMP
+ *
+ * @param row row key
+ * @param column column key
+ */
+ public HStoreKey(Text row, Text column) {
+ this(row, column, HConstants.LATEST_TIMESTAMP);
+ }
+
+ /**
+ * Create an HStoreKey specifying all the fields
+ *
+ * @param row row key
+ * @param column column key
+ * @param timestamp timestamp value
+ */
+ public HStoreKey(Text row, Text column, long timestamp) {
+ // Make copies by doing 'new Text(arg)'.
+ this.row = new Text(row);
+ this.column = new Text(column);
+ this.timestamp = timestamp;
+ }
+
+ /** @return Approximate size in bytes of this key. */
+ public long getSize() {
+ return this.row.getLength() + this.column.getLength() +
+ 8 /* There is no sizeof in java. Presume long is 8 (64bit machine)*/;
+ }
+
+ /**
+ * Constructs a new HStoreKey from another
+ *
+ * @param other the source key
+ */
+ public HStoreKey(HStoreKey other) {
+ this(other.row, other.column, other.timestamp);
+ }
+
+ /**
+ * Change the value of the row key
+ *
+ * @param newrow new row key value
+ */
+ public void setRow(Text newrow) {
+ this.row.set(newrow);
+ }
+
+ /**
+ * Change the value of the column key
+ *
+ * @param newcol new column key value
+ */
+ public void setColumn(Text newcol) {
+ this.column.set(newcol);
+ }
+
+ /**
+ * Change the value of the timestamp field
+ *
+ * @param timestamp new timestamp value
+ */
+ public void setVersion(long timestamp) {
+ this.timestamp = timestamp;
+ }
+
+ /**
+ * Set the value of this HStoreKey from the supplied key
+ *
+ * @param k key value to copy
+ */
+ public void set(HStoreKey k) {
+ this.row = k.getRow();
+ this.column = k.getColumn();
+ this.timestamp = k.getTimestamp();
+ }
+
+ /** @return value of row key */
+ public Text getRow() {
+ return row;
+ }
+
+ /** @return value of column key */
+ public Text getColumn() {
+ return column;
+ }
+
+ /** @return value of timestamp */
+ public long getTimestamp() {
+ return timestamp;
+ }
+
+ /**
+ * Compares the row and column of two keys
+ * @param other Key to compare against. Compares row and column.
+ * @return True if same row and column.
+ * @see #matchesWithoutColumn(HStoreKey)
+ * @see #matchesRowFamily(HStoreKey)
+ */
+ public boolean matchesRowCol(HStoreKey other) {
+ return this.row.compareTo(other.row) == 0
+ && this.column.compareTo(other.column) == 0;
+ }
+
+ /**
+ * Compares the row and timestamp of two keys
+ *
+ * @param other Key to copmare against. Compares row and timestamp.
+ *
+ * @return True if same row and timestamp is greater than <code>other</code>
+ * @see #matchesRowCol(HStoreKey)
+ * @see #matchesRowFamily(HStoreKey)
+ */
+ public boolean matchesWithoutColumn(HStoreKey other) {
+ return this.row.compareTo(other.row) == 0
+ && this.timestamp >= other.getTimestamp();
+ }
+
+ /**
+ * Compares the row and column family of two keys
+ *
+ * @param that Key to compare against. Compares row and column family
+ *
+ * @return true if same row and column family
+ * @throws InvalidColumnNameException
+ * @see #matchesRowCol(HStoreKey)
+ * @see #matchesWithoutColumn(HStoreKey)
+ */
+ public boolean matchesRowFamily(HStoreKey that)
+ throws InvalidColumnNameException {
+ return this.row.compareTo(that.row) == 0 &&
+ extractFamily(this.column).
+ compareTo(extractFamily(that.getColumn())) == 0;
+ }
+
+ /** {@inheritDoc} */
+ @Override
+ public String toString() {
+ return row.toString() + "/" + column.toString() + "/" + timestamp;
+ }
+
+ /** {@inheritDoc} */
+ @Override
+ public boolean equals(Object obj) {
+ return compareTo(obj) == 0;
+ }
+
+ /** {@inheritDoc} */
+ @Override
+ public int hashCode() {
+ int result = this.row.hashCode();
+ result ^= this.column.hashCode();
+ result ^= this.timestamp;
+ return result;
+ }
+
+ // Comparable
+
+ public int compareTo(Object o) {
+ HStoreKey other = (HStoreKey)o;
+ int result = this.row.compareTo(other.row);
+ if (result != 0) {
+ return result;
+ }
+ result = this.column.compareTo(other.column);
+ if (result != 0) {
+ return result;
+ }
+ // The below older timestamps sorting ahead of newer timestamps looks
+ // wrong but it is intentional. This way, newer timestamps are first
+ // found when we iterate over a memcache and newer versions are the
+ // first we trip over when reading from a store file.
+ if (this.timestamp < other.timestamp) {
+ result = 1;
+ } else if (this.timestamp > other.timestamp) {
+ result = -1;
+ }
+ return result;
+ }
+
+ // Writable
+
+ /** {@inheritDoc} */
+ public void write(DataOutput out) throws IOException {
+ row.write(out);
+ column.write(out);
+ out.writeLong(timestamp);
+ }
+
+ /** {@inheritDoc} */
+ public void readFields(DataInput in) throws IOException {
+ row.readFields(in);
+ column.readFields(in);
+ timestamp = in.readLong();
+ }
+
+ // Statics
+ // TODO: Move these utility methods elsewhere (To a Column class?).
+
+ /**
+ * Extracts the column family name from a column
+ * For example, returns 'info' if the specified column was 'info:server'
+ * @param col name of column
+ * @return column famile as a TextSequence based on the passed
+ * <code>col</code>. If <code>col</code> is reused, make a new Text of
+ * the result by calling {@link TextSequence#toText()}.
+ * @throws InvalidColumnNameException
+ */
+ public static TextSequence extractFamily(final Text col)
+ throws InvalidColumnNameException {
+ return extractFamily(col, false);
+ }
+
+ /**
+ * Extracts the column family name from a column
+ * For example, returns 'info' if the specified column was 'info:server'
+ * @param col name of column
+ * @return column famile as a TextSequence based on the passed
+ * <code>col</code>. If <code>col</code> is reused, make a new Text of
+ * the result by calling {@link TextSequence#toText()}.
+ * @throws InvalidColumnNameException
+ */
+ public static TextSequence extractFamily(final Text col,
+ final boolean withColon)
+ throws InvalidColumnNameException {
+ int offset = getColonOffset(col);
+ // Include ':' in copy?
+ offset += (withColon)? 1: 0;
+ if (offset == col.getLength()) {
+ return new TextSequence(col);
+ }
+ return new TextSequence(col, 0, offset);
+ }
+
+ /**
+ * Extracts the column qualifier, the portion that follows the colon (':')
+ * family/qualifier separator.
+ * For example, returns 'server' if the specified column was 'info:server'
+ * @param col name of column
+ * @return column qualifier as a TextSequence based on the passed
+ * <code>col</code>. If <code>col</code> is reused, make a new Text of
+ * the result by calling {@link TextSequence#toText()}.
+ * @throws InvalidColumnNameException
+ */
+ public static TextSequence extractQualifier(final Text col)
+ throws InvalidColumnNameException {
+ int offset = getColonOffset(col);
+ if (offset + 1 == col.getLength()) {
+ return null;
+ }
+ return new TextSequence(col, offset + 1);
+ }
+
+ private static int getColonOffset(final Text col)
+ throws InvalidColumnNameException {
+ int offset = -1;
+ ByteBuffer bb = ByteBuffer.wrap(col.getBytes());
+ for (int lastPosition = bb.position(); bb.hasRemaining();
+ lastPosition = bb.position()) {
+ if (Text.bytesToCodePoint(bb) == COLUMN_FAMILY_DELIMITER) {
+ offset = lastPosition;
+ break;
+ }
+ }
+ if(offset < 0) {
+ throw new InvalidColumnNameException(col + " is missing the colon " +
+ "family/qualifier separator");
+ }
+ return offset;
+ }
+
+ /**
+ * Returns row and column bytes out of an HStoreKey.
+ * @param hsk Store key.
+ * @return byte array encoding of HStoreKey
+ * @throws UnsupportedEncodingException
+ */
+ public static byte[] getBytes(final HStoreKey hsk)
+ throws UnsupportedEncodingException {
+ StringBuilder s = new StringBuilder(hsk.getRow().toString());
+ s.append(hsk.getColumn().toString());
+ return s.toString().getBytes(HConstants.UTF8_ENCODING);
+ }
+}
\ No newline at end of file
Added: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HStoreSize.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HStoreSize.java?rev=630550&view=auto
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HStoreSize.java (added)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HStoreSize.java Sat Feb 23 16:19:34 2008
@@ -0,0 +1,33 @@
+
+package org.apache.hadoop.hbase.regionserver;
+
+/*
+ * Data structure to hold result of a look at store file sizes.
+ */
+public class HStoreSize {
+ final long aggregate;
+ final long largest;
+ boolean splitable;
+
+ HStoreSize(final long a, final long l, final boolean s) {
+ this.aggregate = a;
+ this.largest = l;
+ this.splitable = s;
+ }
+
+ public long getAggregate() {
+ return this.aggregate;
+ }
+
+ public long getLargest() {
+ return this.largest;
+ }
+
+ public boolean isSplitable() {
+ return this.splitable;
+ }
+
+ public void setSplitable(final boolean s) {
+ this.splitable = s;
+ }
+}
\ No newline at end of file
Added: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/LogRollListener.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/LogRollListener.java?rev=630550&view=auto
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/LogRollListener.java (added)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/LogRollListener.java Sat Feb 23 16:19:34 2008
@@ -0,0 +1,29 @@
+/**
+ * Copyright 2007 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.regionserver;
+
+/**
+ * Mechanism by which the HLog requests a log roll
+ */
+public interface LogRollListener {
+ /** Request that the log be rolled */
+ public void logRollRequested();
+}
Added: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/RegionUnavailableListener.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/RegionUnavailableListener.java?rev=630550&view=auto
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/RegionUnavailableListener.java (added)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/RegionUnavailableListener.java Sat Feb 23 16:19:34 2008
@@ -0,0 +1,44 @@
+/**
+ * Copyright 2007 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.regionserver;
+
+import org.apache.hadoop.io.Text;
+
+/**
+ * Used as a callback mechanism so that an HRegion can notify the HRegionServer
+ * of the different stages making an HRegion unavailable. Regions are made
+ * unavailable during region split operations.
+ */
+public interface RegionUnavailableListener {
+ /**
+ * <code>regionName</code> is closing.
+ * Listener should stop accepting new writes but can continue to service
+ * outstanding transactions.
+ * @param regionName
+ */
+ public void closing(final Text regionName);
+
+ /**
+ * <code>regionName</code> is closed and no longer available.
+ * Listener should clean up any references to <code>regionName</code>
+ * @param regionName
+ */
+ public void closed(final Text regionName);
+}
Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/rest/ScannerHandler.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/rest/ScannerHandler.java?rev=630550&r1=630549&r2=630550&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/rest/ScannerHandler.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/rest/ScannerHandler.java Sat Feb 23 16:19:34 2008
@@ -35,7 +35,7 @@
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HScannerInterface;
-import org.apache.hadoop.hbase.HStoreKey;
+import org.apache.hadoop.hbase.regionserver.HStoreKey;
import org.apache.hadoop.hbase.util.JenkinsHash;
import org.apache.hadoop.io.Text;
import org.mortbay.servlet.MultiPartResponse;
Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/thrift/ThriftServer.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/thrift/ThriftServer.java?rev=630550&r1=630549&r2=630550&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/thrift/ThriftServer.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/thrift/ThriftServer.java Sat Feb 23 16:19:34 2008
@@ -35,7 +35,7 @@
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HScannerInterface;
-import org.apache.hadoop.hbase.HStoreKey;
+import org.apache.hadoop.hbase.regionserver.HStoreKey;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.MasterNotRunningException;
import org.apache.hadoop.hbase.thrift.generated.AlreadyExists;
Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/util/Migrate.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/util/Migrate.java?rev=630550&r1=630549&r2=630550&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/util/Migrate.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/util/Migrate.java Sat Feb 23 16:19:34 2008
@@ -56,13 +56,14 @@
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HLog;
-import org.apache.hadoop.hbase.HRegion;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HScannerInterface;
-import org.apache.hadoop.hbase.HStore;
-import org.apache.hadoop.hbase.HStoreKey;
import org.apache.hadoop.hbase.MasterNotRunningException;
+
+import org.apache.hadoop.hbase.regionserver.HLog;
+import org.apache.hadoop.hbase.regionserver.HRegion;
+import org.apache.hadoop.hbase.regionserver.HStore;
+import org.apache.hadoop.hbase.regionserver.HStoreKey;
/**
* Perform a file system upgrade to convert older file layouts to that
Modified: hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/AbstractMergeTestBase.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/AbstractMergeTestBase.java?rev=630550&r1=630549&r2=630550&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/AbstractMergeTestBase.java (original)
+++ hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/AbstractMergeTestBase.java Sat Feb 23 16:19:34 2008
@@ -28,6 +28,8 @@
import org.apache.hadoop.io.Text;
import org.apache.log4j.Logger;
+import org.apache.hadoop.hbase.regionserver.HRegion;
+
/** Abstract base class for merge tests */
public abstract class AbstractMergeTestBase extends HBaseTestCase {
static final Logger LOG =
Modified: hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/DisabledTestScanner2.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/DisabledTestScanner2.java?rev=630550&r1=630549&r2=630550&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/DisabledTestScanner2.java (original)
+++ hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/DisabledTestScanner2.java Sat Feb 23 16:19:34 2008
@@ -46,6 +46,10 @@
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.regionserver.HRegion;
+import org.apache.hadoop.hbase.regionserver.HStoreKey;
+import org.apache.hadoop.hbase.regionserver.HRegionInterface;
+
/**
* Additional scanner tests.
* {@link TestScanner} does a custom setup/takedown not conducive
Modified: hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/HBaseTestCase.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/HBaseTestCase.java?rev=630550&r1=630549&r2=630550&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/HBaseTestCase.java (original)
+++ hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/HBaseTestCase.java Sat Feb 23 16:19:34 2008
@@ -34,6 +34,8 @@
import org.apache.hadoop.io.Text;
import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.regionserver.HRegion;
+
/**
* Abstract base class for test cases. Performs all static initialization
*/
@@ -157,7 +159,7 @@
protected HRegion openClosedRegion(final HRegion closedRegion)
throws IOException {
- return new HRegion(closedRegion.basedir, closedRegion.getLog(),
+ return new HRegion(closedRegion.getBaseDir(), closedRegion.getLog(),
closedRegion.getFilesystem(), closedRegion.getConf(),
closedRegion.getRegionInfo(), null, null);
}
Modified: hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/MiniHBaseCluster.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/MiniHBaseCluster.java?rev=630550&r1=630549&r2=630550&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/MiniHBaseCluster.java (original)
+++ hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/MiniHBaseCluster.java Sat Feb 23 16:19:34 2008
@@ -30,6 +30,8 @@
import org.apache.hadoop.hbase.util.FSUtils;
import org.apache.hadoop.hbase.master.HMaster;
+import org.apache.hadoop.hbase.regionserver.HRegionServer;
+import org.apache.hadoop.hbase.regionserver.HRegion;
/**
* This class creates a single process HBase cluster. One thread is created for
@@ -196,7 +198,7 @@
public void abortRegionServer(int serverNumber) {
HRegionServer server =
this.hbaseCluster.getRegionServers().get(serverNumber).getRegionServer();
- LOG.info("Aborting " + server.serverInfo.toString());
+ LOG.info("Aborting " + server.getServerInfo().toString());
server.abort();
}
@@ -262,10 +264,10 @@
* Call flushCache on all regions on all participating regionservers.
* @throws IOException
*/
- void flushcache() throws IOException {
+ public void flushcache() throws IOException {
for (LocalHBaseCluster.RegionServerThread t:
this.hbaseCluster.getRegionServers()) {
- for(HRegion r: t.getRegionServer().onlineRegions.values() ) {
+ for(HRegion r: t.getRegionServer().getOnlineRegions().values() ) {
r.flushcache();
}
}
Modified: hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/MultiRegionTable.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/MultiRegionTable.java?rev=630550&r1=630549&r2=630550&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/MultiRegionTable.java (original)
+++ hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/MultiRegionTable.java Sat Feb 23 16:19:34 2008
@@ -33,6 +33,10 @@
import org.apache.hadoop.io.Text;
import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.regionserver.HRegion;
+import org.apache.hadoop.hbase.regionserver.HRegionServer;
+import org.apache.hadoop.hbase.regionserver.HStoreKey;
+
/**
* Utility class to build a table of multiple regions.
*/
@@ -99,7 +103,7 @@
continue;
}
LOG.info("Region location: " + hri);
- r = server.onlineRegions.get(hri.getRegionName());
+ r = server.getOnlineRegions().get(hri.getRegionName());
if (r != null) {
break;
}
@@ -335,7 +339,7 @@
LOG.info("Starting compaction");
for (LocalHBaseCluster.RegionServerThread thread:
cluster.getRegionThreads()) {
- SortedMap<Text, HRegion> regions = thread.getRegionServer().onlineRegions;
+ SortedMap<Text, HRegion> regions = thread.getRegionServer().getOnlineRegions();
// Retry if ConcurrentModification... alternative of sync'ing is not
// worth it for sake of unit test.
Modified: hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/PerformanceEvaluation.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/PerformanceEvaluation.java?rev=630550&r1=630549&r2=630550&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/PerformanceEvaluation.java (original)
+++ hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/PerformanceEvaluation.java Sat Feb 23 16:19:34 2008
@@ -49,6 +49,8 @@
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.regionserver.HStoreKey;
+
/**
* Script used evaluating HBase performance and scalability. Runs a HBase
* client that steps through one of a set of hardcoded tests or 'experiments'
Modified: hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/TestCompare.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/TestCompare.java?rev=630550&r1=630549&r2=630550&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/TestCompare.java (original)
+++ hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/TestCompare.java Sat Feb 23 16:19:34 2008
@@ -21,6 +21,7 @@
import org.apache.hadoop.io.Text;
import junit.framework.TestCase;
+import org.apache.hadoop.hbase.regionserver.HStoreKey;
/**
* Test comparing HBase objects.
Modified: hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/TestHBaseCluster.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/TestHBaseCluster.java?rev=630550&r1=630549&r2=630550&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/TestHBaseCluster.java (original)
+++ hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/TestHBaseCluster.java Sat Feb 23 16:19:34 2008
@@ -27,6 +27,7 @@
import org.apache.hadoop.io.Text;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.regionserver.HStoreKey;
/**
* Test HBase Master and Region servers, client API
Modified: hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/TestInfoServers.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/TestInfoServers.java?rev=630550&r1=630549&r2=630550&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/TestInfoServers.java (original)
+++ hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/TestInfoServers.java Sat Feb 23 16:19:34 2008
@@ -63,7 +63,7 @@
assertHasExpectedContent(new URL("http://localhost:" + port +
"/index.html"), "Master");
port = miniHbase.getRegionThreads().get(0).getRegionServer().
- infoServer.getPort();
+ getInfoServer().getPort();
assertHasExpectedContent(new URL("http://localhost:" + port +
"/index.html"), "Region Server");
} finally {
Modified: hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/TestScannerAPI.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/TestScannerAPI.java?rev=630550&r1=630549&r2=630550&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/TestScannerAPI.java (original)
+++ hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/TestScannerAPI.java Sat Feb 23 16:19:34 2008
@@ -30,6 +30,8 @@
import org.apache.hadoop.io.Text;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.regionserver.HRegion;
+import org.apache.hadoop.hbase.regionserver.HStoreKey;
import org.apache.hadoop.hbase.io.BatchUpdate;
/** test the scanner API at all levels */
Modified: hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/client/TestBatchUpdate.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/client/TestBatchUpdate.java?rev=630550&r1=630549&r2=630550&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/client/TestBatchUpdate.java (original)
+++ hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/client/TestBatchUpdate.java Sat Feb 23 16:19:34 2008
@@ -28,7 +28,7 @@
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HScannerInterface;
-import org.apache.hadoop.hbase.HStoreKey;
+import org.apache.hadoop.hbase.regionserver.HStoreKey;
import org.apache.hadoop.hbase.HColumnDescriptor;
/**
Modified: hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/client/TestHTable.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/client/TestHTable.java?rev=630550&r1=630549&r2=630550&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/client/TestHTable.java (original)
+++ hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/client/TestHTable.java Sat Feb 23 16:19:34 2008
@@ -33,7 +33,7 @@
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableNotFoundException;
import org.apache.hadoop.hbase.HScannerInterface;
-import org.apache.hadoop.hbase.HStoreKey;
+import org.apache.hadoop.hbase.regionserver.HStoreKey;
/**
* Tests HTable
Modified: hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/filter/TestRegExpRowFilter.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/filter/TestRegExpRowFilter.java?rev=630550&r1=630549&r2=630550&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/filter/TestRegExpRowFilter.java (original)
+++ hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/filter/TestRegExpRowFilter.java Sat Feb 23 16:19:34 2008
@@ -30,7 +30,7 @@
import junit.framework.TestCase;
import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HLogEdit;
+import org.apache.hadoop.hbase.regionserver.HLogEdit;
import org.apache.hadoop.io.Text;
/**
Modified: hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/mapred/TestTableIndex.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/mapred/TestTableIndex.java?rev=630550&r1=630549&r2=630550&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/mapred/TestTableIndex.java (original)
+++ hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/mapred/TestTableIndex.java Sat Feb 23 16:19:34 2008
@@ -37,9 +37,9 @@
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HRegion;
+import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.HScannerInterface;
-import org.apache.hadoop.hbase.HStoreKey;
+import org.apache.hadoop.hbase.regionserver.HStoreKey;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.MiniHBaseCluster;
Modified: hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/mapred/TestTableMapReduce.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/mapred/TestTableMapReduce.java?rev=630550&r1=630549&r2=630550&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/mapred/TestTableMapReduce.java (original)
+++ hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/mapred/TestTableMapReduce.java Sat Feb 23 16:19:34 2008
@@ -32,7 +32,7 @@
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HScannerInterface;
-import org.apache.hadoop.hbase.HStoreKey;
+import org.apache.hadoop.hbase.regionserver.HStoreKey;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.MiniHBaseCluster;
Added: hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/OOMERegionServer.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/OOMERegionServer.java?rev=630550&view=auto
==============================================================================
--- hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/OOMERegionServer.java (added)
+++ hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/OOMERegionServer.java Sat Feb 23 16:19:34 2008
@@ -0,0 +1,62 @@
+/**
+ * Copyright 2007 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.regionserver;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.hadoop.hbase.io.BatchUpdate;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.HServerAddress;
+import org.apache.hadoop.io.Text;
+
+/**
+ * A region server that will OOME.
+ * Everytime {@link #batchUpdate(Text, long, BatchUpdate)} is called, we add
+ * keep around a reference to the batch. Use this class to test OOME extremes.
+ * Needs to be started manually as in
+ * <code>${HBASE_HOME}/bin/hbase ./bin/hbase org.apache.hadoop.hbase.OOMERegionServer start</code>.
+ */
+public class OOMERegionServer extends HRegionServer {
+ private List<BatchUpdate> retainer = new ArrayList<BatchUpdate>();
+
+ public OOMERegionServer(HBaseConfiguration conf) throws IOException {
+ super(conf);
+ }
+
+ public OOMERegionServer(HServerAddress address, HBaseConfiguration conf)
+ throws IOException {
+ super(address, conf);
+ }
+
+ public void batchUpdate(Text regionName, BatchUpdate b)
+ throws IOException {
+ super.batchUpdate(regionName, b);
+ for (int i = 0; i < 30; i++) {
+ // Add the batch update 30 times to bring on the OOME faster.
+ this.retainer.add(b);
+ }
+ }
+
+ public static void main(String[] args) {
+ HRegionServer.doMain(args, OOMERegionServer.class);
+ }
+}
\ No newline at end of file
Added: hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestCompaction.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestCompaction.java?rev=630550&view=auto
==============================================================================
--- hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestCompaction.java (added)
+++ hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestCompaction.java Sat Feb 23 16:19:34 2008
@@ -0,0 +1,198 @@
+/**
+ * Copyright 2007 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.regionserver;
+
+import java.io.IOException;
+
+import org.apache.hadoop.dfs.MiniDFSCluster;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
+import org.apache.hadoop.hbase.HBaseTestCase;
+import org.apache.hadoop.io.MapFile;
+import org.apache.hadoop.io.Text;
+
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.StaticTestEnvironment;
+
+/**
+ * Test compactions
+ */
+public class TestCompaction extends HBaseTestCase {
+ static final Log LOG = LogFactory.getLog(TestCompaction.class.getName());
+ private HRegion r = null;
+ private static final String COLUMN_FAMILY = COLFAMILY_NAME1;
+ private final Text STARTROW;
+ private static final Text COLUMN_FAMILY_TEXT = new Text(COLUMN_FAMILY);
+ private static final Text COLUMN_FAMILY_TEXT_MINUS_COLON =
+ new Text(COLUMN_FAMILY.substring(0, COLUMN_FAMILY.length() - 1));
+ private static final int COMPACTION_THRESHOLD = MAXVERSIONS;
+
+ private MiniDFSCluster cluster;
+
+ /** constructor */
+ public TestCompaction() {
+ super();
+ STARTROW = new Text(START_KEY);
+
+ // Set cache flush size to 1MB
+ conf.setInt("hbase.hregion.memcache.flush.size", 1024*1024);
+ conf.setInt("hbase.hregion.memcache.block.multiplier", 2);
+ this.cluster = null;
+ }
+
+ /** {@inheritDoc} */
+ @Override
+ public void setUp() throws Exception {
+ this.cluster = new MiniDFSCluster(conf, 2, true, (String[])null);
+ // Make the hbase rootdir match the minidfs we just span up
+ this.conf.set(HConstants.HBASE_DIR,
+ this.cluster.getFileSystem().getHomeDirectory().toString());
+ super.setUp();
+ HTableDescriptor htd = createTableDescriptor(getName());
+ this.r = createNewHRegion(htd, null, null);
+ }
+
+ /** {@inheritDoc} */
+ @Override
+ public void tearDown() throws Exception {
+ HLog hlog = r.getLog();
+ this.r.close();
+ hlog.closeAndDelete();
+ if (this.cluster != null) {
+ StaticTestEnvironment.shutdownDfs(cluster);
+ }
+ super.tearDown();
+ }
+
+ /**
+ * Run compaction and flushing memcache
+ * Assert deletes get cleaned up.
+ * @throws Exception
+ */
+ public void testCompaction() throws Exception {
+ createStoreFile(r);
+ assertFalse(r.compactIfNeeded());
+ for (int i = 0; i < COMPACTION_THRESHOLD; i++) {
+ createStoreFile(r);
+ }
+ // Add more content. Now there are about 5 versions of each column.
+ // Default is that there only 3 (MAXVERSIONS) versions allowed per column.
+ // Assert > 3 and then after compaction, assert that only 3 versions
+ // available.
+ addContent(new HRegionIncommon(r), COLUMN_FAMILY);
+ byte [][] bytes = this.r.get(STARTROW, COLUMN_FAMILY_TEXT, 100 /*Too many*/);
+ // Assert that I can get > 5 versions (Should be at least 5 in there).
+ assertTrue(bytes.length >= 5);
+ // Try to run compaction concurrent with a thread flush just to see that
+ // we can.
+ final HRegion region = this.r;
+ Thread t1 = new Thread() {
+ @Override
+ public void run() {
+ try {
+ region.flushcache();
+ } catch (IOException e) {
+ e.printStackTrace();
+ }
+ }
+ };
+ Thread t2 = new Thread() {
+ @Override
+ public void run() {
+ try {
+ assertTrue(region.compactIfNeeded());
+ } catch (IOException e) {
+ e.printStackTrace();
+ }
+ }
+ };
+ t1.setDaemon(true);
+ t1.start();
+ t2.setDaemon(true);
+ t2.start();
+ t1.join();
+ t2.join();
+ // Now assert that there are 4 versions of a record only: thats the
+ // 3 versions that should be in the compacted store and then the one more
+ // we added when we flushed. But could be 3 only if the flush happened
+ // before the compaction started though we tried to have the threads run
+ // concurrently (On hudson this happens).
+ byte [] secondRowBytes = START_KEY.getBytes(HConstants.UTF8_ENCODING);
+ // Increment the least significant character so we get to next row.
+ secondRowBytes[START_KEY_BYTES.length - 1]++;
+ Text secondRow = new Text(secondRowBytes);
+ bytes = this.r.get(secondRow, COLUMN_FAMILY_TEXT, 100/*Too many*/);
+ LOG.info("Count of " + secondRow + ": " + bytes.length);
+ // Commented out because fails on an hp+ubuntu single-processor w/ 1G and
+ // "Intel(R) Pentium(R) 4 CPU 3.20GHz" though passes on all local
+ // machines and even on hudson. On said machine, its reporting in the
+ // LOG line above that there are 3 items in row so it should pass the
+ // below test.
+ assertTrue(bytes.length == 3 || bytes.length == 4);
+
+ // Now add deletes to memcache and then flush it. That will put us over
+ // the compaction threshold of 3 store files. Compacting these store files
+ // should result in a compacted store file that has no references to the
+ // deleted row.
+ this.r.deleteAll(STARTROW, COLUMN_FAMILY_TEXT, System.currentTimeMillis());
+ // Now, before compacting, remove all instances of the first row so can
+ // verify that it is removed as we compact.
+ // Assert all delted.
+ assertNull(this.r.get(STARTROW, COLUMN_FAMILY_TEXT, 100 /*Too many*/));
+ this.r.flushcache();
+ assertNull(this.r.get(STARTROW, COLUMN_FAMILY_TEXT, 100 /*Too many*/));
+ // Add a bit of data and flush it so we for sure have the compaction limit
+ // for store files. Usually by this time we will have but if compaction
+ // included the flush that ran 'concurrently', there may be just the
+ // compacted store and the flush above when we added deletes. Add more
+ // content to be certain.
+ createSmallerStoreFile(this.r);
+ assertTrue(this.r.compactIfNeeded());
+ // Assert that the first row is still deleted.
+ bytes = this.r.get(STARTROW, COLUMN_FAMILY_TEXT, 100 /*Too many*/);
+ assertNull(bytes);
+ // Assert the store files do not have the first record 'aaa' keys in them.
+ for (MapFile.Reader reader:
+ this.r.stores.get(COLUMN_FAMILY_TEXT_MINUS_COLON).getReaders()) {
+ reader.reset();
+ HStoreKey key = new HStoreKey();
+ ImmutableBytesWritable val = new ImmutableBytesWritable();
+ while(reader.next(key, val)) {
+ assertFalse(key.getRow().equals(STARTROW));
+ }
+ }
+ }
+
+ private void createStoreFile(final HRegion region) throws IOException {
+ HRegionIncommon loader = new HRegionIncommon(region);
+ addContent(loader, COLUMN_FAMILY);
+ loader.flushcache();
+ }
+
+ private void createSmallerStoreFile(final HRegion region) throws IOException {
+ HRegionIncommon loader = new HRegionIncommon(region);
+ addContent(loader, COLUMN_FAMILY,
+ ("bbb" + PUNCTUATION).getBytes(), null);
+ loader.flushcache();
+ }
+}
Added: hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestDeleteAll.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestDeleteAll.java?rev=630550&view=auto
==============================================================================
--- hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestDeleteAll.java (added)
+++ hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestDeleteAll.java Sat Feb 23 16:19:34 2008
@@ -0,0 +1,170 @@
+/**
+ * Copyright 2007 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.regionserver;
+
+import java.io.IOException;
+import java.util.Map;
+
+import org.apache.hadoop.dfs.MiniDFSCluster;
+import org.apache.hadoop.io.Text;
+import org.apache.commons.logging.*;
+import org.apache.hadoop.hbase.HBaseTestCase;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.StaticTestEnvironment;
+
+/**
+ * Test the functionality of deleteAll.
+ */
+public class TestDeleteAll extends HBaseTestCase {
+ static final Log LOG = LogFactory.getLog(TestDeleteAll.class);
+ private MiniDFSCluster miniHdfs;
+
+ @Override
+ protected void setUp() throws Exception {
+ super.setUp();
+ try {
+ this.miniHdfs = new MiniDFSCluster(this.conf, 1, true, null);
+ // Set the hbase.rootdir to be the home directory in mini dfs.
+ this.conf.set(HConstants.HBASE_DIR,
+ this.miniHdfs.getFileSystem().getHomeDirectory().toString());
+ } catch (Exception e) {
+ LOG.fatal("error starting MiniDFSCluster", e);
+ throw e;
+ }
+ }
+
+ /**
+ * Tests for HADOOP-1550.
+ * @throws Exception
+ */
+ public void testDeleteAll() throws Exception {
+ HRegion region = null;
+ HRegionIncommon region_incommon = null;
+ try {
+ HTableDescriptor htd = createTableDescriptor(getName());
+ region = createNewHRegion(htd, null, null);
+ region_incommon = new HRegionIncommon(region);
+
+ // test memcache
+ makeSureItWorks(region, region_incommon, false);
+ // test hstore
+ makeSureItWorks(region, region_incommon, true);
+
+ } finally {
+ if (region != null) {
+ try {
+ region.close();
+ } catch (Exception e) {
+ e.printStackTrace();
+ }
+ region.getLog().closeAndDelete();
+ }
+ }
+ }
+
+ private void makeSureItWorks(HRegion region, HRegionIncommon region_incommon,
+ boolean flush)
+ throws Exception{
+ // insert a few versions worth of data for a row
+ Text row = new Text("test_row");
+ long t0 = System.currentTimeMillis();
+ long t1 = t0 - 15000;
+ long t2 = t1 - 15000;
+
+ Text colA = new Text(COLUMNS[0].toString() + "a");
+ Text colB = new Text(COLUMNS[0].toString() + "b");
+ Text colC = new Text(COLUMNS[0].toString() + "c");
+ Text colD = new Text(COLUMNS[0].toString());
+
+ long lock = region_incommon.startUpdate(row);
+ region_incommon.put(lock, colA, cellData(0, flush).getBytes());
+ region_incommon.put(lock, colB, cellData(0, flush).getBytes());
+ region_incommon.put(lock, colC, cellData(0, flush).getBytes());
+ region_incommon.put(lock, colD, cellData(0, flush).getBytes());
+ region_incommon.commit(lock, t0);
+
+ lock = region_incommon.startUpdate(row);
+ region_incommon.put(lock, colA, cellData(1, flush).getBytes());
+ region_incommon.put(lock, colB, cellData(1, flush).getBytes());
+ region_incommon.put(lock, colC, cellData(1, flush).getBytes());
+ region_incommon.put(lock, colD, cellData(1, flush).getBytes());
+ region_incommon.commit(lock, t1);
+
+ lock = region_incommon.startUpdate(row);
+ region_incommon.put(lock, colA, cellData(2, flush).getBytes());
+ region_incommon.put(lock, colB, cellData(2, flush).getBytes());
+ region_incommon.put(lock, colC, cellData(2, flush).getBytes());
+ region_incommon.put(lock, colD, cellData(2, flush).getBytes());
+ region_incommon.commit(lock, t2);
+
+ if (flush) {region_incommon.flushcache();}
+
+ // call delete all at a timestamp, make sure only the most recent stuff is left behind
+ region.deleteAll(row, t1);
+ if (flush) {region_incommon.flushcache();}
+ assertCellValueEquals(region, row, colA, t0, cellData(0, flush));
+ assertCellValueEquals(region, row, colA, t1, null);
+ assertCellValueEquals(region, row, colA, t2, null);
+ assertCellValueEquals(region, row, colD, t0, cellData(0, flush));
+ assertCellValueEquals(region, row, colD, t1, null);
+ assertCellValueEquals(region, row, colD, t2, null);
+
+ // call delete all w/o a timestamp, make sure nothing is left.
+ region.deleteAll(row, HConstants.LATEST_TIMESTAMP);
+ if (flush) {region_incommon.flushcache();}
+ assertCellValueEquals(region, row, colA, t0, null);
+ assertCellValueEquals(region, row, colA, t1, null);
+ assertCellValueEquals(region, row, colA, t2, null);
+ assertCellValueEquals(region, row, colD, t0, null);
+ assertCellValueEquals(region, row, colD, t1, null);
+ assertCellValueEquals(region, row, colD, t2, null);
+
+ }
+
+ private void assertCellValueEquals(final HRegion region, final Text row,
+ final Text column, final long timestamp, final String value)
+ throws IOException {
+ Map<Text, byte[]> result = region.getFull(row, timestamp);
+ byte[] cell_value = result.get(column);
+ if(value == null){
+ assertEquals(column.toString() + " at timestamp " + timestamp, null, cell_value);
+ } else {
+ if (cell_value == null) {
+ fail(column.toString() + " at timestamp " + timestamp +
+ "\" was expected to be \"" + value + " but was null");
+ }
+ assertEquals(column.toString() + " at timestamp "
+ + timestamp, value, new String(cell_value));
+ }
+ }
+
+ private String cellData(int tsNum, boolean flush){
+ return "t" + tsNum + " data" + (flush ? " - with flush" : "");
+ }
+
+ @Override
+ protected void tearDown() throws Exception {
+ if (this.miniHdfs != null) {
+ StaticTestEnvironment.shutdownDfs(this.miniHdfs);
+ }
+ super.tearDown();
+ }
+}