You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by jg...@apache.org on 2011/10/12 06:15:31 UTC

svn commit: r1182194 [1/2] - in /hbase/trunk: ./ src/main/java/org/apache/hadoop/hbase/io/ src/main/java/org/apache/hadoop/hbase/io/hfile/ src/main/java/org/apache/hadoop/hbase/io/hfile/slab/ src/main/java/org/apache/hadoop/hbase/mapreduce/ src/main/ja...

Author: jgray
Date: Wed Oct 12 04:15:30 2011
New Revision: 1182194

URL: http://svn.apache.org/viewvc?rev=1182194&view=rev
Log:
HBASE-4422  Move block cache parameters and references into single CacheConf class (jgray)

Added:
    hbase/trunk/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java
Modified:
    hbase/trunk/CHANGES.txt
    hbase/trunk/src/main/java/org/apache/hadoop/hbase/io/HalfStoreFileReader.java
    hbase/trunk/src/main/java/org/apache/hadoop/hbase/io/hfile/AbstractHFileReader.java
    hbase/trunk/src/main/java/org/apache/hadoop/hbase/io/hfile/AbstractHFileWriter.java
    hbase/trunk/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java
    hbase/trunk/src/main/java/org/apache/hadoop/hbase/io/hfile/DoubleBlockCache.java
    hbase/trunk/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java
    hbase/trunk/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.java
    hbase/trunk/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV1.java
    hbase/trunk/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV2.java
    hbase/trunk/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV1.java
    hbase/trunk/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV2.java
    hbase/trunk/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java
    hbase/trunk/src/main/java/org/apache/hadoop/hbase/io/hfile/SimpleBlockCache.java
    hbase/trunk/src/main/java/org/apache/hadoop/hbase/io/hfile/slab/SingleSizeCache.java
    hbase/trunk/src/main/java/org/apache/hadoop/hbase/io/hfile/slab/SlabCache.java
    hbase/trunk/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat.java
    hbase/trunk/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java
    hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
    hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
    hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java
    hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java
    hbase/trunk/src/main/java/org/apache/hadoop/hbase/util/BloomFilterFactory.java
    hbase/trunk/src/main/java/org/apache/hadoop/hbase/util/CompressionTest.java
    hbase/trunk/src/test/java/org/apache/hadoop/hbase/HFilePerformanceEvaluation.java
    hbase/trunk/src/test/java/org/apache/hadoop/hbase/io/TestHalfStoreFileReader.java
    hbase/trunk/src/test/java/org/apache/hadoop/hbase/io/hfile/RandomSeek.java
    hbase/trunk/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java
    hbase/trunk/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFile.java
    hbase/trunk/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockIndex.java
    hbase/trunk/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFilePerformance.java
    hbase/trunk/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileReaderV1.java
    hbase/trunk/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileSeek.java
    hbase/trunk/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileWriterV2.java
    hbase/trunk/src/test/java/org/apache/hadoop/hbase/io/hfile/TestReseekTo.java
    hbase/trunk/src/test/java/org/apache/hadoop/hbase/io/hfile/TestSeekTo.java
    hbase/trunk/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat.java
    hbase/trunk/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFiles.java
    hbase/trunk/src/test/java/org/apache/hadoop/hbase/regionserver/TestBlocksRead.java
    hbase/trunk/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactSelection.java
    hbase/trunk/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompoundBloomFilter.java
    hbase/trunk/src/test/java/org/apache/hadoop/hbase/regionserver/TestFSErrorsExposed.java
    hbase/trunk/src/test/java/org/apache/hadoop/hbase/regionserver/TestStore.java
    hbase/trunk/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java
    hbase/trunk/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFileBlockCacheSummary.java
    hbase/trunk/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.java

Modified: hbase/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/hbase/trunk/CHANGES.txt?rev=1182194&r1=1182193&r2=1182194&view=diff
==============================================================================
--- hbase/trunk/CHANGES.txt (original)
+++ hbase/trunk/CHANGES.txt Wed Oct 12 04:15:30 2011
@@ -12,6 +12,8 @@ Release 0.93.0 - Unreleased
                transaction log (dhruba via jgray)
    HBASE-4145  Provide metrics for hbase client (Ming Ma)
    HBASE-4465  Lazy-seek optimization for StoreFile scanners (mikhail/liyin)
+   HBASE-4422  Move block cache parameters and references into single
+               CacheConf class (jgray)
 
   BUG FIXES
    HBASE-4488  Store could miss rows during flush (Lars H via jgray)

Modified: hbase/trunk/src/main/java/org/apache/hadoop/hbase/io/HalfStoreFileReader.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/main/java/org/apache/hadoop/hbase/io/HalfStoreFileReader.java?rev=1182194&r1=1182193&r2=1182194&view=diff
==============================================================================
--- hbase/trunk/src/main/java/org/apache/hadoop/hbase/io/HalfStoreFileReader.java (original)
+++ hbase/trunk/src/main/java/org/apache/hadoop/hbase/io/HalfStoreFileReader.java Wed Oct 12 04:15:30 2011
@@ -27,7 +27,7 @@ import org.apache.commons.logging.LogFac
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.KeyValue;
-import org.apache.hadoop.hbase.io.hfile.BlockCache;
+import org.apache.hadoop.hbase.io.hfile.CacheConfig;
 import org.apache.hadoop.hbase.io.hfile.HFileScanner;
 import org.apache.hadoop.hbase.regionserver.StoreFile;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -59,10 +59,10 @@ public class HalfStoreFileReader extends
    * @param r
    * @throws IOException
    */
-  public HalfStoreFileReader(final FileSystem fs, final Path p, final BlockCache c,
-    final Reference r)
+  public HalfStoreFileReader(final FileSystem fs, final Path p,
+      final CacheConfig cacheConf, final Reference r)
   throws IOException {
-    super(fs, p, c, false, false);
+    super(fs, p, cacheConf);
     // This is not actual midkey for this half-file; its just border
     // around which we split top and bottom.  Have to look in files to find
     // actual last and first keys for bottom and top halves.  Half-files don't

Modified: hbase/trunk/src/main/java/org/apache/hadoop/hbase/io/hfile/AbstractHFileReader.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/main/java/org/apache/hadoop/hbase/io/hfile/AbstractHFileReader.java?rev=1182194&r1=1182193&r2=1182194&view=diff
==============================================================================
--- hbase/trunk/src/main/java/org/apache/hadoop/hbase/io/hfile/AbstractHFileReader.java (original)
+++ hbase/trunk/src/main/java/org/apache/hadoop/hbase/io/hfile/AbstractHFileReader.java Wed Oct 12 04:15:30 2011
@@ -77,25 +77,13 @@ public abstract class AbstractHFileReade
   /** Size of this file. */
   protected final long fileSize;
 
-  /** Block cache to use. */
-  protected final BlockCache blockCache;
+  /** Block cache configuration. */
+  protected final CacheConfig cacheConf;
 
   protected AtomicLong cacheHits = new AtomicLong();
   protected AtomicLong blockLoads = new AtomicLong();
   protected AtomicLong metaLoads = new AtomicLong();
 
-  /**
-   * Whether file is from in-memory store (comes from column family
-   * configuration).
-   */
-  protected boolean inMemory = false;
-
-  /**
-   * Whether blocks of file should be evicted from the block cache when the
-   * file is being closed
-   */
-  protected final boolean evictOnClose;
-
   /** Path of file */
   protected final Path path;
 
@@ -110,16 +98,13 @@ public abstract class AbstractHFileReade
   protected AbstractHFileReader(Path path, FixedFileTrailer trailer,
       final FSDataInputStream fsdis, final long fileSize,
       final boolean closeIStream,
-      final BlockCache blockCache, final boolean inMemory,
-      final boolean evictOnClose) {
+      final CacheConfig cacheConf) {
     this.trailer = trailer;
     this.compressAlgo = trailer.getCompressionCodec();
-    this.blockCache = blockCache;
+    this.cacheConf = cacheConf;
     this.fileSize = fileSize;
     this.istream = fsdis;
     this.closeIStream = closeIStream;
-    this.inMemory = inMemory;
-    this.evictOnClose = evictOnClose;
     this.path = path;
     this.name = path.getName();
     cfStatsPrefix = "cf." + parseCfNameFromPath(path.toString());
@@ -167,7 +152,7 @@ public abstract class AbstractHFileReade
     return "reader=" + path.toString() +
         (!isFileInfoLoaded()? "":
           ", compression=" + compressAlgo.getName() +
-          ", inMemory=" + inMemory +
+          ", cacheConf=" + cacheConf +
           ", firstKey=" + toStringFirstKey() +
           ", lastKey=" + toStringLastKey()) +
           ", avgKeyLen=" + avgKeyLen +

Modified: hbase/trunk/src/main/java/org/apache/hadoop/hbase/io/hfile/AbstractHFileWriter.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/main/java/org/apache/hadoop/hbase/io/hfile/AbstractHFileWriter.java?rev=1182194&r1=1182193&r2=1182194&view=diff
==============================================================================
--- hbase/trunk/src/main/java/org/apache/hadoop/hbase/io/hfile/AbstractHFileWriter.java (original)
+++ hbase/trunk/src/main/java/org/apache/hadoop/hbase/io/hfile/AbstractHFileWriter.java Wed Oct 12 04:15:30 2011
@@ -33,7 +33,6 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hbase.KeyValue.KeyComparator;
 import org.apache.hadoop.hbase.io.hfile.HFile.FileInfo;
-import org.apache.hadoop.hbase.regionserver.StoreFile;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.io.RawComparator;
 import org.apache.hadoop.io.Writable;
@@ -91,17 +90,8 @@ public abstract class AbstractHFileWrite
   /** May be null if we were passed a stream. */
   protected final Path path;
 
-  /** Whether to cache key/value data blocks on write */
-  protected final boolean cacheDataBlocksOnWrite;
-
-  /** Whether to cache non-root index blocks on write */
-  protected final boolean cacheIndexBlocksOnWrite;
-
-  /** Block cache to optionally fill on write. */
-  protected BlockCache blockCache;
-
-  /** Configuration used for block cache initialization */
-  private Configuration conf;
+  /** Cache configuration for caching data on write. */
+  protected final CacheConfig cacheConf;
 
   /**
    * Name for this object used when logging or in toString. Is either
@@ -109,7 +99,7 @@ public abstract class AbstractHFileWrite
    */
   protected final String name;
 
-  public AbstractHFileWriter(Configuration conf,
+  public AbstractHFileWriter(CacheConfig cacheConf,
       FSDataOutputStream outputStream, Path path, int blockSize,
       Compression.Algorithm compressAlgo, KeyComparator comparator) {
     this.outputStream = outputStream;
@@ -122,15 +112,7 @@ public abstract class AbstractHFileWrite
         : Bytes.BYTES_RAWCOMPARATOR;
 
     closeOutputStream = path != null;
-
-    cacheDataBlocksOnWrite = conf.getBoolean(HFile.CACHE_BLOCKS_ON_WRITE_KEY,
-        false);
-    cacheIndexBlocksOnWrite = HFileBlockIndex.shouldCacheOnWrite(conf);
-
-    this.conf = conf;
-
-    if (cacheDataBlocksOnWrite || cacheIndexBlocksOnWrite)
-      initBlockCache();
+    this.cacheConf = cacheConf;
   }
 
   /**
@@ -275,13 +257,4 @@ public abstract class AbstractHFileWrite
         fs.getDefaultReplication(), fs.getDefaultBlockSize(),
         null);
   }
-
-  /** Initializes the block cache to use for cache-on-write */
-  protected void initBlockCache() {
-    if (blockCache == null) {
-      blockCache = StoreFile.getBlockCache(conf);
-      conf = null;  // This is all we need configuration for.
-    }
-  }
-
 }

Modified: hbase/trunk/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java?rev=1182194&r1=1182193&r2=1182194&view=diff
==============================================================================
--- hbase/trunk/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java (original)
+++ hbase/trunk/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java Wed Oct 12 04:15:30 2011
@@ -83,15 +83,37 @@ public interface BlockCache {
    */
   public void shutdown();
 
+  /**
+   * Returns the total size of the block cache, in bytes.
+   * @return size of cache, in bytes
+   */
   public long size();
 
+  /**
+   * Returns the free size of the block cache, in bytes.
+   * @return free space in cache, in bytes
+   */
   public long getFreeSize();
 
+  /**
+   * Returns the occupied size of the block cache, in bytes.
+   * @return occupied space in cache, in bytes
+   */
   public long getCurrentSize();
 
+  /**
+   * Returns the number of evictions that have occurred.
+   * @return number of evictions
+   */
   public long getEvictedCount();
 
   /**
+   * Returns the number of blocks currently cached in the block cache.
+   * @return number of blocks in the cache
+   */
+  public long getBlockCount();
+
+  /**
    * Performs a BlockCache summary and returns a List of BlockCacheColumnFamilySummary objects.
    * This method could be fairly heavyweight in that it evaluates the entire HBase file-system
    * against what is in the RegionServer BlockCache.

Added: hbase/trunk/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java?rev=1182194&view=auto
==============================================================================
--- hbase/trunk/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java (added)
+++ hbase/trunk/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java Wed Oct 12 04:15:30 2011
@@ -0,0 +1,326 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.io.hfile;
+
+import java.lang.management.ManagementFactory;
+import java.lang.management.MemoryUsage;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.regionserver.StoreFile;
+import org.apache.hadoop.hbase.util.DirectMemoryUtils;
+import org.apache.hadoop.util.StringUtils;
+
+/**
+ * Stores all of the cache objects and configuration for a single HFile.
+ */
+public class CacheConfig {
+  private static final Log LOG = LogFactory.getLog(CacheConfig.class.getName());
+
+  /**
+   * Configuration key for the size of the block cache, in bytes.
+   */
+  public static final String HFILE_BLOCK_CACHE_SIZE_KEY =
+    "hfile.block.cache.size";
+
+  /**
+   * Configuration key to cache data blocks on write. There are separate
+   * switches for bloom blocks and non-root index blocks.
+   */
+  public static final String CACHE_BLOCKS_ON_WRITE_KEY =
+      "hbase.rs.cacheblocksonwrite";
+
+  /**
+   * Configuration key to cache leaf and intermediate-level index blocks on
+   * write.
+   */
+  public static final String CACHE_INDEX_BLOCKS_ON_WRITE_KEY =
+      "hfile.block.index.cacheonwrite";
+
+  /**
+   * Configuration key to cache compound bloom filter blocks on write.
+   */
+  public static final String CACHE_BLOOM_BLOCKS_ON_WRITE_KEY =
+      "hfile.block.bloom.cacheonwrite";
+
+  /**
+   * TODO: Implement this (jgray)
+   * Configuration key to cache data blocks in compressed format.
+   */
+  public static final String CACHE_DATA_BLOCKS_COMPRESSED_KEY =
+      "hbase.rs.blockcache.cachedatacompressed";
+
+  /**
+   * Configuration key to evict all blocks of a given file from the block cache
+   * when the file is closed.
+   */
+  public static final String EVICT_BLOCKS_ON_CLOSE_KEY =
+      "hbase.rs.evictblocksonclose";
+
+  // Defaults
+
+  public static final boolean DEFAULT_CACHE_DATA_ON_READ = true;
+  public static final boolean DEFAULT_CACHE_DATA_ON_WRITE = false;
+  public static final boolean DEFAULT_IN_MEMORY = false;
+  public static final boolean DEFAULT_CACHE_INDEXES_ON_WRITE = false;
+  public static final boolean DEFAULT_CACHE_BLOOMS_ON_WRITE = false;
+  public static final boolean DEFAULT_EVICT_ON_CLOSE = false;
+  public static final boolean DEFAULT_COMPRESSED_CACHE = false;
+
+  /** Local reference to the block cache, null if completely disabled */
+  private final BlockCache blockCache;
+
+  /**
+   * Whether blocks should be cached on read (default is on if there is a
+   * cache but this can be turned off on a per-family or per-request basis)
+   */
+  private boolean cacheDataOnRead;
+
+  /** Whether blocks should be flagged as in-memory when being cached */
+  private final boolean inMemory;
+
+  /** Whether data blocks should be cached when new files are written */
+  private final boolean cacheDataOnWrite;
+
+  /** Whether index blocks should be cached when new files are written */
+  private final boolean cacheIndexesOnWrite;
+
+  /** Whether compound bloom filter blocks should be cached on write */
+  private final boolean cacheBloomsOnWrite;
+
+  /** Whether blocks of a file should be evicted when the file is closed */
+  private final boolean evictOnClose;
+
+  /** Whether data blocks should be stored in compressed form in the cache */
+  private final boolean cacheCompressed;
+
+  /**
+   * Create a cache configuration using the specified configuration object and
+   * family descriptor.
+   * @param conf hbase configuration
+   * @param family column family configuration
+   */
+  public CacheConfig(Configuration conf, HColumnDescriptor family) {
+    this(CacheConfig.instantiateBlockCache(conf),
+        family.isBlockCacheEnabled(), family.isInMemory(),
+        conf.getBoolean(CACHE_BLOCKS_ON_WRITE_KEY, DEFAULT_CACHE_DATA_ON_WRITE),
+        conf.getBoolean(CACHE_INDEX_BLOCKS_ON_WRITE_KEY,
+            DEFAULT_CACHE_INDEXES_ON_WRITE),
+        conf.getBoolean(CACHE_BLOOM_BLOCKS_ON_WRITE_KEY,
+            DEFAULT_CACHE_BLOOMS_ON_WRITE),
+        conf.getBoolean(EVICT_BLOCKS_ON_CLOSE_KEY, DEFAULT_EVICT_ON_CLOSE),
+        conf.getBoolean(CACHE_DATA_BLOCKS_COMPRESSED_KEY, DEFAULT_COMPRESSED_CACHE)
+     );
+  }
+
+  /**
+   * Create a cache configuration using the specified configuration object and
+   * defaults for family level settings.
+   * @param conf hbase configuration
+   */
+  public CacheConfig(Configuration conf) {
+    this(CacheConfig.instantiateBlockCache(conf),
+        DEFAULT_CACHE_DATA_ON_READ,
+        DEFAULT_IN_MEMORY, // This is a family-level setting so can't be set
+                           // strictly from conf
+        conf.getBoolean(CACHE_BLOCKS_ON_WRITE_KEY, DEFAULT_CACHE_DATA_ON_WRITE),
+        conf.getBoolean(CACHE_INDEX_BLOCKS_ON_WRITE_KEY,
+            DEFAULT_CACHE_INDEXES_ON_WRITE),
+            conf.getBoolean(CACHE_BLOOM_BLOCKS_ON_WRITE_KEY,
+                DEFAULT_CACHE_BLOOMS_ON_WRITE),
+        conf.getBoolean(EVICT_BLOCKS_ON_CLOSE_KEY, DEFAULT_EVICT_ON_CLOSE),
+        conf.getBoolean(CACHE_DATA_BLOCKS_COMPRESSED_KEY,
+            DEFAULT_COMPRESSED_CACHE)
+     );
+  }
+
+  /**
+   * Create a block cache configuration with the specified cache and
+   * configuration parameters.
+   * @param blockCache reference to block cache, null if completely disabled
+   * @param cacheDataOnRead whether data blocks should be cached on read
+   * @param inMemory whether blocks should be flagged as in-memory
+   * @param cacheDataOnWrite whether data blocks should be cached on write
+   * @param cacheIndexesOnWrite whether index blocks should be cached on write
+   * @param cacheBloomsOnWrite whether blooms should be cached on write
+   * @param evictOnClose whether blocks should be evicted when HFile is closed
+   * @param cacheCompressed whether to store blocks as compressed in the cache
+   */
+  CacheConfig(final BlockCache blockCache,
+      final boolean cacheDataOnRead, final boolean inMemory,
+      final boolean cacheDataOnWrite, final boolean cacheIndexesOnWrite,
+      final boolean cacheBloomsOnWrite, final boolean evictOnClose,
+      final boolean cacheCompressed) {
+    this.blockCache = blockCache;
+    this.cacheDataOnRead = cacheDataOnRead;
+    this.inMemory = inMemory;
+    this.cacheDataOnWrite = cacheDataOnWrite;
+    this.cacheIndexesOnWrite = cacheIndexesOnWrite;
+    this.cacheBloomsOnWrite = cacheBloomsOnWrite;
+    this.evictOnClose = evictOnClose;
+    this.cacheCompressed = cacheCompressed;
+  }
+
+  /**
+   * Constructs a cache configuration copied from the specified configuration.
+   * @param cacheConf
+   */
+  public CacheConfig(CacheConfig cacheConf) {
+    this(cacheConf.blockCache, cacheConf.cacheDataOnRead, cacheConf.inMemory,
+        cacheConf.cacheDataOnWrite, cacheConf.cacheIndexesOnWrite,
+        cacheConf.cacheBloomsOnWrite, cacheConf.evictOnClose,
+        cacheConf.cacheCompressed);
+  }
+
+  /**
+   * Checks whether the block cache is enabled.
+   */
+  public boolean isBlockCacheEnabled() {
+    return this.blockCache != null;
+  }
+
+  /**
+   * Returns the block cache.
+   * @return the block cache, or null if caching is completely disabled
+   */
+  public BlockCache getBlockCache() {
+    return this.blockCache;
+  }
+
+  /**
+   * Returns whether the blocks of this HFile should be cached on read or not.
+   * @return true if blocks should be cached on read, false if not
+   */
+  public boolean shouldCacheDataOnRead() {
+    return isBlockCacheEnabled() && cacheDataOnRead;
+  }
+
+  /**
+   * @return true if blocks in this file should be flagged as in-memory
+   */
+  public boolean isInMemory() {
+    return isBlockCacheEnabled() && this.inMemory;
+  }
+
+  /**
+   * @return true if data blocks should be written to the cache when an HFile is
+   *         written, false if not
+   */
+  public boolean shouldCacheDataOnWrite() {
+    return isBlockCacheEnabled() && this.cacheDataOnWrite;
+  }
+
+  /**
+   * @return true if index blocks should be written to the cache when an HFile
+   *         is written, false if not
+   */
+  public boolean shouldCacheIndexesOnWrite() {
+    return isBlockCacheEnabled() && this.cacheIndexesOnWrite;
+  }
+
+  /**
+   * @return true if bloom blocks should be written to the cache when an HFile
+   *         is written, false if not
+   */
+  public boolean shouldCacheBloomsOnWrite() {
+    return isBlockCacheEnabled() && this.cacheBloomsOnWrite;
+  }
+
+  /**
+   * @return true if blocks should be evicted from the cache when an HFile
+   *         reader is closed, false if not
+   */
+  public boolean shouldEvictOnClose() {
+    return isBlockCacheEnabled() && this.evictOnClose;
+  }
+
+  /**
+   * @return true if blocks should be compressed in the cache, false if not
+   */
+  public boolean shouldCacheCompressed() {
+    return isBlockCacheEnabled() && this.cacheCompressed;
+  }
+
+  @Override
+  public String toString() {
+    if (!isBlockCacheEnabled()) {
+      return "CacheConfig:disabled";
+    }
+    return "CacheConfig:enabled " +
+      "[cacheDataOnRead=" + shouldCacheDataOnRead() + "] " +
+      "[cacheDataOnWrite=" + shouldCacheDataOnWrite() + "] " +
+      "[cacheIndexesOnWrite=" + shouldCacheIndexesOnWrite() + "] " +
+      "[cacheBloomsOnWrite=" + shouldCacheBloomsOnWrite() + "] " +
+      "[cacheEvictOnClose=" + shouldEvictOnClose() + "] " +
+      "[cacheCompressed=" + shouldCacheCompressed() + "]";
+  }
+
+  // Static block cache reference and methods
+
+  /**
+   * Static reference to the block cache, or null if no caching should be used
+   * at all.
+   */
+  private static BlockCache globalBlockCache;
+
+  /** Boolean whether we have disabled the block cache entirely. */
+  private static boolean blockCacheDisabled = false;
+
+  /**
+   * Returns the block cache or <code>null</code> in case none should be used.
+   *
+   * @param conf  The current configuration.
+   * @return The block cache or <code>null</code>.
+   */
+  private static synchronized BlockCache instantiateBlockCache(
+      Configuration conf) {
+    if (globalBlockCache != null) return globalBlockCache;
+    if (blockCacheDisabled) return null;
+
+    float cachePercentage = conf.getFloat(HFILE_BLOCK_CACHE_SIZE_KEY, 0.2f);
+    if (cachePercentage == 0L) {
+      blockCacheDisabled = true;
+      return null;
+    }
+    if (cachePercentage > 1.0) {
+      throw new IllegalArgumentException(HFILE_BLOCK_CACHE_SIZE_KEY +
+        " must be between 0.0 and 1.0, not > 1.0");
+    }
+
+    // Calculate the amount of heap to give the heap.
+    MemoryUsage mu = ManagementFactory.getMemoryMXBean().getHeapMemoryUsage();
+    long cacheSize = (long)(mu.getMax() * cachePercentage);
+    int blockSize = conf.getInt("hbase.offheapcache.minblocksize",
+        HFile.DEFAULT_BLOCKSIZE);
+    long offHeapCacheSize =
+      (long) (conf.getFloat("hbase.offheapcache.percentage", (float) 0.95) *
+          DirectMemoryUtils.getDirectMemorySize());
+    LOG.info("Allocating LruBlockCache with maximum size " +
+      StringUtils.humanReadableInt(cacheSize));
+    if (offHeapCacheSize <= 0) {
+      globalBlockCache = new LruBlockCache(cacheSize,
+          StoreFile.DEFAULT_BLOCKSIZE_SMALL);
+    } else {
+      globalBlockCache = new DoubleBlockCache(cacheSize, offHeapCacheSize,
+          StoreFile.DEFAULT_BLOCKSIZE_SMALL, blockSize, conf);
+    }
+    return globalBlockCache;
+  }
+}

Modified: hbase/trunk/src/main/java/org/apache/hadoop/hbase/io/hfile/DoubleBlockCache.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/main/java/org/apache/hadoop/hbase/io/hfile/DoubleBlockCache.java?rev=1182194&r1=1182193&r2=1182194&view=diff
==============================================================================
--- hbase/trunk/src/main/java/org/apache/hadoop/hbase/io/hfile/DoubleBlockCache.java (original)
+++ hbase/trunk/src/main/java/org/apache/hadoop/hbase/io/hfile/DoubleBlockCache.java Wed Oct 12 04:15:30 2011
@@ -166,4 +166,9 @@ public class DoubleBlockCache implements
     return onHeapCache.getBlockCacheColumnFamilySummaries(conf);
   }
 
+  @Override
+  public long getBlockCount() {
+    return onHeapCache.getBlockCount() + offHeapCache.getBlockCount();
+  }
+
 }

Modified: hbase/trunk/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java?rev=1182194&r1=1182193&r2=1182194&view=diff
==============================================================================
--- hbase/trunk/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java (original)
+++ hbase/trunk/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java Wed Oct 12 04:15:30 2011
@@ -198,8 +198,12 @@ public class HFile {
    */
   public static abstract class WriterFactory {
     protected Configuration conf;
+    protected CacheConfig cacheConf;
 
-    WriterFactory(Configuration conf) { this.conf = conf; }
+    WriterFactory(Configuration conf, CacheConfig cacheConf) {
+      this.conf = conf;
+      this.cacheConf = cacheConf;
+    }
 
     public abstract Writer createWriter(FileSystem fs, Path path)
         throws IOException;
@@ -236,33 +240,29 @@ public class HFile {
    * can also be {@link HFileWriterV1#WRITER_FACTORY_V1} in testing.
    */
   public static final WriterFactory getWriterFactory(Configuration conf) {
+    return HFile.getWriterFactory(conf, new CacheConfig(conf));
+  }
+
+  /**
+   * Returns the factory to be used to create {@link HFile} writers. Should
+   * always be {@link HFileWriterV2#WRITER_FACTORY_V2} in production, but
+   * can also be {@link HFileWriterV1#WRITER_FACTORY_V1} in testing.
+   */
+  public static final WriterFactory getWriterFactory(Configuration conf,
+      CacheConfig cacheConf) {
     int version = getFormatVersion(conf);
     LOG.debug("Using HFile format version " + version);
     switch (version) {
     case 1:
-      return new HFileWriterV1.WriterFactoryV1(conf);
+      return new HFileWriterV1.WriterFactoryV1(conf, cacheConf);
     case 2:
-      return new HFileWriterV2.WriterFactoryV2(conf);
+      return new HFileWriterV2.WriterFactoryV2(conf, cacheConf);
     default:
       throw new IllegalArgumentException("Cannot create writer for HFile " +
           "format version " + version);
     }
   }
 
-  /**
-   * Configuration key to evict all blocks of a given file from the block cache
-   * when the file is closed.
-   */
-  public static final String EVICT_BLOCKS_ON_CLOSE_KEY =
-      "hbase.rs.evictblocksonclose";
-
-  /**
-   * Configuration key to cache data blocks on write. There are separate
-   * switches for Bloom blocks and non-root index blocks.
-   */
-  public static final String CACHE_BLOCKS_ON_WRITE_KEY =
-      "hbase.rs.cacheblocksonwrite";
-
   /** An abstraction used by the block index */
   public interface CachingBlockReader {
     HFileBlock readBlock(long offset, long onDiskBlockSize,
@@ -325,35 +325,32 @@ public class HFile {
   }
 
   private static Reader pickReaderVersion(Path path, FSDataInputStream fsdis,
-      long size, boolean closeIStream, BlockCache blockCache,
-      boolean inMemory, boolean evictOnClose) throws IOException {
+      long size, boolean closeIStream, CacheConfig cacheConf)
+  throws IOException {
     FixedFileTrailer trailer = FixedFileTrailer.readFromStream(fsdis, size);
     switch (trailer.getVersion()) {
     case 1:
       return new HFileReaderV1(path, trailer, fsdis, size, closeIStream,
-          blockCache, inMemory, evictOnClose);
+          cacheConf);
     case 2:
       return new HFileReaderV2(path, trailer, fsdis, size, closeIStream,
-          blockCache, inMemory, evictOnClose);
+          cacheConf);
     default:
       throw new IOException("Cannot instantiate reader for HFile version " +
           trailer.getVersion());
     }
   }
 
-  public static Reader createReader(
-      FileSystem fs, Path path, BlockCache blockCache, boolean inMemory,
-      boolean evictOnClose) throws IOException {
+  public static Reader createReader(FileSystem fs, Path path,
+      CacheConfig cacheConf) throws IOException {
     return pickReaderVersion(path, fs.open(path),
-        fs.getFileStatus(path).getLen(), true, blockCache, inMemory,
-        evictOnClose);
+        fs.getFileStatus(path).getLen(), true, cacheConf);
   }
 
   public static Reader createReader(Path path, FSDataInputStream fsdis,
-      long size, BlockCache blockache, boolean inMemory, boolean evictOnClose)
+      long size, CacheConfig cacheConf)
       throws IOException {
-    return pickReaderVersion(path, fsdis, size, false, blockache, inMemory,
-        evictOnClose);
+    return pickReaderVersion(path, fsdis, size, false, cacheConf);
   }
 
   /*

Modified: hbase/trunk/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.java?rev=1182194&r1=1182193&r2=1182194&view=diff
==============================================================================
--- hbase/trunk/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.java (original)
+++ hbase/trunk/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.java Wed Oct 12 04:15:30 2011
@@ -185,7 +185,7 @@ public class HFilePrettyPrinter {
       System.err.println("ERROR, file doesnt exist: " + file);
     }
 
-    HFile.Reader reader = HFile.createReader(fs, file, null, false, false);
+    HFile.Reader reader = HFile.createReader(fs, file, new CacheConfig(conf));
 
     Map<byte[], byte[]> fileInfo = reader.loadFileInfo();
 

Modified: hbase/trunk/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV1.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV1.java?rev=1182194&r1=1182193&r2=1182194&view=diff
==============================================================================
--- hbase/trunk/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV1.java (original)
+++ hbase/trunk/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV1.java Wed Oct 12 04:15:30 2011
@@ -54,18 +54,14 @@ public class HFileReaderV1 extends Abstr
    * @param fsdis input stream.  Caller is responsible for closing the passed
    * stream.
    * @param size Length of the stream.
-   * @param blockCache block cache. Pass null if none.
-   * @param inMemory whether blocks should be marked as in-memory in cache
-   * @param evictOnClose whether blocks in cache should be evicted on close
+   * @param cacheConf cache references and configuration
    * @throws IOException
    */
   public HFileReaderV1(Path path, FixedFileTrailer trailer,
       final FSDataInputStream fsdis, final long size,
       final boolean closeIStream,
-      final BlockCache blockCache, final boolean inMemory,
-      final boolean evictOnClose) {
-    super(path, trailer, fsdis, size, closeIStream, blockCache, inMemory,
-        evictOnClose);
+      final CacheConfig cacheConf) {
+    super(path, trailer, fsdis, size, closeIStream, cacheConf);
 
     trailer.expectVersion(1);
     fsBlockReader = new HFileBlock.FSReaderV1(fsdis, compressAlgo, fileSize);
@@ -221,9 +217,10 @@ public class HFileReaderV1 extends Abstr
     synchronized (metaBlockIndexReader.getRootBlockKey(block)) {
       metaLoads.incrementAndGet();
       // Check cache for block.  If found return.
-      if (blockCache != null) {
-        HFileBlock cachedBlock = (HFileBlock) blockCache.getBlock(cacheKey,
-            true);
+      if (cacheConf.isBlockCacheEnabled()) {
+        HFileBlock cachedBlock =
+          (HFileBlock) cacheConf.getBlockCache().getBlock(cacheKey,
+              cacheConf.shouldCacheDataOnRead());
         if (cachedBlock != null) {
           cacheHits.incrementAndGet();
           return cachedBlock.getBufferWithoutHeader();
@@ -240,8 +237,9 @@ public class HFileReaderV1 extends Abstr
       HFile.readOps.incrementAndGet();
 
       // Cache the block
-      if (cacheBlock && blockCache != null) {
-        blockCache.cacheBlock(cacheKey, hfileBlock, inMemory);
+      if (cacheConf.shouldCacheDataOnRead() && cacheBlock) {
+        cacheConf.getBlockCache().cacheBlock(cacheKey, hfileBlock,
+            cacheConf.isInMemory());
       }
 
       return hfileBlock.getBufferWithoutHeader();
@@ -279,9 +277,10 @@ public class HFileReaderV1 extends Abstr
       blockLoads.incrementAndGet();
 
       // Check cache for block.  If found return.
-      if (blockCache != null) {
-        HFileBlock cachedBlock = (HFileBlock) blockCache.getBlock(cacheKey,
-            true);
+      if (cacheConf.isBlockCacheEnabled()) {
+        HFileBlock cachedBlock =
+          (HFileBlock) cacheConf.getBlockCache().getBlock(cacheKey,
+              cacheConf.shouldCacheDataOnRead());
         if (cachedBlock != null) {
           cacheHits.incrementAndGet();
           return cachedBlock.getBufferWithoutHeader();
@@ -312,8 +311,9 @@ public class HFileReaderV1 extends Abstr
       HFile.readOps.incrementAndGet();
 
       // Cache the block
-      if (cacheBlock && blockCache != null) {
-        blockCache.cacheBlock(cacheKey, hfileBlock, inMemory);
+      if (cacheConf.shouldCacheDataOnRead() && cacheBlock) {
+        cacheConf.getBlockCache().cacheBlock(cacheKey, hfileBlock,
+            cacheConf.isInMemory());
       }
 
       return buf;
@@ -348,10 +348,10 @@ public class HFileReaderV1 extends Abstr
 
   @Override
   public void close() throws IOException {
-    if (evictOnClose && this.blockCache != null) {
+    if (cacheConf.shouldEvictOnClose()) {
       int numEvicted = 0;
       for (int i = 0; i < dataBlockIndexReader.getRootBlockCount(); i++) {
-        if (blockCache.evictBlock(HFile.getBlockCacheKey(name,
+        if (cacheConf.getBlockCache().evictBlock(HFile.getBlockCacheKey(name,
             dataBlockIndexReader.getRootBlockOffset(i))))
           numEvicted++;
       }

Modified: hbase/trunk/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV2.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV2.java?rev=1182194&r1=1182193&r2=1182194&view=diff
==============================================================================
--- hbase/trunk/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV2.java (original)
+++ hbase/trunk/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV2.java Wed Oct 12 04:15:30 2011
@@ -65,20 +65,20 @@ public class HFileReaderV2 extends Abstr
    * Opens a HFile. You must load the index before you can use it by calling
    * {@link #loadFileInfo()}.
    *
+   * @param path Path to HFile.
+   * @param trailer File trailer.
    * @param fsdis input stream. Caller is responsible for closing the passed
    *          stream.
    * @param size Length of the stream.
-   * @param blockCache block cache. Pass null if none.
-   * @param inMemory whether blocks should be marked as in-memory in cache
-   * @param evictOnClose whether blocks in cache should be evicted on close
+   * @param closeIStream Whether to close the stream.
+   * @param cacheConf Cache configuration.
    * @throws IOException
    */
   public HFileReaderV2(Path path, FixedFileTrailer trailer,
       final FSDataInputStream fsdis, final long size,
-      final boolean closeIStream, final BlockCache blockCache,
-      final boolean inMemory, final boolean evictOnClose) throws IOException {
-    super(path, trailer, fsdis, size, closeIStream, blockCache, inMemory,
-        evictOnClose);
+      final boolean closeIStream, final CacheConfig cacheConf)
+  throws IOException {
+    super(path, trailer, fsdis, size, closeIStream, cacheConf);
 
     trailer.expectVersion(2);
     fsBlockReader = new HFileBlock.FSReaderV2(fsdis, compressAlgo,
@@ -174,9 +174,10 @@ public class HFileReaderV2 extends Abstr
       long metaBlockOffset = metaBlockIndexReader.getRootBlockOffset(block);
       String cacheKey = HFile.getBlockCacheKey(name, metaBlockOffset);
 
-      if (blockCache != null) {
-        HFileBlock cachedBlock = (HFileBlock) blockCache.getBlock(cacheKey,
-            true);
+      cacheBlock &= cacheConf.shouldCacheDataOnRead();
+      if (cacheConf.isBlockCacheEnabled()) {
+        HFileBlock cachedBlock =
+          (HFileBlock) cacheConf.getBlockCache().getBlock(cacheKey, cacheBlock);
         if (cachedBlock != null) {
           // Return a distinct 'shallow copy' of the block,
           // so pos does not get messed by the scanner
@@ -193,8 +194,9 @@ public class HFileReaderV2 extends Abstr
       HFile.readOps.incrementAndGet();
 
       // Cache the block
-      if (cacheBlock && blockCache != null) {
-        blockCache.cacheBlock(cacheKey, metaBlock, inMemory);
+      if (cacheBlock) {
+        cacheConf.getBlockCache().cacheBlock(cacheKey, metaBlock,
+            cacheConf.isInMemory());
       }
 
       return metaBlock.getBufferWithoutHeader();
@@ -237,9 +239,10 @@ public class HFileReaderV2 extends Abstr
       blockLoads.incrementAndGet();
 
       // Check cache for block. If found return.
-      if (blockCache != null) {
-        HFileBlock cachedBlock = (HFileBlock) blockCache.getBlock(cacheKey,
-            true);
+      cacheBlock &= cacheConf.shouldCacheDataOnRead();
+      if (cacheConf.isBlockCacheEnabled()) {
+        HFileBlock cachedBlock =
+          (HFileBlock) cacheConf.getBlockCache().getBlock(cacheKey, cacheBlock);
         if (cachedBlock != null) {
           cacheHits.incrementAndGet();
 
@@ -257,8 +260,9 @@ public class HFileReaderV2 extends Abstr
       HFile.readOps.incrementAndGet();
 
       // Cache the block
-      if (cacheBlock && blockCache != null) {
-        blockCache.cacheBlock(cacheKey, dataBlock, inMemory);
+      if (cacheBlock) {
+        cacheConf.getBlockCache().cacheBlock(cacheKey, dataBlock,
+            cacheConf.isInMemory());
       }
 
       return dataBlock;
@@ -289,8 +293,8 @@ public class HFileReaderV2 extends Abstr
 
   @Override
   public void close() throws IOException {
-    if (evictOnClose && blockCache != null) {
-      int numEvicted = blockCache.evictBlocksByPrefix(name
+    if (cacheConf.shouldEvictOnClose()) {
+      int numEvicted = cacheConf.getBlockCache().evictBlocksByPrefix(name
           + HFile.CACHE_KEY_SEPARATOR);
       LOG.debug("On close of file " + name + " evicted " + numEvicted
           + " block(s)");

Modified: hbase/trunk/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV1.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV1.java?rev=1182194&r1=1182193&r2=1182194&view=diff
==============================================================================
--- hbase/trunk/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV1.java (original)
+++ hbase/trunk/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV1.java Wed Oct 12 04:15:30 2011
@@ -79,18 +79,20 @@ public class HFileWriterV1 extends Abstr
 
   static class WriterFactoryV1 extends HFile.WriterFactory {
 
-    WriterFactoryV1(Configuration conf) { super(conf); }
+    WriterFactoryV1(Configuration conf, CacheConfig cacheConf) {
+      super(conf, cacheConf);
+    }
 
     @Override
     public Writer createWriter(FileSystem fs, Path path) throws IOException {
-      return new HFileWriterV1(conf, fs, path);
+      return new HFileWriterV1(conf, cacheConf, fs, path);
     }
 
     @Override
     public Writer createWriter(FileSystem fs, Path path, int blockSize,
         Compression.Algorithm compressAlgo, final KeyComparator comparator)
         throws IOException {
-      return new HFileWriterV1(conf, fs, path, blockSize,
+      return new HFileWriterV1(conf, cacheConf, fs, path, blockSize,
           compressAlgo, comparator);
     }
 
@@ -98,7 +100,7 @@ public class HFileWriterV1 extends Abstr
     public Writer createWriter(FileSystem fs, Path path, int blockSize,
         String compressAlgoName,
         final KeyComparator comparator) throws IOException {
-      return new HFileWriterV1(conf, fs, path, blockSize,
+      return new HFileWriterV1(conf, cacheConf, fs, path, blockSize,
           compressAlgoName, comparator);
     }
 
@@ -106,21 +108,23 @@ public class HFileWriterV1 extends Abstr
     public Writer createWriter(final FSDataOutputStream ostream,
         final int blockSize, final String compress,
         final KeyComparator comparator) throws IOException {
-      return new HFileWriterV1(conf, ostream, blockSize, compress, comparator);
+      return new HFileWriterV1(cacheConf, ostream, blockSize, compress,
+          comparator);
     }
 
     @Override
     public Writer createWriter(final FSDataOutputStream ostream,
         final int blockSize, final Compression.Algorithm compress,
         final KeyComparator c) throws IOException {
-      return new HFileWriterV1(conf, ostream, blockSize, compress, c);
+      return new HFileWriterV1(cacheConf, ostream, blockSize, compress, c);
     }
   }
 
   /** Constructor that uses all defaults for compression and block size. */
-  public HFileWriterV1(Configuration conf, FileSystem fs, Path path)
+  public HFileWriterV1(Configuration conf, CacheConfig cacheConf,
+      FileSystem fs, Path path)
       throws IOException {
-    this(conf, fs, path, HFile.DEFAULT_BLOCKSIZE,
+    this(conf, cacheConf, fs, path, HFile.DEFAULT_BLOCKSIZE,
         HFile.DEFAULT_COMPRESSION_ALGORITHM,
         null);
   }
@@ -129,37 +133,37 @@ public class HFileWriterV1 extends Abstr
    * Constructor that takes a path, creates and closes the output stream. Takes
    * compression algorithm name as string.
    */
-  public HFileWriterV1(Configuration conf, FileSystem fs, Path path,
-      int blockSize, String compressAlgoName,
+  public HFileWriterV1(Configuration conf, CacheConfig cacheConf, FileSystem fs,
+      Path path, int blockSize, String compressAlgoName,
       final KeyComparator comparator) throws IOException {
-    this(conf, fs, path, blockSize,
+    this(conf, cacheConf, fs, path, blockSize,
         compressionByName(compressAlgoName), comparator);
   }
 
   /** Constructor that takes a path, creates and closes the output stream. */
-  public HFileWriterV1(Configuration conf, FileSystem fs, Path path,
-      int blockSize, Compression.Algorithm compress,
+  public HFileWriterV1(Configuration conf, CacheConfig cacheConf, FileSystem fs,
+      Path path, int blockSize, Compression.Algorithm compress,
       final KeyComparator comparator) throws IOException {
-    super(conf, createOutputStream(conf, fs, path), path,
+    super(cacheConf, createOutputStream(conf, fs, path), path,
         blockSize, compress, comparator);
   }
 
   /** Constructor that takes a stream. */
-  public HFileWriterV1(Configuration conf,
+  public HFileWriterV1(CacheConfig cacheConf,
       final FSDataOutputStream outputStream, final int blockSize,
       final String compressAlgoName, final KeyComparator comparator)
       throws IOException {
-    this(conf, outputStream, blockSize,
+    this(cacheConf, outputStream, blockSize,
         Compression.getCompressionAlgorithmByName(compressAlgoName),
         comparator);
   }
 
   /** Constructor that takes a stream. */
-  public HFileWriterV1(Configuration conf,
+  public HFileWriterV1(CacheConfig cacheConf,
       final FSDataOutputStream outputStream, final int blockSize,
       final Compression.Algorithm compress, final KeyComparator comparator)
       throws IOException {
-    super(conf, outputStream, null, blockSize, compress, comparator);
+    super(cacheConf, outputStream, null, blockSize, compress, comparator);
   }
 
   /**
@@ -194,10 +198,11 @@ public class HFileWriterV1 extends Abstr
     HFile.writeTimeNano.addAndGet(System.nanoTime() - startTimeNs);
     HFile.writeOps.incrementAndGet();
 
-    if (cacheDataBlocksOnWrite) {
+    if (cacheConf.shouldCacheDataOnWrite()) {
       baosDos.flush();
       byte[] bytes = baos.toByteArray();
-      blockCache.cacheBlock(HFile.getBlockCacheKey(name, blockBegin),
+      cacheConf.getBlockCache().cacheBlock(
+          HFile.getBlockCacheKey(name, blockBegin),
           new HFileBlock(BlockType.DATA,
               (int) (outputStream.getPos() - blockBegin), bytes.length, -1,
               ByteBuffer.wrap(bytes, 0, bytes.length), true, blockBegin));
@@ -217,7 +222,7 @@ public class HFileWriterV1 extends Abstr
     this.out = getCompressingStream();
     BlockType.DATA.write(out);
     firstKeyInBlock = null;
-    if (cacheDataBlocksOnWrite) {
+    if (cacheConf.shouldCacheDataOnWrite()) {
       this.baos = new ByteArrayOutputStream();
       this.baosDos = new DataOutputStream(baos);
       baosDos.write(HFileBlock.DUMMY_HEADER);
@@ -361,7 +366,7 @@ public class HFileWriterV1 extends Abstr
     this.lastKeyLength = klength;
     this.entryCount++;
     // If we are pre-caching blocks on write, fill byte array stream
-    if (cacheDataBlocksOnWrite) {
+    if (cacheConf.shouldCacheDataOnWrite()) {
       this.baosDos.writeInt(klength);
       this.baosDos.writeInt(vlength);
       this.baosDos.write(key, koffset, klength);

Modified: hbase/trunk/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV2.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV2.java?rev=1182194&r1=1182193&r2=1182194&view=diff
==============================================================================
--- hbase/trunk/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV2.java (original)
+++ hbase/trunk/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV2.java Wed Oct 12 04:15:30 2011
@@ -65,19 +65,21 @@ public class HFileWriterV2 extends Abstr
 
   static class WriterFactoryV2 extends HFile.WriterFactory {
 
-    WriterFactoryV2(Configuration conf) { super(conf); }
+    WriterFactoryV2(Configuration conf, CacheConfig cacheConf) {
+      super(conf, cacheConf);
+    }
 
     @Override
     public Writer createWriter(FileSystem fs, Path path)
         throws IOException {
-      return new HFileWriterV2(conf, fs, path);
+      return new HFileWriterV2(conf, cacheConf, fs, path);
     }
 
     @Override
     public Writer createWriter(FileSystem fs, Path path, int blockSize,
         Compression.Algorithm compress,
         final KeyComparator comparator) throws IOException {
-      return new HFileWriterV2(conf, fs, path, blockSize,
+      return new HFileWriterV2(conf, cacheConf, fs, path, blockSize,
           compress, comparator);
     }
 
@@ -85,7 +87,7 @@ public class HFileWriterV2 extends Abstr
     public Writer createWriter(FileSystem fs, Path path, int blockSize,
         String compress, final KeyComparator comparator)
         throws IOException {
-      return new HFileWriterV2(conf, fs, path, blockSize,
+      return new HFileWriterV2(conf, cacheConf, fs, path, blockSize,
           compress, comparator);
     }
 
@@ -93,21 +95,24 @@ public class HFileWriterV2 extends Abstr
     public Writer createWriter(final FSDataOutputStream ostream,
         final int blockSize, final String compress,
         final KeyComparator comparator) throws IOException {
-      return new HFileWriterV2(conf, ostream, blockSize, compress, comparator);
+      return new HFileWriterV2(conf, cacheConf, ostream, blockSize, compress,
+          comparator);
     }
 
     @Override
     public Writer createWriter(final FSDataOutputStream ostream,
         final int blockSize, final Compression.Algorithm compress,
         final KeyComparator c) throws IOException {
-      return new HFileWriterV2(conf, ostream, blockSize, compress, c);
+      return new HFileWriterV2(conf, cacheConf, ostream, blockSize, compress,
+          c);
     }
   }
 
   /** Constructor that uses all defaults for compression and block size. */
-  public HFileWriterV2(Configuration conf, FileSystem fs, Path path)
+  public HFileWriterV2(Configuration conf, CacheConfig cacheConf,
+      FileSystem fs, Path path)
       throws IOException {
-    this(conf, fs, path, HFile.DEFAULT_BLOCKSIZE,
+    this(conf, cacheConf, fs, path, HFile.DEFAULT_BLOCKSIZE,
         HFile.DEFAULT_COMPRESSION_ALGORITHM, null);
   }
 
@@ -115,38 +120,38 @@ public class HFileWriterV2 extends Abstr
    * Constructor that takes a path, creates and closes the output stream. Takes
    * compression algorithm name as string.
    */
-  public HFileWriterV2(Configuration conf, FileSystem fs, Path path,
-      int blockSize, String compressAlgoName,
+  public HFileWriterV2(Configuration conf, CacheConfig cacheConf, FileSystem fs,
+      Path path, int blockSize, String compressAlgoName,
       final KeyComparator comparator) throws IOException {
-    this(conf, fs, path, blockSize,
+    this(conf, cacheConf, fs, path, blockSize,
         compressionByName(compressAlgoName), comparator);
   }
 
   /** Constructor that takes a path, creates and closes the output stream. */
-  public HFileWriterV2(Configuration conf, FileSystem fs, Path path,
-      int blockSize, Compression.Algorithm compressAlgo,
+  public HFileWriterV2(Configuration conf, CacheConfig cacheConf, FileSystem fs,
+      Path path, int blockSize, Compression.Algorithm compressAlgo,
       final KeyComparator comparator) throws IOException {
-    super(conf, createOutputStream(conf, fs, path), path,
+    super(cacheConf, createOutputStream(conf, fs, path), path,
         blockSize, compressAlgo, comparator);
     finishInit(conf);
   }
 
   /** Constructor that takes a stream. */
-  public HFileWriterV2(final Configuration conf,
+  public HFileWriterV2(final Configuration conf, final CacheConfig cacheConf,
       final FSDataOutputStream outputStream, final int blockSize,
       final String compressAlgoName, final KeyComparator comparator)
       throws IOException {
-    this(conf, outputStream, blockSize,
+    this(conf, cacheConf, outputStream, blockSize,
         Compression.getCompressionAlgorithmByName(compressAlgoName),
         comparator);
   }
 
   /** Constructor that takes a stream. */
-  public HFileWriterV2(final Configuration conf,
+  public HFileWriterV2(final Configuration conf, final CacheConfig cacheConf,
       final FSDataOutputStream outputStream, final int blockSize,
       final Compression.Algorithm compress, final KeyComparator comparator)
       throws IOException {
-    super(conf, outputStream, null, blockSize, compress, comparator);
+    super(cacheConf, outputStream, null, blockSize, compress, comparator);
     finishInit(conf);
   }
 
@@ -159,9 +164,10 @@ public class HFileWriterV2 extends Abstr
     fsBlockWriter = new HFileBlock.Writer(compressAlgo);
 
     // Data block index writer
+    boolean cacheIndexesOnWrite = cacheConf.shouldCacheIndexesOnWrite();
     dataBlockIndexWriter = new HFileBlockIndex.BlockIndexWriter(fsBlockWriter,
-        cacheIndexBlocksOnWrite ? blockCache : null,
-        cacheIndexBlocksOnWrite ? name : null);
+        cacheIndexesOnWrite ? cacheConf.getBlockCache(): null,
+        cacheIndexesOnWrite ? name : null);
     dataBlockIndexWriter.setMaxChunkSize(
         HFileBlockIndex.getMaxChunkSize(conf));
     inlineBlockWriters.add(dataBlockIndexWriter);
@@ -208,8 +214,9 @@ public class HFileWriterV2 extends Abstr
     HFile.writeTimeNano.addAndGet(System.nanoTime() - startTimeNs);
     HFile.writeOps.incrementAndGet();
 
-    if (cacheDataBlocksOnWrite) {
-      blockCache.cacheBlock(HFile.getBlockCacheKey(name, lastDataBlockOffset),
+    if (cacheConf.shouldCacheDataOnWrite()) {
+      cacheConf.getBlockCache().cacheBlock(
+          HFile.getBlockCacheKey(name, lastDataBlockOffset),
           fsBlockWriter.getBlockForCaching());
     }
   }
@@ -228,7 +235,8 @@ public class HFileWriterV2 extends Abstr
 
         if (cacheThisBlock) {
           // Cache this block on write.
-          blockCache.cacheBlock(HFile.getBlockCacheKey(name, offset),
+          cacheConf.getBlockCache().cacheBlock(
+              HFile.getBlockCacheKey(name, offset),
               fsBlockWriter.getBlockForCaching());
         }
       }
@@ -242,7 +250,8 @@ public class HFileWriterV2 extends Abstr
    */
   private void newBlock() throws IOException {
     // This is where the next block begins.
-    fsBlockWriter.startWriting(BlockType.DATA, cacheDataBlocksOnWrite);
+    fsBlockWriter.startWriting(BlockType.DATA,
+        cacheConf.shouldCacheDataOnWrite());
     firstKeyInBlock = null;
   }
 
@@ -370,7 +379,7 @@ public class HFileWriterV2 extends Abstr
         long offset = outputStream.getPos();
         // write the metadata content
         DataOutputStream dos = fsBlockWriter.startWriting(BlockType.META,
-            cacheDataBlocksOnWrite);
+            cacheConf.shouldCacheDataOnWrite());
         metaData.get(i).write(dos);
 
         fsBlockWriter.writeHeaderAndData(outputStream);
@@ -424,8 +433,6 @@ public class HFileWriterV2 extends Abstr
   @Override
   public void addInlineBlockWriter(InlineBlockWriter ibw) {
     inlineBlockWriters.add(ibw);
-    if (blockCache == null && ibw.cacheOnWrite())
-      initBlockCache();
   }
 
   @Override

Modified: hbase/trunk/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java?rev=1182194&r1=1182193&r2=1182194&view=diff
==============================================================================
--- hbase/trunk/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java (original)
+++ hbase/trunk/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java Wed Oct 12 04:15:30 2011
@@ -40,7 +40,6 @@ import org.apache.commons.logging.LogFac
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.io.HeapSize;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.ClassSize;
@@ -525,6 +524,11 @@ public class LruBlockCache implements Bl
     return this.elements.get();
   }
 
+  @Override
+  public long getBlockCount() {
+    return this.elements.get();
+  }
+
   /**
    * Get the number of eviction runs that have occurred
    */
@@ -624,7 +628,7 @@ public class LruBlockCache implements Bl
   public CacheStats getStats() {
     return this.stats;
   }
-  
+
   public final static long CACHE_FIXED_OVERHEAD = ClassSize.align(
       (3 * Bytes.SIZEOF_LONG) + (8 * ClassSize.REFERENCE) +
       (5 * Bytes.SIZEOF_FLOAT) + Bytes.SIZEOF_BOOLEAN
@@ -645,18 +649,18 @@ public class LruBlockCache implements Bl
 
   @Override
   public List<BlockCacheColumnFamilySummary> getBlockCacheColumnFamilySummaries(Configuration conf) throws IOException {
-   
+
     Map<String, Path> sfMap = FSUtils.getTableStoreFilePathMap(
         FileSystem.get(conf),
         FSUtils.getRootDir(conf));
-        
-    // quirky, but it's a compound key and this is a shortcut taken instead of 
+
+    // quirky, but it's a compound key and this is a shortcut taken instead of
     // creating a class that would represent only a key.
-    Map<BlockCacheColumnFamilySummary, BlockCacheColumnFamilySummary> bcs = 
+    Map<BlockCacheColumnFamilySummary, BlockCacheColumnFamilySummary> bcs =
       new HashMap<BlockCacheColumnFamilySummary, BlockCacheColumnFamilySummary>();
 
     final String pattern = "\\" + HFile.CACHE_KEY_SEPARATOR;
-    
+
     for (CachedBlock cb : map.values()) {
       // split name and get the first part (e.g., "8351478435190657655_0")
       // see HFile.getBlockCacheKey for structure of block cache key.
@@ -665,7 +669,7 @@ public class LruBlockCache implements Bl
         String sf = s[0];
         Path path = sfMap.get(sf);
         if ( path != null) {
-          BlockCacheColumnFamilySummary lookup = 
+          BlockCacheColumnFamilySummary lookup =
             BlockCacheColumnFamilySummary.createFromStoreFilePath(path);
           BlockCacheColumnFamilySummary bcse = bcs.get(lookup);
           if (bcse == null) {
@@ -677,12 +681,12 @@ public class LruBlockCache implements Bl
         }
       }
     }
-    List<BlockCacheColumnFamilySummary> list = 
+    List<BlockCacheColumnFamilySummary> list =
         new ArrayList<BlockCacheColumnFamilySummary>(bcs.values());
-    Collections.sort( list );  
+    Collections.sort( list );
     return list;
   }
-    
+
   // Simple calculators of sizes given factors and maxSize
 
   private long acceptableSize() {

Modified: hbase/trunk/src/main/java/org/apache/hadoop/hbase/io/hfile/SimpleBlockCache.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/main/java/org/apache/hadoop/hbase/io/hfile/SimpleBlockCache.java?rev=1182194&r1=1182193&r2=1182194&view=diff
==============================================================================
--- hbase/trunk/src/main/java/org/apache/hadoop/hbase/io/hfile/SimpleBlockCache.java (original)
+++ hbase/trunk/src/main/java/org/apache/hadoop/hbase/io/hfile/SimpleBlockCache.java Wed Oct 12 04:15:30 2011
@@ -128,5 +128,11 @@ public class SimpleBlockCache implements
     throw new UnsupportedOperationException();
   }
 
+  @Override
+  public long getBlockCount() {
+    // TODO: implement this if we ever actually use this block cache
+    return 0;
+  }
+
 }
 

Modified: hbase/trunk/src/main/java/org/apache/hadoop/hbase/io/hfile/slab/SingleSizeCache.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/main/java/org/apache/hadoop/hbase/io/hfile/slab/SingleSizeCache.java?rev=1182194&r1=1182193&r2=1182194&view=diff
==============================================================================
--- hbase/trunk/src/main/java/org/apache/hadoop/hbase/io/hfile/slab/SingleSizeCache.java (original)
+++ hbase/trunk/src/main/java/org/apache/hadoop/hbase/io/hfile/slab/SingleSizeCache.java Wed Oct 12 04:15:30 2011
@@ -269,6 +269,11 @@ public class SingleSizeCache implements 
     return this.stats;
   }
 
+  @Override
+  public long getBlockCount() {
+    return numBlocks - backingStore.getBlocksRemaining();
+  }
+
   /* Since its offheap, it doesn't matter if its in memory or not */
   @Override
   public void cacheBlock(String blockName, Cacheable buf, boolean inMemory) {

Modified: hbase/trunk/src/main/java/org/apache/hadoop/hbase/io/hfile/slab/SlabCache.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/main/java/org/apache/hadoop/hbase/io/hfile/slab/SlabCache.java?rev=1182194&r1=1182193&r2=1182194&view=diff
==============================================================================
--- hbase/trunk/src/main/java/org/apache/hadoop/hbase/io/hfile/slab/SlabCache.java (original)
+++ hbase/trunk/src/main/java/org/apache/hadoop/hbase/io/hfile/slab/SlabCache.java Wed Oct 12 04:15:30 2011
@@ -375,6 +375,15 @@ public class SlabCache implements SlabIt
     return 0; // this cache, by default, allocates all its space.
   }
 
+  @Override
+  public long getBlockCount() {
+    long count = 0;
+    for (SingleSizeCache cache : backingStore.values()) {
+      count += cache.getBlockCount();
+    }
+    return count;
+  }
+
   public long getCurrentSize() {
     return size;
   }

Modified: hbase/trunk/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat.java?rev=1182194&r1=1182193&r2=1182194&view=diff
==============================================================================
--- hbase/trunk/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat.java (original)
+++ hbase/trunk/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat.java Wed Oct 12 04:15:30 2011
@@ -45,6 +45,7 @@ import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.client.HTable;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
+import org.apache.hadoop.hbase.io.hfile.CacheConfig;
 import org.apache.hadoop.hbase.io.hfile.Compression;
 import org.apache.hadoop.hbase.io.hfile.HFile;
 import org.apache.hadoop.hbase.regionserver.StoreFile;
@@ -169,7 +170,8 @@ public class HFileOutputFormat extends F
         Path familydir = new Path(outputdir, Bytes.toString(family));
         String compression = compressionMap.get(family);
         compression = compression == null ? defaultCompression : compression;
-        wl.writer = HFile.getWriterFactory(conf).createWriter(fs,
+        wl.writer =
+          HFile.getWriterFactory(conf).createWriter(fs,
           StoreFile.getUniqueFile(fs, familydir), blocksize,
           compression, KeyValue.KEY_COMPARATOR);
         this.writers.put(family, wl);

Modified: hbase/trunk/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java?rev=1182194&r1=1182193&r2=1182194&view=diff
==============================================================================
--- hbase/trunk/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java (original)
+++ hbase/trunk/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java Wed Oct 12 04:15:30 2011
@@ -61,6 +61,7 @@ import org.apache.hadoop.hbase.client.Se
 import org.apache.hadoop.hbase.io.HalfStoreFileReader;
 import org.apache.hadoop.hbase.io.Reference;
 import org.apache.hadoop.hbase.io.Reference.Range;
+import org.apache.hadoop.hbase.io.hfile.CacheConfig;
 import org.apache.hadoop.hbase.io.hfile.HFile;
 import org.apache.hadoop.hbase.io.hfile.HFileScanner;
 import org.apache.hadoop.hbase.io.hfile.Compression.Algorithm;
@@ -288,7 +289,8 @@ public class LoadIncrementalHFiles exten
   throws IOException {
     final Path hfilePath = item.hfilePath;
     final FileSystem fs = hfilePath.getFileSystem(getConf());
-    HFile.Reader hfr = HFile.createReader(fs, hfilePath, null, false, false);
+    HFile.Reader hfr = HFile.createReader(fs, hfilePath,
+        new CacheConfig(getConf()));
     final byte[] first, last;
     try {
       hfr.loadFileInfo();
@@ -378,10 +380,12 @@ public class LoadIncrementalHFiles exten
       HColumnDescriptor familyDescriptor)
   throws IOException {
     FileSystem fs = inFile.getFileSystem(conf);
+    CacheConfig cacheConf = new CacheConfig(conf);
     HalfStoreFileReader halfReader = null;
     StoreFile.Writer halfWriter = null;
     try {
-      halfReader = new HalfStoreFileReader(fs, inFile, null, reference);
+      halfReader = new HalfStoreFileReader(fs, inFile, cacheConf,
+          reference);
       Map<byte[], byte[]> fileInfo = halfReader.loadFileInfo();
 
       int blocksize = familyDescriptor.getBlocksize();
@@ -389,8 +393,8 @@ public class LoadIncrementalHFiles exten
       BloomType bloomFilterType = familyDescriptor.getBloomFilterType();
 
       halfWriter = new StoreFile.Writer(
-          fs, outFile, blocksize, compression, conf, KeyValue.COMPARATOR,
-          bloomFilterType, 0);
+          fs, outFile, blocksize, compression, conf, cacheConf,
+          KeyValue.COMPARATOR, bloomFilterType, 0);
       HFileScanner scanner = halfReader.getScanner(false, false);
       scanner.seekTo();
       do {
@@ -490,7 +494,8 @@ public class LoadIncrementalHFiles exten
       for (Path hfile : hfiles) {
         if (hfile.getName().startsWith("_")) continue;
         
-        HFile.Reader reader = HFile.createReader(fs, hfile, null, false, false);
+        HFile.Reader reader = HFile.createReader(fs, hfile,
+            new CacheConfig(getConf()));
         final byte[] first, last;
         try {
           reader.loadFileInfo();

Modified: hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java?rev=1182194&r1=1182193&r2=1182194&view=diff
==============================================================================
--- hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java (original)
+++ hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java Wed Oct 12 04:15:30 2011
@@ -85,6 +85,7 @@ import org.apache.hadoop.hbase.filter.Co
 import org.apache.hadoop.hbase.io.HeapSize;
 import org.apache.hadoop.hbase.io.TimeRange;
 import org.apache.hadoop.hbase.io.hfile.BlockCache;
+import org.apache.hadoop.hbase.io.hfile.CacheConfig;
 import org.apache.hadoop.hbase.ipc.CoprocessorProtocol;
 import org.apache.hadoop.hbase.ipc.HBaseRPC;
 import org.apache.hadoop.hbase.monitoring.MonitoredTask;
@@ -4098,7 +4099,8 @@ public class HRegion implements HeapSize
       processTable(fs, tableDir, log, c, majorCompact);
      } finally {
        log.close();
-       BlockCache bc = StoreFile.getBlockCache(c);
+       // TODO: is this still right?
+       BlockCache bc = new CacheConfig(c).getBlockCache();
        if (bc != null) bc.shutdown();
      }
   }

Modified: hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java?rev=1182194&r1=1182193&r2=1182194&view=diff
==============================================================================
--- hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java (original)
+++ hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java Wed Oct 12 04:15:30 2011
@@ -59,7 +59,6 @@ import org.apache.hadoop.hbase.ClockOutO
 import org.apache.hadoop.hbase.DoNotRetryIOException;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HConstants.OperationStatusCode;
 import org.apache.hadoop.hbase.HDFSBlocksDistribution;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HServerAddress;
@@ -76,6 +75,7 @@ import org.apache.hadoop.hbase.TableDesc
 import org.apache.hadoop.hbase.UnknownRowLockException;
 import org.apache.hadoop.hbase.UnknownScannerException;
 import org.apache.hadoop.hbase.YouAreDeadException;
+import org.apache.hadoop.hbase.HConstants.OperationStatusCode;
 import org.apache.hadoop.hbase.catalog.CatalogTracker;
 import org.apache.hadoop.hbase.catalog.MetaEditor;
 import org.apache.hadoop.hbase.catalog.RootLocationEditor;
@@ -97,10 +97,11 @@ import org.apache.hadoop.hbase.coprocess
 import org.apache.hadoop.hbase.executor.ExecutorService;
 import org.apache.hadoop.hbase.executor.ExecutorService.ExecutorType;
 import org.apache.hadoop.hbase.filter.BinaryComparator;
-import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
 import org.apache.hadoop.hbase.filter.WritableByteArrayComparable;
+import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
 import org.apache.hadoop.hbase.io.hfile.BlockCache;
 import org.apache.hadoop.hbase.io.hfile.BlockCacheColumnFamilySummary;
+import org.apache.hadoop.hbase.io.hfile.CacheConfig;
 import org.apache.hadoop.hbase.io.hfile.CacheStats;
 import org.apache.hadoop.hbase.ipc.CoprocessorProtocol;
 import org.apache.hadoop.hbase.ipc.HBaseRPC;
@@ -291,6 +292,9 @@ public class HRegionServer implements HR
 
   private final RegionServerAccounting regionServerAccounting;
 
+  // Cache configuration and block cache reference
+  private final CacheConfig cacheConfig;
+
   /**
    * The server name the Master sees us as.  Its made from the hostname the
    * master passes us, port, and server startcode. Gets set after registration
@@ -386,6 +390,7 @@ public class HRegionServer implements HR
     User.login(this.conf, "hbase.regionserver.keytab.file",
       "hbase.regionserver.kerberos.principal", this.isa.getHostName());
     regionServerAccounting = new RegionServerAccounting();
+    cacheConfig = new CacheConfig(conf);
   }
 
   /**
@@ -687,9 +692,8 @@ public class HRegionServer implements HR
       }
     }
     // Send cache a shutdown.
-    BlockCache c = StoreFile.getBlockCache(this.conf);
-    if (c != null) {
-      c.shutdown();
+    if (cacheConfig.isBlockCacheEnabled()) {
+      cacheConfig.getBlockCache().shutdown();
     }
 
     // Send interrupts to wake up threads if sleeping so they notice shutdown.
@@ -1277,7 +1281,7 @@ public class HRegionServer implements HR
     this.metrics.readRequestsCount.set(readRequestsCount);
     this.metrics.writeRequestsCount.set(writeRequestsCount);
 
-    BlockCache blockCache = StoreFile.getBlockCache(conf);
+    BlockCache blockCache = cacheConfig.getBlockCache();
     if (blockCache != null) {
       this.metrics.blockCacheCount.set(blockCache.size());
       this.metrics.blockCacheFree.set(blockCache.getFreeSize());
@@ -3230,7 +3234,7 @@ public class HRegionServer implements HR
 
   @Override
   public List<BlockCacheColumnFamilySummary> getBlockCacheColumnFamilySummaries() throws IOException {
-    BlockCache c = StoreFile.getBlockCache(this.conf);
+    BlockCache c = new CacheConfig(this.conf).getBlockCache();
     return c.getBlockCacheColumnFamilySummaries(this.conf);
   }
 

Modified: hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java?rev=1182194&r1=1182193&r2=1182194&view=diff
==============================================================================
--- hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java (original)
+++ hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java Wed Oct 12 04:15:30 2011
@@ -44,6 +44,7 @@ import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.RemoteExceptionHandler;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.io.HeapSize;
+import org.apache.hadoop.hbase.io.hfile.CacheConfig;
 import org.apache.hadoop.hbase.io.hfile.Compression;
 import org.apache.hadoop.hbase.io.hfile.HFile;
 import org.apache.hadoop.hbase.io.hfile.HFileScanner;
@@ -52,8 +53,8 @@ import org.apache.hadoop.hbase.regionser
 import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.ClassSize;
-import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.CollectionBackedScanner;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.util.StringUtils;
 
 import com.google.common.base.Preconditions;
@@ -93,6 +94,7 @@ public class Store implements HeapSize {
   private final HColumnDescriptor family;
   final FileSystem fs;
   final Configuration conf;
+  final CacheConfig cacheConf;
   // ttl in milliseconds.
   protected long ttl;
   protected int minVersions;
@@ -115,7 +117,6 @@ public class Store implements HeapSize {
   private final Object flushLock = new Object();
   final ReentrantReadWriteLock lock = new ReentrantReadWriteLock();
   private final String storeNameStr;
-  private final boolean inMemory;
   private CompactionProgress progress;
   private final int compactionKVMax;
 
@@ -195,8 +196,8 @@ public class Store implements HeapSize {
       conf.getInt("hbase.hstore.compaction.min",
         /*old name*/ conf.getInt("hbase.hstore.compactionThreshold", 3)));
 
-    // Check if this is in-memory store
-    this.inMemory = family.isInMemory();
+    // Setting up cache configuration for this family
+    this.cacheConf = new CacheConfig(conf, family);
     this.blockingStoreFileCount =
       conf.getInt("hbase.hstore.blockingStoreFiles", 7);
 
@@ -270,8 +271,8 @@ public class Store implements HeapSize {
       }
       StoreFile curfile = null;
       try {
-        curfile = new StoreFile(fs, p, blockcache, this.conf,
-            this.family.getBloomFilterType(), this.inMemory);
+        curfile = new StoreFile(fs, p, this.conf, this.cacheConf,
+            this.family.getBloomFilterType());
         curfile.createReader();
       } catch (IOException ioe) {
         LOG.warn("Failed open of " + p + "; presumption is that file was " +
@@ -335,7 +336,7 @@ public class Store implements HeapSize {
       LOG.info("Validating hfile at " + srcPath + " for inclusion in "
           + "store " + this + " region " + this.region);
       reader = HFile.createReader(srcPath.getFileSystem(conf),
-          srcPath, null, false, false);
+          srcPath, cacheConf);
       reader.loadFileInfo();
 
       byte[] firstKey = reader.getFirstRowKey();
@@ -375,8 +376,8 @@ public class Store implements HeapSize {
     LOG.info("Renaming bulk load file " + srcPath + " to " + dstPath);
     StoreFile.rename(fs, srcPath, dstPath);
 
-    StoreFile sf = new StoreFile(fs, dstPath, blockcache,
-        this.conf, this.family.getBloomFilterType(), this.inMemory);
+    StoreFile sf = new StoreFile(fs, dstPath, this.conf, this.cacheConf,
+        this.family.getBloomFilterType());
     sf.createReader();
 
     LOG.info("Moved hfile " + srcPath + " into store directory " +
@@ -530,8 +531,8 @@ public class Store implements HeapSize {
     }
 
     status.setStatus("Flushing " + this + ": reopening flushed file");
-    StoreFile sf = new StoreFile(this.fs, dstPath, blockcache,
-      this.conf, this.family.getBloomFilterType(), this.inMemory);
+    StoreFile sf = new StoreFile(this.fs, dstPath, this.conf, this.cacheConf,
+        this.family.getBloomFilterType());
     StoreFile.Reader r = sf.createReader();
     this.storeSize += r.length();
     this.totalUncompressedBytes += r.getTotalUncompressedBytes();
@@ -562,7 +563,7 @@ public class Store implements HeapSize {
     Compression.Algorithm compression)
   throws IOException {
     return StoreFile.createWriter(this.fs, region.getTmpDir(), this.blocksize,
-        compression, this.comparator, this.conf,
+        compression, this.comparator, this.conf, this.cacheConf,
         this.family.getBloomFilterType(), maxKeyCount);
   }
 
@@ -1227,8 +1228,8 @@ public class Store implements HeapSize {
         LOG.error("Failed move of compacted file " + compactedFile.getPath(), e);
         return null;
       }
-      result = new StoreFile(this.fs, p, blockcache, this.conf,
-          this.family.getBloomFilterType(), this.inMemory);
+      result = new StoreFile(this.fs, p, this.conf, this.cacheConf,
+          this.family.getBloomFilterType());
       result.createReader();
     }
     this.lock.writeLock().lock();
@@ -1790,9 +1791,9 @@ public class Store implements HeapSize {
   }
 
   public static final long FIXED_OVERHEAD = ClassSize.align(
-      ClassSize.OBJECT + (16 * ClassSize.REFERENCE) +
+      ClassSize.OBJECT + (17 * ClassSize.REFERENCE) +
       (7 * Bytes.SIZEOF_LONG) + (1 * Bytes.SIZEOF_DOUBLE) +
-      (6 * Bytes.SIZEOF_INT) + (3 * Bytes.SIZEOF_BOOLEAN));
+      (6 * Bytes.SIZEOF_INT) + (2 * Bytes.SIZEOF_BOOLEAN));
 
   public static final long DEEP_OVERHEAD = ClassSize.align(FIXED_OVERHEAD +
       ClassSize.OBJECT + ClassSize.REENTRANT_LOCK +

Modified: hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java?rev=1182194&r1=1182193&r2=1182194&view=diff
==============================================================================
--- hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java (original)
+++ hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java Wed Oct 12 04:15:30 2011
@@ -22,8 +22,6 @@ package org.apache.hadoop.hbase.regionse
 import java.io.DataInput;
 import java.io.FileNotFoundException;
 import java.io.IOException;
-import java.lang.management.ManagementFactory;
-import java.lang.management.MemoryUsage;
 import java.nio.ByteBuffer;
 import java.util.Arrays;
 import java.util.Collection;
@@ -49,22 +47,19 @@ import org.apache.hadoop.hbase.client.Sc
 import org.apache.hadoop.hbase.io.HalfStoreFileReader;
 import org.apache.hadoop.hbase.io.Reference;
 import org.apache.hadoop.hbase.io.hfile.BlockCache;
+import org.apache.hadoop.hbase.io.hfile.CacheConfig;
 import org.apache.hadoop.hbase.io.hfile.Compression;
-import org.apache.hadoop.hbase.io.hfile.DoubleBlockCache;
 import org.apache.hadoop.hbase.io.hfile.HFile;
 import org.apache.hadoop.hbase.io.hfile.HFileScanner;
 import org.apache.hadoop.hbase.io.hfile.HFileWriterV1;
-import org.apache.hadoop.hbase.io.hfile.LruBlockCache;
 import org.apache.hadoop.hbase.util.BloomFilter;
 import org.apache.hadoop.hbase.util.BloomFilterFactory;
 import org.apache.hadoop.hbase.util.BloomFilterWriter;
 import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.DirectMemoryUtils;
 import org.apache.hadoop.hbase.util.FSUtils;
 import org.apache.hadoop.hbase.util.Writables;
 import org.apache.hadoop.io.RawComparator;
 import org.apache.hadoop.io.WritableUtils;
-import org.apache.hadoop.util.StringUtils;
 
 import com.google.common.base.Function;
 import com.google.common.collect.ImmutableList;
@@ -140,11 +135,8 @@ public class StoreFile {
   // If this StoreFile references another, this is the other files path.
   private Path referencePath;
 
-  // Should the block cache be used or not.
-  private boolean blockcache;
-
-  // Is this from an in-memory store
-  private boolean inMemory;
+  // Block cache configuration and reference.
+  private final CacheConfig cacheConf;
 
   // HDFS blocks distribuion information
   private HDFSBlocksDistribution hdfsBlocksDistribution;
@@ -200,6 +192,7 @@ public class StoreFile {
    * @param p  The path of the file.
    * @param blockcache  <code>true</code> if the block cache is enabled.
    * @param conf  The current configuration.
+   * @param cacheConf  The cache configuration and block cache reference.
    * @param cfBloomType The bloom type to use for this store file as specified
    *          by column family configuration. This may or may not be the same
    *          as the Bloom filter type actually present in the HFile, because
@@ -209,16 +202,14 @@ public class StoreFile {
    */
   StoreFile(final FileSystem fs,
             final Path p,
-            final boolean blockcache,
             final Configuration conf,
-            final BloomType cfBloomType,
-            final boolean inMemory)
+            final CacheConfig cacheConf,
+            final BloomType cfBloomType)
       throws IOException {
     this.conf = conf;
     this.fs = fs;
     this.path = p;
-    this.blockcache = blockcache;
-    this.inMemory = inMemory;
+    this.cacheConf = cacheConf;
     if (isReference(p)) {
       this.reference = Reference.read(fs, p);
       this.referencePath = getReferredToFile(this.path);
@@ -366,49 +357,6 @@ public class StoreFile {
   }
 
   /**
-   * Returns the block cache or <code>null</code> in case none should be used.
-   *
-   * @param conf  The current configuration.
-   * @return The block cache or <code>null</code>.
-   */
-  public static synchronized BlockCache getBlockCache(Configuration conf) {
-    if (hfileBlockCache != null) return hfileBlockCache;
-
-    float cachePercentage = conf.getFloat(HFILE_BLOCK_CACHE_SIZE_KEY, 0.2f);
-    // There should be a better way to optimize this. But oh well.
-    if (cachePercentage == 0L) return null;
-    if (cachePercentage > 1.0) {
-      throw new IllegalArgumentException(HFILE_BLOCK_CACHE_SIZE_KEY +
-        " must be between 0.0 and 1.0, not > 1.0");
-    }
-
-    // Calculate the amount of heap to give the heap.
-    MemoryUsage mu = ManagementFactory.getMemoryMXBean().getHeapMemoryUsage();
-    long cacheSize = (long)(mu.getMax() * cachePercentage);
-    int blockSize = conf.getInt("hbase.offheapcache.minblocksize", HFile.DEFAULT_BLOCKSIZE);
-    long offHeapCacheSize = (long) (conf.getFloat("hbase.offheapcache.percentage", (float) 0.95) * DirectMemoryUtils.getDirectMemorySize());
-    boolean enableOffHeapCache = conf.getBoolean("hbase.offheapcache.enable", false);
-    LOG.info("Allocating LruBlockCache with maximum size " +
-      StringUtils.humanReadableInt(cacheSize));
-    if(offHeapCacheSize <= 0 || !enableOffHeapCache) {
-      hfileBlockCache = new LruBlockCache(cacheSize, DEFAULT_BLOCKSIZE_SMALL);
-    } else {
-        LOG.info("Allocating OffHeapCache with maximum size " +
-          StringUtils.humanReadableInt(offHeapCacheSize));
-      hfileBlockCache = new DoubleBlockCache(cacheSize, offHeapCacheSize, DEFAULT_BLOCKSIZE_SMALL, blockSize, conf);
-    }
-    return hfileBlockCache;
-  }
-
-  /**
-   * @return the blockcache
-   */
-  public BlockCache getBlockCache() {
-    return blockcache ? getBlockCache(conf) : null;
-  }
-
-
-  /**
    * @return the cached value of HDFS blocks distribution. The cached value is
    * calculated when store file is opened.
    */
@@ -497,11 +445,9 @@ public class StoreFile {
     }
     if (isReference()) {
       this.reader = new HalfStoreFileReader(this.fs, this.referencePath,
-          getBlockCache(), this.reference);
+          this.cacheConf, this.reference);
     } else {
-      this.reader = new Reader(this.fs, this.path, getBlockCache(),
-          this.inMemory,
-          this.conf.getBoolean(HFile.EVICT_BLOCKS_ON_CLOSE_KEY, true));
+      this.reader = new Reader(this.fs, this.path, this.cacheConf);
     }
 
     computeHDFSBlockDistribution();
@@ -664,9 +610,10 @@ public class StoreFile {
    * @throws IOException
    */
   public static Writer createWriter(final FileSystem fs, final Path dir,
-      final int blocksize, Configuration conf) throws IOException {
-    return createWriter(fs, dir, blocksize, null, null, conf, BloomType.NONE,
-        0);
+      final int blocksize, Configuration conf, CacheConfig cacheConf)
+  throws IOException {
+    return createWriter(fs, dir, blocksize, null, null, conf, cacheConf,
+        BloomType.NONE, 0);
   }
 
   /**
@@ -679,6 +626,7 @@ public class StoreFile {
    * @param algorithm Pass null to get default.
    * @param c Pass null to get default.
    * @param conf HBase system configuration. used with bloom filters
+   * @param cacheConf Cache configuration and reference.
    * @param bloomType column family setting for bloom filters
    * @param maxKeyCount estimated maximum number of keys we expect to add
    * @return HFile.Writer
@@ -690,6 +638,7 @@ public class StoreFile {
                                               final Compression.Algorithm algorithm,
                                               final KeyValue.KVComparator c,
                                               final Configuration conf,
+                                              final CacheConfig cacheConf,
                                               BloomType bloomType,
                                               long maxKeyCount)
       throws IOException {
@@ -704,7 +653,8 @@ public class StoreFile {
 
     return new Writer(fs, path, blocksize,
         algorithm == null? HFile.DEFAULT_COMPRESSION_ALGORITHM: algorithm,
-        conf, c == null ? KeyValue.COMPARATOR: c, bloomType, maxKeyCount);
+        conf, cacheConf, c == null ? KeyValue.COMPARATOR: c, bloomType,
+        maxKeyCount);
   }
 
   /**
@@ -826,6 +776,7 @@ public class StoreFile {
      */
     public Writer(FileSystem fs, Path path, int blocksize,
         Compression.Algorithm compress, final Configuration conf,
+        CacheConfig cacheConf,
         final KVComparator comparator, BloomType bloomType, long maxKeys)
         throws IOException {
       writer = HFile.getWriterFactory(conf).createWriter(
@@ -834,7 +785,7 @@ public class StoreFile {
 
       this.kvComparator = comparator;
 
-      bloomFilterWriter = BloomFilterFactory.createBloomAtWrite(conf,
+      bloomFilterWriter = BloomFilterFactory.createBloomAtWrite(conf, cacheConf,
           bloomType, (int) Math.min(maxKeys, Integer.MAX_VALUE), writer);
       if (bloomFilterWriter != null) {
         this.bloomType = bloomType;
@@ -1033,10 +984,9 @@ public class StoreFile {
     protected long sequenceID = -1;
     private byte[] lastBloomKey;
 
-    public Reader(FileSystem fs, Path path, BlockCache blockCache,
-        boolean inMemory, boolean evictOnClose)
+    public Reader(FileSystem fs, Path path, CacheConfig cacheConf)
         throws IOException {
-      reader = HFile.createReader(fs, path, blockCache, inMemory, evictOnClose);
+      reader = HFile.createReader(fs, path, cacheConf);
       bloomFilterType = BloomType.NONE;
     }