You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by st...@apache.org on 2009/07/03 23:29:22 UTC

svn commit: r791021 - in /hadoop/hbase/trunk: ./ src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/model/ src/java/org/apache/hadoop/hbase/ src/java/org/apache/hadoop/hbase/client/ src/java/org/apache/hadoop/hbase/io/ src/java/org/apache/h...

Author: stack
Date: Fri Jul  3 21:29:21 2009
New Revision: 791021

URL: http://svn.apache.org/viewvc?rev=791021&view=rev
Log:
HBASE-1218  Implement in-memory column

Modified:
    hadoop/hbase/trunk/CHANGES.txt
    hadoop/hbase/trunk/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/model/TableSchemaModel.java
    hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HTableDescriptor.java
    hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/UnmodifyableHTableDescriptor.java
    hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/HalfHFileReader.java
    hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/hfile/HFile.java
    hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java
    hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/Store.java
    hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/StoreFile.java
    hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/HFilePerformanceEvaluation.java
    hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/io/hfile/RandomSeek.java
    hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/io/hfile/TestHFile.java
    hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/io/hfile/TestHFilePerformance.java
    hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/io/hfile/TestHFileSeek.java
    hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/io/hfile/TestSeekTo.java
    hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestStoreFile.java

Modified: hadoop/hbase/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/CHANGES.txt?rev=791021&r1=791020&r2=791021&view=diff
==============================================================================
--- hadoop/hbase/trunk/CHANGES.txt (original)
+++ hadoop/hbase/trunk/CHANGES.txt Fri Jul  3 21:29:21 2009
@@ -437,6 +437,7 @@
                (Jon Gray via Stack)
    HBASE-1607  Redo MemStore heap sizing to be accurate, testable, and more
                like new LruBlockCache (Jon Gray via Stack)
+   HBASE-1218  Implement in-memory column (Jon Gray via Stack)
 
   OPTIMIZATIONS
    HBASE-1412  Change values for delete column and column family in KeyValue

Modified: hadoop/hbase/trunk/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/model/TableSchemaModel.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/model/TableSchemaModel.java?rev=791021&r1=791020&r2=791021&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/model/TableSchemaModel.java (original)
+++ hadoop/hbase/trunk/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/model/TableSchemaModel.java Fri Jul  3 21:29:21 2009
@@ -48,7 +48,6 @@
 @XmlType(propOrder = {"name","columns"})
 public class TableSchemaModel implements Serializable, IProtobufWrapper {
   private static final long serialVersionUID = 1L;
-  private static final QName IN_MEMORY = new QName(HConstants.IN_MEMORY);
   private static final QName IS_META = new QName(HTableDescriptor.IS_META);
   private static final QName IS_ROOT = new QName(HTableDescriptor.IS_ROOT);
   private static final QName READONLY = new QName(HTableDescriptor.READONLY);
@@ -177,15 +176,6 @@
   // confuse JAXB
 
   /**
-   * @return true if IN_MEMORY attribute exists and is true
-   */
-  public boolean __getInMemory() {
-    Object o = attrs.get(IN_MEMORY);
-    return o != null ? 
-      Boolean.valueOf(o.toString()) : HTableDescriptor.DEFAULT_IN_MEMORY;
-  }
-
-  /**
    * @return true if IS_META attribute exists and is truel
    */
   public boolean __getIsMeta() {
@@ -211,13 +201,6 @@
   }
 
   /**
-   * @param value desired value of IN_MEMORY attribute
-   */
-  public void __setInMemory(boolean value) {
-    attrs.put(IN_MEMORY, Boolean.toString(value));
-  }
-
-  /**
    * @param value desired value of IS_META attribute
    */
   public void __setIsMeta(boolean value) {
@@ -273,10 +256,6 @@
       }
       builder.addColumns(familyBuilder);
     }
-    if (attrs.containsKey(IN_MEMORY)) {
-      builder.setInMemory(
-        Boolean.valueOf(attrs.get(IN_MEMORY).toString()));
-    }
     if (attrs.containsKey(READONLY)) {
       builder.setReadOnly(
         Boolean.valueOf(attrs.get(READONLY).toString()));
@@ -293,9 +272,6 @@
     for (TableSchema.Attribute attr: builder.getAttrsList()) {
       this.addAttribute(attr.getName(), attr.getValue());
     }
-    if (builder.hasInMemory()) {
-      this.addAttribute(HConstants.IN_MEMORY, builder.getInMemory());
-    }
     if (builder.hasReadOnly()) {
       this.addAttribute(HTableDescriptor.READONLY, builder.getReadOnly());
     }

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HTableDescriptor.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HTableDescriptor.java?rev=791021&r1=791020&r2=791021&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HTableDescriptor.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HTableDescriptor.java Fri Jul  3 21:29:21 2009
@@ -90,8 +90,6 @@
   private static final ImmutableBytesWritable TRUE =
     new ImmutableBytesWritable(Bytes.toBytes(Boolean.TRUE.toString()));
 
-  public static final boolean DEFAULT_IN_MEMORY = false;
-
   public static final boolean DEFAULT_READONLY = false;
 
   public static final int DEFAULT_MEMSTORE_FLUSH_SIZE = 1024*1024*64;
@@ -353,25 +351,6 @@
   }
 
   /**
-   * @return true if all columns in the table should be kept in the 
-   * HRegionServer cache only
-   */
-  public boolean isInMemory() {
-    String value = getValue(HConstants.IN_MEMORY);
-    if (value != null)
-      return Boolean.valueOf(value).booleanValue();
-    return DEFAULT_IN_MEMORY;
-  }
-
-  /**
-   * @param inMemory True if all of the columns in the table should be kept in
-   * the HRegionServer cache only.
-   */
-  public void setInMemory(boolean inMemory) {
-    setValue(HConstants.IN_MEMORY, Boolean.toString(inMemory));
-  }
-
-  /**
    * @return true if all columns in the table should be read only
    */
   public boolean isReadOnly() {

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/UnmodifyableHTableDescriptor.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/UnmodifyableHTableDescriptor.java?rev=791021&r1=791020&r2=791021&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/UnmodifyableHTableDescriptor.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/UnmodifyableHTableDescriptor.java Fri Jul  3 21:29:21 2009
@@ -74,15 +74,7 @@
   public HColumnDescriptor removeFamily(final byte [] column) {
     throw new UnsupportedOperationException("HTableDescriptor is read-only");
   }
-
-  /**
-   * @see org.apache.hadoop.hbase.HTableDescriptor#setInMemory(boolean)
-   */
-  @Override
-  public void setInMemory(boolean inMemory) {
-    throw new UnsupportedOperationException("HTableDescriptor is read-only");
-  }
-
+  
   /**
    * @see org.apache.hadoop.hbase.HTableDescriptor#setReadOnly(boolean)
    */

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/HalfHFileReader.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/HalfHFileReader.java?rev=791021&r1=791020&r2=791021&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/HalfHFileReader.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/HalfHFileReader.java Fri Jul  3 21:29:21 2009
@@ -63,7 +63,7 @@
   public HalfHFileReader(final FileSystem fs, final Path p, final BlockCache c,
     final Reference r)
   throws IOException {
-    super(fs, p, c);
+    super(fs, p, c, false);
     // This is not actual midkey for this half-file; its just border
     // around which we split top and bottom.  Have to look in files to find
     // actual last and first keys for bottom and top halves.  Half-files don't

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/hfile/HFile.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/hfile/HFile.java?rev=791021&r1=791020&r2=791021&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/hfile/HFile.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/hfile/HFile.java Fri Jul  3 21:29:21 2009
@@ -657,6 +657,9 @@
     private final BlockCache cache;
     public int cacheHits = 0;
     public int blockLoads = 0;
+    
+    // Whether file is from in-memory store
+    private boolean inMemory = false;
 
     // Name for this object used when logging or in toString.  Is either
     // the result of a toString on the stream or else is toString of passed
@@ -668,7 +671,7 @@
      */
     @SuppressWarnings("unused")
     private Reader() throws IOException {
-      this(null, null, null);
+      this(null, null, null, false);
     }
 
     /** 
@@ -680,9 +683,9 @@
      * @param cache block cache. Pass null if none.
      * @throws IOException
      */
-    public Reader(FileSystem fs, Path path, BlockCache cache)
+    public Reader(FileSystem fs, Path path, BlockCache cache, boolean inMemory)
     throws IOException {
-      this(fs.open(path), fs.getFileStatus(path).getLen(), cache);
+      this(fs.open(path), fs.getFileStatus(path).getLen(), cache, inMemory);
       this.closeIStream = true;
       this.name = path.toString();
     }
@@ -698,12 +701,13 @@
      * @throws IOException
      */
     public Reader(final FSDataInputStream fsdis, final long size,
-        final BlockCache cache) {
+        final BlockCache cache, final boolean inMemory) {
       this.cache = cache;
       this.fileSize = size;
       this.istream = fsdis;
       this.closeIStream = false;
       this.name = this.istream.toString();
+      this.inMemory = inMemory;
     }
 
     @Override
@@ -711,6 +715,7 @@
       return "reader=" + this.name +
           (!isFileInfoLoaded()? "":
             ", compression=" + this.compressAlgo.getName() +
+            ", inMemory=" + this.inMemory +
             ", firstKey=" + toStringFirstKey() +
             ", lastKey=" + toStringLastKey()) +
             ", avgKeyLen=" + this.avgKeyLen +
@@ -730,7 +735,11 @@
     public long length() {
       return this.fileSize;
     }
-
+    
+    public boolean inMemory() {
+      return this.inMemory;
+    }
+       
     /**
      * Read in the index and file info.
      * @return A map of fileinfo data.
@@ -933,7 +942,7 @@
      */
     void cacheBlock(String blockName, ByteBuffer buf) {
       if (cache != null) {
-        cache.cacheBlock(blockName, buf.duplicate());
+        cache.cacheBlock(blockName, buf.duplicate(), inMemory);
       }
     }
 
@@ -1259,7 +1268,7 @@
    */
   public static class CompactionReader extends Reader {
     public CompactionReader(Reader reader) {
-      super(reader.istream, reader.fileSize, reader.cache);
+      super(reader.istream, reader.fileSize, reader.cache, reader.inMemory);
       super.blockIndex = reader.blockIndex;
       super.trailer = reader.trailer;
       super.lastkey = reader.lastkey;
@@ -1625,7 +1634,7 @@
       return;
     }
 
-    HFile.Reader reader = new HFile.Reader(fs, path, null);
+    HFile.Reader reader = new HFile.Reader(fs, path, null, false);
     Map<byte[],byte[]> fileInfo = reader.loadFileInfo();
 
     // scan thru and count the # of unique rows.

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java?rev=791021&r1=791020&r2=791021&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java Fri Jul  3 21:29:21 2009
@@ -372,8 +372,16 @@
         remainingBuckets--;
       }
       
+      float singleMB = ((float)bucketSingle.totalSize())/((float)(1024*1024));
+      float multiMB = ((float)bucketMulti.totalSize())/((float)(1024*1024));
+      float memoryMB = ((float)bucketMemory.totalSize())/((float)(1024*1024));
+      
       LOG.debug("Block cache LRU eviction completed. " + 
-          "Freed " + bytesFreed + " bytes");
+          "Freed " + bytesFreed + " bytes.  " +
+          "Priority Sizes: " +
+          "Single=" + singleMB + "MB (" + bucketSingle.totalSize() + "), " +
+          "Multi=" + multiMB + "MB (" + bucketMulti.totalSize() + ")," +
+          "Memory=" + memoryMB + "MB (" + bucketMemory.totalSize() + ")");
       
     } finally {
       stats.evict();
@@ -424,6 +432,10 @@
       return totalSize - bucketSize;
     }
     
+    public long totalSize() {
+      return totalSize;
+    }
+    
     public int compareTo(BlockBucket that) {
       if(this.overflow() == that.overflow()) return 0;
       return this.overflow() > that.overflow() ? 1 : -1;

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/Store.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/Store.java?rev=791021&r1=791020&r2=791021&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/Store.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/Store.java Fri Jul  3 21:29:21 2009
@@ -110,6 +110,7 @@
   final ReentrantReadWriteLock lock = new ReentrantReadWriteLock();
   final byte [] storeName;
   private final String storeNameStr;
+  private final boolean inMemory;
 
   /*
    * Sorted Map of readers keyed by maximum edit sequence id (Most recent should
@@ -190,7 +191,10 @@
     // MIN_COMMITS_FOR_COMPACTION map files
     this.compactionThreshold =
       conf.getInt("hbase.hstore.compactionThreshold", 3);
-
+    
+    // Check if this is in-memory store
+    this.inMemory = family.isInMemory();
+    
     // By default we split region if a file > DEFAULT_MAX_FILE_SIZE.
     long maxFileSize = info.getTableDesc().getMaxFileSize();
     if (maxFileSize == HConstants.DEFAULT_MAX_FILE_SIZE) {
@@ -366,7 +370,7 @@
       }
       StoreFile curfile = null;
       try {
-        curfile = new StoreFile(fs, p, blockcache, this.conf);
+        curfile = new StoreFile(fs, p, blockcache, this.conf, this.inMemory);
       } catch (IOException ioe) {
         LOG.warn("Failed open of " + p + "; presumption is that file was " +
           "corrupted at flush and lost edits picked up by commit log replay. " +
@@ -523,7 +527,7 @@
       }
     }
     StoreFile sf = new StoreFile(this.fs, writer.getPath(), blockcache, 
-      this.conf);
+      this.conf, this.inMemory);
     Reader r = sf.getReader();
     this.storeSize += r.length();
     if(LOG.isDebugEnabled()) {
@@ -922,7 +926,7 @@
       return;
     }
     StoreFile finalCompactedFile = new StoreFile(this.fs, p, blockcache, 
-      this.conf);
+      this.conf, this.inMemory);
     this.lock.writeLock().lock();
     try {
       try {

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/StoreFile.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/StoreFile.java?rev=791021&r1=791020&r2=791021&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/StoreFile.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/StoreFile.java Fri Jul  3 21:29:21 2009
@@ -76,6 +76,8 @@
   private Path referencePath;
   // Should the block cache be used or not.
   private boolean blockcache;
+  // Is this from an in-memory store
+  private boolean inMemory;
   
   // Keys for metadata stored in backing HFile.
   private static final byte [] MAX_SEQ_ID_KEY = Bytes.toBytes("MAX_SEQ_ID_KEY");
@@ -113,12 +115,13 @@
    * @throws IOException When opening the reader fails.
    */
   StoreFile(final FileSystem fs, final Path p, final boolean blockcache, 
-      final HBaseConfiguration conf) 
+      final HBaseConfiguration conf, final boolean inMemory) 
   throws IOException {
     this.conf = conf;
     this.fs = fs;
     this.path = p;
     this.blockcache = blockcache;
+    this.inMemory = inMemory;
     if (isReference(p)) {
       this.reference = Reference.read(fs, p);
       this.referencePath = getReferredToFile(this.path);
@@ -263,7 +266,8 @@
       this.reader = new HalfHFileReader(this.fs, this.referencePath, 
           getBlockCache(), this.reference);
     } else {
-      this.reader = new Reader(this.fs, this.path, getBlockCache());
+      this.reader = new Reader(this.fs, this.path, getBlockCache(),
+          this.inMemory);
     }
     // Load up indices and fileinfo.
     Map<byte [], byte []> map = this.reader.loadFileInfo();

Modified: hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/HFilePerformanceEvaluation.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/HFilePerformanceEvaluation.java?rev=791021&r1=791020&r2=791021&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/HFilePerformanceEvaluation.java (original)
+++ hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/HFilePerformanceEvaluation.java Fri Jul  3 21:29:21 2009
@@ -226,7 +226,7 @@
     
     @Override
     void setUp() throws Exception {
-      reader = new HFile.Reader(this.fs, this.mf, null);
+      reader = new HFile.Reader(this.fs, this.mf, null, false);
       this.reader.loadFileInfo();
     }
     

Modified: hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/io/hfile/RandomSeek.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/io/hfile/RandomSeek.java?rev=791021&r1=791020&r2=791021&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/io/hfile/RandomSeek.java (original)
+++ hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/io/hfile/RandomSeek.java Fri Jul  3 21:29:21 2009
@@ -66,7 +66,7 @@
     long start = System.currentTimeMillis();
     SimpleBlockCache cache = new SimpleBlockCache();
     //LruBlockCache cache = new LruBlockCache();
-    Reader reader = new HFile.Reader(lfs, path, cache);
+    Reader reader = new HFile.Reader(lfs, path, cache, false);
     reader.loadFileInfo();
     System.out.println(reader.trailer);
     long end = System.currentTimeMillis();

Modified: hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/io/hfile/TestHFile.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/io/hfile/TestHFile.java?rev=791021&r1=791020&r2=791021&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/io/hfile/TestHFile.java (original)
+++ hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/io/hfile/TestHFile.java Fri Jul  3 21:29:21 2009
@@ -122,10 +122,9 @@
     fout.close();
     FSDataInputStream fin = fs.open(ncTFile);
     Reader reader = new Reader(fs.open(ncTFile),
-      fs.getFileStatus(ncTFile).getLen(), null);
+      fs.getFileStatus(ncTFile).getLen(), null, false);
     // Load up the index.
     reader.loadFileInfo();
-    LOG.info(reader);
     HFileScanner scanner = reader.getScanner();
     // Align scanner at start of the file.
     scanner.seekTo();
@@ -186,7 +185,7 @@
     fout.close();
     FSDataInputStream fin = fs.open(mFile);
     Reader reader = new Reader(fs.open(mFile), this.fs.getFileStatus(mFile)
-        .getLen(), null);
+        .getLen(), null, false);
     reader.loadFileInfo();
     // No data -- this should return false.
     assertFalse(reader.getScanner().seekTo());
@@ -210,7 +209,7 @@
     writer.append("foo".getBytes(), "value".getBytes());
     writer.close();
     fout.close();
-    Reader reader = new Reader(fs, mFile, null);
+    Reader reader = new Reader(fs, mFile, null, false);
     reader.loadFileInfo();
     assertNull(reader.getMetaBlock("non-existant"));
   }
@@ -270,4 +269,4 @@
     }
   }
   
-}
\ No newline at end of file
+}

Modified: hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/io/hfile/TestHFilePerformance.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/io/hfile/TestHFilePerformance.java?rev=791021&r1=791020&r2=791021&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/io/hfile/TestHFilePerformance.java (original)
+++ hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/io/hfile/TestHFilePerformance.java Fri Jul  3 21:29:21 2009
@@ -236,9 +236,8 @@
 
     if ("HFile".equals(fileType)){
         HFile.Reader reader = new HFile.Reader(fs.open(path),
-          fs.getFileStatus(path).getLen(), null);
+          fs.getFileStatus(path).getLen(), null, false);
         reader.loadFileInfo();
-        System.out.println(reader);
         switch (method) {
         
           case 0:
@@ -381,4 +380,4 @@
             " the same method several times and flood cache every time and average it to get a" +
             " better number.");
   }
-}
\ No newline at end of file
+}

Modified: hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/io/hfile/TestHFileSeek.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/io/hfile/TestHFileSeek.java?rev=791021&r1=791020&r2=791021&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/io/hfile/TestHFileSeek.java (original)
+++ hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/io/hfile/TestHFileSeek.java Fri Jul  3 21:29:21 2009
@@ -155,9 +155,8 @@
     long totalBytes = 0;
     FSDataInputStream fsdis = fs.open(path);
     Reader reader =
-      new Reader(fsdis, fs.getFileStatus(path).getLen(), null);
+      new Reader(fsdis, fs.getFileStatus(path).getLen(), null, false);
     reader.loadFileInfo();
-    System.out.println(reader);
     KeySampler kSampler =
         new KeySampler(rng, reader.getFirstKey(), reader.getLastKey(),
             keyLenGen);

Modified: hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/io/hfile/TestSeekTo.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/io/hfile/TestSeekTo.java?rev=791021&r1=791020&r2=791021&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/io/hfile/TestSeekTo.java (original)
+++ hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/io/hfile/TestSeekTo.java Fri Jul  3 21:29:21 2009
@@ -49,7 +49,7 @@
   }
   public void testSeekBefore() throws Exception {
     Path p = makeNewFile();
-    HFile.Reader reader = new HFile.Reader(fs, p, null);
+    HFile.Reader reader = new HFile.Reader(fs, p, null, false);
     reader.loadFileInfo();
     HFileScanner scanner = reader.getScanner();
     assertEquals(false, scanner.seekBefore(Bytes.toBytes("a")));
@@ -82,7 +82,7 @@
   
   public void testSeekTo() throws Exception {
     Path p = makeNewFile();
-    HFile.Reader reader = new HFile.Reader(fs, p, null);
+    HFile.Reader reader = new HFile.Reader(fs, p, null, false);
     reader.loadFileInfo();
     assertEquals(2, reader.blockIndex.count);
     HFileScanner scanner = reader.getScanner();
@@ -102,7 +102,7 @@
   
   public void testBlockContainingKey() throws Exception {
     Path p = makeNewFile();
-    HFile.Reader reader = new HFile.Reader(fs, p, null);
+    HFile.Reader reader = new HFile.Reader(fs, p, null, false);
     reader.loadFileInfo();
     System.out.println(reader.blockIndex.toString());
     // falls before the start of the file.

Modified: hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestStoreFile.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestStoreFile.java?rev=791021&r1=791020&r2=791021&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestStoreFile.java (original)
+++ hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestStoreFile.java Fri Jul  3 21:29:21 2009
@@ -74,7 +74,7 @@
       new Path(new Path(this.testDir, "regionname"), "familyname"),
       2 * 1024, null, null);
     writeStoreFile(writer);
-    checkHalfHFile(new StoreFile(this.fs, writer.getPath(), true, conf));
+    checkHalfHFile(new StoreFile(this.fs, writer.getPath(), true, conf, false));
   }
   
   /*
@@ -113,7 +113,7 @@
     HFile.Writer writer = StoreFile.getWriter(this.fs, dir, 8 * 1024, null,
       null);
     writeStoreFile(writer);
-    StoreFile hsf = new StoreFile(this.fs, writer.getPath(), true, conf);
+    StoreFile hsf = new StoreFile(this.fs, writer.getPath(), true, conf, false);
     HFile.Reader reader = hsf.getReader();
     // Split on a row, not in middle of row.  Midkey returned by reader
     // may be in middle of row.  Create new one with empty column and
@@ -124,7 +124,7 @@
     byte [] finalKey = hsk.getRow();
     // Make a reference
     Path refPath = StoreFile.split(fs, dir, hsf, reader.midkey(), Range.top);
-    StoreFile refHsf = new StoreFile(this.fs, refPath, true, conf);
+    StoreFile refHsf = new StoreFile(this.fs, refPath, true, conf, false);
     // Now confirm that I can read from the reference and that it only gets
     // keys from top half of the file.
     HFileScanner s = refHsf.getReader().getScanner();
@@ -158,8 +158,8 @@
     Path bottomPath = StoreFile.split(this.fs, bottomDir,
       f, midkey, Range.bottom);
     // Make readers on top and bottom.
-    HFile.Reader top = new StoreFile(this.fs, topPath, true, conf).getReader();
-    HFile.Reader bottom = new StoreFile(this.fs, bottomPath, true, conf).getReader();
+    HFile.Reader top = new StoreFile(this.fs, topPath, true, conf, false).getReader();
+    HFile.Reader bottom = new StoreFile(this.fs, bottomPath, true, conf, false).getReader();
     ByteBuffer previous = null;
     LOG.info("Midkey: " + Bytes.toString(midkey));
     byte [] midkeyBytes = new HStoreKey(midkey).getBytes();
@@ -212,8 +212,8 @@
       topPath = StoreFile.split(this.fs, topDir, f, badmidkey, Range.top);
       bottomPath = StoreFile.split(this.fs, bottomDir, f, badmidkey,
         Range.bottom);
-      top = new StoreFile(this.fs, topPath, true, conf).getReader();
-      bottom = new StoreFile(this.fs, bottomPath, true, conf).getReader();
+      top = new StoreFile(this.fs, topPath, true, conf, false).getReader();
+      bottom = new StoreFile(this.fs, bottomPath, true, conf, false).getReader();
       bottomScanner = bottom.getScanner();
       int count = 0;
       while ((!bottomScanner.isSeeked() && bottomScanner.seekTo()) ||
@@ -256,8 +256,8 @@
       topPath = StoreFile.split(this.fs, topDir, f, badmidkey, Range.top);
       bottomPath = StoreFile.split(this.fs, bottomDir, f, badmidkey,
         Range.bottom);
-      top = new StoreFile(this.fs, topPath, true, conf).getReader();
-      bottom = new StoreFile(this.fs, bottomPath, true, conf).getReader();
+      top = new StoreFile(this.fs, topPath, true, conf, false).getReader();
+      bottom = new StoreFile(this.fs, bottomPath, true, conf, false).getReader();
       first = true;
       bottomScanner = bottom.getScanner();
       while ((!bottomScanner.isSeeked() && bottomScanner.seekTo()) ||