You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by ap...@apache.org on 2009/07/07 09:10:59 UTC

svn commit: r791727 - in /hadoop/hbase/trunk_on_hadoop-0.18.3: ./ src/java/org/apache/hadoop/hbase/io/ src/java/org/apache/hadoop/hbase/io/hfile/ src/java/org/apache/hadoop/hbase/regionserver/ src/test/org/apache/hadoop/hbase/client/ src/test/org/apach...

Author: apurtell
Date: Tue Jul  7 07:10:59 2009
New Revision: 791727

URL: http://svn.apache.org/viewvc?rev=791727&view=rev
Log:
HBASE-1615, HBASE-1616

Modified:
    hadoop/hbase/trunk_on_hadoop-0.18.3/CHANGES.txt
    hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/io/HalfHFileReader.java
    hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/io/hfile/HFile.java
    hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/Store.java
    hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/StoreFile.java
    hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/client/TestForceSplit.java
    hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/regionserver/TestCompaction.java

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/CHANGES.txt?rev=791727&r1=791726&r2=791727&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/CHANGES.txt (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/CHANGES.txt Tue Jul  7 07:10:59 2009
@@ -242,6 +242,9 @@
    HBASE-1595  hadoop-default.xml and zoo.cfg in hbase jar
    HBASE-1602  HRegionServer won't go down since we added in new LruBlockCache
    HBASE-1608  TestCachedBlockQueue failing on some jvms (Jon Gray via Stack)
+   HBASE-1615  HBASE-1597 introduced a bug when compacting after a split
+               (Jon Gray via Stack)
+   HBASE-1616  Unit test of compacting referenced StoreFiles (Jon Gray via Stack)
 
   IMPROVEMENTS
    HBASE-1089  Add count of regions on filesystem to master UI; add percentage

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/io/HalfHFileReader.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/io/HalfHFileReader.java?rev=791727&r1=791726&r2=791727&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/io/HalfHFileReader.java (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/io/HalfHFileReader.java Tue Jul  7 07:10:59 2009
@@ -80,7 +80,12 @@
 
   @Override
   public HFileScanner getScanner() {
-    final HFileScanner s = super.getScanner();
+    return this.getScanner(true);
+  }
+  
+  @Override
+  public HFileScanner getScanner(boolean cacheBlocks) {
+    final HFileScanner s = super.getScanner(cacheBlocks);
     return new HFileScanner() {
       final HFileScanner delegate = s;
       public boolean atEnd = false;

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/io/hfile/HFile.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/io/hfile/HFile.java?rev=791727&r1=791726&r2=791727&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/io/hfile/HFile.java (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/io/hfile/HFile.java Tue Jul  7 07:10:59 2009
@@ -671,7 +671,7 @@
      */
     @SuppressWarnings("unused")
     private Reader() throws IOException {
-      this(null, null, null, false);
+      this(null, -1, null, false);
     }
 
     /** 
@@ -706,7 +706,7 @@
       this.fileSize = size;
       this.istream = fsdis;
       this.closeIStream = false;
-      this.name = this.istream.toString();
+      this.name = this.istream == null? "": this.istream.toString();
       this.inMemory = inMemory;
     }
 
@@ -817,8 +817,20 @@
      * @return Scanner on this file.
      */
     public HFileScanner getScanner() {
-      return new Scanner(this);
+      return new Scanner(this, true);
     }
+
+    /**
+     * Create a Scanner on this file.  No seeks or reads are done on creation.
+     * Call {@link HFileScanner#seekTo(byte[])} to position an start the read.
+     * There is nothing to clean up in a Scanner. Letting go of your references
+     * to the scanner is sufficient.
+     * @return Scanner on this file.
+     */
+    public HFileScanner getScanner(boolean cacheBlocks) {
+      return new Scanner(this, cacheBlocks);
+    }
+
     /**
      * @param key Key to search.
      * @return Block number of the block containing the key or -1 if not in this
@@ -873,7 +885,7 @@
      * @return Block wrapped in a ByteBuffer.
      * @throws IOException
      */
-    ByteBuffer readBlock(int block) throws IOException {
+    ByteBuffer readBlock(int block, boolean cacheBlock) throws IOException {
       if (blockIndex == null) {
         throw new IOException("Block index not loaded");
       }
@@ -926,25 +938,13 @@
         buf.rewind();
 
         // Cache the block
-        cacheBlock(name + block, buf.duplicate());
+        if(cacheBlock && cache != null) {
+          cache.cacheBlock(name + block, buf.duplicate(), inMemory);
+        }
 
         return buf;
       }
     }
-    
-    /**
-     * Cache this block if there is a block cache available.<p>
-     * 
-     * Makes a copy of the ByteBuffer, not the one we are sending back, so the 
-     * position does not get messed up.
-     * @param blockName
-     * @param buf
-     */
-    void cacheBlock(String blockName, ByteBuffer buf) {
-      if (cache != null) {
-        cache.cacheBlock(blockName, buf.duplicate(), inMemory);
-      }
-    }
 
     /*
      * Decompress <code>compressedSize</code> bytes off the backing
@@ -1041,6 +1041,8 @@
       private final Reader reader;
       private ByteBuffer block;
       private int currBlock;
+      
+      private boolean cacheBlocks = false;
 
       private int currKeyLen = 0;
       private int currValueLen = 0;
@@ -1051,6 +1053,11 @@
         this.reader = r;
       }
       
+      public Scanner(Reader r, boolean cacheBlocks) {
+        this.reader = r;
+        this.cacheBlocks = cacheBlocks;
+      }
+      
       public KeyValue getKeyValue() {
         if(this.block == null) {
           return null;
@@ -1099,7 +1106,7 @@
             block = null;
             return false;
           }
-          block = reader.readBlock(currBlock);
+          block = reader.readBlock(currBlock, cacheBlocks);
           currKeyLen = block.getInt();
           currValueLen = block.getInt();
           blockFetches++;
@@ -1230,7 +1237,7 @@
           currValueLen = block.getInt();
         }
         currBlock = 0;
-        block = reader.readBlock(currBlock);
+        block = reader.readBlock(currBlock, cacheBlocks);
         currKeyLen = block.getInt();
         currValueLen = block.getInt();
         blockFetches++;
@@ -1239,12 +1246,12 @@
       
       private void loadBlock(int bloc) throws IOException {
         if (block == null) {
-          block = reader.readBlock(bloc);
+          block = reader.readBlock(bloc, cacheBlocks);
           currBlock = bloc;
           blockFetches++;
         } else {
           if (bloc != currBlock) {
-            block = reader.readBlock(bloc);
+            block = reader.readBlock(bloc, cacheBlocks);
             currBlock = bloc;
             blockFetches++;
           } else {
@@ -1260,35 +1267,6 @@
     }
   }
   
-
-  /**
-   * HFile Reader that does not cache blocks that were not already cached.<p>
-   * 
-   * Used for compactions.
-   */
-  public static class CompactionReader extends Reader {
-    public CompactionReader(Reader reader) {
-      super(reader.istream, reader.fileSize, reader.cache, reader.inMemory);
-      super.blockIndex = reader.blockIndex;
-      super.trailer = reader.trailer;
-      super.lastkey = reader.lastkey;
-      super.avgKeyLen = reader.avgKeyLen;
-      super.avgValueLen = reader.avgValueLen;
-      super.comparator = reader.comparator;
-      super.metaIndex = reader.metaIndex;
-      super.fileInfoLoaded = reader.fileInfoLoaded;
-      super.compressAlgo = reader.compressAlgo;
-    }
-    
-    /**
-     * Do not cache this block when doing a compaction.
-     */
-    @Override
-    void cacheBlock(String blockName, ByteBuffer buf) {
-      return;
-    }
-  }
-  
   /*
    * The RFile has a fixed trailer which contains offsets to other variable
    * parts of the file.  Also includes basic metadata on this file.

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/Store.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/Store.java?rev=791727&r1=791726&r2=791727&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/Store.java (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/Store.java Tue Jul  7 07:10:59 2009
@@ -54,7 +54,6 @@
 import org.apache.hadoop.hbase.io.hfile.Compression;
 import org.apache.hadoop.hbase.io.hfile.HFile;
 import org.apache.hadoop.hbase.io.hfile.HFileScanner;
-import org.apache.hadoop.hbase.io.hfile.HFile.CompactionReader;
 import org.apache.hadoop.hbase.io.hfile.HFile.Reader;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.ClassSize;
@@ -681,12 +680,12 @@
           LOG.warn("Path is null for " + file);
           return null;
         }
-        CompactionReader r = file.getCompactionReader();
+        Reader r = file.getReader();
         if (r == null) {
           LOG.warn("StoreFile " + file + " has a null Reader");
           continue;
         }
-        long len = file.getCompactionReader().length();
+        long len = file.getReader().length();
         fileSizes[i] = len;
         totalSize += len;
       }
@@ -845,12 +844,13 @@
     // init:
     for (int i = 0; i < filesToCompact.size(); ++i) {
       // TODO open a new HFile.Reader w/o block cache.
-      CompactionReader r = filesToCompact.get(i).getCompactionReader();
+      Reader r = filesToCompact.get(i).getReader();
       if (r == null) {
         LOG.warn("StoreFile " + filesToCompact.get(i) + " has a null Reader");
         continue;
       }
-      scanners[i] = new StoreFileScanner(r.getScanner());
+      // Instantiate HFile.Reader.Scanner to not cache blocks
+      scanners[i] = new StoreFileScanner(r.getScanner(false));
     }
 
     if (majorCompaction) {
@@ -960,7 +960,7 @@
       // 4. Compute new store size
       this.storeSize = 0L;
       for (StoreFile hsf : this.storefiles.values()) {
-        Reader r = hsf.getCompactionReader();
+        Reader r = hsf.getReader();
         if (r == null) {
           LOG.warn("StoreFile " + hsf + " has a null Reader");
           continue;

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/StoreFile.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/StoreFile.java?rev=791727&r1=791726&r2=791727&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/StoreFile.java (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/StoreFile.java Tue Jul  7 07:10:59 2009
@@ -310,14 +310,6 @@
   }
 
   /**
-   * Gets a special Reader for use during compactions.  Will not cache blocks.
-   * @return Current reader.  Must call open first else returns null.
-   */
-  public HFile.CompactionReader getCompactionReader() {
-    return new HFile.CompactionReader(this.reader);
-  }
-
-  /**
    * @throws IOException
    */
   public synchronized void close() throws IOException {

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/client/TestForceSplit.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/client/TestForceSplit.java?rev=791727&r1=791726&r2=791727&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/client/TestForceSplit.java (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/client/TestForceSplit.java Tue Jul  7 07:10:59 2009
@@ -48,6 +48,7 @@
    * @throws Exception 
    * @throws IOException
    */
+  @SuppressWarnings("unused") 
   public void testForceSplit() throws Exception {
     // create the test table
     HTableDescriptor htd = new HTableDescriptor(tableName);
@@ -56,6 +57,7 @@
     admin.createTable(htd);
     HTable table = new HTable(conf, tableName);
     byte[] k = new byte[3];
+    int rowCount = 0;
     for (byte b1 = 'a'; b1 < 'z'; b1++) {
       for (byte b2 = 'a'; b2 < 'z'; b2++) {
         for (byte b3 = 'a'; b3 < 'z'; b3++) {
@@ -66,6 +68,7 @@
           byte [][] famAndQf = KeyValue.parseColumn(columnName);
           put.add(famAndQf[0], famAndQf[1], k);
           table.put(put);
+          rowCount++;
         }
       }
     }
@@ -75,6 +78,16 @@
     System.out.println("Initial regions (" + m.size() + "): " + m);
     assertTrue(m.size() == 1);
 
+    // Verify row count
+    Scan scan = new Scan();
+    ResultScanner scanner = table.getScanner(scan);
+    int rows = 0;
+    for(Result result : scanner) {
+      rows++;
+    }
+    scanner.close();
+    assertEquals(rowCount, rows);
+    
     // tell the master to split the table
     admin.split(Bytes.toString(tableName));
 
@@ -86,5 +99,20 @@
     System.out.println("Regions after split (" + m.size() + "): " + m);
     // should have two regions now
     assertTrue(m.size() == 2);
+    
+    // Verify row count
+    scan = new Scan();
+    scanner = table.getScanner(scan);
+    rows = 0;
+    for(Result result : scanner) {
+      rows++;
+      if(rows > rowCount) {
+        scanner.close();
+        assertTrue("Have already scanned more rows than expected (" + 
+            rowCount + ")", false);
+      }
+    }
+    scanner.close();
+    assertEquals(rowCount, rows);
   }
 }

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/regionserver/TestCompaction.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/regionserver/TestCompaction.java?rev=791727&r1=791726&r2=791727&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/regionserver/TestCompaction.java (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/regionserver/TestCompaction.java Tue Jul  7 07:10:59 2009
@@ -169,7 +169,7 @@
     boolean containsStartRow = false;
     for (StoreFile f: this.r.stores.get(COLUMN_FAMILY_TEXT).getStorefiles().
         values()) {
-      HFileScanner scanner = f.getCompactionReader().getScanner();
+      HFileScanner scanner = f.getReader().getScanner(false);
       scanner.seekTo();
       do {
         byte [] row = scanner.getKeyValue().getRow();