You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by li...@apache.org on 2013/03/21 19:54:09 UTC

svn commit: r1459474 - in /hbase/branches/0.89-fb/src: main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java main/java/org/apache/hadoop/hbase/regionserver/Store.java test/java/org/apache/hadoop/hbase/regionserver/TestFSErrorsExposed.java

Author: liyin
Date: Thu Mar 21 18:54:08 2013
New Revision: 1459474

URL: http://svn.apache.org/r1459474
Log:
[HBASE-8113] Bring back the seek+read functionality to Pluto.

Author: manukranthk

Summary: It was observed that the seek + read gives better performance than pread when performing relatively large scans(scan which cannot be cached). Hence reverting [HBASE-7408] Removes the pread/seek+read option and defaults it to pread(D661385). Currently this feature is intended to be used only with the ClientSideScan(D708638). This can be potentially made configurable in the future.

Test Plan: Unit test on MR

Reviewers: liyintang, rshroff

Reviewed By: liyintang

CC: hbase-eng@

Differential Revision: https://phabricator.fb.com/D736962

Task ID: 2179482

Modified:
    hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java
    hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java
    hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/regionserver/TestFSErrorsExposed.java

Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java?rev=1459474&r1=1459473&r2=1459474&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java (original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java Thu Mar 21 18:54:08 2013
@@ -42,6 +42,7 @@ import org.apache.hadoop.hbase.ipc.HBase
 import org.apache.hadoop.hbase.ipc.ProfilingData;
 import org.apache.hadoop.hbase.regionserver.HRegionServer;
 import org.apache.hadoop.hbase.regionserver.MemStore;
+import org.apache.hadoop.hbase.regionserver.Store;
 import org.apache.hadoop.hbase.regionserver.metrics.SchemaConfigured;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.ClassSize;
@@ -1095,33 +1096,57 @@ public class HFileBlock extends SchemaCo
 
       long t0, t1;
       long timeToRead;
-      // Positional read. Better for random reads.
-      int extraSize = peekIntoNextBlock ? HEADER_SIZE : 0;
-
-      t0 = EnvironmentEdgeManager.currentTimeMillis();
-      numPositionalRead.incrementAndGet();
-      int sizeToRead = size + extraSize;
-      int sizeRead = 0;
-      if (sizeToRead > 0) {
-        sizeRead = istream.read(fileOffset, dest, destOffset, sizeToRead);
-        if (pData != null) {
-          t1 = EnvironmentEdgeManager.currentTimeMillis();
-          timeToRead = t1 - t0;
-          pData.addToHist(ProfilingData.HFILE_BLOCK_P_READ_TIME_MS, timeToRead);
-        }
-        if (size == 0 && sizeRead == -1) {
-          // a degenerate case of a zero-size block and no next block header.
-          sizeRead = 0;
+      if (Store.isPread) {
+        // Positional read. Better for random reads.
+        int extraSize = peekIntoNextBlock ? HEADER_SIZE : 0;
+
+        t0 = EnvironmentEdgeManager.currentTimeMillis();
+        numPositionalRead.incrementAndGet();
+        int sizeToRead = size + extraSize;
+        int sizeRead = 0;
+        if (sizeToRead > 0) {
+          sizeRead = istream.read(fileOffset, dest, destOffset, sizeToRead);
+          if (pData != null) {
+            t1 = EnvironmentEdgeManager.currentTimeMillis();
+            timeToRead = t1 - t0;
+            pData.addToHist(ProfilingData.HFILE_BLOCK_P_READ_TIME_MS, timeToRead);
+          }
+          if (size == 0 && sizeRead == -1) {
+            // a degenerate case of a zero-size block and no next block header.
+            sizeRead = 0;
+          }
+          if (sizeRead < size) {
+            throw new IOException("Positional read of " + sizeToRead + " bytes " +
+                "failed at offset " + fileOffset + " (returned " + sizeRead + ")");
+          }
         }
-        if (sizeRead < size) {
-          throw new IOException("Positional read of " + sizeToRead + " bytes " +
-              "failed at offset " + fileOffset + " (returned " + sizeRead + ")");
+
+        if (sizeRead == size || sizeRead < sizeToRead) {
+          // Could not read the next block's header, or did not try.
+          return -1;
         }
-      }
+      } else {
+        // Seek + read. Better for scanning.
+        synchronized (istream) {
+          numSeekRead.incrementAndGet();
+          istream.seek(fileOffset);
+          long realOffset = istream.getPos();
+          if (realOffset != fileOffset) {
+            throw new IOException("Tried to seek to " + fileOffset + " to "
+                + "read " + size + " bytes, but pos=" + realOffset
+                + " after seek");
+          }
+
+          if (!peekIntoNextBlock) {
+            IOUtils.readFully(istream, dest, destOffset, size);
+            return -1;
+          }
 
-      if (sizeRead == size || sizeRead < sizeToRead) {
-        // Could not read the next block's header, or did not try.
-        return -1;
+          // Try to read the next block header.
+          if (!readWithExtra(istream, dest, destOffset, size, HEADER_SIZE)){
+            return -1;
+          }
+        }
       }
       assert peekIntoNextBlock;
       return Bytes.toInt(dest, destOffset + size + BlockType.MAGIC_LENGTH) +

Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java?rev=1459474&r1=1459473&r2=1459474&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java (original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java Thu Mar 21 18:54:08 2013
@@ -1886,6 +1886,13 @@ public class Store extends SchemaConfigu
       ClassSize.CONCURRENT_SKIPLISTMAP +
       ClassSize.CONCURRENT_SKIPLISTMAP_ENTRY + ClassSize.OBJECT);
 
+  /*
+   * Set to
+   * true  : for using pread
+   * false : for using seek+read
+   */
+  public static boolean isPread = true;
+
   @Override
   public long heapSize() {
     return DEEP_OVERHEAD + this.memstore.heapSize();

Modified: hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/regionserver/TestFSErrorsExposed.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/regionserver/TestFSErrorsExposed.java?rev=1459474&r1=1459473&r2=1459474&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/regionserver/TestFSErrorsExposed.java (original)
+++ hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/regionserver/TestFSErrorsExposed.java Thu Mar 21 18:54:08 2013
@@ -67,6 +67,7 @@ public class TestFSErrorsExposed {
    */
   @Test
   public void testHFileScannerThrowsErrors() throws IOException {
+    Store.isPread = true; // Forcing usage of pread.
     Path hfilePath = new Path(new Path(
         util.getTestDir("internalScannerExposesErrors"),
         "regionname"), "familyname");
@@ -114,6 +115,7 @@ public class TestFSErrorsExposed {
    */
   @Test
   public void testStoreFileScannerThrowsErrors() throws IOException {
+    Store.isPread = true; // Forcing usage of pread.
     Path hfilePath = new Path(new Path(
         util.getTestDir("internalScannerExposesErrors"),
         "regionname"), "familyname");