You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by ki...@apache.org on 2013/10/01 00:33:01 UTC

svn commit: r1527808 - in /hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs: CHANGES.txt src/test/java/org/apache/hadoop/hdfs/TestShortCircuitLocalRead.java

Author: kihwal
Date: Mon Sep 30 22:33:01 2013
New Revision: 1527808

URL: http://svn.apache.org/r1527808
Log:
svn merge -c 1527807. merging from trunk to branch-2 to fix HDFS-4517.

Modified:
    hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
    hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestShortCircuitLocalRead.java

Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1527808&r1=1527807&r2=1527808&view=diff
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Mon Sep 30 22:33:01 2013
@@ -59,6 +59,9 @@ Release 2.3.0 - UNRELEASED
     HDFS-5260. Merge zero-copy memory-mapped HDFS client reads to trunk and
     branch-2. (cnauroth)
 
+    HDFS-4517. Cover class RemoteBlockReader with unit tests. (Vadim Bondarev
+    and Dennis Y via kihwal)
+
   OPTIMIZATIONS
 
     HDFS-5239.  Allow FSNamesystem lock fairness to be configurable (daryn)

Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestShortCircuitLocalRead.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestShortCircuitLocalRead.java?rev=1527808&r1=1527807&r2=1527808&view=diff
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestShortCircuitLocalRead.java (original)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestShortCircuitLocalRead.java Mon Sep 30 22:33:01 2013
@@ -578,4 +578,64 @@ public class TestShortCircuitLocalRead {
     System.out.println("Iteration " + iteration + " took " + (end - start));
     fs.delete(file1, false);
   }
+
+  public void testReadWithRemoteBlockReader() throws IOException, InterruptedException {
+    doTestShortCircuitReadWithRemoteBlockReader(true, 3*blockSize+100, getCurrentUser(), 0, false);
+  }
+
+  /**
+   * Test that file data can be read by reading the block
+   * through RemoteBlockReader
+   * @throws IOException
+  */
+  public void doTestShortCircuitReadWithRemoteBlockReader(boolean ignoreChecksum, int size, String shortCircuitUser,
+                                                          int readOffset, boolean shortCircuitFails) throws IOException, InterruptedException {
+    Configuration conf = new Configuration();
+    conf.setBoolean(DFSConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADER, true);
+    conf.setBoolean(DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_KEY, true);
+
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
+             .format(true).build();
+    FileSystem fs = cluster.getFileSystem();
+    // check that / exists
+    Path path = new Path("/");
+    URI uri = cluster.getURI();
+    assertTrue("/ should be a directory", fs.getFileStatus(path)
+                .isDirectory() == true);
+
+    byte[] fileData = AppendTestUtil.randomBytes(seed, size);
+    Path file1 = new Path("filelocal.dat");
+    FSDataOutputStream stm = createFile(fs, file1, 1);
+
+    stm.write(fileData);
+    stm.close();
+    try {
+      checkFileContent(uri, file1, fileData, readOffset, shortCircuitUser, conf, shortCircuitFails);
+      //RemoteBlockReader have unsupported method read(ByteBuffer bf)
+      assertTrue("RemoteBlockReader unsupported method read(ByteBuffer bf) error",
+                    checkUnsupportedMethod(fs, file1, fileData, readOffset));
+    } catch(IOException e) {
+      throw new IOException("doTestShortCircuitReadWithRemoteBlockReader ex error ", e);
+    } catch(InterruptedException inEx) {
+      throw inEx;
+    } finally {
+      fs.close();
+      cluster.shutdown();
+    }
+  }
+
+  private boolean checkUnsupportedMethod(FileSystem fs, Path file,
+                                           byte[] expected, int readOffset) throws IOException {
+    HdfsDataInputStream stm = (HdfsDataInputStream)fs.open(file);
+    ByteBuffer actual = ByteBuffer.allocateDirect(expected.length - readOffset);
+    IOUtils.skipFully(stm, readOffset);
+    try {
+      stm.read(actual);
+    } catch(UnsupportedOperationException unex) {
+      return true;
+    }
+    return false;
+  }
+
+
 }