You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by zh...@apache.org on 2015/03/02 18:15:18 UTC

[02/50] [abbrv] hadoop git commit: HDFS-7495. Remove updatePosition argument from DFSInputStream#getBlockAt() (cmccabe)

HDFS-7495. Remove updatePosition argument from DFSInputStream#getBlockAt() (cmccabe)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/11a1c72c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/11a1c72c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/11a1c72c

Branch: refs/heads/HDFS-7285
Commit: 11a1c72ceb010b8048db79417ad65646047f9111
Parents: 8d184d1
Author: Colin Patrick Mccabe <cm...@cloudera.com>
Authored: Wed Feb 25 13:29:31 2015 -0800
Committer: Zhe Zhang <zh...@cloudera.com>
Committed: Mon Mar 2 09:13:50 2015 -0800

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |  3 ++
 .../org/apache/hadoop/hdfs/DFSInputStream.java  | 33 ++++++++------------
 2 files changed, 16 insertions(+), 20 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/11a1c72c/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 4ca10da..e0f9267 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -668,6 +668,9 @@ Release 2.7.0 - UNRELEASED
 
     HDFS-7740. Test truncate with DataNodes restarting. (yliu)
 
+    HDFS-7495. Remove updatePosition argument from DFSInputStream#getBlockAt()
+    (cmccabe)
+
   OPTIMIZATIONS
 
     HDFS-7454. Reduce memory footprint for AclEntries in NameNode.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/11a1c72c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
index c408524..cf8015f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
@@ -434,12 +434,10 @@ implements ByteBufferReadable, CanSetDropBehind, CanSetReadahead,
    * Fetch it from the namenode if not cached.
    * 
    * @param offset block corresponding to this offset in file is returned
-   * @param updatePosition whether to update current position
    * @return located block
    * @throws IOException
    */
-  private LocatedBlock getBlockAt(long offset,
-      boolean updatePosition) throws IOException {
+  private LocatedBlock getBlockAt(long offset) throws IOException {
     synchronized(infoLock) {
       assert (locatedBlocks != null) : "locatedBlocks is null";
 
@@ -449,7 +447,6 @@ implements ByteBufferReadable, CanSetDropBehind, CanSetReadahead,
       if (offset < 0 || offset >= getFileLength()) {
         throw new IOException("offset < 0 || offset >= getFileLength(), offset="
             + offset
-            + ", updatePosition=" + updatePosition
             + ", locatedBlocks=" + locatedBlocks);
       }
       else if (offset >= locatedBlocks.getFileLength()) {
@@ -470,17 +467,6 @@ implements ByteBufferReadable, CanSetDropBehind, CanSetReadahead,
         }
         blk = locatedBlocks.get(targetBlockIdx);
       }
-
-      // update current position
-      if (updatePosition) {
-        // synchronized not strictly needed, since we only get here
-        // from synchronized caller methods
-        synchronized(this) {
-          pos = offset;
-          blockEnd = blk.getStartOffset() + blk.getBlockSize() - 1;
-          currentLocatedBlock = blk;
-        }
-      }
       return blk;
     }
   }
@@ -604,7 +590,14 @@ implements ByteBufferReadable, CanSetDropBehind, CanSetReadahead,
       //
       // Compute desired block
       //
-      LocatedBlock targetBlock = getBlockAt(target, true);
+      LocatedBlock targetBlock = getBlockAt(target);
+
+      // update current position
+      this.pos = target;
+      this.blockEnd = targetBlock.getStartOffset() +
+            targetBlock.getBlockSize() - 1;
+      this.currentLocatedBlock = targetBlock;
+
       assert (target==pos) : "Wrong postion " + pos + " expect " + target;
       long offsetIntoBlock = target - targetBlock.getStartOffset();
 
@@ -979,7 +972,7 @@ implements ByteBufferReadable, CanSetDropBehind, CanSetReadahead,
         }
         deadNodes.clear(); //2nd option is to remove only nodes[blockId]
         openInfo();
-        block = getBlockAt(block.getStartOffset(), false);
+        block = getBlockAt(block.getStartOffset());
         failures++;
         continue;
       }
@@ -1056,7 +1049,7 @@ implements ByteBufferReadable, CanSetDropBehind, CanSetReadahead,
       byte[] buf, int offset,
       Map<ExtendedBlock, Set<DatanodeInfo>> corruptedBlockMap)
       throws IOException {
-    block = getBlockAt(block.getStartOffset(), false);
+    block = getBlockAt(block.getStartOffset());
     while (true) {
       DNAddrPair addressPair = chooseDataNode(block, null);
       try {
@@ -1108,7 +1101,7 @@ implements ByteBufferReadable, CanSetDropBehind, CanSetReadahead,
       // start of the loop.
       CachingStrategy curCachingStrategy;
       boolean allowShortCircuitLocalReads;
-      block = getBlockAt(block.getStartOffset(), false);
+      block = getBlockAt(block.getStartOffset());
       synchronized(infoLock) {
         curCachingStrategy = cachingStrategy;
         allowShortCircuitLocalReads = !shortCircuitForbidden();
@@ -1208,7 +1201,7 @@ implements ByteBufferReadable, CanSetDropBehind, CanSetReadahead,
     ByteBuffer bb = null;
     int len = (int) (end - start + 1);
     int hedgedReadId = 0;
-    block = getBlockAt(block.getStartOffset(), false);
+    block = getBlockAt(block.getStartOffset());
     while (true) {
       // see HDFS-6591, this metric is used to verify/catch unnecessary loops
       hedgedReadOpsLoopNumForTesting++;