You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by zh...@apache.org on 2015/09/18 20:04:08 UTC

hadoop git commit: HDFS-8550. Erasure Coding: Fix FindBugs Multithreaded correctness Warning. Contributed by Rakesh R.

Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7285 82a88b92b -> c45709520


HDFS-8550. Erasure Coding: Fix FindBugs Multithreaded correctness Warning. Contributed by Rakesh R.

Change-Id: Ic248999a7f8e5e740d49c9b10abcf16f66dd0f98


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c4570952
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c4570952
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c4570952

Branch: refs/heads/HDFS-7285
Commit: c457095206e5093c577b0124ad6fc7eef6f77b0a
Parents: 82a88b9
Author: Zhe Zhang <zh...@cloudera.com>
Authored: Fri Sep 18 11:04:06 2015 -0700
Committer: Zhe Zhang <zh...@cloudera.com>
Committed: Fri Sep 18 11:04:06 2015 -0700

----------------------------------------------------------------------
 .../hadoop-hdfs/CHANGES-HDFS-EC-7285.txt              |  3 +++
 .../org/apache/hadoop/hdfs/DFSStripedInputStream.java | 14 +++++++-------
 .../hdfs/server/blockmanagement/BlockManager.java     |  1 +
 .../org/apache/hadoop/hdfs/util/StripedBlockUtil.java |  2 +-
 4 files changed, 12 insertions(+), 8 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c4570952/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
index 8ff696b..468cc56 100755
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
@@ -435,3 +435,6 @@
 
     HDFS-9086. Rename dfs.datanode.stripedread.threshold.millis to 
     dfs.datanode.stripedread.timeout.millis. (wang via zhz)
+
+    HDFS-8550. Erasure Coding: Fix FindBugs Multithreaded correctness Warning.
+    (Rakesh R via zhz)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c4570952/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
index 2ad63b8..b7c22c4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
@@ -112,7 +112,6 @@ public class DFSStripedInputStream extends DFSInputStream {
      * offsets for all the block readers so that we can skip data if necessary.
      */
     long blockReaderOffset;
-    LocatedBlock targetBlock;
     /**
      * We use this field to indicate whether we should use this reader. In case
      * we hit any issue with this reader, we set this field to true and avoid
@@ -120,10 +119,8 @@ public class DFSStripedInputStream extends DFSInputStream {
      */
     boolean shouldSkip = false;
 
-    BlockReaderInfo(BlockReader reader, LocatedBlock targetBlock,
-        DatanodeInfo dn, long offset) {
+    BlockReaderInfo(BlockReader reader, DatanodeInfo dn, long offset) {
       this.reader = reader;
-      this.targetBlock = targetBlock;
       this.datanode = dn;
       this.blockReaderOffset = offset;
     }
@@ -649,8 +646,8 @@ public class DFSStripedInputStream extends DFSInputStream {
           }
         }
         if (reader != null) {
-          readerInfos[chunkIndex] = new BlockReaderInfo(reader, block,
-              dnInfo.info, alignedStripe.getOffsetInBlock());
+          readerInfos[chunkIndex] = new BlockReaderInfo(reader, dnInfo.info,
+              alignedStripe.getOffsetInBlock());
           return true;
         }
       }
@@ -826,7 +823,10 @@ public class DFSStripedInputStream extends DFSInputStream {
     void prepareDecodeInputs() {
       if (decodeInputs == null) {
         decodeInputs = new ByteBuffer[dataBlkNum + parityBlkNum];
-        ByteBuffer cur = curStripeBuf.duplicate();
+        final ByteBuffer cur;
+        synchronized (DFSStripedInputStream.this) {
+          cur = curStripeBuf.duplicate();
+        }
         StripedBlockUtil.VerticalRange range = alignedStripe.range;
         for (int i = 0; i < dataBlkNum; i++) {
           cur.limit(cur.capacity());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c4570952/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 3c1c461..1211169 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -971,6 +971,7 @@ public class BlockManager implements BlockStatsMXBean {
     if (isBlockTokenEnabled()) {
       // Use cached UGI if serving RPC calls.
       if (b.isStriped()) {
+        Preconditions.checkState(b instanceof LocatedStripedBlock);
         LocatedStripedBlock sb = (LocatedStripedBlock) b;
         int[] indices = sb.getBlockIndices();
         Token<BlockTokenIdentifier>[] blockTokens = new Token[indices.length];

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c4570952/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/StripedBlockUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/StripedBlockUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/StripedBlockUtil.java
index 897b092..5af3585 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/StripedBlockUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/StripedBlockUtil.java
@@ -372,7 +372,7 @@ public class StripedBlockUtil {
 
     // Step 4: calculate each chunk's position in destination buffer. Since the
     // whole read range is within a single stripe, the logic is simpler here.
-    int bufOffset = (int) (rangeStartInBlockGroup % (cellSize * dataBlkNum));
+    int bufOffset = (int) (rangeStartInBlockGroup % ((long) cellSize * dataBlkNum));
     for (StripingCell cell : cells) {
       long cellStart = cell.idxInInternalBlk * cellSize + cell.offset;
       long cellEnd = cellStart + cell.size - 1;