You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by wh...@apache.org on 2016/01/05 20:52:02 UTC

[02/50] [abbrv] hadoop git commit: Change INodeFile#getBlockReplication() to BlockInfoContinguous#getReplication().

Change INodeFile#getBlockReplication() to BlockInfoContinguous#getReplication().


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2bace6f3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2bace6f3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2bace6f3

Branch: refs/heads/feature-HDFS-8286
Commit: 2bace6f36b6032e62c479db57a4d9a15859297eb
Parents: 7a82dbd
Author: Haohui Mai <wh...@apache.org>
Authored: Mon May 4 14:06:24 2015 -0700
Committer: Haohui Mai <wh...@apache.org>
Committed: Fri Jun 12 13:56:55 2015 -0700

----------------------------------------------------------------------
 .../blockmanagement/BlockInfoContiguous.java    | 17 ++++--
 .../server/blockmanagement/BlockManager.java    | 56 ++++++++++----------
 .../blockmanagement/DecommissionManager.java    | 13 ++---
 .../hdfs/server/namenode/FSDirAttrOp.java       | 35 ++++--------
 .../hdfs/server/namenode/FSDirConcatOp.java     | 22 ++++----
 .../hdfs/server/namenode/FSEditLogLoader.java   | 12 +++--
 .../hdfs/server/namenode/FSNamesystem.java      |  4 +-
 .../hadoop/hdfs/server/namenode/INodeFile.java  |  3 +-
 .../hdfs/server/namenode/NamenodeFsck.java      |  3 +-
 .../blockmanagement/TestBlockManager.java       |  1 -
 .../blockmanagement/TestReplicationPolicy.java  |  2 -
 11 files changed, 84 insertions(+), 84 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2bace6f3/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguous.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguous.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguous.java
index 769046b..7a78708 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguous.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguous.java
@@ -53,6 +53,8 @@ public class BlockInfoContiguous extends Block
    */
   private Object[] triplets;
 
+  private short replication;
+
   /**
    * Construct an entry for blocksmap
    * @param replication the block's replication factor
@@ -60,12 +62,14 @@ public class BlockInfoContiguous extends Block
   public BlockInfoContiguous(short replication) {
     this.triplets = new Object[3*replication];
     this.bc = null;
+    this.replication = replication;
   }
   
   public BlockInfoContiguous(Block blk, short replication) {
     super(blk);
     this.triplets = new Object[3*replication];
     this.bc = null;
+    this.replication = replication;
   }
 
   /**
@@ -74,11 +78,18 @@ public class BlockInfoContiguous extends Block
    * @param from BlockInfo to copy from.
    */
   protected BlockInfoContiguous(BlockInfoContiguous from) {
-    super(from);
-    this.triplets = new Object[from.triplets.length];
+    this(from, from.getReplication());
     this.bc = from.bc;
   }
 
+  public void setReplication(short replication) {
+    this.replication = replication;
+  }
+
+  public short getReplication() {
+    return replication;
+  }
+
   public BlockCollection getBlockCollection() {
     return bc;
   }
@@ -362,7 +373,7 @@ public class BlockInfoContiguous extends Block
     if(isComplete()) {
       BlockInfoContiguousUnderConstruction ucBlock =
           new BlockInfoContiguousUnderConstruction(this,
-          getBlockCollection().getPreferredBlockReplication(), s, targets);
+          getReplication(), s, targets);
       ucBlock.setBlockCollection(getBlockCollection());
       return ucBlock;
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2bace6f3/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 44868aa..2e9b5b66 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -1109,8 +1109,7 @@ public class BlockManager {
       addToInvalidates(b.corrupted, node);
       return;
     } 
-    short expectedReplicas =
-        b.corrupted.getBlockCollection().getPreferredBlockReplication();
+    short expectedReplicas = b.corrupted.getReplication();
 
     // Add replica to the data-node if it is not already there
     if (storageInfo != null) {
@@ -1277,15 +1276,16 @@ public class BlockManager {
           for (Block block : blocksToReplicate.get(priority)) {
             // block should belong to a file
             bc = blocksMap.getBlockCollection(block);
+            BlockInfoContiguous bi = getStoredBlock(block);
             // abandoned block or block reopened for append
-            if (bc == null
+            if (bc == null || bi == null
                 || (bc.isUnderConstruction() && block.equals(bc.getLastBlock()))) {
               // remove from neededReplications
               neededReplications.remove(block, priority);
               continue;
             }
 
-            requiredReplication = bc.getPreferredBlockReplication();
+            requiredReplication = bi.getReplication();
 
             // get a source data-node
             containingNodes = new ArrayList<DatanodeDescriptor>();
@@ -1359,6 +1359,8 @@ public class BlockManager {
 
         synchronized (neededReplications) {
           Block block = rw.block;
+          BlockInfoContiguous bi = getStoredBlock(block);
+          assert bi != null;
           int priority = rw.priority;
           // Recheck since global lock was released
           // block should belong to a file
@@ -1369,7 +1371,7 @@ public class BlockManager {
             rw.targets = null;
             continue;
           }
-          requiredReplication = bc.getPreferredBlockReplication();
+          requiredReplication = bi.getReplication();
 
           // do not schedule more if enough replicas is already pending
           NumberReplicas numReplicas = countNodes(block);
@@ -2534,15 +2536,17 @@ public class BlockManager {
     }
 
     // handle underReplication/overReplication
-    short fileReplication = bc.getPreferredBlockReplication();
-    if (!isNeededReplication(storedBlock, fileReplication, numCurrentReplica)) {
+    short expectedReplication = storedBlock.getReplication();
+    if (!isNeededReplication(storedBlock, expectedReplication,
+                             numCurrentReplica)) {
       neededReplications.remove(storedBlock, numCurrentReplica,
-          num.decommissionedAndDecommissioning(), fileReplication);
+          num.decommissionedAndDecommissioning(), expectedReplication);
     } else {
       updateNeededReplications(storedBlock, curReplicaDelta, 0);
     }
-    if (numCurrentReplica > fileReplication) {
-      processOverReplicatedBlock(storedBlock, fileReplication, node, delNodeHint);
+    if (numCurrentReplica > expectedReplication) {
+      processOverReplicatedBlock(storedBlock, expectedReplication, node,
+                                 delNodeHint);
     }
     // If the file replication has reached desired value
     // we can remove any corrupt replicas the block may have
@@ -2553,7 +2557,7 @@ public class BlockManager {
           storedBlock + "blockMap has " + numCorruptNodes + 
           " but corrupt replicas map has " + corruptReplicasCount);
     }
-    if ((corruptReplicasCount > 0) && (numLiveReplicas >= fileReplication))
+    if ((corruptReplicasCount > 0) && (numLiveReplicas >= expectedReplication))
       invalidateCorruptReplicas(storedBlock);
     return storedBlock;
   }
@@ -2764,8 +2768,7 @@ public class BlockManager {
       return MisReplicationResult.UNDER_CONSTRUCTION;
     }
     // calculate current replication
-    short expectedReplication =
-        block.getBlockCollection().getPreferredBlockReplication();
+    short expectedReplication = block.getReplication();
     NumberReplicas num = countNodes(block);
     int numCurrentReplica = num.liveReplicas();
     // add to under-replicated queue if need to be
@@ -2796,23 +2799,19 @@ public class BlockManager {
   
   /** Set replication for the blocks. */
   public void setReplication(final short oldRepl, final short newRepl,
-      final String src, final Block... blocks) {
+      final String src, final Block b) {
     if (newRepl == oldRepl) {
       return;
     }
 
     // update needReplication priority queues
-    for(Block b : blocks) {
-      updateNeededReplications(b, 0, newRepl-oldRepl);
-    }
-      
+    updateNeededReplications(b, 0, newRepl-oldRepl);
+
     if (oldRepl > newRepl) {
       // old replication > the new one; need to remove copies
       LOG.info("Decreasing replication from " + oldRepl + " to " + newRepl
           + " for " + src);
-      for(Block b : blocks) {
-        processOverReplicatedBlock(b, newRepl, null, null);
-      }
+      processOverReplicatedBlock(b, newRepl, null, null);
     } else { // replication factor is increased
       LOG.info("Increasing replication from " + oldRepl + " to " + newRepl
           + " for " + src);
@@ -3262,12 +3261,11 @@ public class BlockManager {
     if (!namesystem.isPopulatingReplQueues()) {
       return;
     }
-    final Iterator<? extends Block> it = srcNode.getBlockIterator();
+    final Iterator<BlockInfoContiguous> it = srcNode.getBlockIterator();
     int numOverReplicated = 0;
     while(it.hasNext()) {
-      final Block block = it.next();
-      BlockCollection bc = blocksMap.getBlockCollection(block);
-      short expectedReplication = bc.getPreferredBlockReplication();
+      final BlockInfoContiguous block = it.next();
+      short expectedReplication = block.getReplication();
       NumberReplicas num = countNodes(block);
       int numCurrentReplica = num.liveReplicas();
       if (numCurrentReplica > expectedReplication) {
@@ -3381,8 +3379,8 @@ public class BlockManager {
    * process it as an over replicated block.
    */
   public void checkReplication(BlockCollection bc) {
-    final short expected = bc.getPreferredBlockReplication();
-    for (Block block : bc.getBlocks()) {
+    for (BlockInfoContiguous block : bc.getBlocks()) {
+      final short expected = block.getReplication();
       final NumberReplicas n = countNodes(block);
       if (isNeededReplication(block, expected, n.liveReplicas())) { 
         neededReplications.add(block, n.liveReplicas(),
@@ -3419,8 +3417,8 @@ public class BlockManager {
    *         otherwise, return the replication factor of the block.
    */
   private int getReplication(Block block) {
-    final BlockCollection bc = blocksMap.getBlockCollection(block);
-    return bc == null? 0: bc.getPreferredBlockReplication();
+    BlockInfoContiguous bi = blocksMap.getStoredBlock(block);
+    return bi == null ? 0 : bi.getReplication();
   }
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2bace6f3/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java
index 5f7366e..48fb39c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java
@@ -251,7 +251,7 @@ public class DecommissionManager {
   private boolean isSufficientlyReplicated(BlockInfoContiguous block, 
       BlockCollection bc,
       NumberReplicas numberReplicas) {
-    final int numExpected = bc.getPreferredBlockReplication();
+    final int numExpected = block.getReplication();
     final int numLive = numberReplicas.liveReplicas();
     if (!blockManager.isNeededReplication(block, numExpected, numLive)) {
       // Block doesn't need replication. Skip.
@@ -284,11 +284,12 @@ public class DecommissionManager {
     return false;
   }
 
-  private static void logBlockReplicationInfo(Block block, BlockCollection bc,
+  private static void logBlockReplicationInfo(
+      BlockInfoContiguous block, BlockCollection bc,
       DatanodeDescriptor srcNode, NumberReplicas num,
       Iterable<DatanodeStorageInfo> storages) {
     int curReplicas = num.liveReplicas();
-    int curExpectedReplicas = bc.getPreferredBlockReplication();
+    int curExpectedReplicas = block.getReplication();
     StringBuilder nodeList = new StringBuilder();
     for (DatanodeStorageInfo storage : storages) {
       final DatanodeDescriptor node = storage.getDatanodeDescriptor();
@@ -564,8 +565,8 @@ public class DecommissionManager {
 
         // Schedule under-replicated blocks for replication if not already
         // pending
-        if (blockManager.isNeededReplication(block,
-            bc.getPreferredBlockReplication(), liveReplicas)) {
+        if (blockManager.isNeededReplication(block, block.getReplication(),
+            liveReplicas)) {
           if (!blockManager.neededReplications.contains(block) &&
               blockManager.pendingReplications.getNumReplicas(block) == 0 &&
               namesystem.isPopulatingReplQueues()) {
@@ -573,7 +574,7 @@ public class DecommissionManager {
             blockManager.neededReplications.add(block,
                 curReplicas,
                 num.decommissionedAndDecommissioning(),
-                bc.getPreferredBlockReplication());
+                block.getReplication());
           }
         }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2bace6f3/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
index a24c81f..ef62e05 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
@@ -31,6 +31,7 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
 import org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
 import org.apache.hadoop.hdfs.util.EnumCounters;
@@ -147,13 +148,11 @@ public class FSDirAttrOp {
         fsd.checkPathAccess(pc, iip, FsAction.WRITE);
       }
 
-      final short[] blockRepls = new short[2]; // 0: old, 1: new
-      final Block[] blocks = unprotectedSetReplication(fsd, src, replication,
-                                                       blockRepls);
+      final Block[] blocks = unprotectedSetReplication(fsd, bm, src,
+                                                       replication);
       isFile = blocks != null;
       if (isFile) {
         fsd.getEditLog().logSetReplication(src, replication);
-        bm.setReplication(blockRepls[0], blockRepls[1], src, blocks);
       }
     } finally {
       fsd.writeUnlock();
@@ -376,7 +375,7 @@ public class FSDirAttrOp {
   }
 
   static Block[] unprotectedSetReplication(
-      FSDirectory fsd, String src, short replication, short[] blockRepls)
+      FSDirectory fsd, BlockManager bm, String src, short replication)
       throws QuotaExceededException, UnresolvedLinkException,
              SnapshotAccessControlException {
     assert fsd.hasWriteLock();
@@ -387,29 +386,17 @@ public class FSDirAttrOp {
       return null;
     }
     INodeFile file = inode.asFile();
-    final short oldBR = file.getPreferredBlockReplication();
-
-    // before setFileReplication, check for increasing block replication.
-    // if replication > oldBR, then newBR == replication.
-    // if replication < oldBR, we don't know newBR yet.
-    if (replication > oldBR) {
-      long dsDelta = file.storagespaceConsumed(null).getStorageSpace() / oldBR;
-      fsd.updateCount(iip, 0L, dsDelta, oldBR, replication, true);
-    }
-
     file.setFileReplication(replication, iip.getLatestSnapshotId());
 
-    final short newBR = file.getPreferredBlockReplication();
-    // check newBR < oldBR case.
-    if (newBR < oldBR) {
-      long dsDelta = file.storagespaceConsumed(null).getStorageSpace() / newBR;
-      fsd.updateCount(iip, 0L, dsDelta, oldBR, newBR, true);
+    for (BlockInfoContiguous block : file.getBlocks()) {
+      final short oldBR = block.getReplication();
+      if (oldBR == replication) {
+        continue;
+      }
+      fsd.updateCount(iip, 0L, block.getNumBytes(), oldBR, replication, true);
+      bm.setReplication(oldBR, replication, src, block);
     }
 
-    if (blockRepls != null) {
-      blockRepls[0] = oldBR;
-      blockRepls[1] = newBR;
-    }
     return file.getBlocks();
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2bace6f3/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java
index 3f22f51..2a71471 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java
@@ -25,6 +25,7 @@ import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
 import org.apache.hadoop.hdfs.protocol.SnapshotException;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous;
 
 import java.io.IOException;
 import java.util.Arrays;
@@ -167,25 +168,28 @@ class FSDirConcatOp {
   private static QuotaCounts computeQuotaDeltas(FSDirectory fsd,
       INodeFile target, INodeFile[] srcList) {
     QuotaCounts deltas = new QuotaCounts.Builder().build();
-    final short targetRepl = target.getPreferredBlockReplication();
+    final short targetRepl = target.getFileReplication();
     for (INodeFile src : srcList) {
-      short srcRepl = src.getPreferredBlockReplication();
-      long fileSize = src.computeFileSize();
-      if (targetRepl != srcRepl) {
-        deltas.addStorageSpace(fileSize * (targetRepl - srcRepl));
-        BlockStoragePolicy bsp =
-            fsd.getBlockStoragePolicySuite().getPolicy(src.getStoragePolicyID());
+      BlockStoragePolicy bsp =
+          fsd.getBlockStoragePolicySuite().getPolicy(src.getStoragePolicyID());
+      for (BlockInfoContiguous b : src.getBlocks()) {
+        short srcRepl = b.getReplication();
+        if (targetRepl == srcRepl) {
+          continue;
+        }
+
+        deltas.addStorageSpace(b.getNumBytes() * (targetRepl - srcRepl));
         if (bsp != null) {
           List<StorageType> srcTypeChosen = bsp.chooseStorageTypes(srcRepl);
           for (StorageType t : srcTypeChosen) {
             if (t.supportTypeQuota()) {
-              deltas.addTypeSpace(t, -fileSize);
+              deltas.addTypeSpace(t, -b.getNumBytes());
             }
           }
           List<StorageType> targetTypeChosen = bsp.chooseStorageTypes(targetRepl);
           for (StorageType t : targetTypeChosen) {
             if (t.supportTypeQuota()) {
-              deltas.addTypeSpace(t, fileSize);
+              deltas.addTypeSpace(t, b.getNumBytes());
             }
           }
         }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2bace6f3/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
index 476ff36..950b5ce 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
@@ -45,6 +45,7 @@ import org.apache.hadoop.hdfs.protocol.LastBlockWithStatus;
 import org.apache.hadoop.hdfs.protocol.LayoutVersion;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.RollingUpgradeStartupOption;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
@@ -511,10 +512,11 @@ public class FSEditLogLoader {
     }
     case OP_SET_REPLICATION: {
       SetReplicationOp setReplicationOp = (SetReplicationOp)op;
-      short replication = fsNamesys.getBlockManager().adjustReplication(
-          setReplicationOp.replication);
-      FSDirAttrOp.unprotectedSetReplication(fsDir, renameReservedPathsOnUpgrade(
-          setReplicationOp.path, logVersion), replication, null);
+      BlockManager bm = fsNamesys.getBlockManager();
+      short replication = bm.adjustReplication(setReplicationOp.replication);
+      FSDirAttrOp.unprotectedSetReplication(fsDir, bm,
+                                            renameReservedPathsOnUpgrade(
+          setReplicationOp.path, logVersion), replication);
       break;
     }
     case OP_CONCAT_DELETE: {
@@ -969,7 +971,7 @@ public class FSEditLogLoader {
     }
     // add the new block
     BlockInfoContiguous newBI = new BlockInfoContiguousUnderConstruction(
-          newBlock, file.getPreferredBlockReplication());
+          newBlock, file.getFileReplication());
     fsNamesys.getBlockManager().addBlockCollection(newBI, file);
     file.addBlock(newBI);
     fsNamesys.getBlockManager().processQueuedMessagesForBlock(newBlock);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2bace6f3/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index af89d02..dbf2d63 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -2007,7 +2007,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
       // Add new truncateBlock into blocksMap and
       // use oldBlock as a source for copy-on-truncate recovery
       truncatedBlockUC = new BlockInfoContiguousUnderConstruction(newBlock,
-          file.getPreferredBlockReplication());
+          oldBlock.getReplication());
       truncatedBlockUC.setNumBytes(oldBlock.getNumBytes() - lastBlockDelta);
       truncatedBlockUC.setTruncateBlock(oldBlock);
       file.setLastBlock(truncatedBlockUC, blockManager.getStorages(oldBlock));
@@ -2510,7 +2510,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
     final BlockInfoContiguous lastBlock = file.getLastBlock();
     if (lastBlock != null) {
       final long diff = file.getPreferredBlockSize() - lastBlock.getNumBytes();
-      final short repl = file.getPreferredBlockReplication();
+      final short repl = lastBlock.getReplication();
       delta.addStorageSpace(diff * repl);
       final BlockStoragePolicy policy = dir.getBlockStoragePolicySuite()
           .getPolicy(file.getStoragePolicyID());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2bace6f3/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
index a6ff6fb..b97ea90 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
@@ -731,6 +731,7 @@ public class INodeFile extends INodeWithAdditionalFields
     for (BlockInfoContiguous b : blocks) {
       long blockSize = b.isComplete() ? b.getNumBytes() :
           getPreferredBlockSize();
+      final short replication = b.getReplication();
       counts.addStorageSpace(blockSize * replication);
       if (bsp != null) {
         List<StorageType> types = bsp.chooseStorageTypes(replication);
@@ -849,7 +850,7 @@ public class INodeFile extends INodeWithAdditionalFields
         truncatedBytes -= bi.getNumBytes();
       }
 
-      delta.addStorageSpace(-truncatedBytes * getPreferredBlockReplication());
+      delta.addStorageSpace(-truncatedBytes * bi.getReplication());
       if (bsps != null) {
         List<StorageType> types = bsps.chooseStorageTypes(
             getPreferredBlockReplication());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2bace6f3/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
index 0de2637..be88e10 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
@@ -254,8 +254,7 @@ public class NamenodeFsck implements DataEncryptionKeyFactory {
       NumberReplicas numberReplicas= bm.countNodes(block);
       out.println("Block Id: " + blockId);
       out.println("Block belongs to: "+iNode.getFullPathName());
-      out.println("No. of Expected Replica: " +
-          bc.getPreferredBlockReplication());
+      out.println("No. of Expected Replica: " + blockInfo.getReplication());
       out.println("No. of live Replica: " + numberReplicas.liveReplicas());
       out.println("No. of excess Replica: " + numberReplicas.excessReplicas());
       out.println("No. of stale Replica: " +

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2bace6f3/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
index 58210c1..ff604e8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
@@ -433,7 +433,6 @@ public class TestBlockManager {
   
   private BlockInfoContiguous addBlockOnNodes(long blockId, List<DatanodeDescriptor> nodes) {
     BlockCollection bc = Mockito.mock(BlockCollection.class);
-    Mockito.doReturn((short)3).when(bc).getPreferredBlockReplication();
     BlockInfoContiguous blockInfo = blockOnNodes(blockId, nodes);
 
     bm.blocksMap.addBlockCollection(blockInfo, bc);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2bace6f3/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
index 3226578..d3df32c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
@@ -1179,7 +1179,6 @@ public class TestReplicationPolicy {
     // queue.
     BlockInfoContiguousUnderConstruction info = new BlockInfoContiguousUnderConstruction(block1, (short) 1);
     BlockCollection bc = mock(BlockCollection.class);
-    when(bc.getPreferredBlockReplication()).thenReturn((short)1);
     bm.addBlockCollection(info, bc);
 
     // Adding this block will increase its current replication, and that will
@@ -1223,7 +1222,6 @@ public class TestReplicationPolicy {
     final BlockCollection mbc = mock(BlockCollection.class);
     when(mbc.getLastBlock()).thenReturn(info);
     when(mbc.getPreferredBlockSize()).thenReturn(block1.getNumBytes() + 1);
-    when(mbc.getPreferredBlockReplication()).thenReturn((short)1);
     when(mbc.isUnderConstruction()).thenReturn(true);
     ContentSummary cs = mock(ContentSummary.class);
     when(cs.getLength()).thenReturn((long)1);