You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by in...@apache.org on 2016/10/03 22:59:07 UTC

[54/57] [abbrv] hadoop git commit: HDFS-10940. Reduce performance penalty of block caching when not used. Contributed by Daryn Sharp.

HDFS-10940. Reduce performance penalty of block caching when not used. Contributed by Daryn Sharp.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/74420843
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/74420843
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/74420843

Branch: refs/heads/HDFS-10467
Commit: 744208431f7365bf054e6b773b86af2583001e1d
Parents: 9002062
Author: Kihwal Lee <ki...@apache.org>
Authored: Mon Oct 3 11:27:23 2016 -0500
Committer: Kihwal Lee <ki...@apache.org>
Committed: Mon Oct 3 11:27:23 2016 -0500

----------------------------------------------------------------------
 .../hdfs/server/blockmanagement/BlockManager.java  | 10 +++++++++-
 .../hadoop/hdfs/server/namenode/CacheManager.java  | 12 +++++++++++-
 .../server/namenode/FSDirStatAndListingOp.java     | 17 +----------------
 .../hdfs/server/namenode/TestCacheDirectives.java  | 10 ++++++++++
 4 files changed, 31 insertions(+), 18 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/74420843/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 886984a..9b426bb 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -103,6 +103,7 @@ import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks;
 import org.apache.hadoop.hdfs.util.FoldedTreeSet;
 import org.apache.hadoop.hdfs.util.LightWeightHashSet;
 import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
+import org.apache.hadoop.hdfs.server.namenode.CacheManager;
 
 import static org.apache.hadoop.hdfs.util.StripedBlockUtil.getInternalBlockLength;
 
@@ -1145,9 +1146,16 @@ public class BlockManager implements BlockStatsMXBean {
             fileSizeExcludeBlocksUnderConstruction, mode);
         isComplete = true;
       }
-      return new LocatedBlocks(fileSizeExcludeBlocksUnderConstruction,
+      LocatedBlocks locations = new LocatedBlocks(
+          fileSizeExcludeBlocksUnderConstruction,
           isFileUnderConstruction, locatedblocks, lastlb, isComplete, feInfo,
           ecPolicy);
+      // Set caching information for the located blocks.
+      CacheManager cm = namesystem.getCacheManager();
+      if (cm != null) {
+        cm.setCachedLocations(locations);
+      }
+      return locations;
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/74420843/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java
index 366dd9b..24bf751 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java
@@ -63,6 +63,7 @@ import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProto;
 import org.apache.hadoop.hdfs.protocolPB.PBHelperClient;
@@ -902,7 +903,16 @@ public final class CacheManager {
     return new BatchedListEntries<CachePoolEntry>(results, false);
   }
 
-  public void setCachedLocations(LocatedBlock block) {
+  public void setCachedLocations(LocatedBlocks locations) {
+    // don't attempt lookups if there are no cached blocks
+    if (cachedBlocks.size() > 0) {
+      for (LocatedBlock lb : locations.getLocatedBlocks()) {
+        setCachedLocations(lb);
+      }
+    }
+  }
+
+  private void setCachedLocations(LocatedBlock block) {
     CachedBlock cachedBlock =
         new CachedBlock(block.getBlock().getBlockId(),
             (short)0, false);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/74420843/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
index 4876fb1..f56d83d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
@@ -36,7 +36,6 @@ import org.apache.hadoop.hdfs.protocol.FsPermissionExtension;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus;
-import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.protocol.SnapshotException;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
@@ -156,7 +155,6 @@ class FSDirStatAndListingOp {
         "Negative offset is not supported. File: " + src);
     Preconditions.checkArgument(length >= 0,
         "Negative length is not supported. File: " + src);
-    CacheManager cm = fsd.getFSNamesystem().getCacheManager();
     BlockManager bm = fsd.getBlockManager();
     fsd.readLock();
     try {
@@ -190,11 +188,6 @@ class FSDirStatAndListingOp {
           inode.getBlocks(iip.getPathSnapshotId()), fileSize, isUc, offset,
           length, needBlockToken, iip.isSnapshot(), feInfo, ecPolicy);
 
-      // Set caching information for the located blocks.
-      for (LocatedBlock lb : blocks.getLocatedBlocks()) {
-        cm.setCachedLocations(lb);
-      }
-
       final long now = now();
       boolean updateAccessTime = fsd.isAccessTimeSupported()
           && !iip.isSnapshot()
@@ -461,7 +454,7 @@ class FSDirStatAndListingOp {
         node.asDirectory().getChildrenNum(snapshot) : 0;
 
     INodeAttributes nodeAttrs = fsd.getAttributes(iip);
-    HdfsFileStatus status = createFileStatus(
+    return createFileStatus(
         size,
         node.isDirectory(),
         replication,
@@ -479,14 +472,6 @@ class FSDirStatAndListingOp {
         storagePolicy,
         ecPolicy,
         loc);
-    // Set caching information for the located blocks.
-    if (loc != null) {
-      CacheManager cacheManager = fsd.getFSNamesystem().getCacheManager();
-      for (LocatedBlock lb: loc.getLocatedBlocks()) {
-        cacheManager.setCachedLocations(lb);
-      }
-    }
-    return status;
   }
 
   private static HdfsFileStatus createFileStatus(long length, boolean isdir,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/74420843/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCacheDirectives.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCacheDirectives.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCacheDirectives.java
index efb5cf8..658e4ca 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCacheDirectives.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCacheDirectives.java
@@ -72,6 +72,7 @@ import org.apache.hadoop.hdfs.protocol.CachePoolStats;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
+import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.CachedBlocksList.Type;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager;
@@ -89,6 +90,7 @@ import org.junit.After;
 import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
+import org.mockito.Mockito;
 
 import com.google.common.base.Supplier;
 
@@ -1531,4 +1533,12 @@ public class TestCacheDirectives {
       DataNodeTestUtils.setCacheReportsDisabledForTests(cluster, false);
     }
   }
+
+  @Test
+  public void testNoLookupsWhenNotUsed() throws Exception {
+    CacheManager cm = cluster.getNamesystem().getCacheManager();
+    LocatedBlocks locations = Mockito.mock(LocatedBlocks.class);
+    cm.setCachedLocations(locations);
+    Mockito.verifyZeroInteractions(locations);
+  }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org