You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by xg...@apache.org on 2016/12/05 18:47:31 UTC

[13/29] hadoop git commit: HDFS-5517. Lower the default maximum number of blocks per file. Contributed by Aaron T. Myers and Andrew Wang.

HDFS-5517. Lower the default maximum number of blocks per file. Contributed by Aaron T. Myers and Andrew Wang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7226a71b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7226a71b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7226a71b

Branch: refs/heads/YARN-5734
Commit: 7226a71b1f684f562bd88ee121f1dd7aa8b73816
Parents: 69fb70c
Author: Andrew Wang <wa...@apache.org>
Authored: Wed Nov 30 15:58:31 2016 -0800
Committer: Andrew Wang <wa...@apache.org>
Committed: Wed Nov 30 15:58:31 2016 -0800

----------------------------------------------------------------------
 .../main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java  |  2 +-
 .../hadoop-hdfs/src/main/resources/hdfs-default.xml      |  2 +-
 .../hdfs/server/datanode/TestDirectoryScanner.java       | 11 +++++++++--
 .../server/namenode/metrics/TestNameNodeMetrics.java     |  2 +-
 4 files changed, 12 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7226a71b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index d7d3c9d..df21857 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -399,7 +399,7 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   public static final String  DFS_NAMENODE_MIN_BLOCK_SIZE_KEY = "dfs.namenode.fs-limits.min-block-size";
   public static final long    DFS_NAMENODE_MIN_BLOCK_SIZE_DEFAULT = 1024*1024;
   public static final String  DFS_NAMENODE_MAX_BLOCKS_PER_FILE_KEY = "dfs.namenode.fs-limits.max-blocks-per-file";
-  public static final long    DFS_NAMENODE_MAX_BLOCKS_PER_FILE_DEFAULT = 1024*1024;
+  public static final long    DFS_NAMENODE_MAX_BLOCKS_PER_FILE_DEFAULT = 10*1000;
   public static final String  DFS_NAMENODE_MAX_XATTRS_PER_INODE_KEY = "dfs.namenode.fs-limits.max-xattrs-per-inode";
   public static final int     DFS_NAMENODE_MAX_XATTRS_PER_INODE_DEFAULT = 32;
   public static final String  DFS_NAMENODE_MAX_XATTR_SIZE_KEY = "dfs.namenode.fs-limits.max-xattr-size";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7226a71b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
index 671c98c..086f667 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
@@ -372,7 +372,7 @@
 
 <property>
     <name>dfs.namenode.fs-limits.max-blocks-per-file</name>
-    <value>1048576</value>
+    <value>10000</value>
     <description>Maximum number of blocks per file, enforced by the Namenode on
         write. This prevents the creation of extremely large files which can
         degrade performance.</description>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7226a71b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java
index f08b579..d7c8383 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java
@@ -590,8 +590,15 @@ public class TestDirectoryScanner {
           100);
       DataNode dataNode = cluster.getDataNodes().get(0);
 
-      createFile(GenericTestUtils.getMethodName(),
-          BLOCK_LENGTH * blocks, false);
+      final int maxBlocksPerFile = (int) DFSConfigKeys
+          .DFS_NAMENODE_MAX_BLOCKS_PER_FILE_DEFAULT;
+      int numBlocksToCreate = blocks;
+      while (numBlocksToCreate > 0) {
+        final int toCreate = Math.min(maxBlocksPerFile, numBlocksToCreate);
+        createFile(GenericTestUtils.getMethodName() + numBlocksToCreate,
+            BLOCK_LENGTH * toCreate, false);
+        numBlocksToCreate -= toCreate;
+      }
 
       float ratio = 0.0f;
       int retries = maxRetries;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7226a71b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java
index 0ad6130..b9d25b2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java
@@ -571,7 +571,7 @@ public class TestNameNodeMetrics {
     Path file1_Path = new Path(TEST_ROOT_DIR_PATH, "ReadData.dat");
 
     //Perform create file operation
-    createFile(file1_Path, 1024 * 1024,(short)2);
+    createFile(file1_Path, 1024, (short) 2);
 
     // Perform read file operation on earlier created file
     readFile(fs, file1_Path);


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org