You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by yj...@apache.org on 2015/03/20 17:17:46 UTC
hadoop git commit: HDFS-7835. make initial sleeptime in
locateFollowingBlock configurable for DFSClient. Contributed by Zhihai Xu.
Repository: hadoop
Updated Branches:
refs/heads/trunk 43dde502b -> 15612313f
HDFS-7835. make initial sleeptime in locateFollowingBlock configurable for DFSClient. Contributed by Zhihai Xu.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/15612313
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/15612313
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/15612313
Branch: refs/heads/trunk
Commit: 15612313f578a5115f8d03885e9b0c8c376ed56e
Parents: 43dde50
Author: Yongjun Zhang <yz...@cloudera.com>
Authored: Fri Mar 20 08:59:44 2015 -0700
Committer: Yongjun Zhang <yz...@cloudera.com>
Committed: Fri Mar 20 09:12:42 2015 -0700
----------------------------------------------------------------------
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++
.../java/org/apache/hadoop/hdfs/DFSClient.java | 11 ++++++++++
.../org/apache/hadoop/hdfs/DFSConfigKeys.java | 3 +++
.../org/apache/hadoop/hdfs/DFSOutputStream.java | 10 +++++----
.../src/main/resources/hdfs-default.xml | 7 +++++++
.../hadoop/hdfs/TestDFSClientRetries.java | 22 ++++++++++++++++++++
6 files changed, 52 insertions(+), 4 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/15612313/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index d6fc88b..52fbeff 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -323,6 +323,9 @@ Release 2.8.0 - UNRELEASED
HDFS-2360. Ugly stacktrace when quota exceeds. (harsh)
+ HDFS-7835. make initial sleeptime in locateFollowingBlock configurable for
+ DFSClient. (Zhihai Xu via Yongjun Zhang)
+
OPTIMIZATIONS
BUG FIXES
http://git-wip-us.apache.org/repos/asf/hadoop/blob/15612313/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index 3c8fd31..3236771 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -24,6 +24,8 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_DEFAUL
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_BLOCK_WRITE_LOCATEFOLLOWINGBLOCK_RETRIES_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_BLOCK_WRITE_LOCATEFOLLOWINGBLOCK_RETRIES_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_BLOCK_WRITE_LOCATEFOLLOWINGBLOCK_INITIAL_DELAY_DEFAULT;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_BLOCK_WRITE_LOCATEFOLLOWINGBLOCK_INITIAL_DELAY_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_BLOCK_WRITE_RETRIES_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_BLOCK_WRITE_RETRIES_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_CACHED_CONN_RETRY_DEFAULT;
@@ -305,6 +307,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
final int nCachedConnRetry;
final int nBlockWriteRetry;
final int nBlockWriteLocateFollowingRetry;
+ final int blockWriteLocateFollowingInitialDelayMs;
final long defaultBlockSize;
final long prefetchSize;
final short defaultReplication;
@@ -416,6 +419,9 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
nBlockWriteLocateFollowingRetry = conf.getInt(
DFS_CLIENT_BLOCK_WRITE_LOCATEFOLLOWINGBLOCK_RETRIES_KEY,
DFS_CLIENT_BLOCK_WRITE_LOCATEFOLLOWINGBLOCK_RETRIES_DEFAULT);
+ blockWriteLocateFollowingInitialDelayMs = conf.getInt(
+ DFS_CLIENT_BLOCK_WRITE_LOCATEFOLLOWINGBLOCK_INITIAL_DELAY_KEY,
+ DFS_CLIENT_BLOCK_WRITE_LOCATEFOLLOWINGBLOCK_INITIAL_DELAY_DEFAULT);
uMask = FsPermission.getUMask(conf);
connectToDnViaHostname = conf.getBoolean(DFS_CLIENT_USE_DN_HOSTNAME,
DFS_CLIENT_USE_DN_HOSTNAME_DEFAULT);
@@ -566,6 +572,11 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
}
return dataChecksum;
}
+
+ @VisibleForTesting
+ public int getBlockWriteLocateFollowingInitialDelayMs() {
+ return blockWriteLocateFollowingInitialDelayMs;
+ }
}
public Conf getConf() {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/15612313/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 1e864bd..9ecf242 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -399,6 +399,9 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
// Much code in hdfs is not yet updated to use these keys.
public static final String DFS_CLIENT_BLOCK_WRITE_LOCATEFOLLOWINGBLOCK_RETRIES_KEY = "dfs.client.block.write.locateFollowingBlock.retries";
public static final int DFS_CLIENT_BLOCK_WRITE_LOCATEFOLLOWINGBLOCK_RETRIES_DEFAULT = 5;
+ // the initial delay (unit is ms) for locateFollowingBlock, the delay time will increase exponentially(double) for each retry.
+ public static final String DFS_CLIENT_BLOCK_WRITE_LOCATEFOLLOWINGBLOCK_INITIAL_DELAY_KEY = "dfs.client.block.write.locateFollowingBlock.initial.delay.ms";
+ public static final int DFS_CLIENT_BLOCK_WRITE_LOCATEFOLLOWINGBLOCK_INITIAL_DELAY_DEFAULT = 400;
public static final String DFS_CLIENT_BLOCK_WRITE_RETRIES_KEY = "dfs.client.block.write.retries";
public static final int DFS_CLIENT_BLOCK_WRITE_RETRIES_DEFAULT = 3;
public static final String DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_KEY = "dfs.client.max.block.acquire.failures";
http://git-wip-us.apache.org/repos/asf/hadoop/blob/15612313/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
index 3fabfec..c3df897 100755
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
@@ -1433,7 +1433,8 @@ public class DFSOutputStream extends FSOutputSummer
private LocatedBlock locateFollowingBlock(long start,
DatanodeInfo[] excludedNodes) throws IOException {
int retries = dfsClient.getConf().nBlockWriteLocateFollowingRetry;
- long sleeptime = 400;
+ long sleeptime = dfsClient.getConf().
+ blockWriteLocateFollowingInitialDelayMs;
while (true) {
long localstart = Time.now();
while (true) {
@@ -2253,7 +2254,8 @@ public class DFSOutputStream extends FSOutputSummer
// be called during unit tests
private void completeFile(ExtendedBlock last) throws IOException {
long localstart = Time.now();
- long localTimeout = 400;
+ long sleeptime = dfsClient.getConf().
+ blockWriteLocateFollowingInitialDelayMs;
boolean fileComplete = false;
int retries = dfsClient.getConf().nBlockWriteLocateFollowingRetry;
while (!fileComplete) {
@@ -2276,8 +2278,8 @@ public class DFSOutputStream extends FSOutputSummer
+ " does not have enough number of replicas.");
}
retries--;
- Thread.sleep(localTimeout);
- localTimeout *= 2;
+ Thread.sleep(sleeptime);
+ sleeptime *= 2;
if (Time.now() - localstart > 5000) {
DFSClient.LOG.info("Could not complete " + src + " retrying...");
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/15612313/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
index 736c96a..092d5aa 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
@@ -2314,4 +2314,11 @@
<description>Whether pin blocks on favored DataNode.</description>
</property>
+<property>
+ <name>dfs.client.block.write.locateFollowingBlock.initial.delay.ms</name>
+ <value>400</value>
+ <description>The initial delay (unit is ms) for locateFollowingBlock,
+ the delay time will increase exponentially(double) for each retry.
+ </description>
+</property>
</configuration>
http://git-wip-us.apache.org/repos/asf/hadoop/blob/15612313/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
index 9c9111c..73b9fbd 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
@@ -1131,4 +1131,26 @@ public class TestDFSClientRetries {
assertEquals("MultipleLinearRandomRetry" + expected, r.toString());
}
}
+
+ @Test
+ public void testDFSClientConfigurationLocateFollowingBlockInitialDelay()
+ throws Exception {
+ // test if DFS_CLIENT_BLOCK_WRITE_LOCATEFOLLOWINGBLOCK_INITIAL_DELAY_KEY
+ // is not configured, verify DFSClient uses the default value 400.
+ Configuration dfsConf = new HdfsConfiguration();
+ MiniDFSCluster cluster = new MiniDFSCluster.Builder(dfsConf).build();
+ cluster.waitActive();
+ NamenodeProtocols nn = cluster.getNameNodeRpc();
+ DFSClient client = new DFSClient(null, nn, dfsConf, null);
+ assertEquals(client.getConf().
+ getBlockWriteLocateFollowingInitialDelayMs(), 400);
+
+ // change DFS_CLIENT_BLOCK_WRITE_LOCATEFOLLOWINGBLOCK_INITIAL_DELAY_KEY,
+ // verify DFSClient uses the configured value 1000.
+ dfsConf.setInt(DFSConfigKeys.
+ DFS_CLIENT_BLOCK_WRITE_LOCATEFOLLOWINGBLOCK_INITIAL_DELAY_KEY, 1000);
+ client = new DFSClient(null, nn, dfsConf, null);
+ assertEquals(client.getConf().
+ getBlockWriteLocateFollowingInitialDelayMs(), 1000);
+ }
}