You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by wh...@apache.org on 2015/09/24 09:31:54 UTC
hadoop git commit: HDFS-9131. Move config keys used by hdfs-client to
HdfsClientConfigKeys. Contributed by Mingliang Liu.
Repository: hadoop
Updated Branches:
refs/heads/branch-2 7e4bd11b5 -> d148b6a23
HDFS-9131. Move config keys used by hdfs-client to HdfsClientConfigKeys. Contributed by Mingliang Liu.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d148b6a2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d148b6a2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d148b6a2
Branch: refs/heads/branch-2
Commit: d148b6a23493210a1b18a85193d4085b8e54a487
Parents: 7e4bd11
Author: Haohui Mai <wh...@apache.org>
Authored: Thu Sep 24 00:30:01 2015 -0700
Committer: Haohui Mai <wh...@apache.org>
Committed: Thu Sep 24 00:30:17 2015 -0700
----------------------------------------------------------------------
.../hdfs/client/HdfsClientConfigKeys.java | 11 +++++++
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 ++
.../java/org/apache/hadoop/hdfs/DFSClient.java | 24 +++++++++-------
.../org/apache/hadoop/hdfs/DFSConfigKeys.java | 30 ++++++++++++--------
.../hadoop/hdfs/DistributedFileSystem.java | 8 +++---
.../apache/hadoop/hdfs/TestFileCreation.java | 2 +-
.../org/apache/hadoop/hdfs/TestLocalDFS.java | 7 +++--
.../namenode/TestNameNodeRetryCacheMetrics.java | 4 +--
.../ha/TestLossyRetryInvocationHandler.java | 8 +++---
.../org/apache/hadoop/tracing/TestTracing.java | 4 +--
.../TestTracingShortCircuitLocalRead.java | 2 +-
11 files changed, 63 insertions(+), 40 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d148b6a2/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
index 253bd4f..1a90555 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
@@ -147,6 +147,17 @@ public interface HdfsClientConfigKeys {
String REPLICA_ACCESSOR_BUILDER_CLASSES_KEY =
PREFIX + "replica.accessor.builder.classes";
+ // The number of NN response dropped by client proactively in each RPC call.
+ // For testing NN retry cache, we can set this property with positive value.
+ String DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY =
+ "dfs.client.test.drop.namenode.response.number";
+ int DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_DEFAULT = 0;
+ String DFS_CLIENT_LOCAL_INTERFACES = "dfs.client.local.interfaces";
+ // HDFS client HTrace configuration.
+ String DFS_CLIENT_HTRACE_PREFIX = "dfs.client.htrace.";
+ String DFS_USER_HOME_DIR_PREFIX_KEY = "dfs.user.home.dir.prefix";
+ String DFS_USER_HOME_DIR_PREFIX_DEFAULT = "/user";
+
/** dfs.client.retry configuration properties */
interface Retry {
String PREFIX = HdfsClientConfigKeys.PREFIX + "retry.";
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d148b6a2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 5999233..f4526ff 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -605,6 +605,9 @@ Release 2.8.0 - UNRELEASED
HDFS-9130. Use GenericTestUtils#setLogLevel to the logging level.
(Mingliang Liu via wheat9)
+ HDFS-9131 Move config keys used by hdfs-client to HdfsClientConfigKeys.
+ (Mingliang Liu via wheat9)
+
OPTIMIZATIONS
HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d148b6a2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index f6f159f..f1e8905 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -99,6 +99,7 @@ import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.NameNodeProxiesClient.ProxyAndInfo;
+import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
import org.apache.hadoop.hdfs.client.impl.DfsClientConf;
@@ -290,19 +291,20 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
/**
* Create a new DFSClient connected to the given nameNodeUri or rpcNamenode.
- * If HA is enabled and a positive value is set for
- * {@link DFSConfigKeys#DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY} in the
- * configuration, the DFSClient will use {@link LossyRetryInvocationHandler}
- * as its RetryInvocationHandler. Otherwise one of nameNodeUri or rpcNamenode
- * must be null.
+ * If HA is enabled and a positive value is set for
+ * {@link HdfsClientConfigKeys#DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY}
+ * in the configuration, the DFSClient will use
+ * {@link LossyRetryInvocationHandler} as its RetryInvocationHandler.
+ * Otherwise one of nameNodeUri or rpcNamenode must be null.
*/
@VisibleForTesting
public DFSClient(URI nameNodeUri, ClientProtocol rpcNamenode,
Configuration conf, FileSystem.Statistics stats)
throws IOException {
- SpanReceiverHost.get(conf, DFSConfigKeys.DFS_CLIENT_HTRACE_PREFIX);
+ SpanReceiverHost.get(conf, HdfsClientConfigKeys.DFS_CLIENT_HTRACE_PREFIX);
traceSampler = new SamplerBuilder(TraceUtils.
- wrapHadoopConf(DFSConfigKeys.DFS_CLIENT_HTRACE_PREFIX, conf)).build();
+ wrapHadoopConf(HdfsClientConfigKeys.DFS_CLIENT_HTRACE_PREFIX, conf))
+ .build();
// Copy only the required DFSClient configuration
this.dfsClientConf = new DfsClientConf(conf);
this.conf = conf;
@@ -318,13 +320,13 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
ThreadLocalRandom.current().nextInt() + "_" +
Thread.currentThread().getId();
int numResponseToDrop = conf.getInt(
- DFSConfigKeys.DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY,
- DFSConfigKeys.DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_DEFAULT);
+ HdfsClientConfigKeys.DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY,
+ HdfsClientConfigKeys.DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_DEFAULT);
ProxyAndInfo<ClientProtocol> proxyInfo = null;
AtomicBoolean nnFallbackToSimpleAuth = new AtomicBoolean(false);
if (numResponseToDrop > 0) {
// This case is used for testing.
- LOG.warn(DFSConfigKeys.DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY
+ LOG.warn(HdfsClientConfigKeys.DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY
+ " is set to " + numResponseToDrop
+ ", this hacked client will proactively drop responses");
proxyInfo = NameNodeProxiesClient.createProxyWithLossyRetryHandler(conf,
@@ -350,7 +352,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
}
String localInterfaces[] =
- conf.getTrimmedStrings(DFSConfigKeys.DFS_CLIENT_LOCAL_INTERFACES);
+ conf.getTrimmedStrings(HdfsClientConfigKeys.DFS_CLIENT_LOCAL_INTERFACES);
localInterfaceAddrs = getLocalInterfaceAddrs(localInterfaces);
if (LOG.isDebugEnabled() && 0 != localInterfaces.length) {
LOG.debug("Using local interfaces [" +
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d148b6a2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index f1e66b3..40ca2c9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -49,8 +49,12 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
HdfsClientConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY;
public static final int DFS_BYTES_PER_CHECKSUM_DEFAULT =
HdfsClientConfigKeys.DFS_BYTES_PER_CHECKSUM_DEFAULT;
- public static final String DFS_USER_HOME_DIR_PREFIX_KEY = "dfs.user.home.dir.prefix";
- public static final String DFS_USER_HOME_DIR_PREFIX_DEFAULT = "/user";
+ @Deprecated
+ public static final String DFS_USER_HOME_DIR_PREFIX_KEY =
+ HdfsClientConfigKeys.DFS_USER_HOME_DIR_PREFIX_KEY;
+ @Deprecated
+ public static final String DFS_USER_HOME_DIR_PREFIX_DEFAULT =
+ HdfsClientConfigKeys.DFS_USER_HOME_DIR_PREFIX_DEFAULT;
public static final String DFS_CHECKSUM_TYPE_KEY = HdfsClientConfigKeys
.DFS_CHECKSUM_TYPE_KEY;
public static final String DFS_CHECKSUM_TYPE_DEFAULT =
@@ -65,9 +69,9 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
// HDFS HTrace configuration is controlled by dfs.htrace.spanreceiver.classes,
// etc.
public static final String DFS_SERVER_HTRACE_PREFIX = "dfs.htrace.";
-
- // HDFS client HTrace configuration.
- public static final String DFS_CLIENT_HTRACE_PREFIX = "dfs.client.htrace.";
+ @Deprecated
+ public static final String DFS_CLIENT_HTRACE_PREFIX =
+ HdfsClientConfigKeys.DFS_CLIENT_HTRACE_PREFIX;
// HA related configuration
public static final String DFS_DATANODE_RESTART_REPLICA_EXPIRY_KEY = "dfs.datanode.restart.replica.expiration";
@@ -1121,9 +1125,9 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
@Deprecated
public static final boolean DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL_DEFAULT
= HdfsClientConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL_DEFAULT;
-
- public static final String DFS_CLIENT_LOCAL_INTERFACES = "dfs.client.local.interfaces";
-
+ @Deprecated
+ public static final String DFS_CLIENT_LOCAL_INTERFACES =
+ HdfsClientConfigKeys.DFS_CLIENT_LOCAL_INTERFACES;
@Deprecated
public static final String DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC =
@@ -1132,10 +1136,12 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
public static final boolean DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC_DEFAULT =
HdfsClientConfigKeys.DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC_DEFAULT;
- // The number of NN response dropped by client proactively in each RPC call.
- // For testing NN retry cache, we can set this property with positive value.
- public static final String DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY = "dfs.client.test.drop.namenode.response.number";
- public static final int DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_DEFAULT = 0;
+ @Deprecated
+ public static final String DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY =
+ HdfsClientConfigKeys.DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY;
+ @Deprecated
+ public static final int DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_DEFAULT =
+ HdfsClientConfigKeys.DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_DEFAULT;
@Deprecated
public static final String DFS_CLIENT_SLOW_IO_WARNING_THRESHOLD_KEY =
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d148b6a2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
index 09181ce..cf09c64 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
@@ -111,7 +111,7 @@ public class DistributedFileSystem extends FileSystem {
private Path workingDir;
private URI uri;
private String homeDirPrefix =
- DFSConfigKeys.DFS_USER_HOME_DIR_PREFIX_DEFAULT;
+ HdfsClientConfigKeys.DFS_USER_HOME_DIR_PREFIX_DEFAULT;
DFSClient dfs;
private boolean verifyChecksum = true;
@@ -147,9 +147,9 @@ public class DistributedFileSystem extends FileSystem {
throw new IOException("Incomplete HDFS URI, no host: "+ uri);
}
homeDirPrefix = conf.get(
- DFSConfigKeys.DFS_USER_HOME_DIR_PREFIX_KEY,
- DFSConfigKeys.DFS_USER_HOME_DIR_PREFIX_DEFAULT);
-
+ HdfsClientConfigKeys.DFS_USER_HOME_DIR_PREFIX_KEY,
+ HdfsClientConfigKeys.DFS_USER_HOME_DIR_PREFIX_DEFAULT);
+
this.dfs = new DFSClient(uri, conf, statistics);
this.uri = URI.create(uri.getScheme()+"://"+uri.getAuthority());
this.workingDir = getHomeDirectory();
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d148b6a2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java
index 85d079c..e59963b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java
@@ -216,7 +216,7 @@ public class TestFileCreation {
throws IOException {
Configuration conf = new HdfsConfiguration();
if (netIf != null) {
- conf.set(DFSConfigKeys.DFS_CLIENT_LOCAL_INTERFACES, netIf);
+ conf.set(HdfsClientConfigKeys.DFS_CLIENT_LOCAL_INTERFACES, netIf);
}
conf.setBoolean(HdfsClientConfigKeys.DFS_CLIENT_USE_DN_HOSTNAME, useDnHostname);
if (useDnHostname) {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d148b6a2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLocalDFS.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLocalDFS.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLocalDFS.java
index 5385046..9fbb417 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLocalDFS.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLocalDFS.java
@@ -27,6 +27,7 @@ import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.junit.Test;
/**
@@ -91,8 +92,8 @@ public class TestLocalDFS {
// test home directory
Path home =
fileSys.makeQualified(
- new Path(DFSConfigKeys.DFS_USER_HOME_DIR_PREFIX_DEFAULT
- + "/" + getUserName(fileSys)));
+ new Path(HdfsClientConfigKeys.DFS_USER_HOME_DIR_PREFIX_DEFAULT
+ + "/" + getUserName(fileSys)));
Path fsHome = fileSys.getHomeDirectory();
assertEquals(home, fsHome);
@@ -110,7 +111,7 @@ public class TestLocalDFS {
final String[] homeBases = new String[] {"/home", "/home/user"};
Configuration conf = new HdfsConfiguration();
for (final String homeBase : homeBases) {
- conf.set(DFSConfigKeys.DFS_USER_HOME_DIR_PREFIX_KEY, homeBase);
+ conf.set(HdfsClientConfigKeys.DFS_USER_HOME_DIR_PREFIX_KEY, homeBase);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
FileSystem fileSys = cluster.getFileSystem();
try {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d148b6a2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRetryCacheMetrics.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRetryCacheMetrics.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRetryCacheMetrics.java
index e5d059e..9715810 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRetryCacheMetrics.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRetryCacheMetrics.java
@@ -20,11 +20,11 @@ package org.apache.hadoop.hdfs.server.namenode;
import java.io.IOException;
import org.apache.hadoop.hdfs.DFSClient;
+import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil;
import org.apache.hadoop.ipc.metrics.RetryCacheMetrics;
import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
@@ -60,7 +60,7 @@ public class TestNameNodeRetryCacheMetrics {
public void setup() throws Exception {
conf = new HdfsConfiguration();
conf.setBoolean(DFS_NAMENODE_ENABLE_RETRY_CACHE_KEY, true);
- conf.setInt(DFSConfigKeys.DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY, 2);
+ conf.setInt(HdfsClientConfigKeys.DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY, 2);
cluster = new MiniDFSCluster.Builder(conf)
.nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(3)
.build();
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d148b6a2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestLossyRetryInvocationHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestLossyRetryInvocationHandler.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestLossyRetryInvocationHandler.java
index 9434392..8cdd445 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestLossyRetryInvocationHandler.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestLossyRetryInvocationHandler.java
@@ -18,15 +18,15 @@
package org.apache.hadoop.hdfs.server.namenode.ha;
import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.MiniDFSNNTopology;
+import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.junit.Test;
/**
* This test makes sure that when
- * {@link DFSConfigKeys#DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY} is set,
+ * {@link HdfsClientConfigKeys#DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY} is set,
* DFSClient instances can still be created within NN/DN (e.g., the fs instance
* used by the trash emptier thread in NN)
*/
@@ -39,8 +39,8 @@ public class TestLossyRetryInvocationHandler {
// enable both trash emptier and dropping response
conf.setLong("fs.trash.interval", 360);
- conf.setInt(DFSConfigKeys.DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY, 2);
-
+ conf.setInt(HdfsClientConfigKeys.DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY, 2);
+
try {
cluster = new MiniDFSCluster.Builder(conf)
.nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(0)
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d148b6a2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tracing/TestTracing.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tracing/TestTracing.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tracing/TestTracing.java
index c3d2c73..5b365ba 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tracing/TestTracing.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tracing/TestTracing.java
@@ -22,9 +22,9 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.htrace.Sampler;
import org.apache.htrace.Span;
import org.apache.htrace.Trace;
@@ -189,7 +189,7 @@ public class TestTracing {
public static void setup() throws IOException {
conf = new Configuration();
conf.setLong("dfs.blocksize", 100 * 1024);
- conf.set(DFSConfigKeys.DFS_CLIENT_HTRACE_PREFIX +
+ conf.set(HdfsClientConfigKeys.DFS_CLIENT_HTRACE_PREFIX +
SpanReceiverHost.SPAN_RECEIVERS_CONF_SUFFIX,
SetSpanReceiver.class.getName());
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d148b6a2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tracing/TestTracingShortCircuitLocalRead.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tracing/TestTracingShortCircuitLocalRead.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tracing/TestTracingShortCircuitLocalRead.java
index 0804a05..a34748d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tracing/TestTracingShortCircuitLocalRead.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tracing/TestTracingShortCircuitLocalRead.java
@@ -64,7 +64,7 @@ public class TestTracingShortCircuitLocalRead {
public void testShortCircuitTraceHooks() throws IOException {
assumeTrue(NativeCodeLoader.isNativeCodeLoaded() && !Path.WINDOWS);
conf = new Configuration();
- conf.set(DFSConfigKeys.DFS_CLIENT_HTRACE_PREFIX +
+ conf.set(HdfsClientConfigKeys.DFS_CLIENT_HTRACE_PREFIX +
SpanReceiverHost.SPAN_RECEIVERS_CONF_SUFFIX,
SetSpanReceiver.class.getName());
conf.setLong("dfs.blocksize", 100 * 1024);