You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by ar...@apache.org on 2018/02/24 22:26:54 UTC
[1/2] hadoop git commit: HDFS-12865. RequestHedgingProxyProvider
should handle case when none of the proxies are available. Contributed by
Mukul Kumar Singh.
Repository: hadoop
Updated Branches:
refs/heads/branch-3.1 57095b68d -> c5602ea12
refs/heads/trunk 1e84e46f1 -> c30a26abc
HDFS-12865. RequestHedgingProxyProvider should handle case when none of the proxies are available. Contributed by Mukul Kumar Singh.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c30a26ab
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c30a26ab
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c30a26ab
Branch: refs/heads/trunk
Commit: c30a26abc54df669a77e0219fd9b48a47c179a99
Parents: 1e84e46
Author: Arpit Agarwal <ar...@apache.org>
Authored: Sat Feb 24 14:25:56 2018 -0800
Committer: Arpit Agarwal <ar...@apache.org>
Committed: Sat Feb 24 14:25:56 2018 -0800
----------------------------------------------------------------------
.../ha/RequestHedgingProxyProvider.java | 6 +++
.../ha/TestRequestHedgingProxyProvider.java | 45 ++++++++++++++++++++
2 files changed, 51 insertions(+)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c30a26ab/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RequestHedgingProxyProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RequestHedgingProxyProvider.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RequestHedgingProxyProvider.java
index 010e9e5..7b9cd64 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RequestHedgingProxyProvider.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RequestHedgingProxyProvider.java
@@ -17,6 +17,7 @@
*/
package org.apache.hadoop.hdfs.server.namenode.ha;
+import java.io.IOException;
import java.lang.reflect.InvocationHandler;
import java.lang.reflect.InvocationTargetException;
import java.lang.reflect.Method;
@@ -87,6 +88,11 @@ public class RequestHedgingProxyProvider<T> extends
// Optimization : if only 2 proxies are configured and one had failed
// over, then we dont need to create a threadpool etc.
targetProxies.remove(toIgnore);
+ if (targetProxies.size() == 0) {
+ LOG.trace("No valid proxies left");
+ throw new RemoteException(IOException.class.getName(),
+ "No valid proxies left. All NameNode proxies have failed over.");
+ }
if (targetProxies.size() == 1) {
ProxyInfo<T> proxyInfo = targetProxies.values().iterator().next();
try {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c30a26ab/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRequestHedgingProxyProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRequestHedgingProxyProvider.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRequestHedgingProxyProvider.java
index 65fbbf8..8d6b02d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRequestHedgingProxyProvider.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRequestHedgingProxyProvider.java
@@ -28,6 +28,7 @@ import java.util.Iterator;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
+import org.apache.commons.lang.RandomStringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
@@ -290,6 +291,50 @@ public class TestRequestHedgingProxyProvider {
}
@Test
+ public void testSingleProxyFailover() throws Exception {
+ String singleNS = "mycluster-" + Time.monotonicNow();
+ URI singleNNUri = new URI("hdfs://" + singleNS);
+ Configuration singleConf = new Configuration();
+ singleConf.set(HdfsClientConfigKeys.DFS_NAMESERVICES, singleNS);
+ singleConf.set(HdfsClientConfigKeys.
+ DFS_HA_NAMENODES_KEY_PREFIX + "." + singleNS, "nn1");
+
+ singleConf.set(HdfsClientConfigKeys.
+ DFS_NAMENODE_RPC_ADDRESS_KEY + "." + singleNS + ".nn1",
+ RandomStringUtils.randomAlphabetic(8) + ".foo.bar:9820");
+ ClientProtocol active = Mockito.mock(ClientProtocol.class);
+ Mockito
+ .when(active.getBlockLocations(Matchers.anyString(),
+ Matchers.anyLong(), Matchers.anyLong()))
+ .thenThrow(new RemoteException("java.io.FileNotFoundException",
+ "File does not exist!"));
+
+ RequestHedgingProxyProvider<ClientProtocol> provider =
+ new RequestHedgingProxyProvider<>(singleConf, singleNNUri,
+ ClientProtocol.class, createFactory(active));
+ try {
+ provider.getProxy().proxy.getBlockLocations("/tmp/test.file", 0L, 20L);
+ Assert.fail("Should fail since the active namenode throws"
+ + " FileNotFoundException!");
+ } catch (RemoteException ex) {
+ Exception rEx = ex.unwrapRemoteException();
+ Assert.assertTrue(rEx instanceof FileNotFoundException);
+ }
+ //Perform failover now, there will be no active proxies now
+ provider.performFailover(active);
+ try {
+ provider.getProxy().proxy.getBlockLocations("/tmp/test.file", 0L, 20L);
+ Assert.fail("Should fail since the active namenode throws"
+ + " FileNotFoundException!");
+ } catch (RemoteException ex) {
+ Exception rEx = ex.unwrapRemoteException();
+ Assert.assertTrue(rEx instanceof IOException);
+ Assert.assertTrue(rEx.getMessage().equals("No valid proxies left."
+ + " All NameNode proxies have failed over."));
+ }
+ }
+
+ @Test
public void testPerformFailoverWith3Proxies() throws Exception {
conf.set(HdfsClientConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX + "." + ns,
"nn1,nn2,nn3");
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[2/2] hadoop git commit: HDFS-12865. RequestHedgingProxyProvider
should handle case when none of the proxies are available. Contributed by
Mukul Kumar Singh.
Posted by ar...@apache.org.
HDFS-12865. RequestHedgingProxyProvider should handle case when none of the proxies are available. Contributed by Mukul Kumar Singh.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c5602ea1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c5602ea1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c5602ea1
Branch: refs/heads/branch-3.1
Commit: c5602ea124986a1d5b133464881f174f48256c7a
Parents: 57095b6
Author: Arpit Agarwal <ar...@apache.org>
Authored: Sat Feb 24 14:25:56 2018 -0800
Committer: Arpit Agarwal <ar...@apache.org>
Committed: Sat Feb 24 14:26:20 2018 -0800
----------------------------------------------------------------------
.../ha/RequestHedgingProxyProvider.java | 6 +++
.../ha/TestRequestHedgingProxyProvider.java | 45 ++++++++++++++++++++
2 files changed, 51 insertions(+)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c5602ea1/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RequestHedgingProxyProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RequestHedgingProxyProvider.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RequestHedgingProxyProvider.java
index 010e9e5..7b9cd64 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RequestHedgingProxyProvider.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RequestHedgingProxyProvider.java
@@ -17,6 +17,7 @@
*/
package org.apache.hadoop.hdfs.server.namenode.ha;
+import java.io.IOException;
import java.lang.reflect.InvocationHandler;
import java.lang.reflect.InvocationTargetException;
import java.lang.reflect.Method;
@@ -87,6 +88,11 @@ public class RequestHedgingProxyProvider<T> extends
// Optimization : if only 2 proxies are configured and one had failed
// over, then we dont need to create a threadpool etc.
targetProxies.remove(toIgnore);
+ if (targetProxies.size() == 0) {
+ LOG.trace("No valid proxies left");
+ throw new RemoteException(IOException.class.getName(),
+ "No valid proxies left. All NameNode proxies have failed over.");
+ }
if (targetProxies.size() == 1) {
ProxyInfo<T> proxyInfo = targetProxies.values().iterator().next();
try {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c5602ea1/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRequestHedgingProxyProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRequestHedgingProxyProvider.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRequestHedgingProxyProvider.java
index 65fbbf8..8d6b02d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRequestHedgingProxyProvider.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRequestHedgingProxyProvider.java
@@ -28,6 +28,7 @@ import java.util.Iterator;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
+import org.apache.commons.lang.RandomStringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
@@ -290,6 +291,50 @@ public class TestRequestHedgingProxyProvider {
}
@Test
+ public void testSingleProxyFailover() throws Exception {
+ String singleNS = "mycluster-" + Time.monotonicNow();
+ URI singleNNUri = new URI("hdfs://" + singleNS);
+ Configuration singleConf = new Configuration();
+ singleConf.set(HdfsClientConfigKeys.DFS_NAMESERVICES, singleNS);
+ singleConf.set(HdfsClientConfigKeys.
+ DFS_HA_NAMENODES_KEY_PREFIX + "." + singleNS, "nn1");
+
+ singleConf.set(HdfsClientConfigKeys.
+ DFS_NAMENODE_RPC_ADDRESS_KEY + "." + singleNS + ".nn1",
+ RandomStringUtils.randomAlphabetic(8) + ".foo.bar:9820");
+ ClientProtocol active = Mockito.mock(ClientProtocol.class);
+ Mockito
+ .when(active.getBlockLocations(Matchers.anyString(),
+ Matchers.anyLong(), Matchers.anyLong()))
+ .thenThrow(new RemoteException("java.io.FileNotFoundException",
+ "File does not exist!"));
+
+ RequestHedgingProxyProvider<ClientProtocol> provider =
+ new RequestHedgingProxyProvider<>(singleConf, singleNNUri,
+ ClientProtocol.class, createFactory(active));
+ try {
+ provider.getProxy().proxy.getBlockLocations("/tmp/test.file", 0L, 20L);
+ Assert.fail("Should fail since the active namenode throws"
+ + " FileNotFoundException!");
+ } catch (RemoteException ex) {
+ Exception rEx = ex.unwrapRemoteException();
+ Assert.assertTrue(rEx instanceof FileNotFoundException);
+ }
+ //Perform failover now, there will be no active proxies now
+ provider.performFailover(active);
+ try {
+ provider.getProxy().proxy.getBlockLocations("/tmp/test.file", 0L, 20L);
+ Assert.fail("Should fail since the active namenode throws"
+ + " FileNotFoundException!");
+ } catch (RemoteException ex) {
+ Exception rEx = ex.unwrapRemoteException();
+ Assert.assertTrue(rEx instanceof IOException);
+ Assert.assertTrue(rEx.getMessage().equals("No valid proxies left."
+ + " All NameNode proxies have failed over."));
+ }
+ }
+
+ @Test
public void testPerformFailoverWith3Proxies() throws Exception {
conf.set(HdfsClientConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX + "." + ns,
"nn1,nn2,nn3");
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org