You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by sh...@apache.org on 2018/05/02 19:21:36 UTC

[01/50] [abbrv] hadoop git commit: YARN-7810. Update TestDockerContainerRuntime to test with current user credential. (contributed by Shane Kumpf) [Forced Update!]

Repository: hadoop
Updated Branches:
  refs/heads/YARN-8200 7ed9ee718 -> 58bcb90e7 (forced update)


YARN-7810.  Update TestDockerContainerRuntime to test with current user credential.
            (contributed by Shane Kumpf)

            Cherry-picked from: 59828be1978ec942dda38774a1d9f741efa96f71


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/724bffdb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/724bffdb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/724bffdb

Branch: refs/heads/YARN-8200
Commit: 724bffdb89cd62cc90a1f49c7c5e40998dc1cc0f
Parents: 940d3e6
Author: Eric Yang <ey...@apache.org>
Authored: Wed Apr 11 20:04:24 2018 -0400
Committer: Eric Yang <ey...@apache.org>
Committed: Wed Apr 11 20:06:33 2018 -0400

----------------------------------------------------------------------
 .../runtime/TestDockerContainerRuntime.java     | 178 ++++++++++---------
 1 file changed, 96 insertions(+), 82 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/724bffdb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestDockerContainerRuntime.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestDockerContainerRuntime.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestDockerContainerRuntime.java
index aef94a7..ab38ea2 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestDockerContainerRuntime.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestDockerContainerRuntime.java
@@ -81,7 +81,8 @@ public class TestDockerContainerRuntime {
   private HashMap<String, String> env;
   private String image;
   private String uidGidPair;
-  private String runAsUser;
+  private String runAsUser = System.getProperty("user.name");
+  private String[] groups = {};
   private String user;
   private String appId;
   private String containerIdStr = containerId;
@@ -130,8 +131,37 @@ public class TestDockerContainerRuntime {
     when(context.getEnvironment()).thenReturn(env);
     when(container.getUser()).thenReturn(submittingUser);
 
-    uidGidPair = "";
-    runAsUser = "run_as_user";
+    // Get the running user's uid and gid for remap
+    String uid = "";
+    String gid = "";
+    Shell.ShellCommandExecutor shexec1 = new Shell.ShellCommandExecutor(
+        new String[]{"id", "-u", runAsUser});
+    Shell.ShellCommandExecutor shexec2 = new Shell.ShellCommandExecutor(
+        new String[]{"id", "-g", runAsUser});
+    Shell.ShellCommandExecutor shexec3 = new Shell.ShellCommandExecutor(
+        new String[]{"id", "-G", runAsUser});
+    try {
+      shexec1.execute();
+      // get rid of newline at the end
+      uid = shexec1.getOutput().replaceAll("\n$", "");
+    } catch (Exception e) {
+      LOG.info("Could not run id -u command: " + e);
+    }
+    try {
+      shexec2.execute();
+      // get rid of newline at the end
+      gid = shexec2.getOutput().replaceAll("\n$", "");
+    } catch (Exception e) {
+      LOG.info("Could not run id -g command: " + e);
+    }
+    try {
+      shexec3.execute();
+      groups = shexec3.getOutput().replace("\n", " ").split(" ");
+    } catch (Exception e) {
+      LOG.info("Could not run id -G command: " + e);
+    }
+    uidGidPair = uid + ":" + gid;
+
     user = "user";
     appId = "app_id";
     containerIdStr = containerId;
@@ -301,7 +331,7 @@ public class TestDockerContainerRuntime {
     List<String> dockerCommands = Files.readAllLines(Paths.get
             (dockerCommandFile), Charset.forName("UTF-8"));
 
-    int expected = 13;
+    int expected = 14;
     int counter = 0;
     Assert.assertEquals(expected, dockerCommands.size());
     Assert.assertEquals("[docker-command-execution]",
@@ -311,6 +341,8 @@ public class TestDockerContainerRuntime {
     Assert.assertEquals("  cap-drop=ALL", dockerCommands.get(counter++));
     Assert.assertEquals("  detach=true", dockerCommands.get(counter++));
     Assert.assertEquals("  docker-command=run", dockerCommands.get(counter++));
+    Assert.assertEquals("  group-add=" + String.join(",", groups),
+        dockerCommands.get(counter++));
     Assert.assertEquals("  hostname=ctr-id", dockerCommands.get(counter++));
     Assert
         .assertEquals("  image=busybox:latest", dockerCommands.get(counter++));
@@ -326,7 +358,7 @@ public class TestDockerContainerRuntime {
             + "/test_container_log_dir:/test_container_log_dir,"
             + "/test_user_local_dir:/test_user_local_dir",
         dockerCommands.get(counter++));
-    Assert.assertEquals("  user=run_as_user", dockerCommands.get(counter++));
+    Assert.assertEquals("  user=" + uidGidPair, dockerCommands.get(counter++));
     Assert.assertEquals("  workdir=/test_container_work_dir",
         dockerCommands.get(counter++));
   }
@@ -337,13 +369,6 @@ public class TestDockerContainerRuntime {
       IOException {
     conf.setBoolean(YarnConfiguration.NM_DOCKER_ENABLE_USER_REMAPPING,
         true);
-    Shell.ShellCommandExecutor shexec = new Shell.ShellCommandExecutor(
-        new String[]{"whoami"});
-    shexec.execute();
-    // get rid of newline at the end
-    runAsUser = shexec.getOutput().replaceAll("\n$", "");
-    builder.setExecutionAttribute(RUN_AS_USER, runAsUser);
-
     DockerLinuxContainerRuntime runtime = new DockerLinuxContainerRuntime(
         mockExecutor, mockCGroupsHandler);
     runtime.initialize(conf);
@@ -353,37 +378,6 @@ public class TestDockerContainerRuntime {
     List<String> args = op.getArguments();
     String dockerCommandFile = args.get(11);
 
-    String uid = "";
-    String gid = "";
-    String[] groups = {};
-    Shell.ShellCommandExecutor shexec1 = new Shell.ShellCommandExecutor(
-        new String[]{"id", "-u", runAsUser});
-    Shell.ShellCommandExecutor shexec2 = new Shell.ShellCommandExecutor(
-        new String[]{"id", "-g", runAsUser});
-    Shell.ShellCommandExecutor shexec3 = new Shell.ShellCommandExecutor(
-        new String[]{"id", "-G", runAsUser});
-    try {
-      shexec1.execute();
-      // get rid of newline at the end
-      uid = shexec1.getOutput().replaceAll("\n$", "");
-    } catch (Exception e) {
-      LOG.info("Could not run id -u command: " + e);
-    }
-    try {
-      shexec2.execute();
-      // get rid of newline at the end
-      gid = shexec2.getOutput().replaceAll("\n$", "");
-    } catch (Exception e) {
-      LOG.info("Could not run id -g command: " + e);
-    }
-    try {
-      shexec3.execute();
-      groups = shexec3.getOutput().replace("\n", " ").split(" ");
-    } catch (Exception e) {
-      LOG.info("Could not run id -G command: " + e);
-    }
-    uidGidPair = uid + ":" + gid;
-
     List<String> dockerCommands = Files.readAllLines(
         Paths.get(dockerCommandFile), Charset.forName("UTF-8"));
 
@@ -505,7 +499,7 @@ public class TestDockerContainerRuntime {
     //This is the expected docker invocation for this case
     List<String> dockerCommands = Files
         .readAllLines(Paths.get(dockerCommandFile), Charset.forName("UTF-8"));
-    int expected = 13;
+    int expected = 14;
     int counter = 0;
     Assert.assertEquals(expected, dockerCommands.size());
     Assert.assertEquals("[docker-command-execution]",
@@ -515,6 +509,8 @@ public class TestDockerContainerRuntime {
     Assert.assertEquals("  cap-drop=ALL", dockerCommands.get(counter++));
     Assert.assertEquals("  detach=true", dockerCommands.get(counter++));
     Assert.assertEquals("  docker-command=run", dockerCommands.get(counter++));
+    Assert.assertEquals("  group-add=" + String.join(",", groups),
+        dockerCommands.get(counter++));
     Assert.assertEquals("  hostname=test.hostname",
         dockerCommands.get(counter++));
     Assert
@@ -532,7 +528,7 @@ public class TestDockerContainerRuntime {
             + "/test_container_log_dir:/test_container_log_dir,"
             + "/test_user_local_dir:/test_user_local_dir",
         dockerCommands.get(counter++));
-    Assert.assertEquals("  user=run_as_user", dockerCommands.get(counter++));
+    Assert.assertEquals("  user=" + uidGidPair, dockerCommands.get(counter++));
     Assert.assertEquals("  workdir=/test_container_work_dir",
         dockerCommands.get(counter++));
   }
@@ -571,7 +567,7 @@ public class TestDockerContainerRuntime {
     List<String> dockerCommands = Files
         .readAllLines(Paths.get(dockerCommandFile), Charset.forName("UTF-8"));
 
-    int expected = 13;
+    int expected = 14;
     int counter = 0;
     Assert.assertEquals(expected, dockerCommands.size());
     Assert.assertEquals("[docker-command-execution]",
@@ -581,6 +577,8 @@ public class TestDockerContainerRuntime {
     Assert.assertEquals("  cap-drop=ALL", dockerCommands.get(counter++));
     Assert.assertEquals("  detach=true", dockerCommands.get(counter++));
     Assert.assertEquals("  docker-command=run", dockerCommands.get(counter++));
+    Assert.assertEquals("  group-add=" + String.join(",", groups),
+        dockerCommands.get(counter++));
     Assert.assertEquals("  hostname=ctr-id", dockerCommands.get(counter++));
     Assert
         .assertEquals("  image=busybox:latest", dockerCommands.get(counter++));
@@ -596,7 +594,7 @@ public class TestDockerContainerRuntime {
             + "/test_container_log_dir:/test_container_log_dir,"
             + "/test_user_local_dir:/test_user_local_dir",
         dockerCommands.get(counter++));
-    Assert.assertEquals("  user=run_as_user", dockerCommands.get(counter++));
+    Assert.assertEquals("  user=" + uidGidPair, dockerCommands.get(counter++));
     Assert.assertEquals("  workdir=/test_container_work_dir",
         dockerCommands.get(counter++));
 
@@ -624,6 +622,8 @@ public class TestDockerContainerRuntime {
     Assert.assertEquals("  cap-drop=ALL", dockerCommands.get(counter++));
     Assert.assertEquals("  detach=true", dockerCommands.get(counter++));
     Assert.assertEquals("  docker-command=run", dockerCommands.get(counter++));
+    Assert.assertEquals("  group-add=" + String.join(",", groups),
+        dockerCommands.get(counter++));
     Assert.assertEquals("  hostname=ctr-id", dockerCommands.get(counter++));
     Assert
         .assertEquals("  image=busybox:latest", dockerCommands.get(counter++));
@@ -640,7 +640,7 @@ public class TestDockerContainerRuntime {
             + "/test_container_log_dir:/test_container_log_dir,"
             + "/test_user_local_dir:/test_user_local_dir",
         dockerCommands.get(counter++));
-    Assert.assertEquals("  user=run_as_user", dockerCommands.get(counter++));
+    Assert.assertEquals("  user=" + uidGidPair, dockerCommands.get(counter++));
     Assert.assertEquals("  workdir=/test_container_work_dir",
         dockerCommands.get(counter++));
 
@@ -677,7 +677,7 @@ public class TestDockerContainerRuntime {
     List<String> dockerCommands = Files.readAllLines(Paths.get
         (dockerCommandFile), Charset.forName("UTF-8"));
 
-    int expected = 13;
+    int expected = 14;
     Assert.assertEquals(expected, dockerCommands.size());
 
     String command = dockerCommands.get(0);
@@ -786,7 +786,7 @@ public class TestDockerContainerRuntime {
     List<String> dockerCommands = Files.readAllLines(Paths.get
         (dockerCommandFile), Charset.forName("UTF-8"));
 
-    int expected = 14;
+    int expected = 15;
     int counter = 0;
     Assert.assertEquals(expected, dockerCommands.size());
     Assert.assertEquals("[docker-command-execution]",
@@ -796,6 +796,8 @@ public class TestDockerContainerRuntime {
     Assert.assertEquals("  cap-drop=ALL", dockerCommands.get(counter++));
     Assert.assertEquals("  detach=true", dockerCommands.get(counter++));
     Assert.assertEquals("  docker-command=run", dockerCommands.get(counter++));
+    Assert.assertEquals("  group-add=" + String.join(",", groups),
+        dockerCommands.get(counter++));
     Assert.assertEquals("  hostname=ctr-id", dockerCommands.get(counter++));
     Assert
         .assertEquals("  image=busybox:latest", dockerCommands.get(counter++));
@@ -812,7 +814,7 @@ public class TestDockerContainerRuntime {
             + "/test_container_log_dir:/test_container_log_dir,"
             + "/test_user_local_dir:/test_user_local_dir",
         dockerCommands.get(counter++));
-    Assert.assertEquals("  user=run_as_user", dockerCommands.get(counter++));
+    Assert.assertEquals("  user=" + uidGidPair, dockerCommands.get(counter++));
     Assert.assertEquals("  workdir=/test_container_work_dir",
         dockerCommands.get(counter++));
   }
@@ -903,33 +905,39 @@ public class TestDockerContainerRuntime {
     List<String> dockerCommands = Files.readAllLines(Paths.get
         (dockerCommandFile), Charset.forName("UTF-8"));
 
-    Assert.assertEquals(14, dockerCommands.size());
-    Assert.assertEquals("[docker-command-execution]", dockerCommands.get(0));
+    int expected = 15;
+    int counter = 0;
+    Assert.assertEquals(expected, dockerCommands.size());
+    Assert.assertEquals("[docker-command-execution]",
+        dockerCommands.get(counter++));
     Assert.assertEquals("  cap-add=SYS_CHROOT,NET_BIND_SERVICE",
-        dockerCommands.get(1));
-    Assert.assertEquals("  cap-drop=ALL", dockerCommands.get(2));
-    Assert.assertEquals("  detach=true", dockerCommands.get(3));
-    Assert.assertEquals("  docker-command=run", dockerCommands.get(4));
-    Assert.assertEquals("  hostname=ctr-id", dockerCommands.get(5));
-    Assert.assertEquals("  image=busybox:latest", dockerCommands.get(6));
+        dockerCommands.get(counter++));
+    Assert.assertEquals("  cap-drop=ALL", dockerCommands.get(counter++));
+    Assert.assertEquals("  detach=true", dockerCommands.get(counter++));
+    Assert.assertEquals("  docker-command=run", dockerCommands.get(counter++));
+    Assert.assertEquals("  group-add=" + String.join(",", groups),
+        dockerCommands.get(counter++));
+    Assert.assertEquals("  hostname=ctr-id", dockerCommands.get(counter++));
+    Assert.assertEquals("  image=busybox:latest",
+        dockerCommands.get(counter++));
     Assert.assertEquals(
         "  launch-command=bash,/test_container_work_dir/launch_container.sh",
-        dockerCommands.get(7));
-    Assert.assertEquals("  name=container_id", dockerCommands.get(8));
-    Assert.assertEquals("  net=host", dockerCommands.get(9));
+        dockerCommands.get(counter++));
+    Assert.assertEquals("  name=container_id", dockerCommands.get(counter++));
+    Assert.assertEquals("  net=host", dockerCommands.get(counter++));
     Assert.assertEquals(
         "  ro-mounts=/test_local_dir/test_resource_file:test_mount",
-        dockerCommands.get(10));
+        dockerCommands.get(counter++));
     Assert.assertEquals(
         "  rw-mounts=/test_container_local_dir:/test_container_local_dir,"
             + "/test_filecache_dir:/test_filecache_dir,"
             + "/test_container_work_dir:/test_container_work_dir,"
             + "/test_container_log_dir:/test_container_log_dir,"
             + "/test_user_local_dir:/test_user_local_dir",
-        dockerCommands.get(11));
-    Assert.assertEquals("  user=run_as_user", dockerCommands.get(12));
+        dockerCommands.get(counter++));
+    Assert.assertEquals("  user=" + uidGidPair, dockerCommands.get(counter++));
     Assert.assertEquals("  workdir=/test_container_work_dir",
-        dockerCommands.get(13));
+        dockerCommands.get(counter++));
   }
 
   @Test
@@ -973,34 +981,40 @@ public class TestDockerContainerRuntime {
     List<String> dockerCommands = Files.readAllLines(Paths.get
         (dockerCommandFile), Charset.forName("UTF-8"));
 
-    Assert.assertEquals(14, dockerCommands.size());
-    Assert.assertEquals("[docker-command-execution]", dockerCommands.get(0));
+    int expected = 15;
+    int counter = 0;
+    Assert.assertEquals(expected, dockerCommands.size());
+    Assert.assertEquals("[docker-command-execution]",
+        dockerCommands.get(counter++));
     Assert.assertEquals("  cap-add=SYS_CHROOT,NET_BIND_SERVICE",
-        dockerCommands.get(1));
-    Assert.assertEquals("  cap-drop=ALL", dockerCommands.get(2));
-    Assert.assertEquals("  detach=true", dockerCommands.get(3));
-    Assert.assertEquals("  docker-command=run", dockerCommands.get(4));
-    Assert.assertEquals("  hostname=ctr-id", dockerCommands.get(5));
-    Assert.assertEquals("  image=busybox:latest", dockerCommands.get(6));
+        dockerCommands.get(counter++));
+    Assert.assertEquals("  cap-drop=ALL", dockerCommands.get(counter++));
+    Assert.assertEquals("  detach=true", dockerCommands.get(counter++));
+    Assert.assertEquals("  docker-command=run", dockerCommands.get(counter++));
+    Assert.assertEquals("  group-add=" + String.join(",", groups),
+        dockerCommands.get(counter++));
+    Assert.assertEquals("  hostname=ctr-id", dockerCommands.get(counter++));
+    Assert.assertEquals("  image=busybox:latest",
+        dockerCommands.get(counter++));
     Assert.assertEquals(
         "  launch-command=bash,/test_container_work_dir/launch_container.sh",
-        dockerCommands.get(7));
-    Assert.assertEquals("  name=container_id", dockerCommands.get(8));
-    Assert.assertEquals("  net=host", dockerCommands.get(9));
+        dockerCommands.get(counter++));
+    Assert.assertEquals("  name=container_id", dockerCommands.get(counter++));
+    Assert.assertEquals("  net=host", dockerCommands.get(counter++));
     Assert.assertEquals(
         "  ro-mounts=/test_local_dir/test_resource_file:test_mount1,"
             + "/test_local_dir/test_resource_file:test_mount2",
-        dockerCommands.get(10));
+        dockerCommands.get(counter++));
     Assert.assertEquals(
         "  rw-mounts=/test_container_local_dir:/test_container_local_dir,"
             + "/test_filecache_dir:/test_filecache_dir,"
             + "/test_container_work_dir:/test_container_work_dir,"
             + "/test_container_log_dir:/test_container_log_dir,"
             + "/test_user_local_dir:/test_user_local_dir",
-        dockerCommands.get(11));
-    Assert.assertEquals("  user=run_as_user", dockerCommands.get(12));
+        dockerCommands.get(counter++));
+    Assert.assertEquals("  user=" + uidGidPair, dockerCommands.get(counter++));
     Assert.assertEquals("  workdir=/test_container_work_dir",
-        dockerCommands.get(13));
+        dockerCommands.get(counter++));
 
   }
 
@@ -1020,7 +1034,7 @@ public class TestDockerContainerRuntime {
     PrivilegedOperation op = capturePrivilegedOperation();
     Assert.assertEquals(op.getOperationType(),
         PrivilegedOperation.OperationType.SIGNAL_CONTAINER);
-    Assert.assertEquals("run_as_user", op.getArguments().get(0));
+    Assert.assertEquals(runAsUser, op.getArguments().get(0));
     Assert.assertEquals("user", op.getArguments().get(1));
     Assert.assertEquals("2", op.getArguments().get(2));
     Assert.assertEquals("1234", op.getArguments().get(3));


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[39/50] [abbrv] hadoop git commit: YARN-8205. Application State is not updated to ATS if AM launching is delayed. Contributed by Rohith Sharma K S.

Posted by sh...@apache.org.
YARN-8205. Application State is not updated to ATS if AM launching is delayed. Contributed by Rohith Sharma K S.

(cherry picked from commit 1634de0fc1430d86b7688d16259a81462fba482f)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ab2b4290
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ab2b4290
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ab2b4290

Branch: refs/heads/YARN-8200
Commit: ab2b4290a040a04ed111914410c203e0a43109b9
Parents: 87c9fff
Author: Sunil G <su...@apache.org>
Authored: Fri Apr 27 10:26:57 2018 +0530
Committer: Rohith Sharma K S <ro...@apache.org>
Committed: Fri Apr 27 11:08:27 2018 +0530

----------------------------------------------------------------------
 .../server/resourcemanager/metrics/TimelineServiceV1Publisher.java | 2 ++
 .../server/resourcemanager/metrics/TimelineServiceV2Publisher.java | 2 ++
 .../server/resourcemanager/metrics/TestSystemMetricsPublisher.java | 1 +
 .../resourcemanager/metrics/TestSystemMetricsPublisherForV2.java   | 1 +
 4 files changed, 6 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ab2b4290/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TimelineServiceV1Publisher.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TimelineServiceV1Publisher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TimelineServiceV1Publisher.java
index e1fe512..bb35255 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TimelineServiceV1Publisher.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TimelineServiceV1Publisher.java
@@ -108,6 +108,8 @@ public class TimelineServiceV1Publisher extends AbstractSystemMetricsPublisher {
         app.getApplicationSubmissionContext().getAMContainerSpec();
     entityInfo.put(ApplicationMetricsConstants.AM_CONTAINER_LAUNCH_COMMAND,
         amContainerSpec.getCommands());
+    entityInfo.put(ApplicationMetricsConstants.STATE_EVENT_INFO,
+        RMServerUtils.createApplicationState(app.getState()).toString());
 
     entity.setOtherInfo(entityInfo);
     TimelineEvent tEvent = new TimelineEvent();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ab2b4290/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TimelineServiceV2Publisher.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TimelineServiceV2Publisher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TimelineServiceV2Publisher.java
index 1b73f7c..8acabda 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TimelineServiceV2Publisher.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TimelineServiceV2Publisher.java
@@ -139,6 +139,8 @@ public class TimelineServiceV2Publisher extends AbstractSystemMetricsPublisher {
         app.getApplicationSubmissionContext().getAMContainerSpec();
     entityInfo.put(ApplicationMetricsConstants.AM_CONTAINER_LAUNCH_COMMAND,
         amContainerSpec.getCommands());
+    entityInfo.put(ApplicationMetricsConstants.STATE_EVENT_INFO,
+        RMServerUtils.createApplicationState(app.getState()).toString());
 
     entity.setInfo(entityInfo);
     TimelineEvent tEvent = new TimelineEvent();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ab2b4290/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TestSystemMetricsPublisher.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TestSystemMetricsPublisher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TestSystemMetricsPublisher.java
index 30ad2e0..5321916 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TestSystemMetricsPublisher.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TestSystemMetricsPublisher.java
@@ -531,6 +531,7 @@ public class TestSystemMetricsPublisher {
     when(app.getApplicationPriority()).thenReturn(Priority.newInstance(10));
     when(app.getCallerContext())
         .thenReturn(new CallerContext.Builder("context").build());
+    when(app.getState()).thenReturn(RMAppState.SUBMITTED);
     return app;
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ab2b4290/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TestSystemMetricsPublisherForV2.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TestSystemMetricsPublisherForV2.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TestSystemMetricsPublisherForV2.java
index 593f422..ad71f6e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TestSystemMetricsPublisherForV2.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TestSystemMetricsPublisherForV2.java
@@ -382,6 +382,7 @@ public class TestSystemMetricsPublisherForV2 {
 
     when(app.getApplicationSubmissionContext())
         .thenReturn(appSubmissionContext);
+    when(app.getState()).thenReturn(RMAppState.SUBMITTED);
     return app;
   }
 


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[46/50] [abbrv] hadoop git commit: HDFS-13509. Bug fix for breakHardlinks() of ReplicaInfo/LocalReplica, and fix TestFileAppend failures on Windows. Contributed by Xiao Liang.

Posted by sh...@apache.org.
HDFS-13509. Bug fix for breakHardlinks() of ReplicaInfo/LocalReplica, and fix TestFileAppend failures on Windows. Contributed by Xiao Liang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c0c788aa
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c0c788aa
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c0c788aa

Branch: refs/heads/YARN-8200
Commit: c0c788aafc892373b23ae0c7905d913fd788c3a6
Parents: c844181
Author: Inigo Goiri <in...@apache.org>
Authored: Sat Apr 28 09:07:56 2018 -0700
Committer: Inigo Goiri <in...@apache.org>
Committed: Sat Apr 28 09:07:56 2018 -0700

----------------------------------------------------------------------
 .../hdfs/server/datanode/ReplicaInfo.java       | 21 ++++---
 .../org/apache/hadoop/hdfs/TestFileAppend.java  | 61 +++++++++++++-------
 2 files changed, 53 insertions(+), 29 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0c788aa/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java
index 9817f97..f3f6db1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java
@@ -240,19 +240,22 @@ abstract public class ReplicaInfo extends Block
     final FileIoProvider fileIoProvider = getFileIoProvider();
     final File tmpFile = DatanodeUtil.createFileWithExistsCheck(
         getVolume(), b, DatanodeUtil.getUnlinkTmpFile(file), fileIoProvider);
-    try (FileInputStream in = fileIoProvider.getFileInputStream(
+    try {
+      try (FileInputStream in = fileIoProvider.getFileInputStream(
         getVolume(), file)) {
-      try (FileOutputStream out = fileIoProvider.getFileOutputStream(
-          getVolume(), tmpFile)) {
-        IOUtils.copyBytes(in, out, 16 * 1024);
-      }
-      if (file.length() != tmpFile.length()) {
-        throw new IOException("Copy of file " + file + " size " + file.length()+
-            " into file " + tmpFile +
-            " resulted in a size of " + tmpFile.length());
+        try (FileOutputStream out = fileIoProvider.getFileOutputStream(
+            getVolume(), tmpFile)) {
+          IOUtils.copyBytes(in, out, 16 * 1024);
+        }
+        if (file.length() != tmpFile.length()) {
+          throw new IOException("Copy of file " + file + " size "
+              + file.length() + " into file " + tmpFile
+              + " resulted in a size of " + tmpFile.length());
+        }
       }
       fileIoProvider.replaceFile(getVolume(), tmpFile, file);
     } catch (IOException e) {
+      DataNode.LOG.error("Cannot breakHardlinks for file " + file, e);
       if (!fileIoProvider.delete(getVolume(), tmpFile)) {
         DataNode.LOG.info("detachFile failed to delete temporary file " +
             tmpFile);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0c788aa/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java
index fbf09fb..59cc31a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java
@@ -55,6 +55,7 @@ import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetTestUtil;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetUtil;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.ipc.RemoteException;
+import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.DataChecksum;
 import org.apache.hadoop.util.Time;
 import org.junit.Assert;
@@ -120,7 +121,9 @@ public class TestFileAppend{
   @Test
   public void testBreakHardlinksIfNeeded() throws IOException {
     Configuration conf = new HdfsConfiguration();
-    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
+    File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath());
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf, builderBaseDir)
+        .build();
     FileSystem fs = cluster.getFileSystem();
     InetSocketAddress addr = new InetSocketAddress("localhost",
                                                    cluster.getNameNodePort());
@@ -186,7 +189,9 @@ public class TestFileAppend{
   public void testSimpleFlush() throws IOException {
     Configuration conf = new HdfsConfiguration();
     fileContents = AppendTestUtil.initBuffer(AppendTestUtil.FILE_SIZE);
-    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
+    File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath());
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf, builderBaseDir)
+        .build();
     DistributedFileSystem fs = cluster.getFileSystem();
     try {
 
@@ -239,7 +244,9 @@ public class TestFileAppend{
   public void testComplexFlush() throws IOException {
     Configuration conf = new HdfsConfiguration();
     fileContents = AppendTestUtil.initBuffer(AppendTestUtil.FILE_SIZE);
-    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
+    File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath());
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf, builderBaseDir)
+        .build();
     DistributedFileSystem fs = cluster.getFileSystem();
     try {
 
@@ -286,7 +293,9 @@ public class TestFileAppend{
   @Test(expected = FileNotFoundException.class)
   public void testFileNotFound() throws IOException {
     Configuration conf = new HdfsConfiguration();
-    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
+    File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath());
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf, builderBaseDir)
+        .build();
     FileSystem fs = cluster.getFileSystem();
     try {
       Path file1 = new Path("/nonexistingfile.dat");
@@ -301,7 +310,9 @@ public class TestFileAppend{
   @Test
   public void testAppendTwice() throws Exception {
     Configuration conf = new HdfsConfiguration();
-    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
+    File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath());
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf, builderBaseDir)
+        .build();
     final FileSystem fs1 = cluster.getFileSystem();
     final FileSystem fs2 = AppendTestUtil.createHdfsWithDifferentUsername(conf);
     try {
@@ -340,7 +351,9 @@ public class TestFileAppend{
   @Test
   public void testAppend2Twice() throws Exception {
     Configuration conf = new HdfsConfiguration();
-    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
+    File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath());
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf, builderBaseDir)
+        .build();
     final DistributedFileSystem fs1 = cluster.getFileSystem();
     final FileSystem fs2 = AppendTestUtil.createHdfsWithDifferentUsername(conf);
     try {
@@ -386,8 +399,9 @@ public class TestFileAppend{
         HdfsClientConfigKeys.BlockWrite.ReplaceDatanodeOnFailure.ENABLE_KEY,
         false);
 
-    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
-        .numDataNodes(4).build();
+    File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath());
+    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf,
+        builderBaseDir).numDataNodes(4).build();
     final DistributedFileSystem fs = cluster.getFileSystem();
     try {
       final Path p = new Path("/testMultipleAppend/foo");
@@ -439,8 +453,9 @@ public class TestFileAppend{
     final long softLimit = 1L;
     final long hardLimit = 9999999L;
 
-    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
-        .build();
+    File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath());
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf, builderBaseDir)
+        .numDataNodes(1).build();
     cluster.setLeasePeriod(softLimit, hardLimit);
     cluster.waitActive();
 
@@ -479,8 +494,9 @@ public class TestFileAppend{
     final long softLimit = 1L;
     final long hardLimit = 9999999L;
 
-    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
-        .build();
+    File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath());
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf, builderBaseDir)
+        .numDataNodes(1).build();
     cluster.setLeasePeriod(softLimit, hardLimit);
     cluster.waitActive();
 
@@ -526,8 +542,9 @@ public class TestFileAppend{
     Configuration conf = new HdfsConfiguration();
     conf.set("dfs.client.block.write.replace-datanode-on-failure.enable",
         "false");
-    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3)
-        .build();
+    File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath());
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf, builderBaseDir)
+        .numDataNodes(3).build();
     DistributedFileSystem fs = null;
     try {
       fs = cluster.getFileSystem();
@@ -541,7 +558,7 @@ public class TestFileAppend{
       String dnAddress = dnProp.datanode.getXferAddress().toString();
       if (dnAddress.startsWith("/")) {
         dnAddress = dnAddress.substring(1);
-}
+      }
 
       // append again to bump genstamps
       for (int i = 0; i < 2; i++) {
@@ -579,8 +596,9 @@ public class TestFileAppend{
     Configuration conf = new HdfsConfiguration();
     conf.set("dfs.client.block.write.replace-datanode-on-failure.enable",
         "false");
-    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3)
-        .build();
+    File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath());
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf, builderBaseDir)
+        .numDataNodes(3).build();
     DistributedFileSystem fs = null;
     final String hello = "hello\n";
     try {
@@ -651,8 +669,9 @@ public class TestFileAppend{
     conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 1024);
     conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);
     conf.setInt("dfs.min.replication", 1);
-    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
-        .build();
+    File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath());
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf, builderBaseDir)
+        .numDataNodes(1).build();
     try {
       DistributedFileSystem fs = cluster.getFileSystem();
       Path fileName = new Path("/appendCorruptBlock");
@@ -677,7 +696,9 @@ public class TestFileAppend{
     conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);
     conf.setInt("dfs.min.replication", 1);
 
-    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
+    File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath());
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf, builderBaseDir)
+        .build();
     try {
       cluster.waitActive();
       DataNode dn = cluster.getDataNodes().get(0);


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[06/50] [abbrv] hadoop git commit: YARN-8120. JVM can crash with SIGSEGV when exiting due to custom leveldb logger. Contributed by Jason Lowe.

Posted by sh...@apache.org.
YARN-8120. JVM can crash with SIGSEGV when exiting due to custom leveldb logger. Contributed by Jason Lowe.

(cherry picked from commit 6bb128dfb893cf0e4aa2d3ecc65440668a1fc8d7)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b0dfb18d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b0dfb18d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b0dfb18d

Branch: refs/heads/YARN-8200
Commit: b0dfb18daa8b983aa53b136a4989c9668b25e88d
Parents: 20472bd
Author: Eric E Payne <er...@oath.com>
Authored: Thu Apr 12 16:04:23 2018 +0000
Committer: Eric E Payne <er...@oath.com>
Committed: Thu Apr 12 16:38:50 2018 +0000

----------------------------------------------------------------------
 .../v2/hs/HistoryServerLeveldbStateStoreService.java    | 10 ----------
 .../java/org/apache/hadoop/mapred/ShuffleHandler.java   | 11 -----------
 .../recovery/NMLeveldbStateStoreService.java            | 12 ------------
 .../resourcemanager/recovery/LeveldbRMStateStore.java   | 12 ------------
 4 files changed, 45 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b0dfb18d/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryServerLeveldbStateStoreService.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryServerLeveldbStateStoreService.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryServerLeveldbStateStoreService.java
index 16366b1..b951525 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryServerLeveldbStateStoreService.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryServerLeveldbStateStoreService.java
@@ -75,7 +75,6 @@ public class HistoryServerLeveldbStateStoreService extends
     Path storeRoot = createStorageDir(getConfig());
     Options options = new Options();
     options.createIfMissing(false);
-    options.logger(new LeveldbLogger());
     LOG.info("Using state database at " + storeRoot + " for recovery");
     File dbfile = new File(storeRoot.toString());
     try {
@@ -367,13 +366,4 @@ public class HistoryServerLeveldbStateStoreService extends
             + getCurrentVersion() + ", but loading version " + loadedVersion);
     }
   }
-
-  private static class LeveldbLogger implements Logger {
-    private static final Log LOG = LogFactory.getLog(LeveldbLogger.class);
-
-    @Override
-    public void log(String message) {
-      LOG.info(message);
-    }
-  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b0dfb18d/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
index 47eb8b1..0bab750 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
@@ -95,7 +95,6 @@ import org.fusesource.leveldbjni.JniDBFactory;
 import org.fusesource.leveldbjni.internal.NativeDB;
 import org.iq80.leveldb.DB;
 import org.iq80.leveldb.DBException;
-import org.iq80.leveldb.Logger;
 import org.iq80.leveldb.Options;
 import org.jboss.netty.bootstrap.ServerBootstrap;
 import org.jboss.netty.buffer.ChannelBuffers;
@@ -628,7 +627,6 @@ public class ShuffleHandler extends AuxiliaryService {
   private void startStore(Path recoveryRoot) throws IOException {
     Options options = new Options();
     options.createIfMissing(false);
-    options.logger(new LevelDBLogger());
     Path dbPath = new Path(recoveryRoot, STATE_DB_NAME);
     LOG.info("Using state database at " + dbPath + " for recovery");
     File dbfile = new File(dbPath.toString());
@@ -774,15 +772,6 @@ public class ShuffleHandler extends AuxiliaryService {
     }
   }
 
-  private static class LevelDBLogger implements Logger {
-    private static final Log LOG = LogFactory.getLog(LevelDBLogger.class);
-
-    @Override
-    public void log(String message) {
-      LOG.info(message);
-    }
-  }
-
   static class TimeoutHandler extends IdleStateAwareChannelHandler {
 
     private boolean enabledTimeout;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b0dfb18d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMLeveldbStateStoreService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMLeveldbStateStoreService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMLeveldbStateStoreService.java
index c59a84c..9549b1f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMLeveldbStateStoreService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMLeveldbStateStoreService.java
@@ -1393,7 +1393,6 @@ public class NMLeveldbStateStoreService extends NMStateStoreService {
     Path storeRoot = createStorageDir(conf);
     Options options = new Options();
     options.createIfMissing(false);
-    options.logger(new LeveldbLogger());
     LOG.info("Using state database at " + storeRoot + " for recovery");
     File dbfile = new File(storeRoot.toString());
     try {
@@ -1458,17 +1457,6 @@ public class NMLeveldbStateStoreService extends NMStateStoreService {
     }
   }
 
-  private static class LeveldbLogger implements org.iq80.leveldb.Logger {
-    private static final org.slf4j.Logger LOG =
-        LoggerFactory.getLogger(LeveldbLogger.class);
-
-    @Override
-    public void log(String message) {
-      LOG.info(message);
-    }
-  }
-
-
   Version loadVersion() throws IOException {
     byte[] data = db.get(bytes(DB_SCHEMA_VERSION_KEY));
     // if version is not stored previously, treat it as CURRENT_VERSION_INFO.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b0dfb18d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/LeveldbRMStateStore.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/LeveldbRMStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/LeveldbRMStateStore.java
index a53083f..36a8dfa 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/LeveldbRMStateStore.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/LeveldbRMStateStore.java
@@ -64,12 +64,10 @@ import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.impl.pb.AM
 import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.impl.pb.ApplicationAttemptStateDataPBImpl;
 import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.impl.pb.ApplicationStateDataPBImpl;
 import org.apache.hadoop.yarn.server.utils.LeveldbIterator;
-import org.apache.hadoop.yarn.util.ConverterUtils;
 import org.fusesource.leveldbjni.JniDBFactory;
 import org.fusesource.leveldbjni.internal.NativeDB;
 import org.iq80.leveldb.DB;
 import org.iq80.leveldb.DBException;
-import org.iq80.leveldb.Logger;
 import org.iq80.leveldb.Options;
 import org.iq80.leveldb.WriteBatch;
 
@@ -165,7 +163,6 @@ public class LeveldbRMStateStore extends RMStateStore {
     Path storeRoot = createStorageDir();
     Options options = new Options();
     options.createIfMissing(false);
-    options.logger(new LeveldbLogger());
     LOG.info("Using state database at " + storeRoot + " for recovery");
     File dbfile = new File(storeRoot.toString());
     try {
@@ -872,13 +869,4 @@ public class LeveldbRMStateStore extends RMStateStore {
       LOG.info("Full compaction cycle completed in " + duration + " msec");
     }
   }
-
-  private static class LeveldbLogger implements Logger {
-    private static final Log LOG = LogFactory.getLog(LeveldbLogger.class);
-
-    @Override
-    public void log(String message) {
-      LOG.info(message);
-    }
-  }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[25/50] [abbrv] hadoop git commit: HDFS-13453. RBF: getMountPointDates should fetch latest subdir time/date when parent dir is not present but /parent/child dirs are present in mount table. Contributed by Dibyendu Karmakar.

Posted by sh...@apache.org.
HDFS-13453. RBF: getMountPointDates should fetch latest subdir time/date when parent dir is not present but /parent/child dirs are present in mount table. Contributed by Dibyendu Karmakar.

(cherry picked from commit 1134af9ad1daf683204df8f95a8f03d7baaa74d4)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a3c17359
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a3c17359
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a3c17359

Branch: refs/heads/YARN-8200
Commit: a3c17359513f775e5768ad9c57c33f3e2fdc6bdd
Parents: 2444d70
Author: Inigo Goiri <in...@apache.org>
Authored: Thu Apr 19 14:56:36 2018 -0700
Committer: Inigo Goiri <in...@apache.org>
Committed: Thu Apr 19 14:58:02 2018 -0700

----------------------------------------------------------------------
 .../federation/router/RouterRpcServer.java      | 55 ++++++++++++++++----
 .../federation/router/TestRouterMountTable.java | 21 +++++++-
 2 files changed, 65 insertions(+), 11 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a3c17359/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
index d626699..8a0cf27 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
@@ -2197,18 +2197,11 @@ public class RouterRpcServer extends AbstractService
   private Map<String, Long> getMountPointDates(String path) {
     Map<String, Long> ret = new TreeMap<>();
     if (subclusterResolver instanceof MountTableResolver) {
-      MountTableResolver mountTable = (MountTableResolver)subclusterResolver;
-      String srcPath;
       try {
         final List<String> children = subclusterResolver.getMountPoints(path);
         for (String child : children) {
-          if (path.equals(Path.SEPARATOR)) {
-            srcPath = Path.SEPARATOR + child;
-          } else {
-            srcPath = path + Path.SEPARATOR + child;
-          }
-          MountTable entry = mountTable.getMountPoint(srcPath);
-          ret.put(child, entry.getDateModified());
+          Long modTime = getModifiedTime(ret, path, child);
+          ret.put(child, modTime);
         }
       } catch (IOException e) {
         LOG.error("Cannot get mount point", e);
@@ -2218,6 +2211,50 @@ public class RouterRpcServer extends AbstractService
   }
 
   /**
+   * Get modified time for child. If the child is present in mount table it
+   * will return the modified time. If the child is not present but subdirs of
+   * this child are present then it will return latest modified subdir's time
+   * as modified time of the requested child.
+   * @param ret contains children and modified times.
+   * @param mountTable.
+   * @param path Name of the path to start checking dates from.
+   * @param child child of the requested path.
+   * @return modified time.
+   */
+  private long getModifiedTime(Map<String, Long> ret, String path,
+      String child) {
+    MountTableResolver mountTable = (MountTableResolver)subclusterResolver;
+    String srcPath;
+    if (path.equals(Path.SEPARATOR)) {
+      srcPath = Path.SEPARATOR + child;
+    } else {
+      srcPath = path + Path.SEPARATOR + child;
+    }
+    Long modTime = 0L;
+    try {
+      // Get mount table entry for the srcPath
+      MountTable entry = mountTable.getMountPoint(srcPath);
+      // if srcPath is not in mount table but its subdirs are in mount
+      // table we will display latest modified subdir date/time.
+      if (entry == null) {
+        List<MountTable> entries = mountTable.getMounts(srcPath);
+        for (MountTable eachEntry : entries) {
+          // Get the latest date
+          if (ret.get(child) == null ||
+              ret.get(child) < eachEntry.getDateModified()) {
+            modTime = eachEntry.getDateModified();
+          }
+        }
+      } else {
+        modTime = entry.getDateModified();
+      }
+    } catch (IOException e) {
+      LOG.error("Cannot get mount point", e);
+    }
+    return modTime;
+  }
+
+  /**
    * Create a new file status for a mount point.
    *
    * @param name Name of the mount point.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a3c17359/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterMountTable.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterMountTable.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterMountTable.java
index b33b998..4d8ffe1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterMountTable.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterMountTable.java
@@ -24,6 +24,7 @@ import static org.junit.Assert.fail;
 import java.io.IOException;
 import java.util.Collections;
 import java.util.Iterator;
+import java.util.List;
 import java.util.Map;
 import java.util.TreeMap;
 
@@ -167,6 +168,12 @@ public class TestRouterMountTable {
     addEntry = MountTable.newInstance(
         "/testdir/subdir", Collections.singletonMap("ns0", "/testdir/subdir"));
     assertTrue(addMountTable(addEntry));
+    addEntry = MountTable.newInstance(
+        "/testdir3/subdir1", Collections.singletonMap("ns0", "/testdir3"));
+    assertTrue(addMountTable(addEntry));
+    addEntry = MountTable.newInstance(
+        "/testA/testB/testC/testD", Collections.singletonMap("ns0", "/test"));
+    assertTrue(addMountTable(addEntry));
 
     // Create test dir in NN
     final FileSystem nnFs = nnContext.getFileSystem();
@@ -174,8 +181,18 @@ public class TestRouterMountTable {
 
     Map<String, Long> pathModTime = new TreeMap<>();
     for (String mount : mountTable.getMountPoints("/")) {
-      pathModTime.put(mount, mountTable.getMountPoint("/"+mount)
-          .getDateModified());
+      if (mountTable.getMountPoint("/"+mount) != null) {
+        pathModTime.put(mount, mountTable.getMountPoint("/"+mount)
+            .getDateModified());
+      } else {
+        List<MountTable> entries = mountTable.getMounts("/"+mount);
+        for (MountTable entry : entries) {
+          if (pathModTime.get(mount) == null ||
+              pathModTime.get(mount) < entry.getDateModified()) {
+            pathModTime.put(mount, entry.getDateModified());
+          }
+        }
+      }
     }
     FileStatus[] iterator = nnFs.listStatus(new Path("/"));
     for (FileStatus file : iterator) {


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[19/50] [abbrv] hadoop git commit: HDFS-13466. RBF: Add more router-related information to the UI.

Posted by sh...@apache.org.
HDFS-13466. RBF: Add more router-related information to the UI.

(cherry picked from commit e4313e7e4725261b652dd7202048ff2373b05e28)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8d720daa
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8d720daa
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8d720daa

Branch: refs/heads/YARN-8200
Commit: 8d720daa494c38bd48a278abdc082099a76719ad
Parents: 7a2edb5
Author: Wei Yan <we...@apache.org>
Authored: Tue Apr 17 15:01:07 2018 -0700
Committer: Wei Yan <we...@apache.org>
Committed: Tue Apr 17 15:06:48 2018 -0700

----------------------------------------------------------------------
 .../src/main/webapps/router/federationhealth.html  | 17 +++++++++++++++--
 .../src/main/webapps/router/federationhealth.js    |  5 ++++-
 2 files changed, 19 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8d720daa/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/federationhealth.html
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/federationhealth.html b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/federationhealth.html
index a7da094..f1cf482 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/federationhealth.html
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/federationhealth.html
@@ -90,6 +90,21 @@
 
 <div class="page-header"><h1>Summary</h1></div>
 {#federation}
+<p>
+  Security is {#routerstat}{#SecurityEnabled}on{:else}off{/SecurityEnabled}{/routerstat}.</p>
+<p>{#router}{#Safemode}{.}{:else}Safemode is off.{/Safemode}{/router}</p>
+
+<p>
+  {NumFiles|fmt_number} files and directories, {NumBlocks|fmt_number} blocks.
+</p>
+
+{#mem.HeapMemoryUsage}
+<p>Heap Memory used {used|fmt_bytes} of {committed|fmt_bytes} Heap Memory. Max Heap Memory is {@eq key=max value="-1" type="number"}&ltunbounded&gt{:else}{max|fmt_bytes}{/eq}.</p>
+{/mem.HeapMemoryUsage}
+
+{#mem.NonHeapMemoryUsage}
+<p>Non Heap Memory used {used|fmt_bytes} of {committed|fmt_bytes} Commited Non Heap Memory. Max Non Heap Memory is {@eq key=max value="-1" type="number"}&ltunbounded&gt{:else}{max|fmt_bytes}{/eq}.</p>
+{/mem.NonHeapMemoryUsage}
 <table class="table table-bordered table-striped">
   <tr><th>Total capacity</th><td>{TotalCapacity|fmt_bytes}</td></tr>
   <tr><th>Used capacity</th><td>{UsedCapacity|fmt_bytes}</td></tr>
@@ -103,8 +118,6 @@
   <tr><th><a href="#tab-datanode">Live Nodes</a></th><td>{NumLiveNodes} (Decommissioned: {NumDecomLiveNodes})</td></tr>
   <tr><th><a href="#tab-datanode">Dead Nodes</a></th><td>{NumDeadNodes} (Decommissioned: {NumDecomDeadNodes})</td></tr>
   <tr><th><a href="#tab-datanode">Decommissioning Nodes</a></th><td>{NumDecommissioningNodes}</td></tr>
-  <tr><th>Files</th><td>{NumFiles}</td></tr>
-  <tr><th>Blocks</th><td>{NumBlocks}</td></tr>
   <tr><th title="Excludes missing blocks.">Number of Under-Replicated Blocks</th><td>{NumOfBlocksUnderReplicated}</td></tr>
   <tr><th>Number of Blocks Pending Deletion</th><td>{NumOfBlocksPendingDeletion}</td></tr>
 </table>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8d720daa/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/federationhealth.js
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/federationhealth.js b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/federationhealth.js
index ef0a2a4..a0b0128 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/federationhealth.js
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/federationhealth.js
@@ -33,7 +33,10 @@
 
   function load_overview() {
     var BEANS = [
-      {"name": "federation",      "url": "/jmx?qry=Hadoop:service=Router,name=FederationState"}
+      {"name": "federation",  "url": "/jmx?qry=Hadoop:service=Router,name=FederationState"},
+      {"name": "routerstat",  "url": "/jmx?qry=Hadoop:service=NameNode,name=NameNodeStatus"},
+      {"name": "router",      "url": "/jmx?qrt=Hadoop:service=NameNode,name=NameNodeInfo"},
+      {"name": "mem",         "url": "/jmx?qry=java.lang:type=Memory"}
     ];
 
     var HELPERS = {


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[47/50] [abbrv] hadoop git commit: HDFS-13283. Percentage based Reserved Space Calculation for DataNode. Contributed by Lukas Majercak.

Posted by sh...@apache.org.
HDFS-13283. Percentage based Reserved Space Calculation for DataNode. Contributed by Lukas Majercak.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cb3414a2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cb3414a2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cb3414a2

Branch: refs/heads/YARN-8200
Commit: cb3414a27944b5878bfd8134a086276e454b3db0
Parents: c0c788a
Author: Inigo Goiri <in...@apache.org>
Authored: Mon Apr 30 15:24:21 2018 -0700
Committer: Inigo Goiri <in...@apache.org>
Committed: Mon Apr 30 15:24:21 2018 -0700

----------------------------------------------------------------------
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |   9 +
 .../datanode/fsdataset/impl/FsVolumeImpl.java   |  36 +--
 .../fsdataset/impl/ReservedSpaceCalculator.java | 227 +++++++++++++++++++
 .../src/main/resources/hdfs-default.xml         |  28 +++
 .../fsdataset/impl/TestFsVolumeList.java        |  59 ++++-
 .../impl/TestReservedSpaceCalculator.java       | 171 ++++++++++++++
 6 files changed, 516 insertions(+), 14 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cb3414a2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index ec50448..c128a8e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -25,6 +25,7 @@ import org.apache.hadoop.hdfs.net.DFSNetworkTopology;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyDefault;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.RamDiskReplicaLruTracker;
+import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.ReservedSpaceCalculator;
 import org.apache.hadoop.http.HttpConfig;
 
 /** 
@@ -540,8 +541,16 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   public static final String  DFS_DATANODE_DNS_INTERFACE_DEFAULT = "default";
   public static final String  DFS_DATANODE_DNS_NAMESERVER_KEY = "dfs.datanode.dns.nameserver";
   public static final String  DFS_DATANODE_DNS_NAMESERVER_DEFAULT = "default";
+  public static final String DFS_DATANODE_DU_RESERVED_CALCULATOR_KEY =
+      "dfs.datanode.du.reserved.calculator";
+  public static final Class<? extends ReservedSpaceCalculator>
+      DFS_DATANODE_DU_RESERVED_CALCULATOR_DEFAULT =
+          ReservedSpaceCalculator.ReservedSpaceCalculatorAbsolute.class;
   public static final String  DFS_DATANODE_DU_RESERVED_KEY = "dfs.datanode.du.reserved";
   public static final long    DFS_DATANODE_DU_RESERVED_DEFAULT = 0;
+  public static final String  DFS_DATANODE_DU_RESERVED_PERCENTAGE_KEY =
+      "dfs.datanode.du.reserved.pct";
+  public static final int     DFS_DATANODE_DU_RESERVED_PERCENTAGE_DEFAULT = 0;
   public static final String  DFS_DATANODE_HANDLER_COUNT_KEY = "dfs.datanode.handler.count";
   public static final int     DFS_DATANODE_HANDLER_COUNT_DEFAULT = 10;
   public static final String  DFS_DATANODE_HTTP_ADDRESS_KEY = "dfs.datanode.http.address";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cb3414a2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
index 71d93ae..4c8accf 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
@@ -62,7 +62,6 @@ import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
 import org.apache.hadoop.util.CloseableReferenceCount;
 import org.apache.hadoop.util.DiskChecker.DiskErrorException;
-import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.Time;
 import org.apache.hadoop.util.Timer;
 import org.codehaus.jackson.annotate.JsonProperty;
@@ -99,7 +98,7 @@ public class FsVolumeImpl implements FsVolumeSpi {
       = new ConcurrentHashMap<String, BlockPoolSlice>();
   private final File currentDir;    // <StorageDirectory>/current
   private final DF usage;
-  private final long reserved;
+  private final ReservedSpaceCalculator reserved;
   private CloseableReferenceCount reference = new CloseableReferenceCount();
 
   // Disk space reserved for blocks (RBW or Re-replicating) open for write.
@@ -123,24 +122,33 @@ public class FsVolumeImpl implements FsVolumeSpi {
 
   FsVolumeImpl(FsDatasetImpl dataset, String storageID, File currentDir,
       Configuration conf, StorageType storageType) throws IOException {
+    // outside tests, usage created in ReservedSpaceCalculator.Builder
+    this(dataset, storageID, currentDir, conf, storageType, null);
+  }
+
+  FsVolumeImpl(FsDatasetImpl dataset, String storageID, File currentDir,
+      Configuration conf, StorageType storageType, DF usage)
+      throws IOException {
     this.dataset = dataset;
     this.storageID = storageID;
-    this.reserved = conf.getLong(DFSConfigKeys.DFS_DATANODE_DU_RESERVED_KEY
-        + "." + StringUtils.toLowerCase(storageType.toString()), conf.getLong(
-        DFSConfigKeys.DFS_DATANODE_DU_RESERVED_KEY,
-        DFSConfigKeys.DFS_DATANODE_DU_RESERVED_DEFAULT));
     this.reservedForReplicas = new AtomicLong(0L);
     this.currentDir = currentDir;
     File parent = currentDir.getParentFile();
-    this.usage = new DF(parent, conf);
     this.storageType = storageType;
     this.configuredCapacity = -1;
+
+    if (usage == null) {
+      usage = new DF(parent, conf);
+    }
+    this.usage = usage;
     // dataset.datanode may be null in some tests.
     this.fileIoProvider = dataset.datanode != null ?
         dataset.datanode.getFileIoProvider() :
         new FileIoProvider(conf, dataset.datanode);
     cacheExecutor = initializeCacheExecutor(parent);
     this.metrics = DataNodeVolumeMetrics.create(conf, parent.getAbsolutePath());
+    this.reserved = new ReservedSpaceCalculator.Builder(conf)
+        .setUsage(usage).setStorageType(storageType).build();
   }
 
   protected ThreadPoolExecutor initializeCacheExecutor(File parent) {
@@ -370,7 +378,7 @@ public class FsVolumeImpl implements FsVolumeSpi {
   @VisibleForTesting
   public long getCapacity() {
     if (configuredCapacity < 0) {
-      long remaining = usage.getCapacity() - reserved;
+      long remaining = usage.getCapacity() - getReserved();
       return remaining > 0 ? remaining : 0;
     }
 
@@ -410,8 +418,9 @@ public class FsVolumeImpl implements FsVolumeSpi {
 
   private long getRemainingReserved() throws IOException {
     long actualNonDfsUsed = getActualNonDfsUsed();
-    if (actualNonDfsUsed < reserved) {
-      return reserved - actualNonDfsUsed;
+    long actualReserved = getReserved();
+    if (actualNonDfsUsed < actualReserved) {
+      return actualReserved - actualNonDfsUsed;
     }
     return 0L;
   }
@@ -424,10 +433,11 @@ public class FsVolumeImpl implements FsVolumeSpi {
    */
   public long getNonDfsUsed() throws IOException {
     long actualNonDfsUsed = getActualNonDfsUsed();
-    if (actualNonDfsUsed < reserved) {
+    long actualReserved = getReserved();
+    if (actualNonDfsUsed < actualReserved) {
       return 0L;
     }
-    return actualNonDfsUsed - reserved;
+    return actualNonDfsUsed - actualReserved;
   }
 
   @VisibleForTesting
@@ -446,7 +456,7 @@ public class FsVolumeImpl implements FsVolumeSpi {
   }
 
   long getReserved(){
-    return reserved;
+    return reserved.getReserved();
   }
 
   BlockPoolSlice getBlockPoolSlice(String bpid) throws IOException {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cb3414a2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/ReservedSpaceCalculator.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/ReservedSpaceCalculator.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/ReservedSpaceCalculator.java
new file mode 100644
index 0000000..5523cfd
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/ReservedSpaceCalculator.java
@@ -0,0 +1,227 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.DF;
+import org.apache.hadoop.fs.StorageType;
+import org.apache.hadoop.util.StringUtils;
+
+import java.lang.reflect.Constructor;
+
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DU_RESERVED_DEFAULT;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DU_RESERVED_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DU_RESERVED_PERCENTAGE_DEFAULT;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DU_RESERVED_PERCENTAGE_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DU_RESERVED_CALCULATOR_DEFAULT;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DU_RESERVED_CALCULATOR_KEY;
+
+/**
+ * Used for calculating file system space reserved for non-HDFS data.
+ */
+public abstract class ReservedSpaceCalculator {
+
+  /**
+   * Used for creating instances of ReservedSpaceCalculator.
+   */
+  public static class Builder {
+
+    private final Configuration conf;
+
+    private DF usage;
+    private StorageType storageType;
+
+    public Builder(Configuration conf) {
+      this.conf = conf;
+    }
+
+    public Builder setUsage(DF newUsage) {
+      this.usage = newUsage;
+      return this;
+    }
+
+    public Builder setStorageType(
+        StorageType newStorageType) {
+      this.storageType = newStorageType;
+      return this;
+    }
+
+    ReservedSpaceCalculator build() {
+      try {
+        Class<? extends ReservedSpaceCalculator> clazz = conf.getClass(
+            DFS_DATANODE_DU_RESERVED_CALCULATOR_KEY,
+            DFS_DATANODE_DU_RESERVED_CALCULATOR_DEFAULT,
+            ReservedSpaceCalculator.class);
+
+        Constructor constructor = clazz.getConstructor(
+            Configuration.class, DF.class, StorageType.class);
+
+        return (ReservedSpaceCalculator) constructor.newInstance(
+            conf, usage, storageType);
+      } catch (Exception e) {
+        throw new IllegalStateException(
+            "Error instantiating ReservedSpaceCalculator", e);
+      }
+    }
+  }
+
+  private final DF usage;
+  private final Configuration conf;
+  private final StorageType storageType;
+
+  ReservedSpaceCalculator(Configuration conf, DF usage,
+      StorageType storageType) {
+    this.usage = usage;
+    this.conf = conf;
+    this.storageType = storageType;
+  }
+
+  DF getUsage() {
+    return usage;
+  }
+
+  long getReservedFromConf(String key, long defaultValue) {
+    return conf.getLong(key + "." + StringUtils.toLowerCase(
+        storageType.toString()), conf.getLong(key, defaultValue));
+  }
+
+  /**
+   * Return the capacity of the file system space reserved for non-HDFS.
+   *
+   * @return the number of bytes reserved for non-HDFS.
+   */
+  abstract long getReserved();
+
+
+  /**
+   * Based on absolute number of reserved bytes.
+   */
+  public static class ReservedSpaceCalculatorAbsolute extends
+      ReservedSpaceCalculator {
+
+    private final long reservedBytes;
+
+    public ReservedSpaceCalculatorAbsolute(Configuration conf, DF usage,
+        StorageType storageType) {
+      super(conf, usage, storageType);
+      this.reservedBytes = getReservedFromConf(DFS_DATANODE_DU_RESERVED_KEY,
+          DFS_DATANODE_DU_RESERVED_DEFAULT);
+    }
+
+    @Override
+    long getReserved() {
+      return reservedBytes;
+    }
+  }
+
+  /**
+   * Based on percentage of total capacity in the storage.
+   */
+  public static class ReservedSpaceCalculatorPercentage extends
+      ReservedSpaceCalculator {
+
+    private final long reservedPct;
+
+    public ReservedSpaceCalculatorPercentage(Configuration conf, DF usage,
+        StorageType storageType) {
+      super(conf, usage, storageType);
+      this.reservedPct = getReservedFromConf(
+          DFS_DATANODE_DU_RESERVED_PERCENTAGE_KEY,
+          DFS_DATANODE_DU_RESERVED_PERCENTAGE_DEFAULT);
+    }
+
+    @Override
+    long getReserved() {
+      return getPercentage(getUsage().getCapacity(), reservedPct);
+    }
+  }
+
+  /**
+   * Calculates absolute and percentage based reserved space and
+   * picks the one that will yield more reserved space.
+   */
+  public static class ReservedSpaceCalculatorConservative extends
+      ReservedSpaceCalculator {
+
+    private final long reservedBytes;
+    private final long reservedPct;
+
+    public ReservedSpaceCalculatorConservative(Configuration conf, DF usage,
+        StorageType storageType) {
+      super(conf, usage, storageType);
+      this.reservedBytes = getReservedFromConf(DFS_DATANODE_DU_RESERVED_KEY,
+          DFS_DATANODE_DU_RESERVED_DEFAULT);
+      this.reservedPct = getReservedFromConf(
+          DFS_DATANODE_DU_RESERVED_PERCENTAGE_KEY,
+          DFS_DATANODE_DU_RESERVED_PERCENTAGE_DEFAULT);
+    }
+
+    long getReservedBytes() {
+      return reservedBytes;
+    }
+
+    long getReservedPct() {
+      return reservedPct;
+    }
+
+    @Override
+    long getReserved() {
+      return Math.max(getReservedBytes(),
+          getPercentage(getUsage().getCapacity(), getReservedPct()));
+    }
+  }
+
+  /**
+   * Calculates absolute and percentage based reserved space and
+   * picks the one that will yield less reserved space.
+   */
+  public static class ReservedSpaceCalculatorAggressive extends
+      ReservedSpaceCalculator {
+
+    private final long reservedBytes;
+    private final long reservedPct;
+
+    public ReservedSpaceCalculatorAggressive(Configuration conf, DF usage,
+        StorageType storageType) {
+      super(conf, usage, storageType);
+      this.reservedBytes = getReservedFromConf(DFS_DATANODE_DU_RESERVED_KEY,
+          DFS_DATANODE_DU_RESERVED_DEFAULT);
+      this.reservedPct = getReservedFromConf(
+          DFS_DATANODE_DU_RESERVED_PERCENTAGE_KEY,
+          DFS_DATANODE_DU_RESERVED_PERCENTAGE_DEFAULT);
+    }
+
+    long getReservedBytes() {
+      return reservedBytes;
+    }
+
+    long getReservedPct() {
+      return reservedPct;
+    }
+
+    @Override
+    long getReserved() {
+      return Math.min(getReservedBytes(),
+          getPercentage(getUsage().getCapacity(), getReservedPct()));
+    }
+  }
+
+  private static long getPercentage(long total, long percentage) {
+    return (total * percentage) / 100;
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cb3414a2/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
index ba98257..edd4c5f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
@@ -327,6 +327,20 @@
 </property>
 
 <property>
+  <name>dfs.datanode.du.reserved.calculator</name>
+  <value>org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.ReservedSpaceCalculator$ReservedSpaceCalculatorAbsolute</value>
+  <description>Determines the class of ReservedSpaceCalculator to be used for
+    calculating disk space reservedfor non-HDFS data. The default calculator is
+    ReservedSpaceCalculatorAbsolute which will use dfs.datanode.du.reserved
+    for a static reserved number of bytes. ReservedSpaceCalculatorPercentage
+    will use dfs.datanode.du.reserved.pct to calculate the reserved number
+    of bytes based on the size of the storage. ReservedSpaceCalculatorConservative and
+    ReservedSpaceCalculatorAggressive will use their combination, Conservative will use
+    maximum, Aggressive minimum. For more details see ReservedSpaceCalculator.
+  </description>
+</property>
+
+<property>
   <name>dfs.datanode.du.reserved</name>
   <value>0</value>
   <description>Reserved space in bytes per volume. Always leave this much space free for non dfs use.
@@ -339,6 +353,20 @@
 </property>
 
 <property>
+  <name>dfs.datanode.du.reserved.pct</name>
+  <value>0</value>
+  <description>Reserved space in percentage. Read dfs.datanode.du.reserved.calculator to see
+    when this takes effect. The actual number of bytes reserved will be calculated by using the
+    total capacity of the data directory in question. Specific storage type based reservation
+    is also supported. The property can be followed with corresponding storage types
+    ([ssd]/[disk]/[archive]/[ram_disk]) for cluster with heterogeneous storage.
+    For example, reserved percentage space for RAM_DISK storage can be configured using property
+    'dfs.datanode.du.reserved.pct.ram_disk'. If specific storage type reservation is not configured
+    then dfs.datanode.du.reserved.pct will be used.
+  </description>
+</property>
+
+<property>
   <name>dfs.namenode.name.dir</name>
   <value>file://${hadoop.tmp.dir}/dfs/name</value>
   <description>Determines where on the local filesystem the DFS name node

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cb3414a2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsVolumeList.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsVolumeList.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsVolumeList.java
index f511dc6..147b2cb 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsVolumeList.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsVolumeList.java
@@ -19,6 +19,7 @@ package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl;
 
 import com.google.common.base.Supplier;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.DF;
 import org.apache.hadoop.fs.FileSystemTestHelper;
 import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
@@ -38,15 +39,18 @@ import java.util.ArrayList;
 import java.util.Collections;
 import java.util.List;
 import java.util.concurrent.TimeoutException;
+
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DU_RESERVED_PERCENTAGE_KEY;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNotEquals;
 import static org.junit.Assert.assertNull;
 import static org.junit.Assert.fail;
 import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
 
 public class TestFsVolumeList {
 
-  private final Configuration conf = new Configuration();
+  private Configuration conf;
   private VolumeChoosingPolicy<FsVolumeImpl> blockChooser =
       new RoundRobinVolumeChoosingPolicy<>();
   private FsDatasetImpl dataset = null;
@@ -61,6 +65,7 @@ public class TestFsVolumeList {
     blockScannerConf.setInt(DFSConfigKeys.
         DFS_DATANODE_SCAN_PERIOD_HOURS_KEY, -1);
     blockScanner = new BlockScanner(null, blockScannerConf);
+    conf = new Configuration();
   }
 
   @Test(timeout=30000)
@@ -185,4 +190,56 @@ public class TestFsVolumeList {
         actualNonDfsUsage - duReserved;
     assertEquals(expectedNonDfsUsage, spyVolume.getNonDfsUsed());
   }
+
+  @Test
+  public void testDfsReservedPercentageForDifferentStorageTypes()
+      throws IOException {
+    conf.setClass(DFSConfigKeys.DFS_DATANODE_DU_RESERVED_CALCULATOR_KEY,
+        ReservedSpaceCalculator.ReservedSpaceCalculatorPercentage.class,
+        ReservedSpaceCalculator.class);
+    conf.setLong(DFS_DATANODE_DU_RESERVED_PERCENTAGE_KEY, 15);
+
+    File volDir = new File(baseDir, "volume-0");
+    volDir.mkdirs();
+
+    DF usage = mock(DF.class);
+    when(usage.getCapacity()).thenReturn(4000L);
+    when(usage.getAvailable()).thenReturn(1000L);
+
+    // when storage type reserved is not configured, should consider
+    // dfs.datanode.du.reserved.pct
+    FsVolumeImpl volume = new FsVolumeImpl(dataset, "storage-id", volDir,
+        conf, StorageType.RAM_DISK, usage);
+
+    assertEquals(600, volume.getReserved());
+    assertEquals(3400, volume.getCapacity());
+    assertEquals(400, volume.getAvailable());
+
+    // when storage type reserved is configured.
+    conf.setLong(
+        DFS_DATANODE_DU_RESERVED_PERCENTAGE_KEY + "."
+            + StringUtils.toLowerCase(StorageType.RAM_DISK.toString()), 10);
+    conf.setLong(
+        DFS_DATANODE_DU_RESERVED_PERCENTAGE_KEY + "."
+            + StringUtils.toLowerCase(StorageType.SSD.toString()), 50);
+    FsVolumeImpl volume1 = new FsVolumeImpl(dataset, "storage-id", volDir,
+        conf, StorageType.RAM_DISK, usage);
+    assertEquals(400, volume1.getReserved());
+    assertEquals(3600, volume1.getCapacity());
+    assertEquals(600, volume1.getAvailable());
+
+    FsVolumeImpl volume2 = new FsVolumeImpl(dataset, "storage-id", volDir,
+        conf, StorageType.SSD, usage);
+    assertEquals(2000, volume2.getReserved());
+    assertEquals(2000, volume2.getCapacity());
+    assertEquals(0, volume2.getAvailable());
+
+    FsVolumeImpl volume3 = new FsVolumeImpl(dataset, "storage-id", volDir,
+        conf, StorageType.DISK, usage);
+    assertEquals(600, volume3.getReserved());
+
+    FsVolumeImpl volume4 = new FsVolumeImpl(dataset, "storage-id", volDir,
+        conf, StorageType.ARCHIVE, usage);
+    assertEquals(600, volume4.getReserved());
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cb3414a2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestReservedSpaceCalculator.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestReservedSpaceCalculator.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestReservedSpaceCalculator.java
new file mode 100644
index 0000000..e04a239
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestReservedSpaceCalculator.java
@@ -0,0 +1,171 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.DF;
+import org.apache.hadoop.fs.StorageType;
+import org.junit.Before;
+import org.junit.Test;
+import org.mockito.Mockito;
+
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DU_RESERVED_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DU_RESERVED_PERCENTAGE_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DU_RESERVED_CALCULATOR_KEY;
+import static org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.ReservedSpaceCalculator.ReservedSpaceCalculatorAbsolute;
+import static org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.ReservedSpaceCalculator.ReservedSpaceCalculatorAggressive;
+import static org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.ReservedSpaceCalculator.ReservedSpaceCalculatorConservative;
+import static org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.ReservedSpaceCalculator.ReservedSpaceCalculatorPercentage;
+import static org.junit.Assert.assertEquals;
+import static org.mockito.Mockito.when;
+
+/**
+ * Unit testing for different types of ReservedSpace calculators.
+ */
+public class TestReservedSpaceCalculator {
+
+  private Configuration conf;
+  private DF usage;
+  private ReservedSpaceCalculator reserved;
+
+  @Before
+  public void setUp() {
+    conf = new Configuration();
+    usage = Mockito.mock(DF.class);
+  }
+
+  @Test
+  public void testReservedSpaceAbsolute() {
+    conf.setClass(DFS_DATANODE_DU_RESERVED_CALCULATOR_KEY,
+        ReservedSpaceCalculatorAbsolute.class,
+        ReservedSpaceCalculator.class);
+
+    // Test both using global configuration
+    conf.setLong(DFS_DATANODE_DU_RESERVED_KEY, 900);
+
+    checkReserved(StorageType.DISK, 10000, 900);
+    checkReserved(StorageType.SSD, 10000, 900);
+    checkReserved(StorageType.ARCHIVE, 10000, 900);
+  }
+
+  @Test
+  public void testReservedSpaceAbsolutePerStorageType() {
+    conf.setClass(DFS_DATANODE_DU_RESERVED_CALCULATOR_KEY,
+        ReservedSpaceCalculatorAbsolute.class,
+        ReservedSpaceCalculator.class);
+
+    // Test DISK
+    conf.setLong(DFS_DATANODE_DU_RESERVED_KEY + ".disk", 500);
+    checkReserved(StorageType.DISK, 2300, 500);
+
+    // Test SSD
+    conf.setLong(DFS_DATANODE_DU_RESERVED_KEY + ".ssd", 750);
+    checkReserved(StorageType.SSD, 1550, 750);
+  }
+
+  @Test
+  public void testReservedSpacePercentage() {
+    conf.setClass(DFS_DATANODE_DU_RESERVED_CALCULATOR_KEY,
+        ReservedSpaceCalculatorPercentage.class,
+        ReservedSpaceCalculator.class);
+
+    // Test both using global configuration
+    conf.setLong(DFS_DATANODE_DU_RESERVED_PERCENTAGE_KEY, 10);
+    checkReserved(StorageType.DISK, 10000, 1000);
+    checkReserved(StorageType.SSD, 10000, 1000);
+    checkReserved(StorageType.ARCHIVE, 10000, 1000);
+
+    conf.setLong(DFS_DATANODE_DU_RESERVED_PERCENTAGE_KEY, 50);
+    checkReserved(StorageType.DISK, 4000, 2000);
+    checkReserved(StorageType.SSD, 4000, 2000);
+    checkReserved(StorageType.ARCHIVE, 4000, 2000);
+  }
+
+  @Test
+  public void testReservedSpacePercentagePerStorageType() {
+    conf.setClass(DFS_DATANODE_DU_RESERVED_CALCULATOR_KEY,
+        ReservedSpaceCalculatorPercentage.class,
+        ReservedSpaceCalculator.class);
+
+    // Test DISK
+    conf.setLong(DFS_DATANODE_DU_RESERVED_PERCENTAGE_KEY + ".disk", 20);
+    checkReserved(StorageType.DISK, 1600, 320);
+
+    // Test SSD
+    conf.setLong(DFS_DATANODE_DU_RESERVED_PERCENTAGE_KEY + ".ssd", 50);
+    checkReserved(StorageType.SSD, 8001, 4000);
+  }
+
+  @Test
+  public void testReservedSpaceConservativePerStorageType() {
+    // This policy should take the maximum of the two
+    conf.setClass(DFS_DATANODE_DU_RESERVED_CALCULATOR_KEY,
+        ReservedSpaceCalculatorConservative.class,
+        ReservedSpaceCalculator.class);
+
+    // Test DISK + taking the reserved bytes over percentage,
+    // as that gives more reserved space
+    conf.setLong(DFS_DATANODE_DU_RESERVED_KEY + ".disk", 800);
+    conf.setLong(DFS_DATANODE_DU_RESERVED_PERCENTAGE_KEY + ".disk", 20);
+    checkReserved(StorageType.DISK, 1600, 800);
+
+    // Test ARCHIVE + taking reserved space based on the percentage,
+    // as that gives more reserved space
+    conf.setLong(DFS_DATANODE_DU_RESERVED_KEY + ".archive", 1300);
+    conf.setLong(DFS_DATANODE_DU_RESERVED_PERCENTAGE_KEY + ".archive", 50);
+    checkReserved(StorageType.ARCHIVE, 6200, 3100);
+  }
+
+  @Test
+  public void testReservedSpaceAggresivePerStorageType() {
+    // This policy should take the maximum of the two
+    conf.setClass(DFS_DATANODE_DU_RESERVED_CALCULATOR_KEY,
+        ReservedSpaceCalculatorAggressive.class,
+        ReservedSpaceCalculator.class);
+
+    // Test RAM_DISK + taking the reserved bytes over percentage,
+    // as that gives less reserved space
+    conf.setLong(DFS_DATANODE_DU_RESERVED_KEY + ".ram_disk", 100);
+    conf.setLong(DFS_DATANODE_DU_RESERVED_PERCENTAGE_KEY + ".ram_disk", 10);
+    checkReserved(StorageType.RAM_DISK, 1600, 100);
+
+    // Test ARCHIVE + taking reserved space based on the percentage,
+    // as that gives less reserved space
+    conf.setLong(DFS_DATANODE_DU_RESERVED_KEY + ".archive", 20000);
+    conf.setLong(DFS_DATANODE_DU_RESERVED_PERCENTAGE_KEY + ".archive", 5);
+    checkReserved(StorageType.ARCHIVE, 100000, 5000);
+  }
+
+  @Test(expected = IllegalStateException.class)
+  public void testInvalidCalculator() {
+    conf.set(DFS_DATANODE_DU_RESERVED_CALCULATOR_KEY, "INVALIDTYPE");
+    reserved = new ReservedSpaceCalculator.Builder(conf)
+        .setUsage(usage)
+        .setStorageType(StorageType.DISK)
+        .build();
+  }
+
+  private void checkReserved(StorageType storageType,
+      long totalCapacity, long reservedExpected) {
+    when(usage.getCapacity()).thenReturn(totalCapacity);
+
+    reserved = new ReservedSpaceCalculator.Builder(conf).setUsage(usage)
+        .setStorageType(storageType).build();
+    assertEquals(reservedExpected, reserved.getReserved());
+  }
+}
\ No newline at end of file


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[45/50] [abbrv] hadoop git commit: HDFS-13508. RBF: Normalize paths (automatically) when adding, updating, removing or listing mount table entries. Contributed by Ekanth S.

Posted by sh...@apache.org.
HDFS-13508. RBF: Normalize paths (automatically) when adding, updating, removing or listing mount table entries. Contributed by Ekanth S.

(cherry picked from commit 484440602c5b69fbd8106010603c61ae051056dd)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c8441811
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c8441811
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c8441811

Branch: refs/heads/YARN-8200
Commit: c8441811fdee2379f50abe218e2afe8133786497
Parents: ae32e21
Author: Inigo Goiri <in...@apache.org>
Authored: Fri Apr 27 16:28:17 2018 -0700
Committer: Inigo Goiri <in...@apache.org>
Committed: Fri Apr 27 16:30:07 2018 -0700

----------------------------------------------------------------------
 .../hdfs/tools/federation/RouterAdmin.java      |  16 +++
 .../federation/router/TestRouterAdminCLI.java   | 117 ++++++++++++++++++-
 2 files changed, 130 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c8441811/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java
index 17707dc..b0a2062 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java
@@ -26,6 +26,7 @@ import java.util.Map;
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
+import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
@@ -322,6 +323,7 @@ public class RouterAdmin extends Configured implements Tool {
   public boolean addMount(String mount, String[] nss, String dest,
       boolean readonly, DestinationOrder order, ACLEntity aclInfo)
       throws IOException {
+    mount = normalizeFileSystemPath(mount);
     // Get the existing entry
     MountTableManager mountTable = client.getMountTableManager();
     GetMountTableEntriesRequest getRequest =
@@ -473,6 +475,7 @@ public class RouterAdmin extends Configured implements Tool {
   public boolean updateMount(String mount, String[] nss, String dest,
       boolean readonly, DestinationOrder order, ACLEntity aclInfo)
       throws IOException {
+    mount = normalizeFileSystemPath(mount);
     MountTableManager mountTable = client.getMountTableManager();
 
     // Create a new entry
@@ -519,6 +522,7 @@ public class RouterAdmin extends Configured implements Tool {
    * @throws IOException If it cannot be removed.
    */
   public boolean removeMount(String path) throws IOException {
+    path = normalizeFileSystemPath(path);
     MountTableManager mountTable = client.getMountTableManager();
     RemoveMountTableEntryRequest request =
         RemoveMountTableEntryRequest.newInstance(path);
@@ -538,6 +542,7 @@ public class RouterAdmin extends Configured implements Tool {
    * @throws IOException If it cannot be listed.
    */
   public void listMounts(String path) throws IOException {
+    path = normalizeFileSystemPath(path);
     MountTableManager mountTable = client.getMountTableManager();
     GetMountTableEntriesRequest request =
         GetMountTableEntriesRequest.newInstance(path);
@@ -798,6 +803,17 @@ public class RouterAdmin extends Configured implements Tool {
   }
 
   /**
+   * Normalize a path for that filesystem.
+   *
+   * @param path Path to normalize.
+   * @return Normalized path.
+   */
+  private static String normalizeFileSystemPath(final String path) {
+    Path normalizedPath = new Path(path);
+    return normalizedPath.toString();
+  }
+
+  /**
    * Inner class that stores ACL info of mount table.
    */
   static class ACLEntity {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c8441811/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdminCLI.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdminCLI.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdminCLI.java
index 4e84c33..2537c19 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdminCLI.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdminCLI.java
@@ -159,6 +159,45 @@ public class TestRouterAdminCLI {
   }
 
   @Test
+  public void testAddMountTableNotNormalized() throws Exception {
+    String nsId = "ns0";
+    String src = "/test-addmounttable-notnormalized";
+    String srcWithSlash = src + "/";
+    String dest = "/addmounttable-notnormalized";
+    String[] argv = new String[] {"-add", srcWithSlash, nsId, dest};
+    assertEquals(0, ToolRunner.run(admin, argv));
+
+    stateStore.loadCache(MountTableStoreImpl.class, true);
+    GetMountTableEntriesRequest getRequest = GetMountTableEntriesRequest
+        .newInstance(src);
+    GetMountTableEntriesResponse getResponse = client.getMountTableManager()
+        .getMountTableEntries(getRequest);
+    MountTable mountTable = getResponse.getEntries().get(0);
+
+    List<RemoteLocation> destinations = mountTable.getDestinations();
+    assertEquals(1, destinations.size());
+
+    assertEquals(src, mountTable.getSourcePath());
+    assertEquals(nsId, destinations.get(0).getNameserviceId());
+    assertEquals(dest, destinations.get(0).getDest());
+    assertFalse(mountTable.isReadOnly());
+
+    // test mount table update behavior
+    dest = dest + "-new";
+    argv = new String[] {"-add", srcWithSlash, nsId, dest, "-readonly"};
+    assertEquals(0, ToolRunner.run(admin, argv));
+    stateStore.loadCache(MountTableStoreImpl.class, true);
+
+    getResponse = client.getMountTableManager()
+        .getMountTableEntries(getRequest);
+    mountTable = getResponse.getEntries().get(0);
+    assertEquals(2, mountTable.getDestinations().size());
+    assertEquals(nsId, mountTable.getDestinations().get(1).getNameserviceId());
+    assertEquals(dest, mountTable.getDestinations().get(1).getDest());
+    assertTrue(mountTable.isReadOnly());
+  }
+
+  @Test
   public void testAddOrderMountTable() throws Exception {
     testAddOrderMountTable(DestinationOrder.HASH);
     testAddOrderMountTable(DestinationOrder.LOCAL);
@@ -192,6 +231,7 @@ public class TestRouterAdminCLI {
   public void testListMountTable() throws Exception {
     String nsId = "ns0";
     String src = "/test-lsmounttable";
+    String srcWithSlash = src + "/";
     String dest = "/lsmounttable";
     String[] argv = new String[] {"-add", src, nsId, dest};
     assertEquals(0, ToolRunner.run(admin, argv));
@@ -203,6 +243,11 @@ public class TestRouterAdminCLI {
     assertEquals(0, ToolRunner.run(admin, argv));
     assertTrue(out.toString().contains(src));
 
+    // Test with not-normalized src input
+    argv = new String[] {"-ls", srcWithSlash};
+    assertEquals(0, ToolRunner.run(admin, argv));
+    assertTrue(out.toString().contains(src));
+
     out.reset();
     GetMountTableEntriesRequest getRequest = GetMountTableEntriesRequest
         .newInstance("/");
@@ -256,6 +301,33 @@ public class TestRouterAdminCLI {
   }
 
   @Test
+  public void testRemoveMountTableNotNormalized() throws Exception {
+    String nsId = "ns0";
+    String src = "/test-rmmounttable-notnormalized";
+    String srcWithSlash = src + "/";
+    String dest = "/rmmounttable-notnormalized";
+    String[] argv = new String[] {"-add", src, nsId, dest};
+    assertEquals(0, ToolRunner.run(admin, argv));
+
+    stateStore.loadCache(MountTableStoreImpl.class, true);
+    GetMountTableEntriesRequest getRequest = GetMountTableEntriesRequest
+        .newInstance(src);
+    GetMountTableEntriesResponse getResponse = client.getMountTableManager()
+        .getMountTableEntries(getRequest);
+    // ensure mount table added successfully
+    MountTable mountTable = getResponse.getEntries().get(0);
+    assertEquals(src, mountTable.getSourcePath());
+
+    argv = new String[] {"-rm", srcWithSlash};
+    assertEquals(0, ToolRunner.run(admin, argv));
+
+    stateStore.loadCache(MountTableStoreImpl.class, true);
+    getResponse = client.getMountTableManager()
+        .getMountTableEntries(getRequest);
+    assertEquals(0, getResponse.getEntries().size());
+  }
+
+  @Test
   public void testMountTableDefaultACL() throws Exception {
     String[] argv = new String[] {"-add", "/testpath0", "ns0", "/testdir0"};
     assertEquals(0, ToolRunner.run(admin, argv));
@@ -552,12 +624,12 @@ public class TestRouterAdminCLI {
   }
 
   @Test
-  public void testUpdateNameserviceDestinationForExistingMountTable() throws
+  public void testUpdateDestinationForExistingMountTable() throws
   Exception {
     // Add a mount table firstly
     String nsId = "ns0";
-    String src = "/test-updateNameserviceDestinationForExistingMountTable";
-    String dest = "/UpdateNameserviceDestinationForExistingMountTable";
+    String src = "/test-updateDestinationForExistingMountTable";
+    String dest = "/UpdateDestinationForExistingMountTable";
     String[] argv = new String[] {"-add", src, nsId, dest};
     assertEquals(0, ToolRunner.run(admin, argv));
 
@@ -590,6 +662,45 @@ public class TestRouterAdminCLI {
   }
 
   @Test
+  public void testUpdateDestinationForExistingMountTableNotNormalized() throws
+      Exception {
+    // Add a mount table firstly
+    String nsId = "ns0";
+    String src = "/test-updateDestinationForExistingMountTableNotNormalized";
+    String srcWithSlash = src + "/";
+    String dest = "/UpdateDestinationForExistingMountTableNotNormalized";
+    String[] argv = new String[] {"-add", src, nsId, dest};
+    assertEquals(0, ToolRunner.run(admin, argv));
+
+    stateStore.loadCache(MountTableStoreImpl.class, true);
+    GetMountTableEntriesRequest getRequest =
+        GetMountTableEntriesRequest.newInstance(src);
+    GetMountTableEntriesResponse getResponse =
+        client.getMountTableManager().getMountTableEntries(getRequest);
+    // Ensure mount table added successfully
+    MountTable mountTable = getResponse.getEntries().get(0);
+    assertEquals(src, mountTable.getSourcePath());
+    assertEquals(nsId, mountTable.getDestinations().get(0).getNameserviceId());
+    assertEquals(dest, mountTable.getDestinations().get(0).getDest());
+
+    // Update the destination
+    String newNsId = "ns1";
+    String newDest = "/newDestination";
+    argv = new String[] {"-update", srcWithSlash, newNsId, newDest};
+    assertEquals(0, ToolRunner.run(admin, argv));
+
+    stateStore.loadCache(MountTableStoreImpl.class, true);
+    getResponse = client.getMountTableManager()
+        .getMountTableEntries(getRequest);
+    // Ensure the destination updated successfully
+    mountTable = getResponse.getEntries().get(0);
+    assertEquals(src, mountTable.getSourcePath());
+    assertEquals(newNsId,
+        mountTable.getDestinations().get(0).getNameserviceId());
+    assertEquals(newDest, mountTable.getDestinations().get(0).getDest());
+  }
+
+  @Test
   public void testUpdateReadonlyUserGroupPermissionMountable()
       throws Exception {
     // Add a mount table


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[43/50] [abbrv] hadoop git commit: YARN-8221. RMWebServices also need to honor yarn.resourcemanager.display.per-user-apps. Contributed by Sunil G.

Posted by sh...@apache.org.
YARN-8221. RMWebServices also need to honor yarn.resourcemanager.display.per-user-apps. Contributed by Sunil G.

(cherry picked from commit ef3ecc308dbea41c6a88bd4d16739c7bbc10cdda)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/250ea479
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/250ea479
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/250ea479

Branch: refs/heads/YARN-8200
Commit: 250ea47985cd3fcc8d3b4a053e24daffeec8493b
Parents: b47275f
Author: Rohith Sharma K S <ro...@apache.org>
Authored: Fri Apr 27 22:58:10 2018 +0530
Committer: Rohith Sharma K S <ro...@apache.org>
Committed: Fri Apr 27 23:00:19 2018 +0530

----------------------------------------------------------------------
 .../server/resourcemanager/webapp/RMWebServices.java   | 13 ++++++++++++-
 1 file changed, 12 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/250ea479/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java
index 5266581..44af1ba 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java
@@ -228,6 +228,7 @@ public class RMWebServices extends WebServices implements RMWebServiceProtocol {
 
   @VisibleForTesting
   boolean isCentralizedNodeLabelConfiguration = true;
+  private boolean displayPerUserApps = false;
 
   public final static String DELEGATION_TOKEN_HEADER =
       "Hadoop-YARN-RM-Delegation-Token";
@@ -240,6 +241,9 @@ public class RMWebServices extends WebServices implements RMWebServiceProtocol {
     this.conf = conf;
     isCentralizedNodeLabelConfiguration =
         YarnConfiguration.isCentralizedNodeLabelConfiguration(conf);
+    this.displayPerUserApps  = conf.getBoolean(
+        YarnConfiguration.DISPLAY_APPS_FOR_LOGGED_IN_USER,
+        YarnConfiguration.DEFAULT_DISPLAY_APPS_FOR_LOGGED_IN_USER);
   }
 
   RMWebServices(ResourceManager rm, Configuration conf,
@@ -600,7 +604,14 @@ public class RMWebServices extends WebServices implements RMWebServiceProtocol {
       DeSelectFields deSelectFields = new DeSelectFields();
       deSelectFields.initFields(unselectedFields);
 
-      AppInfo app = new AppInfo(rm, rmapp, hasAccess(rmapp, hsr),
+      boolean allowAccess = hasAccess(rmapp, hsr);
+      // Given RM is configured to display apps per user, skip apps to which
+      // this caller doesn't have access to view.
+      if (displayPerUserApps && !allowAccess) {
+        continue;
+      }
+
+      AppInfo app = new AppInfo(rm, rmapp, allowAccess,
           WebAppUtils.getHttpSchemePrefix(conf), deSelectFields);
       allApps.add(app);
     }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[20/50] [abbrv] hadoop git commit: HDFS-13462. Add BIND_HOST configuration for JournalNode's HTTP and RPC Servers. Contributed by Lukas Majercak.

Posted by sh...@apache.org.
HDFS-13462. Add BIND_HOST configuration for JournalNode's HTTP and RPC Servers. Contributed by Lukas Majercak.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b4b10786
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b4b10786
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b4b10786

Branch: refs/heads/YARN-8200
Commit: b4b10786c342b3a79344f4d5c6e06c78b60be619
Parents: 8d720da
Author: Inigo Goiri <in...@apache.org>
Authored: Tue Apr 17 15:18:01 2018 -0700
Committer: Inigo Goiri <in...@apache.org>
Committed: Tue Apr 17 15:18:01 2018 -0700

----------------------------------------------------------------------
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |   6 +-
 .../hdfs/qjournal/server/JournalNode.java       |  59 +++++-
 .../qjournal/server/JournalNodeHttpServer.java  |  65 ++++--
 .../qjournal/server/JournalNodeRpcServer.java   |  31 ++-
 .../src/main/resources/hdfs-default.xml         |  33 +++
 .../TestJournalNodeRespectsBindHostKeys.java    | 200 +++++++++++++++++++
 6 files changed, 364 insertions(+), 30 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b4b10786/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index c9fef06..ec50448 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -843,15 +843,19 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   public static final String  DFS_JOURNALNODE_EDITS_DIR_DEFAULT = "/tmp/hadoop/dfs/journalnode/";
   public static final String  DFS_JOURNALNODE_RPC_ADDRESS_KEY = "dfs.journalnode.rpc-address";
   public static final int     DFS_JOURNALNODE_RPC_PORT_DEFAULT = 8485;
+  public static final String  DFS_JOURNALNODE_RPC_BIND_HOST_KEY = "dfs.journalnode.rpc-bind-host";
   public static final String  DFS_JOURNALNODE_RPC_ADDRESS_DEFAULT = "0.0.0.0:" + DFS_JOURNALNODE_RPC_PORT_DEFAULT;
-    
+
   public static final String  DFS_JOURNALNODE_HTTP_ADDRESS_KEY = "dfs.journalnode.http-address";
   public static final int     DFS_JOURNALNODE_HTTP_PORT_DEFAULT = 8480;
+  public static final String  DFS_JOURNALNODE_HTTP_BIND_HOST_KEY = "dfs.journalnode.http-bind-host";
   public static final String  DFS_JOURNALNODE_HTTP_ADDRESS_DEFAULT = "0.0.0.0:" + DFS_JOURNALNODE_HTTP_PORT_DEFAULT;
   public static final String  DFS_JOURNALNODE_HTTPS_ADDRESS_KEY = "dfs.journalnode.https-address";
   public static final int     DFS_JOURNALNODE_HTTPS_PORT_DEFAULT = 8481;
+  public static final String  DFS_JOURNALNODE_HTTPS_BIND_HOST_KEY = "dfs.journalnode.https-bind-host";
   public static final String  DFS_JOURNALNODE_HTTPS_ADDRESS_DEFAULT = "0.0.0.0:" + DFS_JOURNALNODE_HTTPS_PORT_DEFAULT;
 
+
   public static final String  DFS_JOURNALNODE_KEYTAB_FILE_KEY = "dfs.journalnode.keytab.file";
   public static final String  DFS_JOURNALNODE_KERBEROS_PRINCIPAL_KEY = "dfs.journalnode.kerberos.principal";
   public static final String  DFS_JOURNALNODE_KERBEROS_INTERNAL_SPNEGO_PRINCIPAL_KEY = "dfs.journalnode.kerberos.internal.spnego.principal";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b4b10786/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNode.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNode.java
index 3eb3477..563c580 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNode.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.hdfs.qjournal.server;
 
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_JOURNALNODE_HTTP_BIND_HOST_KEY;
 import static org.apache.hadoop.util.ExitUtil.terminate;
 
 import java.io.File;
@@ -44,6 +45,7 @@ import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
 import org.apache.hadoop.metrics2.source.JvmMetrics;
 import org.apache.hadoop.metrics2.util.MBeans;
+import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.tracing.TraceUtils;
 import org.apache.hadoop.util.DiskChecker;
@@ -159,7 +161,8 @@ public class JournalNode implements Tool, Configurable, JournalNodeMXBean {
 
       registerJNMXBean();
 
-      httpServer = new JournalNodeHttpServer(conf, this);
+      httpServer = new JournalNodeHttpServer(conf, this,
+          getHttpServerBindAddress(conf));
       httpServer.start();
 
       httpServerURI = httpServer.getServerURI().toString();
@@ -184,11 +187,6 @@ public class JournalNode implements Tool, Configurable, JournalNodeMXBean {
   public InetSocketAddress getBoundIpcAddress() {
     return rpcServer.getAddress();
   }
-  
-  @Deprecated
-  public InetSocketAddress getBoundHttpAddress() {
-    return httpServer.getAddress();
-  }
 
   public String getHttpServerURI() {
     return httpServerURI;
@@ -306,7 +304,7 @@ public class JournalNode implements Tool, Configurable, JournalNodeMXBean {
   private void registerJNMXBean() {
     journalNodeInfoBeanName = MBeans.register("JournalNode", "JournalNodeInfo", this);
   }
-  
+
   private class ErrorReporter implements StorageErrorReporter {
     @Override
     public void reportErrorOnFile(File f) {
@@ -356,4 +354,51 @@ public class JournalNode implements Tool, Configurable, JournalNodeMXBean {
   public Long getJournalCTime(String journalId) throws IOException {
     return getOrCreateJournal(journalId).getJournalCTime();
   }
+
+  public static InetSocketAddress getHttpAddress(Configuration conf) {
+    String addr = conf.get(DFSConfigKeys.DFS_JOURNALNODE_HTTP_ADDRESS_KEY,
+        DFSConfigKeys.DFS_JOURNALNODE_HTTP_ADDRESS_DEFAULT);
+    return NetUtils.createSocketAddr(addr,
+        DFSConfigKeys.DFS_JOURNALNODE_HTTP_PORT_DEFAULT,
+        DFSConfigKeys.DFS_JOURNALNODE_HTTP_ADDRESS_KEY);
+  }
+
+  protected InetSocketAddress getHttpServerBindAddress(Configuration conf) {
+    InetSocketAddress bindAddress = getHttpAddress(conf);
+
+    // If DFS_JOURNALNODE_HTTP_BIND_HOST_KEY exists then it overrides the
+    // host name portion of DFS_JOURNALNODE_HTTP_ADDRESS_KEY.
+    final String bindHost = conf.getTrimmed(DFS_JOURNALNODE_HTTP_BIND_HOST_KEY);
+    if (bindHost != null && !bindHost.isEmpty()) {
+      bindAddress = new InetSocketAddress(bindHost, bindAddress.getPort());
+    }
+
+    return bindAddress;
+  }
+
+  @VisibleForTesting
+  public JournalNodeRpcServer getRpcServer() {
+    return rpcServer;
+  }
+
+  /**
+   * @return the actual JournalNode HTTP/HTTPS address.
+   */
+  public InetSocketAddress getBoundHttpAddress() {
+    return httpServer.getAddress();
+  }
+
+  /**
+   * @return JournalNode HTTP address
+   */
+  public InetSocketAddress getHttpAddress() {
+    return httpServer.getHttpAddress();
+  }
+
+  /**
+   * @return JournalNode HTTPS address
+   */
+  public InetSocketAddress getHttpsAddress() {
+    return httpServer.getHttpsAddress();
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b4b10786/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeHttpServer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeHttpServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeHttpServer.java
index 3adb93a..1d29c1b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeHttpServer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeHttpServer.java
@@ -28,6 +28,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.server.common.JspHelper;
+import org.apache.hadoop.http.HttpConfig;
 import org.apache.hadoop.http.HttpServer2;
 import org.apache.hadoop.net.NetUtils;
 
@@ -41,21 +42,37 @@ public class JournalNodeHttpServer {
   private HttpServer2 httpServer;
   private final JournalNode localJournalNode;
 
+  private InetSocketAddress httpAddress;
+  private InetSocketAddress httpsAddress;
+  private final InetSocketAddress bindAddress;
+
   private final Configuration conf;
 
-  JournalNodeHttpServer(Configuration conf, JournalNode jn) {
+  JournalNodeHttpServer(Configuration conf, JournalNode jn,
+      InetSocketAddress bindAddress) {
     this.conf = conf;
     this.localJournalNode = jn;
+    this.bindAddress = bindAddress;
   }
 
   void start() throws IOException {
-    final InetSocketAddress httpAddr = getAddress(conf);
+    final InetSocketAddress httpAddr = bindAddress;
 
     final String httpsAddrString = conf.get(
         DFSConfigKeys.DFS_JOURNALNODE_HTTPS_ADDRESS_KEY,
         DFSConfigKeys.DFS_JOURNALNODE_HTTPS_ADDRESS_DEFAULT);
     InetSocketAddress httpsAddr = NetUtils.createSocketAddr(httpsAddrString);
 
+    if (httpsAddr != null) {
+      // If DFS_JOURNALNODE_HTTPS_BIND_HOST_KEY exists then it overrides the
+      // host name portion of DFS_NAMENODE_HTTPS_ADDRESS_KEY.
+      final String bindHost =
+          conf.getTrimmed(DFSConfigKeys.DFS_JOURNALNODE_HTTPS_BIND_HOST_KEY);
+      if (bindHost != null && !bindHost.isEmpty()) {
+        httpsAddr = new InetSocketAddress(bindHost, httpsAddr.getPort());
+      }
+    }
+
     HttpServer2.Builder builder = DFSUtil.httpServerTemplateForNNAndJN(conf,
         httpAddr, httpsAddr, "journal",
         DFSConfigKeys.DFS_JOURNALNODE_KERBEROS_INTERNAL_SPNEGO_PRINCIPAL_KEY,
@@ -67,6 +84,20 @@ public class JournalNodeHttpServer {
     httpServer.addInternalServlet("getJournal", "/getJournal",
         GetJournalEditServlet.class, true);
     httpServer.start();
+
+    HttpConfig.Policy policy = DFSUtil.getHttpPolicy(conf);
+    int connIdx = 0;
+    if (policy.isHttpEnabled()) {
+      httpAddress = httpServer.getConnectorAddress(connIdx++);
+      conf.set(DFSConfigKeys.DFS_JOURNALNODE_HTTP_ADDRESS_KEY,
+          NetUtils.getHostPortString(httpAddress));
+    }
+
+    if (policy.isHttpsEnabled()) {
+      httpsAddress = httpServer.getConnectorAddress(connIdx);
+      conf.set(DFSConfigKeys.DFS_JOURNALNODE_HTTPS_ADDRESS_KEY,
+          NetUtils.getHostPortString(httpsAddress));
+    }
   }
 
   void stop() throws IOException {
@@ -78,15 +109,27 @@ public class JournalNodeHttpServer {
       }
     }
   }
+
+  /**
+   * Return the actual HTTP/HTTPS address bound to by the running server.
+   */
+  public InetSocketAddress getAddress() {
+    assert httpAddress != null || httpsAddress != null;
+    return httpAddress != null ? httpAddress : httpsAddress;
+  }
   
   /**
    * Return the actual address bound to by the running server.
    */
-  @Deprecated
-  public InetSocketAddress getAddress() {
-    InetSocketAddress addr = httpServer.getConnectorAddress(0);
-    assert addr.getPort() != 0;
-    return addr;
+  public InetSocketAddress getHttpAddress() {
+    return httpAddress;
+  }
+
+  /**
+   * Return the actual address bound to by the running server.
+   */
+  public InetSocketAddress getHttpsAddress() {
+    return httpsAddress;
   }
 
   /**
@@ -101,14 +144,6 @@ public class JournalNodeHttpServer {
         + NetUtils.getHostPortString(addr));
   }
 
-  private static InetSocketAddress getAddress(Configuration conf) {
-    String addr = conf.get(DFSConfigKeys.DFS_JOURNALNODE_HTTP_ADDRESS_KEY,
-        DFSConfigKeys.DFS_JOURNALNODE_HTTP_ADDRESS_DEFAULT);
-    return NetUtils.createSocketAddr(addr,
-        DFSConfigKeys.DFS_JOURNALNODE_HTTP_PORT_DEFAULT,
-        DFSConfigKeys.DFS_JOURNALNODE_HTTP_ADDRESS_KEY);
-  }
-
   public static Journal getJournalFromContext(ServletContext context, String jid)
       throws IOException {
     JournalNode jn = (JournalNode)context.getAttribute(JN_ATTRIBUTE_KEY);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b4b10786/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeRpcServer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeRpcServer.java
index 42e6a4d..541f55b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeRpcServer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeRpcServer.java
@@ -21,6 +21,7 @@ import java.io.IOException;
 import java.net.InetSocketAddress;
 import java.net.URL;
 
+import org.apache.commons.logging.Log;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
@@ -49,10 +50,14 @@ import org.apache.hadoop.net.NetUtils;
 import com.google.common.annotations.VisibleForTesting;
 import com.google.protobuf.BlockingService;
 
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_JOURNALNODE_RPC_BIND_HOST_KEY;
+
 @InterfaceAudience.Private
 @VisibleForTesting
 public class JournalNodeRpcServer implements QJournalProtocol {
 
+  private static final Log LOG = JournalNode.LOG;
+
   private static final int HANDLER_COUNT = 5;
   private final JournalNode jn;
   private Server server;
@@ -68,6 +73,12 @@ public class JournalNodeRpcServer implements QJournalProtocol {
         true);
     
     InetSocketAddress addr = getAddress(confCopy);
+    String bindHost = conf.getTrimmed(DFS_JOURNALNODE_RPC_BIND_HOST_KEY, null);
+    if (bindHost == null) {
+      bindHost = addr.getHostName();
+    }
+    LOG.info("RPC server is binding to " + bindHost + ":" + addr.getPort());
+
     RPC.setProtocolEngine(confCopy, QJournalProtocolPB.class,
         ProtobufRpcEngine.class);
     QJournalProtocolServerSideTranslatorPB translator =
@@ -76,13 +87,13 @@ public class JournalNodeRpcServer implements QJournalProtocol {
         .newReflectiveBlockingService(translator);
     
     this.server = new RPC.Builder(confCopy)
-      .setProtocol(QJournalProtocolPB.class)
-      .setInstance(service)
-      .setBindAddress(addr.getHostName())
-      .setPort(addr.getPort())
-      .setNumHandlers(HANDLER_COUNT)
-      .setVerbose(false)
-      .build();
+        .setProtocol(QJournalProtocolPB.class)
+        .setInstance(service)
+        .setBindAddress(bindHost)
+        .setPort(addr.getPort())
+        .setNumHandlers(HANDLER_COUNT)
+        .setVerbose(false)
+        .build();
 
     // set service-level authorization security policy
     if (confCopy.getBoolean(
@@ -248,4 +259,10 @@ public class JournalNodeRpcServer implements QJournalProtocol {
       throws IOException {
     jn.discardSegments(journalId, startTxId);
   }
+
+  /** Allow access to the RPC server for testing. */
+  @VisibleForTesting
+  Server getRpcServer() {
+    return server;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b4b10786/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
index 66f9718..ba98257 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
@@ -2115,6 +2115,17 @@
 </property>
 
 <property>
+  <name>dfs.journalnode.rpc-bind-host</name>
+  <value></value>
+  <description>
+    The actual address the RPC server will bind to. If this optional address is
+    set, it overrides only the hostname portion of dfs.journalnode.rpc-address.
+    This is useful for making the JournalNode listen on all interfaces by
+    setting it to 0.0.0.0.
+  </description>
+</property>
+
+<property>
   <name>dfs.journalnode.http-address</name>
   <value>0.0.0.0:8480</value>
   <description>
@@ -2124,6 +2135,17 @@
 </property>
 
 <property>
+  <name>dfs.journalnode.http-bind-host</name>
+  <value></value>
+  <description>
+    The actual address the HTTP server will bind to. If this optional address
+    is set, it overrides only the hostname portion of
+    dfs.journalnode.http-address. This is useful for making the JournalNode
+    HTTP server listen on allinterfaces by setting it to 0.0.0.0.
+  </description>
+</property>
+
+<property>
   <name>dfs.journalnode.https-address</name>
   <value>0.0.0.0:8481</value>
   <description>
@@ -2133,6 +2155,17 @@
 </property>
 
 <property>
+  <name>dfs.journalnode.https-bind-host</name>
+  <value></value>
+  <description>
+    The actual address the HTTP server will bind to. If this optional address
+    is set, it overrides only the hostname portion of
+    dfs.journalnode.https-address. This is useful for making the JournalNode
+    HTTP server listen on all interfaces by setting it to 0.0.0.0.
+  </description>
+</property>
+
+<property>
   <name>dfs.namenode.audit.loggers</name>
   <value>default</value>
   <description>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b4b10786/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNodeRespectsBindHostKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNodeRespectsBindHostKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNodeRespectsBindHostKeys.java
new file mode 100644
index 0000000..79f3598
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNodeRespectsBindHostKeys.java
@@ -0,0 +1,200 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.qjournal.server;
+
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HTTP_POLICY_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_JOURNALNODE_HTTP_ADDRESS_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_JOURNALNODE_HTTPS_ADDRESS_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_JOURNALNODE_HTTP_BIND_HOST_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_JOURNALNODE_HTTPS_BIND_HOST_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_JOURNALNODE_RPC_BIND_HOST_KEY;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertThat;
+import static org.hamcrest.core.Is.is;
+import static org.hamcrest.core.IsNot.not;
+
+import org.apache.hadoop.hdfs.qjournal.MiniJournalCluster;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.http.HttpConfig;
+import org.apache.hadoop.security.ssl.KeyStoreTestUtil;
+
+import java.io.File;
+import java.io.IOException;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import static org.junit.Assert.assertTrue;
+
+import org.apache.hadoop.hdfs.HdfsConfiguration;
+
+/**
+ * This test checks that the JournalNode respects the following keys.
+ *
+ *  - DFS_JOURNALNODE_RPC_BIND_HOST_KEY
+ *  - DFS_JOURNALNODE_HTTP_BIND_HOST_KEY
+ *  - DFS_JOURNALNODE_HTTPS_BIND_HOST_KEY
+ */
+public class TestJournalNodeRespectsBindHostKeys {
+
+  public static final Log LOG = LogFactory.getLog(
+      TestJournalNodeRespectsBindHostKeys.class);
+  private static final String WILDCARD_ADDRESS = "0.0.0.0";
+  private static final String LOCALHOST_SERVER_ADDRESS = "127.0.0.1:0";
+  private static final int NUM_JN = 1;
+
+  private HdfsConfiguration conf;
+  private MiniJournalCluster jCluster;
+  private JournalNode jn;
+
+  @Before
+  public void setUp() {
+    conf = new HdfsConfiguration();
+  }
+
+  @After
+  public void tearDown() throws IOException {
+    if (jCluster != null) {
+      jCluster.shutdown();
+      jCluster = null;
+    }
+  }
+
+  private static String getRpcServerAddress(JournalNode jn) {
+    JournalNodeRpcServer rpcServer = jn.getRpcServer();
+    return rpcServer.getRpcServer().getListenerAddress().getAddress().
+        toString();
+  }
+
+  @Test (timeout=300000)
+  public void testRpcBindHostKey() throws IOException {
+    LOG.info("Testing without " + DFS_JOURNALNODE_RPC_BIND_HOST_KEY);
+
+    // NN should not bind the wildcard address by default.
+    jCluster = new MiniJournalCluster.Builder(conf).format(true)
+        .numJournalNodes(NUM_JN).build();
+    jn = jCluster.getJournalNode(0);
+    String address = getRpcServerAddress(jn);
+    assertThat("Bind address not expected to be wildcard by default.",
+        address, not("/" + WILDCARD_ADDRESS));
+
+    LOG.info("Testing with " + DFS_JOURNALNODE_RPC_BIND_HOST_KEY);
+
+    // Tell NN to bind the wildcard address.
+    conf.set(DFS_JOURNALNODE_RPC_BIND_HOST_KEY, WILDCARD_ADDRESS);
+
+    // Verify that NN binds wildcard address now.
+    jCluster = new MiniJournalCluster.Builder(conf).format(true)
+        .numJournalNodes(NUM_JN).build();
+    jn = jCluster.getJournalNode(0);
+    address = getRpcServerAddress(jn);
+    assertThat("Bind address " + address + " is not wildcard.",
+        address, is("/" + WILDCARD_ADDRESS));
+  }
+
+  @Test(timeout=300000)
+  public void testHttpBindHostKey() throws IOException {
+    LOG.info("Testing without " + DFS_JOURNALNODE_HTTP_BIND_HOST_KEY);
+
+    // NN should not bind the wildcard address by default.
+    conf.set(DFS_JOURNALNODE_HTTP_ADDRESS_KEY, LOCALHOST_SERVER_ADDRESS);
+    jCluster = new MiniJournalCluster.Builder(conf).format(true)
+        .numJournalNodes(NUM_JN).build();
+    jn = jCluster.getJournalNode(0);
+    String address = jn.getHttpAddress().toString();
+    assertFalse("HTTP Bind address not expected to be wildcard by default.",
+        address.startsWith(WILDCARD_ADDRESS));
+
+    LOG.info("Testing with " + DFS_JOURNALNODE_HTTP_BIND_HOST_KEY);
+
+    // Tell NN to bind the wildcard address.
+    conf.set(DFS_JOURNALNODE_HTTP_BIND_HOST_KEY, WILDCARD_ADDRESS);
+
+    // Verify that NN binds wildcard address now.
+    conf.set(DFS_JOURNALNODE_HTTP_ADDRESS_KEY, LOCALHOST_SERVER_ADDRESS);
+    jCluster = new MiniJournalCluster.Builder(conf).format(true)
+        .numJournalNodes(NUM_JN).build();
+    jn = jCluster.getJournalNode(0);
+    address = jn.getHttpAddress().toString();
+    assertTrue("HTTP Bind address " + address + " is not wildcard.",
+        address.startsWith(WILDCARD_ADDRESS));
+  }
+
+  private static final String BASEDIR = System.getProperty("test.build.dir",
+      "target/test-dir") + "/" +
+      TestJournalNodeRespectsBindHostKeys.class.getSimpleName();
+
+  private static void setupSsl() throws Exception {
+    Configuration conf = new Configuration();
+    conf.set(DFS_HTTP_POLICY_KEY, HttpConfig.Policy.HTTPS_ONLY.name());
+    conf.set(DFS_JOURNALNODE_HTTPS_ADDRESS_KEY, "localhost:0");
+    conf.set(DFSConfigKeys.DFS_DATANODE_HTTPS_ADDRESS_KEY, "localhost:0");
+
+    File base = new File(BASEDIR);
+    FileUtil.fullyDelete(base);
+    assertTrue(base.mkdirs());
+    final String keystoresDir = new File(BASEDIR).getAbsolutePath();
+    final String sslConfDir = KeyStoreTestUtil.getClasspathDir(
+        TestJournalNodeRespectsBindHostKeys.class);
+
+    KeyStoreTestUtil.setupSSLConfig(keystoresDir, sslConfDir, conf, false);
+  }
+
+  /**
+   * HTTPS test is different since we need to setup SSL configuration.
+   * NN also binds the wildcard address for HTTPS port by default so we must
+   * pick a different host/port combination.
+   * @throws Exception
+   */
+  @Test (timeout=300000)
+  public void testHttpsBindHostKey() throws Exception {
+    LOG.info("Testing behavior without " + DFS_JOURNALNODE_HTTPS_BIND_HOST_KEY);
+
+    setupSsl();
+
+    conf.set(DFS_HTTP_POLICY_KEY, HttpConfig.Policy.HTTPS_ONLY.name());
+
+    // NN should not bind the wildcard address by default.
+    conf.set(DFS_JOURNALNODE_HTTPS_ADDRESS_KEY, LOCALHOST_SERVER_ADDRESS);
+    jCluster = new MiniJournalCluster.Builder(conf).format(true)
+        .numJournalNodes(NUM_JN).build();
+    jn = jCluster.getJournalNode(0);
+    String address = jn.getHttpsAddress().toString();
+    assertFalse("HTTP Bind address not expected to be wildcard by default.",
+        address.startsWith(WILDCARD_ADDRESS));
+
+    LOG.info("Testing behavior with " + DFS_JOURNALNODE_HTTPS_BIND_HOST_KEY);
+
+    // Tell NN to bind the wildcard address.
+    conf.set(DFS_JOURNALNODE_HTTPS_BIND_HOST_KEY, WILDCARD_ADDRESS);
+
+    // Verify that NN binds wildcard address now.
+    conf.set(DFS_JOURNALNODE_HTTPS_ADDRESS_KEY, LOCALHOST_SERVER_ADDRESS);
+    jCluster = new MiniJournalCluster.Builder(conf).format(true)
+        .numJournalNodes(NUM_JN).build();
+    jn = jCluster.getJournalNode(0);
+    address = jn.getHttpsAddress().toString();
+    assertTrue("HTTP Bind address " + address + " is not wildcard.",
+        address.startsWith(WILDCARD_ADDRESS));
+  }
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[30/50] [abbrv] hadoop git commit: HDFS-13408. MiniDFSCluster to support being built on randomized base directory. Contributed by Xiao Liang

Posted by sh...@apache.org.
HDFS-13408. MiniDFSCluster to support being built on randomized base directory. Contributed by Xiao Liang

(cherry picked from commit f411de6a79a0a87f03c09366cfe7a7d0726ed932)
(cherry picked from commit cf272c5179a9cb4b524016c0fca7c69c9eaa92f1)
(cherry picked from commit 956ab12ede390e1eea0a66752e0e6711f47b4b94)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/99e82e2c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/99e82e2c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/99e82e2c

Branch: refs/heads/YARN-8200
Commit: 99e82e2c2cf554fa5582da2ec9615fd8b698eecc
Parents: a975250
Author: Chris Douglas <cd...@apache.org>
Authored: Mon Apr 23 11:13:18 2018 -0700
Committer: Chris Douglas <cd...@apache.org>
Committed: Mon Apr 23 11:15:28 2018 -0700

----------------------------------------------------------------------
 .../org/apache/hadoop/hdfs/MiniDFSCluster.java  | 22 +++++++++++++++++++-
 .../apache/hadoop/hdfs/TestMiniDFSCluster.java  | 16 +++++++-------
 2 files changed, 28 insertions(+), 10 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/99e82e2c/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
index 887c635..a643077 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
@@ -198,8 +198,28 @@ public class MiniDFSCluster implements AutoCloseable {
       this.conf = conf;
       this.storagesPerDatanode =
           FsDatasetTestUtils.Factory.getFactory(conf).getDefaultNumOfDataDirs();
+      if (null == conf.get(HDFS_MINIDFS_BASEDIR)) {
+        conf.set(HDFS_MINIDFS_BASEDIR,
+            new File(getBaseDirectory()).getAbsolutePath());
+      }
     }
-    
+
+    public Builder(Configuration conf, File basedir) {
+      this.conf = conf;
+      this.storagesPerDatanode =
+          FsDatasetTestUtils.Factory.getFactory(conf).getDefaultNumOfDataDirs();
+      if (null == basedir) {
+        throw new IllegalArgumentException(
+            "MiniDFSCluster base directory cannot be null");
+      }
+      String cdir = conf.get(HDFS_MINIDFS_BASEDIR);
+      if (cdir != null) {
+        throw new IllegalArgumentException(
+            "MiniDFSCluster base directory already defined (" + cdir + ")");
+      }
+      conf.set(HDFS_MINIDFS_BASEDIR, basedir.getAbsolutePath());
+    }
+
     /**
      * Default: 0
      */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/99e82e2c/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMiniDFSCluster.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMiniDFSCluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMiniDFSCluster.java
index e1346e9..296ede3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMiniDFSCluster.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMiniDFSCluster.java
@@ -200,9 +200,8 @@ public class TestMiniDFSCluster {
   public void testIsClusterUpAfterShutdown() throws Throwable {
     Configuration conf = new HdfsConfiguration();
     File testDataCluster4 = new File(testDataPath, CLUSTER_4);
-    String c4Path = testDataCluster4.getAbsolutePath();
-    conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, c4Path);
-    MiniDFSCluster cluster4 = new MiniDFSCluster.Builder(conf).build();
+    MiniDFSCluster cluster4 =
+        new MiniDFSCluster.Builder(conf, testDataCluster4).build();
     try {
       DistributedFileSystem dfs = cluster4.getFileSystem();
       dfs.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_ENTER);
@@ -221,12 +220,11 @@ public class TestMiniDFSCluster {
     Configuration conf = new HdfsConfiguration();
     conf.set(DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY, "MYHOST");
     File testDataCluster5 = new File(testDataPath, CLUSTER_5);
-    String c5Path = testDataCluster5.getAbsolutePath();
-    conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, c5Path);
-    try (MiniDFSCluster cluster5 = new MiniDFSCluster.Builder(conf)
-        .numDataNodes(1)
-        .checkDataNodeHostConfig(true)
-        .build()) {
+    try (MiniDFSCluster cluster5 =
+        new MiniDFSCluster.Builder(conf, testDataCluster5)
+          .numDataNodes(1)
+          .checkDataNodeHostConfig(true)
+          .build()) {
       assertEquals("DataNode hostname config not respected", "MYHOST",
           cluster5.getDataNodes().get(0).getDatanodeId().getHostName());
     }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[27/50] [abbrv] hadoop git commit: YARN-7786. NullPointerException while launching ApplicationMaster. Contributed by lujie

Posted by sh...@apache.org.
YARN-7786. NullPointerException while launching ApplicationMaster. Contributed by lujie


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/06f3f453
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/06f3f453
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/06f3f453

Branch: refs/heads/YARN-8200
Commit: 06f3f453c22dfc77c1444ab58606bfb30282f671
Parents: 733248c
Author: Jason Lowe <jl...@apache.org>
Authored: Fri Apr 20 13:31:19 2018 -0500
Committer: Jason Lowe <jl...@apache.org>
Committed: Fri Apr 20 13:31:19 2018 -0500

----------------------------------------------------------------------
 .../resourcemanager/amlauncher/AMLauncher.java  | 21 +++--
 .../TestApplicationMasterLauncher.java          | 82 ++++++++++++++++----
 2 files changed, 84 insertions(+), 19 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/06f3f453/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/amlauncher/AMLauncher.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/amlauncher/AMLauncher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/amlauncher/AMLauncher.java
index e0754a0..19f1d81 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/amlauncher/AMLauncher.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/amlauncher/AMLauncher.java
@@ -105,7 +105,7 @@ public class AMLauncher implements Runnable {
     connect();
     ContainerId masterContainerID = masterContainer.getId();
     ApplicationSubmissionContext applicationContext =
-      application.getSubmissionContext();
+        application.getSubmissionContext();
     LOG.info("Setting up container " + masterContainer
         + " for AM " + application.getAppAttemptId());  
     ContainerLaunchContext launchContext =
@@ -189,6 +189,10 @@ public class AMLauncher implements Runnable {
     ContainerLaunchContext container = 
         applicationMasterContext.getAMContainerSpec();
 
+    if (container == null){
+      throw new IOException(containerID +
+            " has been cleaned before launched");
+    }
     // Finalize the container
     setupTokens(container, containerID);
     // set the flow context optionally for timeline service v.2
@@ -308,11 +312,7 @@ public class AMLauncher implements Runnable {
         handler.handle(new RMAppAttemptEvent(application.getAppAttemptId(),
             RMAppAttemptEventType.LAUNCHED));
       } catch(Exception ie) {
-        String message = "Error launching " + application.getAppAttemptId()
-            + ". Got exception: " + StringUtils.stringifyException(ie);
-        LOG.info(message);
-        handler.handle(new RMAppAttemptEvent(application
-            .getAppAttemptId(), RMAppAttemptEventType.LAUNCH_FAILED, message));
+        onAMLaunchFailed(masterContainer.getId(), ie);
       }
       break;
     case CLEANUP:
@@ -347,4 +347,13 @@ public class AMLauncher implements Runnable {
       throw (IOException) t;
     }
   }
+
+  @SuppressWarnings("unchecked")
+  protected void onAMLaunchFailed(ContainerId containerId, Exception ie) {
+    String message = "Error launching " + application.getAppAttemptId()
+            + ". Got exception: " + StringUtils.stringifyException(ie);
+    LOG.info(message);
+    handler.handle(new RMAppAttemptEvent(application
+           .getAppAttemptId(), RMAppAttemptEventType.LAUNCH_FAILED, message));
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/06f3f453/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationMasterLauncher.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationMasterLauncher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationMasterLauncher.java
index 1603c2d..d834474 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationMasterLauncher.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationMasterLauncher.java
@@ -24,12 +24,14 @@ import java.nio.ByteBuffer;
 import java.util.ArrayList;
 import java.util.HashMap;
 import java.util.Map;
+import java.util.concurrent.TimeoutException;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.io.DataOutputBuffer;
 import org.apache.hadoop.security.Credentials;
+import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.yarn.api.ApplicationConstants;
 import org.apache.hadoop.yarn.api.ContainerManagementProtocol;
 import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse;
@@ -73,6 +75,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.amlauncher.AMLauncher;
 import org.apache.hadoop.yarn.server.resourcemanager.amlauncher.AMLauncherEventType;
 import org.apache.hadoop.yarn.server.resourcemanager.amlauncher.ApplicationMasterLauncher;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptState;
 import org.apache.hadoop.yarn.server.utils.AMRMClientUtils;
@@ -83,6 +86,9 @@ import org.apache.log4j.Logger;
 import org.junit.Assert;
 import org.junit.Test;
 
+import com.google.common.base.Supplier;
+
+import static org.junit.Assert.fail;
 import static org.mockito.Matchers.any;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.when;
@@ -206,7 +212,7 @@ public class TestApplicationMasterLauncher {
   public void testAMLaunchAndCleanup() throws Exception {
     Logger rootLogger = LogManager.getRootLogger();
     rootLogger.setLevel(Level.DEBUG);
-    MyContainerManagerImpl containerManager = new MyContainerManagerImpl();
+    final MyContainerManagerImpl containerManager = new MyContainerManagerImpl();
     MockRMWithCustomAMLauncher rm = new MockRMWithCustomAMLauncher(
         containerManager);
     rm.start();
@@ -217,10 +223,14 @@ public class TestApplicationMasterLauncher {
     // kick the scheduling
     nm1.nodeHeartbeat(true);
 
-    int waitCount = 0;
-    while (containerManager.launched == false && waitCount++ < 20) {
-      LOG.info("Waiting for AM Launch to happen..");
-      Thread.sleep(1000);
+    try {
+      GenericTestUtils.waitFor(new Supplier<Boolean>() {
+        @Override public Boolean get() {
+          return containerManager.launched;
+        }
+      }, 100, 200 * 100);
+    } catch (TimeoutException e) {
+      fail("timed out while waiting for AM Launch to happen.");
     }
     Assert.assertTrue(containerManager.launched);
 
@@ -234,7 +244,7 @@ public class TestApplicationMasterLauncher {
         .getMasterContainer().getId()
         .toString(), containerManager.containerIdAtContainerManager);
     Assert.assertEquals(nm1.getNodeId().toString(),
-      containerManager.nmHostAtContainerManager);
+        containerManager.nmHostAtContainerManager);
     Assert.assertEquals(YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS,
         containerManager.maxAppAttempts);
 
@@ -247,10 +257,14 @@ public class TestApplicationMasterLauncher {
     nm1.nodeHeartbeat(attempt.getAppAttemptId(), 1, ContainerState.COMPLETE);
     rm.waitForState(am.getApplicationAttemptId(), RMAppAttemptState.FINISHED);
 
-    waitCount = 0;
-    while (containerManager.cleanedup == false && waitCount++ < 20) {
-      LOG.info("Waiting for AM Cleanup to happen..");
-      Thread.sleep(1000);
+    try {
+      GenericTestUtils.waitFor(new Supplier<Boolean>() {
+        @Override public Boolean get() {
+          return containerManager.cleanedup;
+        }
+      }, 100, 200 * 100);
+    } catch (TimeoutException e) {
+      fail("timed out while waiting for AM cleanup to happen.");
     }
     Assert.assertTrue(containerManager.cleanedup);
 
@@ -259,6 +273,48 @@ public class TestApplicationMasterLauncher {
   }
 
   @Test
+  public void testAMCleanupBeforeLaunch() throws Exception {
+    MockRM rm = new MockRM();
+    rm.start();
+    MockNM nm1 = rm.registerNode("127.0.0.1:1234", 5120);
+    RMApp app = rm.submitApp(2000);
+    // kick the scheduling
+    nm1.nodeHeartbeat(true);
+    final RMAppAttempt attempt = app.getCurrentAppAttempt();
+
+    try {
+      GenericTestUtils.waitFor(new Supplier<Boolean>() {
+        @Override public Boolean get() {
+          return attempt.getMasterContainer() != null;
+        }
+      }, 10, 200 * 100);
+    } catch (TimeoutException e) {
+      fail("timed out while waiting for AM Launch to happen.");
+    }
+
+    //send kill before launch
+    rm.killApp(app.getApplicationId());
+    rm.waitForState(app.getApplicationId(), RMAppState.KILLED);
+    //Launch after kill
+    AMLauncher launcher = new AMLauncher(rm.getRMContext(),
+            attempt, AMLauncherEventType.LAUNCH, rm.getConfig()) {
+        @Override
+        public void onAMLaunchFailed(ContainerId containerId, Exception e) {
+          Assert.assertFalse("NullPointerException happens "
+                 + " while launching " + containerId,
+                   e instanceof NullPointerException);
+        }
+        @Override
+        protected ContainerManagementProtocol getContainerMgrProxy(
+            ContainerId containerId) {
+          return new MyContainerManagerImpl();
+        }
+    };
+    launcher.run();
+    rm.stop();
+  }
+
+  @Test
   public void testRetriesOnFailures() throws Exception {
     final ContainerManagementProtocol mockProxy =
         mock(ContainerManagementProtocol.class);
@@ -304,7 +360,7 @@ public class TestApplicationMasterLauncher {
     rm.drainEvents();
 
     MockRM.waitForState(app.getCurrentAppAttempt(),
-      RMAppAttemptState.LAUNCHED, 500);
+        RMAppAttemptState.LAUNCHED, 500);
   }
 
   @SuppressWarnings("unused")
@@ -336,9 +392,9 @@ public class TestApplicationMasterLauncher {
 
     AllocateResponse amrs = null;
     try {
-        amrs = am.allocate(new ArrayList<ResourceRequest>(),
+      amrs = am.allocate(new ArrayList<ResourceRequest>(),
           new ArrayList<ContainerId>());
-        Assert.fail();
+      Assert.fail();
     } catch (ApplicationMasterNotRegisteredException e) {
     }
 


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[44/50] [abbrv] hadoop git commit: HDFS-13434. RBF: Fix dead links in RBF document. Contributed by Chetna Chaudhari.

Posted by sh...@apache.org.
HDFS-13434. RBF: Fix dead links in RBF document. Contributed by Chetna Chaudhari.

(cherry picked from commit f469628bba350ba79bc6a0d38f9dc1cb5eb65c77)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ae32e214
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ae32e214
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ae32e214

Branch: refs/heads/YARN-8200
Commit: ae32e2140cb81f8b4d402b8684a5244fe93b4ec6
Parents: 250ea47
Author: Inigo Goiri <in...@apache.org>
Authored: Fri Apr 27 15:13:47 2018 -0700
Committer: Inigo Goiri <in...@apache.org>
Committed: Fri Apr 27 15:16:46 2018 -0700

----------------------------------------------------------------------
 .../src/site/markdown/HDFSRouterFederation.md       | 16 ++++++++--------
 1 file changed, 8 insertions(+), 8 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ae32e214/hadoop-hdfs-project/hadoop-hdfs-rbf/src/site/markdown/HDFSRouterFederation.md
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/site/markdown/HDFSRouterFederation.md b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/site/markdown/HDFSRouterFederation.md
index 43e89ed..70c6226 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/site/markdown/HDFSRouterFederation.md
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/site/markdown/HDFSRouterFederation.md
@@ -21,7 +21,7 @@ Introduction
 ------------
 
 NameNodes have scalability limits because of the metadata overhead comprised of inodes (files and directories) and file blocks, the number of Datanode heartbeats, and the number of HDFS RPC client requests.
-The common solution is to split the filesystem into smaller subclusters [HDFS Federation](./Federation.html) and provide a federated view [ViewFs](./ViewFs.html).
+The common solution is to split the filesystem into smaller subclusters [HDFS Federation](../hadoop-hdfs/Federation.html) and provide a federated view [ViewFs](../hadoop-hdfs/ViewFs.html).
 The problem is how to maintain the split of the subclusters (e.g., namespace partition), which forces users to connect to multiple subclusters and manage the allocation of folders/files to them.
 
 
@@ -37,8 +37,8 @@ This layer must be scalable, highly available, and fault tolerant.
 
 This federation layer comprises multiple components.
 The _Router_ component that has the same interface as a NameNode, and forwards the client requests to the correct subcluster, based on ground-truth information from a State Store.
-The _State Store_ combines a remote _Mount Table_ (in the flavor of [ViewFs](./ViewFs.html), but shared between clients) and utilization (load/capacity) information about the subclusters.
-This approach has the same architecture as [YARN federation](../hadoop-yarn/Federation.html).
+The _State Store_ combines a remote _Mount Table_ (in the flavor of [ViewFs](../hadoop-hdfs/ViewFs.html), but shared between clients) and utilization (load/capacity) information about the subclusters.
+This approach has the same architecture as [YARN federation](../../hadoop-yarn/hadoop-yarn-site/Federation.html).
 
 ![Router-based Federation Sequence Diagram | width=800](./images/routerfederation.png)
 
@@ -140,7 +140,7 @@ Examples users may encounter include the following.
 ### Quota management
 Federation supports and controls global quota at mount table level.
 For performance reasons, the Router caches the quota usage and updates it periodically. These quota usage values
-will be used for quota-verification during each WRITE RPC call invoked in RouterRPCSever. See [HDFS Quotas Guide](./HdfsQuotaAdminGuide.html)
+will be used for quota-verification during each WRITE RPC call invoked in RouterRPCSever. See [HDFS Quotas Guide](../hadoop-hdfs/HdfsQuotaAdminGuide.html)
 for the quota detail.
 
 ### State Store
@@ -163,7 +163,7 @@ The Routers discard the entries older than a certain threshold (e.g., ten Router
 
 * **Mount Table**:
 This table hosts the mapping between folders and subclusters.
-It is similar to the mount table in [ViewFs](.ViewFs.html) where it specifies the federated folder, the destination subcluster and the path in that folder.
+It is similar to the mount table in [ViewFs](../hadoop-hdfs/ViewFs.html) where it specifies the federated folder, the destination subcluster and the path in that folder.
 
 
 ### Security
@@ -175,7 +175,7 @@ Deployment
 
 By default, the Router is ready to take requests and monitor the NameNode in the local machine.
 It needs to know the State Store endpoint by setting `dfs.federation.router.store.driver.class`.
-The rest of the options are documented in [hdfs-default.xml](./hdfs-default.xml).
+The rest of the options are documented in [hdfs-default.xml](../hadoop-hdfs/hdfs-default.xml).
 
 Once the Router is configured, it can be started:
 
@@ -187,7 +187,7 @@ And to stop it:
 
 ### Mount table management
 
-The mount table entries are pretty much the same as in [ViewFs](./ViewFs.html).
+The mount table entries are pretty much the same as in [ViewFs](../hadoop-hdfs/ViewFs.html).
 A good practice for simplifying the management is to name the federated namespace with the same names as the destination namespaces.
 For example, if we to mount `/data/app1` in the federated namespace, it is recommended to have that same name as in the destination namespace.
 
@@ -290,7 +290,7 @@ Router configuration
 --------------------
 
 One can add the configurations for Router-based federation to **hdfs-site.xml**.
-The main options are documented in [hdfs-default.xml](./hdfs-default.xml).
+The main options are documented in [hdfs-default.xml](../hadoop-hdfs/hdfs-default.xml).
 The configuration values are described in this section.
 
 ### RPC server


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[48/50] [abbrv] hadoop git commit: HDFS-13503. Fix TestFsck test failures on Windows. Contributed by Xiao Liang.

Posted by sh...@apache.org.
HDFS-13503. Fix TestFsck test failures on Windows. Contributed by Xiao Liang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9fd93ee5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9fd93ee5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9fd93ee5

Branch: refs/heads/YARN-8200
Commit: 9fd93ee533736af85e7b684a0ab8bbc5e2d9a528
Parents: cb3414a
Author: Inigo Goiri <in...@apache.org>
Authored: Tue May 1 08:15:07 2018 -0700
Committer: Inigo Goiri <in...@apache.org>
Committed: Tue May 1 08:15:07 2018 -0700

----------------------------------------------------------------------
 .../org/apache/hadoop/hdfs/MiniDFSCluster.java  |   3 +-
 .../hadoop/hdfs/server/namenode/TestFsck.java   | 110 ++++++++++++-------
 2 files changed, 72 insertions(+), 41 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9fd93ee5/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
index a643077..6dfa5b1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
@@ -2791,7 +2791,8 @@ public class MiniDFSCluster implements AutoCloseable {
    * @return Storage directory
    */
   public File getStorageDir(int dnIndex, int dirIndex) {
-    return new File(getBaseDirectory(), getStorageDirPath(dnIndex, dirIndex));
+    return new File(determineDfsBaseDir(),
+        getStorageDirPath(dnIndex, dirIndex));
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9fd93ee5/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
index e9f26c3..b4e4e1f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.hdfs.server.namenode;
 
+import static org.apache.hadoop.hdfs.MiniDFSCluster.HDFS_MINIDFS_BASEDIR;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertNotNull;
@@ -195,7 +196,9 @@ public class TestFsck {
     conf.setLong(DFSConfigKeys.DFS_NAMENODE_ACCESSTIME_PRECISION_KEY,
         precision);
     conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 10000L);
-    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
+    File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath());
+    cluster = new MiniDFSCluster.Builder(conf, builderBaseDir)
+        .numDataNodes(4).build();
     fs = cluster.getFileSystem();
     final String fileName = "/srcdat";
     util.createFiles(fs, fileName);
@@ -283,7 +286,9 @@ public class TestFsck {
         setNumFiles(20).build();
     FileSystem fs = null;
     conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 10000L);
-    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
+    File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath());
+    cluster = new MiniDFSCluster.Builder(conf, builderBaseDir)
+        .numDataNodes(4).build();
     fs = cluster.getFileSystem();
     util.createFiles(fs, "/srcdat");
     util.waitReplication(fs, "/srcdat", (short)3);
@@ -301,7 +306,9 @@ public class TestFsck {
     conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 10000L);
 
     // Create a cluster with the current user, write some files
-    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
+    File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath());
+    cluster = new MiniDFSCluster.Builder(conf, builderBaseDir)
+        .numDataNodes(4).build();
     final MiniDFSCluster c2 = cluster;
     final String dir = "/dfsck";
     final Path dirpath = new Path(dir);
@@ -347,8 +354,9 @@ public class TestFsck {
     DFSTestUtil util = new DFSTestUtil("TestFsck", 5, 3,
         (5 * dfsBlockSize) + (dfsBlockSize - 1), 5 * dfsBlockSize);
     FileSystem fs = null;
-    cluster = new MiniDFSCluster.Builder(conf).
-        numDataNodes(numDatanodes).build();
+    File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath());
+    cluster = new MiniDFSCluster.Builder(conf, builderBaseDir)
+        .numDataNodes(numDatanodes).build();
     String topDir = "/srcdat";
     fs = cluster.getFileSystem();
     cluster.waitActive();
@@ -546,7 +554,9 @@ public class TestFsck {
     FileSystem fs = null;
     conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 10000L);
     conf.setInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY, 1);
-    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
+    File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath());
+    cluster = new MiniDFSCluster.Builder(conf, builderBaseDir)
+        .numDataNodes(4).build();
     String topDir = "/srcdat";
     fs = cluster.getFileSystem();
     cluster.waitActive();
@@ -610,7 +620,9 @@ public class TestFsck {
         setNumFiles(4).build();
     FileSystem fs = null;
     conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 10000L);
-    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
+    File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath());
+    cluster = new MiniDFSCluster.Builder(conf, builderBaseDir)
+        .numDataNodes(4).build();
     String topDir = "/srcdat";
     String randomString = "HADOOP  ";
     fs = cluster.getFileSystem();
@@ -661,7 +673,9 @@ public class TestFsck {
     String outStr = null;
     short factor = 1;
 
-    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
+    File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath());
+    cluster = new MiniDFSCluster.Builder(conf, builderBaseDir)
+        .numDataNodes(1).build();
     cluster.waitActive();
     fs = cluster.getFileSystem();
     Path file1 = new Path("/testCorruptBlock");
@@ -732,7 +746,9 @@ public class TestFsck {
     Random random = new Random();
     String outStr = null;
     short factor = 1;
-    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
+    File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath());
+    cluster = new MiniDFSCluster.Builder(conf, builderBaseDir)
+        .numDataNodes(2).build();
     cluster.waitActive();
     fs = cluster.getFileSystem();
     Path file1 = new Path("/testUnderMinReplicatedBlock");
@@ -803,9 +819,9 @@ public class TestFsck {
     conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
     conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);
 
-    cluster =
-        new MiniDFSCluster.Builder(conf).numDataNodes(numDn).hosts(hosts)
-            .racks(racks).build();
+    File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath());
+    cluster = new MiniDFSCluster.Builder(conf, builderBaseDir)
+        .numDataNodes(numDn).hosts(hosts).racks(racks).build();
     cluster.waitClusterUp();
     final DistributedFileSystem dfs = cluster.getFileSystem();
 
@@ -952,7 +968,8 @@ public class TestFsck {
   @Test
   public void testFsckError() throws Exception {
     // bring up a one-node cluster
-    cluster = new MiniDFSCluster.Builder(conf).build();
+    File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath());
+    cluster = new MiniDFSCluster.Builder(conf, builderBaseDir).build();
     String fileName = "/test.txt";
     Path filePath = new Path(fileName);
     FileSystem fs = cluster.getFileSystem();
@@ -984,7 +1001,8 @@ public class TestFsck {
     conf.setInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY, 1);
     FileSystem fs = null;
 
-    cluster = new MiniDFSCluster.Builder(conf).build();
+    File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath());
+    cluster = new MiniDFSCluster.Builder(conf, builderBaseDir).build();
     cluster.waitActive();
     fs = cluster.getFileSystem();
     DFSTestUtil util = new DFSTestUtil.Builder().
@@ -1047,7 +1065,8 @@ public class TestFsck {
   @Test
   public void testToCheckTheFsckCommandOnIllegalArguments() throws Exception {
     // bring up a one-node cluster
-    cluster = new MiniDFSCluster.Builder(conf).build();
+    File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath());
+    cluster = new MiniDFSCluster.Builder(conf, builderBaseDir).build();
     String fileName = "/test.txt";
     Path filePath = new Path(fileName);
     FileSystem fs = cluster.getFileSystem();
@@ -1091,8 +1110,9 @@ public class TestFsck {
     DistributedFileSystem dfs = null;
     
     // Startup a minicluster
-    cluster =
-        new MiniDFSCluster.Builder(conf).numDataNodes(numReplicas).build();
+    File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath());
+    cluster = new MiniDFSCluster.Builder(conf, builderBaseDir)
+        .numDataNodes(numReplicas).build();
     assertNotNull("Failed Cluster Creation", cluster);
     cluster.waitClusterUp();
     dfs = cluster.getFileSystem();
@@ -1151,9 +1171,9 @@ public class TestFsck {
     DistributedFileSystem dfs = null;
     
     // Startup a minicluster
-    cluster =
-        new MiniDFSCluster.Builder(conf).numDataNodes(numDn).hosts(hosts)
-        .racks(racks).build();
+    File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath());
+    cluster = new MiniDFSCluster.Builder(conf, builderBaseDir)
+        .numDataNodes(numDn).hosts(hosts).racks(racks).build();
     assertNotNull("Failed Cluster Creation", cluster);
     cluster.waitClusterUp();
     dfs = cluster.getFileSystem();
@@ -1263,7 +1283,9 @@ public class TestFsck {
     conf.setLong(DFSConfigKeys.DFS_NAMENODE_ACCESSTIME_PRECISION_KEY,
         precision);
     conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 10000L);
-    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
+    File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath());
+    cluster = new MiniDFSCluster.Builder(conf, builderBaseDir)
+        .numDataNodes(4).build();
     fs = cluster.getFileSystem();
     final String fileName = "/srcdat";
     util.createFiles(fs, fileName);
@@ -1290,7 +1312,8 @@ public class TestFsck {
    */
   @Test
   public void testFsckForSnapshotFiles() throws Exception {
-    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
+    File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath());
+    cluster = new MiniDFSCluster.Builder(conf, builderBaseDir).numDataNodes(1)
         .build();
     String runFsck = runFsck(conf, 0, true, "/", "-includeSnapshots",
         "-files");
@@ -1325,9 +1348,9 @@ public class TestFsck {
     conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 2);
 
     DistributedFileSystem dfs = null;
-    cluster =
-      new MiniDFSCluster.Builder(conf).numDataNodes(numDn).hosts(hosts)
-        .racks(racks).build();
+    File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath());
+    cluster = new MiniDFSCluster.Builder(conf, builderBaseDir)
+        .numDataNodes(numDn).hosts(hosts).racks(racks).build();
 
     assertNotNull("Failed Cluster Creation", cluster);
     cluster.waitClusterUp();
@@ -1380,9 +1403,9 @@ public class TestFsck {
     conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 2);
 
     DistributedFileSystem dfs;
-    cluster =
-        new MiniDFSCluster.Builder(conf).numDataNodes(numDn).hosts(hosts)
-            .racks(racks).build();
+    File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath());
+    cluster = new MiniDFSCluster.Builder(conf, builderBaseDir)
+        .numDataNodes(numDn).hosts(hosts).racks(racks).build();
 
     assertNotNull("Failed Cluster Creation", cluster);
     cluster.waitClusterUp();
@@ -1464,7 +1487,8 @@ public class TestFsck {
     conf.setInt(DFSConfigKeys.DFS_NAMENODE_MAINTENANCE_REPLICATION_MIN_KEY,
         replFactor);
 
-    cluster = new MiniDFSCluster.Builder(conf)
+    File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath());
+    cluster = new MiniDFSCluster.Builder(conf, builderBaseDir)
         .numDataNodes(numDn)
         .hosts(hosts)
         .racks(racks)
@@ -1584,9 +1608,9 @@ public class TestFsck {
     conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
     conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);
 
-    cluster =
-        new MiniDFSCluster.Builder(conf).numDataNodes(numDn).hosts(hosts)
-            .racks(racks).build();
+    File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath());
+    cluster = new MiniDFSCluster.Builder(conf, builderBaseDir)
+        .numDataNodes(numDn).hosts(hosts).racks(racks).build();
 
     assertNotNull("Failed Cluster Creation", cluster);
     cluster.waitClusterUp();
@@ -1653,7 +1677,8 @@ public class TestFsck {
    */
   @Test
   public void testStoragePoliciesCK() throws Exception {
-    cluster = new MiniDFSCluster.Builder(conf)
+    File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath());
+    cluster = new MiniDFSCluster.Builder(conf, builderBaseDir)
         .numDataNodes(3)
         .storageTypes(
             new StorageType[] {StorageType.DISK, StorageType.ARCHIVE})
@@ -1696,9 +1721,9 @@ public class TestFsck {
     conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);
 
     DistributedFileSystem dfs;
-    cluster =
-        new MiniDFSCluster.Builder(conf).numDataNodes(numDn).hosts(hosts)
-            .racks(racks).build();
+    File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath());
+    cluster = new MiniDFSCluster.Builder(conf, builderBaseDir)
+        .numDataNodes(numDn).hosts(hosts).racks(racks).build();
 
     assertNotNull("Failed Cluster Creation", cluster);
     cluster.waitClusterUp();
@@ -1777,7 +1802,8 @@ public class TestFsck {
     conf.setInt(DFSConfigKeys.DFS_NAMENODE_MAINTENANCE_REPLICATION_MIN_KEY,
         replFactor);
 
-    cluster = new MiniDFSCluster.Builder(conf)
+    File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath());
+    cluster = new MiniDFSCluster.Builder(conf, builderBaseDir)
         .numDataNodes(numDn)
         .hosts(hosts)
         .racks(racks)
@@ -1886,7 +1912,8 @@ public class TestFsck {
 
     int numFiles = 3;
     int numSnapshots = 0;
-    cluster = new MiniDFSCluster.Builder(conf).build();
+    File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath());
+    cluster = new MiniDFSCluster.Builder(conf, builderBaseDir).build();
     cluster.waitActive();
     hdfs = cluster.getFileSystem();
     DFSTestUtil util = new DFSTestUtil.Builder().
@@ -1966,7 +1993,8 @@ public class TestFsck {
     conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000L);
     conf.setInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY, 1);
     conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, replication);
-    cluster = new MiniDFSCluster.Builder(conf).build();
+    File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath());
+    cluster = new MiniDFSCluster.Builder(conf, builderBaseDir).build();
     DistributedFileSystem dfs = cluster.getFileSystem();
     cluster.waitActive();
 
@@ -2061,6 +2089,7 @@ public class TestFsck {
     HostsFileWriter hostsFileWriter = new HostsFileWriter();
     conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
     conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, replFactor);
+    conf.set(HDFS_MINIDFS_BASEDIR, GenericTestUtils.getRandomizedTempPath());
     if (defineUpgradeDomain) {
       conf.setClass(DFSConfigKeys.DFS_NAMENODE_HOSTS_PROVIDER_CLASSNAME_KEY,
           CombinedHostFileManager.class, HostConfigManager.class);
@@ -2107,7 +2136,8 @@ public class TestFsck {
   @Test(timeout = 300000)
   public void testFsckCorruptWhenOneReplicaIsCorrupt()
       throws Exception {
-    try (MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
+    try (MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf,
+        new File(GenericTestUtils.getRandomizedTempPath()))
         .nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(2)
         .build()) {
       cluster.waitActive();


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[42/50] [abbrv] hadoop git commit: MAPREDUCE-7072. mapred job -history prints duplicate counter in human output (wilfreds via rkanter)

Posted by sh...@apache.org.
MAPREDUCE-7072. mapred job -history prints duplicate counter in human output (wilfreds via rkanter)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b47275fe
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b47275fe
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b47275fe

Branch: refs/heads/YARN-8200
Commit: b47275fe28f51d14adf777822b45c0812e71a2a6
Parents: 600f4d4
Author: Robert Kanter <rk...@apache.org>
Authored: Fri Apr 27 09:57:31 2018 -0700
Committer: Robert Kanter <rk...@apache.org>
Committed: Fri Apr 27 09:57:31 2018 -0700

----------------------------------------------------------------------
 .../HumanReadableHistoryViewerPrinter.java      |  3 +-
 .../jobhistory/JSONHistoryViewerPrinter.java    |  3 +-
 .../jobhistory/TestHistoryViewerPrinter.java    | 76 ++++++++++++++++++++
 3 files changed, 80 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b47275fe/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/HumanReadableHistoryViewerPrinter.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/HumanReadableHistoryViewerPrinter.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/HumanReadableHistoryViewerPrinter.java
index d3da9f4..fdf3c47 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/HumanReadableHistoryViewerPrinter.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/HumanReadableHistoryViewerPrinter.java
@@ -148,7 +148,8 @@ class HumanReadableHistoryViewerPrinter implements HistoryViewerPrinter {
           "Total Value"));
       buff.append("\n------------------------------------------" +
           "---------------------------------------------");
-      for (String groupName : totalCounters.getGroupNames()) {
+      for (CounterGroup counterGroup : totalCounters) {
+        String groupName = counterGroup.getName();
         CounterGroup totalGroup = totalCounters.getGroup(groupName);
         CounterGroup mapGroup = mapCounters.getGroup(groupName);
         CounterGroup reduceGroup = reduceCounters.getGroup(groupName);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b47275fe/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JSONHistoryViewerPrinter.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JSONHistoryViewerPrinter.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JSONHistoryViewerPrinter.java
index 456dcf7..850fe2f 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JSONHistoryViewerPrinter.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JSONHistoryViewerPrinter.java
@@ -104,7 +104,8 @@ class JSONHistoryViewerPrinter implements HistoryViewerPrinter {
     // Killed jobs might not have counters
     if (totalCounters != null) {
       JSONObject jGroups = new JSONObject();
-      for (String groupName : totalCounters.getGroupNames()) {
+      for (CounterGroup counterGroup : totalCounters) {
+        String groupName = counterGroup.getName();
         CounterGroup totalGroup = totalCounters.getGroup(groupName);
         CounterGroup mapGroup = mapCounters.getGroup(groupName);
         CounterGroup reduceGroup = reduceCounters.getGroup(groupName);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b47275fe/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestHistoryViewerPrinter.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestHistoryViewerPrinter.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestHistoryViewerPrinter.java
index 358e9b2..74a2605 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestHistoryViewerPrinter.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestHistoryViewerPrinter.java
@@ -859,6 +859,46 @@ public class TestHistoryViewerPrinter {
         "}\n", outStr, JSONCompareMode.NON_EXTENSIBLE);
   }
 
+  @Test
+  public void testHumanDupePrinter() throws Exception {
+    JobHistoryParser.JobInfo job = createJobInfo2();
+    // Counters are only part of the overview so printAll can be false or true
+    // this does not affect the test, task counters are not printed
+    HumanReadableHistoryViewerPrinter printer =
+        new HumanReadableHistoryViewerPrinter(job, false, "http://",
+            TimeZone.getTimeZone("GMT"));
+    String outStr = run(printer);
+    // We are not interested in anything but the duplicate counter
+    int count1 = outStr.indexOf(
+        "|Map-Reduce Framework          |Map input records             |");
+    Assert.assertNotEquals("First counter occurrence not found", -1, count1);
+    int count2 = outStr.indexOf(
+        "|Map-Reduce Framework          |Map input records             |",
+        count1 + 1);
+    Assert.assertEquals("Duplicate counter found at: " + count1 +
+        " and " + count2, -1, count2);
+  }
+
+  @Test
+  public void testJSONDupePrinter() throws Exception {
+    JobHistoryParser.JobInfo job = createJobInfo2();
+    // Counters are part of the overview and task info
+    // Tasks only have bogus counters in the test if that is changed printAll
+    // must then be kept as false for this test to pass
+    JSONHistoryViewerPrinter printer =
+        new JSONHistoryViewerPrinter(job, false, "http://");
+    String outStr = run(printer);
+    // We are not interested in anything but the duplicate counter
+    int count1 = outStr.indexOf(
+        "\"counterName\":\"MAP_INPUT_RECORDS\"");
+    Assert.assertNotEquals("First counter occurrence not found", -1, count1);
+    int count2 = outStr.indexOf(
+        "\"counterName\":\"MAP_INPUT_RECORDS\"",
+        count1 + 1);
+    Assert.assertEquals("Duplicate counter found at: " + count1 +
+        " and " + count2, -1, count2);
+  }
+
   private String run(HistoryViewerPrinter printer) throws Exception {
     ByteArrayOutputStream boas = new ByteArrayOutputStream();
     PrintStream out = new PrintStream(boas, true);
@@ -901,6 +941,32 @@ public class TestHistoryViewerPrinter {
     addTaskInfo(job, TaskType.JOB_CLEANUP, 9, TaskStatus.State.SUCCEEDED);
     return job;
   }
+  private static JobHistoryParser.JobInfo createJobInfo2() {
+    JobHistoryParser.JobInfo job = new JobHistoryParser.JobInfo();
+    job.submitTime = 1317928501754L;
+    job.finishTime = job.submitTime + 15000;
+    job.jobid = JobID.forName("job_1317928501754_0001");
+    job.username = "test";
+    job.jobname = "Dupe counter output";
+    job.jobQueueName = "root.test";
+    job.jobConfPath = "/tmp/job.xml";
+    job.launchTime = job.submitTime + 1000;
+    job.totalMaps = 1;
+    job.totalReduces = 0;
+    job.finishedMaps = 1;
+    job.finishedReduces = 0;
+    job.failedMaps = 0;
+    job.failedReduces = 0;
+    job.jobStatus = JobStatus.State.SUCCEEDED.name();
+    job.totalCounters = createDeprecatedCounters();
+    job.mapCounters = createDeprecatedCounters();
+    job.reduceCounters = createDeprecatedCounters();
+    job.tasksMap = new HashMap<>();
+    addTaskInfo(job, TaskType.JOB_SETUP, 1, TaskStatus.State.SUCCEEDED);
+    addTaskInfo(job, TaskType.MAP, 2, TaskStatus.State.SUCCEEDED);
+    addTaskInfo(job, TaskType.JOB_CLEANUP, 3, TaskStatus.State.SUCCEEDED);
+    return job;
+  }
 
   private static Counters createCounters() {
     Counters counters = new Counters();
@@ -910,6 +976,16 @@ public class TestHistoryViewerPrinter {
     return counters;
   }
 
+  private static Counters createDeprecatedCounters() {
+    Counters counters = new Counters();
+    // Deprecated counter: make sure it is only printed once
+    counters.findCounter("org.apache.hadoop.mapred.Task$Counter",
+        "MAP_INPUT_RECORDS").setValue(1);
+    counters.findCounter("File System Counters",
+        "FILE: Number of bytes read").setValue(1);
+    return counters;
+  }
+
   private static void addTaskInfo(JobHistoryParser.JobInfo job,
       TaskType type, int id, TaskStatus.State status) {
     JobHistoryParser.TaskInfo task = new JobHistoryParser.TaskInfo();


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[12/50] [abbrv] hadoop git commit: YARN-7810. Update TestDockerContainerRuntime to test with current user credential. (contributed by Shane Kumpf)

Posted by sh...@apache.org.
YARN-7810.  Update TestDockerContainerRuntime to test with current user credential.
                (contributed by Shane Kumpf)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/07317562
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/07317562
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/07317562

Branch: refs/heads/YARN-8200
Commit: 0731756293e695e7bfefb1d05884a0893b3867c6
Parents: 900e6b4
Author: Eric Yang <ey...@apache.org>
Authored: Mon Apr 16 13:39:21 2018 -0400
Committer: Eric Yang <ey...@apache.org>
Committed: Mon Apr 16 13:39:21 2018 -0400

----------------------------------------------------------------------
 .../runtime/TestDockerContainerRuntime.java     | 146 +++++++++++++------
 1 file changed, 102 insertions(+), 44 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/07317562/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestDockerContainerRuntime.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestDockerContainerRuntime.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestDockerContainerRuntime.java
index aef94a7..5920be3 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestDockerContainerRuntime.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestDockerContainerRuntime.java
@@ -81,7 +81,9 @@ public class TestDockerContainerRuntime {
   private HashMap<String, String> env;
   private String image;
   private String uidGidPair;
-  private String runAsUser;
+  private String runAsUser = System.getProperty("user.name");
+  private String[] groups = {};
+  private String groupGids;
   private String user;
   private String appId;
   private String containerIdStr = containerId;
@@ -130,8 +132,42 @@ public class TestDockerContainerRuntime {
     when(context.getEnvironment()).thenReturn(env);
     when(container.getUser()).thenReturn(submittingUser);
 
-    uidGidPair = "";
-    runAsUser = "run_as_user";
+    // Get the running user's uid and gid for remap
+    String uid = "";
+    String gid = "";
+    Shell.ShellCommandExecutor shexec1 = new Shell.ShellCommandExecutor(
+        new String[]{"id", "-u", runAsUser});
+    Shell.ShellCommandExecutor shexec2 = new Shell.ShellCommandExecutor(
+        new String[]{"id", "-g", runAsUser});
+    Shell.ShellCommandExecutor shexec3 = new Shell.ShellCommandExecutor(
+        new String[]{"id", "-G", runAsUser});
+    try {
+      shexec1.execute();
+      // get rid of newline at the end
+      uid = shexec1.getOutput().replaceAll("\n$", "");
+    } catch (Exception e) {
+      LOG.info("Could not run id -u command: " + e);
+    }
+    try {
+      shexec2.execute();
+      // get rid of newline at the end
+      gid = shexec2.getOutput().replaceAll("\n$", "");
+    } catch (Exception e) {
+      LOG.info("Could not run id -g command: " + e);
+    }
+    try {
+      shexec3.execute();
+      groups = shexec3.getOutput().replace("\n", " ").split(" ");
+    } catch (Exception e) {
+      LOG.info("Could not run id -G command: " + e);
+    }
+    uidGidPair = uid + ":" + gid;
+    StringBuilder sb = new StringBuilder();
+    for (String group : groups) {
+      sb.append(group).append(",");
+    }
+    groupGids = sb.toString().replaceAll(",$", "");
+
     user = "user";
     appId = "app_id";
     containerIdStr = containerId;
@@ -301,7 +337,7 @@ public class TestDockerContainerRuntime {
     List<String> dockerCommands = Files.readAllLines(Paths.get
             (dockerCommandFile), Charset.forName("UTF-8"));
 
-    int expected = 13;
+    int expected = 14;
     int counter = 0;
     Assert.assertEquals(expected, dockerCommands.size());
     Assert.assertEquals("[docker-command-execution]",
@@ -311,6 +347,8 @@ public class TestDockerContainerRuntime {
     Assert.assertEquals("  cap-drop=ALL", dockerCommands.get(counter++));
     Assert.assertEquals("  detach=true", dockerCommands.get(counter++));
     Assert.assertEquals("  docker-command=run", dockerCommands.get(counter++));
+    Assert.assertEquals("  group-add=" + groupGids,
+        dockerCommands.get(counter++));
     Assert.assertEquals("  hostname=ctr-id", dockerCommands.get(counter++));
     Assert
         .assertEquals("  image=busybox:latest", dockerCommands.get(counter++));
@@ -326,7 +364,7 @@ public class TestDockerContainerRuntime {
             + "/test_container_log_dir:/test_container_log_dir,"
             + "/test_user_local_dir:/test_user_local_dir",
         dockerCommands.get(counter++));
-    Assert.assertEquals("  user=run_as_user", dockerCommands.get(counter++));
+    Assert.assertEquals("  user=" + uidGidPair, dockerCommands.get(counter++));
     Assert.assertEquals("  workdir=/test_container_work_dir",
         dockerCommands.get(counter++));
   }
@@ -505,7 +543,7 @@ public class TestDockerContainerRuntime {
     //This is the expected docker invocation for this case
     List<String> dockerCommands = Files
         .readAllLines(Paths.get(dockerCommandFile), Charset.forName("UTF-8"));
-    int expected = 13;
+    int expected = 14;
     int counter = 0;
     Assert.assertEquals(expected, dockerCommands.size());
     Assert.assertEquals("[docker-command-execution]",
@@ -515,6 +553,8 @@ public class TestDockerContainerRuntime {
     Assert.assertEquals("  cap-drop=ALL", dockerCommands.get(counter++));
     Assert.assertEquals("  detach=true", dockerCommands.get(counter++));
     Assert.assertEquals("  docker-command=run", dockerCommands.get(counter++));
+    Assert.assertEquals("  group-add=" + groupGids,
+        dockerCommands.get(counter++));
     Assert.assertEquals("  hostname=test.hostname",
         dockerCommands.get(counter++));
     Assert
@@ -532,7 +572,7 @@ public class TestDockerContainerRuntime {
             + "/test_container_log_dir:/test_container_log_dir,"
             + "/test_user_local_dir:/test_user_local_dir",
         dockerCommands.get(counter++));
-    Assert.assertEquals("  user=run_as_user", dockerCommands.get(counter++));
+    Assert.assertEquals("  user=" + uidGidPair, dockerCommands.get(counter++));
     Assert.assertEquals("  workdir=/test_container_work_dir",
         dockerCommands.get(counter++));
   }
@@ -571,7 +611,7 @@ public class TestDockerContainerRuntime {
     List<String> dockerCommands = Files
         .readAllLines(Paths.get(dockerCommandFile), Charset.forName("UTF-8"));
 
-    int expected = 13;
+    int expected = 14;
     int counter = 0;
     Assert.assertEquals(expected, dockerCommands.size());
     Assert.assertEquals("[docker-command-execution]",
@@ -581,6 +621,8 @@ public class TestDockerContainerRuntime {
     Assert.assertEquals("  cap-drop=ALL", dockerCommands.get(counter++));
     Assert.assertEquals("  detach=true", dockerCommands.get(counter++));
     Assert.assertEquals("  docker-command=run", dockerCommands.get(counter++));
+    Assert.assertEquals("  group-add=" + groupGids,
+        dockerCommands.get(counter++));
     Assert.assertEquals("  hostname=ctr-id", dockerCommands.get(counter++));
     Assert
         .assertEquals("  image=busybox:latest", dockerCommands.get(counter++));
@@ -596,7 +638,7 @@ public class TestDockerContainerRuntime {
             + "/test_container_log_dir:/test_container_log_dir,"
             + "/test_user_local_dir:/test_user_local_dir",
         dockerCommands.get(counter++));
-    Assert.assertEquals("  user=run_as_user", dockerCommands.get(counter++));
+    Assert.assertEquals("  user=" + uidGidPair, dockerCommands.get(counter++));
     Assert.assertEquals("  workdir=/test_container_work_dir",
         dockerCommands.get(counter++));
 
@@ -624,6 +666,8 @@ public class TestDockerContainerRuntime {
     Assert.assertEquals("  cap-drop=ALL", dockerCommands.get(counter++));
     Assert.assertEquals("  detach=true", dockerCommands.get(counter++));
     Assert.assertEquals("  docker-command=run", dockerCommands.get(counter++));
+    Assert.assertEquals("  group-add=" + groupGids,
+        dockerCommands.get(counter++));
     Assert.assertEquals("  hostname=ctr-id", dockerCommands.get(counter++));
     Assert
         .assertEquals("  image=busybox:latest", dockerCommands.get(counter++));
@@ -640,7 +684,7 @@ public class TestDockerContainerRuntime {
             + "/test_container_log_dir:/test_container_log_dir,"
             + "/test_user_local_dir:/test_user_local_dir",
         dockerCommands.get(counter++));
-    Assert.assertEquals("  user=run_as_user", dockerCommands.get(counter++));
+    Assert.assertEquals("  user=" + uidGidPair, dockerCommands.get(counter++));
     Assert.assertEquals("  workdir=/test_container_work_dir",
         dockerCommands.get(counter++));
 
@@ -677,7 +721,7 @@ public class TestDockerContainerRuntime {
     List<String> dockerCommands = Files.readAllLines(Paths.get
         (dockerCommandFile), Charset.forName("UTF-8"));
 
-    int expected = 13;
+    int expected = 14;
     Assert.assertEquals(expected, dockerCommands.size());
 
     String command = dockerCommands.get(0);
@@ -786,7 +830,7 @@ public class TestDockerContainerRuntime {
     List<String> dockerCommands = Files.readAllLines(Paths.get
         (dockerCommandFile), Charset.forName("UTF-8"));
 
-    int expected = 14;
+    int expected = 15;
     int counter = 0;
     Assert.assertEquals(expected, dockerCommands.size());
     Assert.assertEquals("[docker-command-execution]",
@@ -796,6 +840,8 @@ public class TestDockerContainerRuntime {
     Assert.assertEquals("  cap-drop=ALL", dockerCommands.get(counter++));
     Assert.assertEquals("  detach=true", dockerCommands.get(counter++));
     Assert.assertEquals("  docker-command=run", dockerCommands.get(counter++));
+    Assert.assertEquals("  group-add=" + groupGids,
+        dockerCommands.get(counter++));
     Assert.assertEquals("  hostname=ctr-id", dockerCommands.get(counter++));
     Assert
         .assertEquals("  image=busybox:latest", dockerCommands.get(counter++));
@@ -812,7 +858,7 @@ public class TestDockerContainerRuntime {
             + "/test_container_log_dir:/test_container_log_dir,"
             + "/test_user_local_dir:/test_user_local_dir",
         dockerCommands.get(counter++));
-    Assert.assertEquals("  user=run_as_user", dockerCommands.get(counter++));
+    Assert.assertEquals("  user=" + uidGidPair, dockerCommands.get(counter++));
     Assert.assertEquals("  workdir=/test_container_work_dir",
         dockerCommands.get(counter++));
   }
@@ -903,33 +949,39 @@ public class TestDockerContainerRuntime {
     List<String> dockerCommands = Files.readAllLines(Paths.get
         (dockerCommandFile), Charset.forName("UTF-8"));
 
-    Assert.assertEquals(14, dockerCommands.size());
-    Assert.assertEquals("[docker-command-execution]", dockerCommands.get(0));
+    int expected = 15;
+    int counter = 0;
+    Assert.assertEquals(expected, dockerCommands.size());
+    Assert.assertEquals("[docker-command-execution]",
+        dockerCommands.get(counter++));
     Assert.assertEquals("  cap-add=SYS_CHROOT,NET_BIND_SERVICE",
-        dockerCommands.get(1));
-    Assert.assertEquals("  cap-drop=ALL", dockerCommands.get(2));
-    Assert.assertEquals("  detach=true", dockerCommands.get(3));
-    Assert.assertEquals("  docker-command=run", dockerCommands.get(4));
-    Assert.assertEquals("  hostname=ctr-id", dockerCommands.get(5));
-    Assert.assertEquals("  image=busybox:latest", dockerCommands.get(6));
+        dockerCommands.get(counter++));
+    Assert.assertEquals("  cap-drop=ALL", dockerCommands.get(counter++));
+    Assert.assertEquals("  detach=true", dockerCommands.get(counter++));
+    Assert.assertEquals("  docker-command=run", dockerCommands.get(counter++));
+    Assert.assertEquals("  group-add=" + groupGids,
+        dockerCommands.get(counter++));
+    Assert.assertEquals("  hostname=ctr-id", dockerCommands.get(counter++));
+    Assert.assertEquals("  image=busybox:latest",
+        dockerCommands.get(counter++));
     Assert.assertEquals(
         "  launch-command=bash,/test_container_work_dir/launch_container.sh",
-        dockerCommands.get(7));
-    Assert.assertEquals("  name=container_id", dockerCommands.get(8));
-    Assert.assertEquals("  net=host", dockerCommands.get(9));
+        dockerCommands.get(counter++));
+    Assert.assertEquals("  name=container_id", dockerCommands.get(counter++));
+    Assert.assertEquals("  net=host", dockerCommands.get(counter++));
     Assert.assertEquals(
         "  ro-mounts=/test_local_dir/test_resource_file:test_mount",
-        dockerCommands.get(10));
+        dockerCommands.get(counter++));
     Assert.assertEquals(
         "  rw-mounts=/test_container_local_dir:/test_container_local_dir,"
             + "/test_filecache_dir:/test_filecache_dir,"
             + "/test_container_work_dir:/test_container_work_dir,"
             + "/test_container_log_dir:/test_container_log_dir,"
             + "/test_user_local_dir:/test_user_local_dir",
-        dockerCommands.get(11));
-    Assert.assertEquals("  user=run_as_user", dockerCommands.get(12));
+        dockerCommands.get(counter++));
+    Assert.assertEquals("  user=" + uidGidPair, dockerCommands.get(counter++));
     Assert.assertEquals("  workdir=/test_container_work_dir",
-        dockerCommands.get(13));
+        dockerCommands.get(counter));
   }
 
   @Test
@@ -973,34 +1025,40 @@ public class TestDockerContainerRuntime {
     List<String> dockerCommands = Files.readAllLines(Paths.get
         (dockerCommandFile), Charset.forName("UTF-8"));
 
-    Assert.assertEquals(14, dockerCommands.size());
-    Assert.assertEquals("[docker-command-execution]", dockerCommands.get(0));
+    int expected = 15;
+    int counter = 0;
+    Assert.assertEquals(expected, dockerCommands.size());
+    Assert.assertEquals("[docker-command-execution]",
+        dockerCommands.get(counter++));
     Assert.assertEquals("  cap-add=SYS_CHROOT,NET_BIND_SERVICE",
-        dockerCommands.get(1));
-    Assert.assertEquals("  cap-drop=ALL", dockerCommands.get(2));
-    Assert.assertEquals("  detach=true", dockerCommands.get(3));
-    Assert.assertEquals("  docker-command=run", dockerCommands.get(4));
-    Assert.assertEquals("  hostname=ctr-id", dockerCommands.get(5));
-    Assert.assertEquals("  image=busybox:latest", dockerCommands.get(6));
+        dockerCommands.get(counter++));
+    Assert.assertEquals("  cap-drop=ALL", dockerCommands.get(counter++));
+    Assert.assertEquals("  detach=true", dockerCommands.get(counter++));
+    Assert.assertEquals("  docker-command=run", dockerCommands.get(counter++));
+    Assert.assertEquals("  group-add=" + groupGids,
+        dockerCommands.get(counter++));
+    Assert.assertEquals("  hostname=ctr-id", dockerCommands.get(counter++));
+    Assert.assertEquals("  image=busybox:latest",
+        dockerCommands.get(counter++));
     Assert.assertEquals(
         "  launch-command=bash,/test_container_work_dir/launch_container.sh",
-        dockerCommands.get(7));
-    Assert.assertEquals("  name=container_id", dockerCommands.get(8));
-    Assert.assertEquals("  net=host", dockerCommands.get(9));
+        dockerCommands.get(counter++));
+    Assert.assertEquals("  name=container_id", dockerCommands.get(counter++));
+    Assert.assertEquals("  net=host", dockerCommands.get(counter++));
     Assert.assertEquals(
         "  ro-mounts=/test_local_dir/test_resource_file:test_mount1,"
             + "/test_local_dir/test_resource_file:test_mount2",
-        dockerCommands.get(10));
+        dockerCommands.get(counter++));
     Assert.assertEquals(
         "  rw-mounts=/test_container_local_dir:/test_container_local_dir,"
             + "/test_filecache_dir:/test_filecache_dir,"
             + "/test_container_work_dir:/test_container_work_dir,"
             + "/test_container_log_dir:/test_container_log_dir,"
             + "/test_user_local_dir:/test_user_local_dir",
-        dockerCommands.get(11));
-    Assert.assertEquals("  user=run_as_user", dockerCommands.get(12));
+        dockerCommands.get(counter++));
+    Assert.assertEquals("  user=" + uidGidPair, dockerCommands.get(counter++));
     Assert.assertEquals("  workdir=/test_container_work_dir",
-        dockerCommands.get(13));
+        dockerCommands.get(counter));
 
   }
 
@@ -1020,7 +1078,7 @@ public class TestDockerContainerRuntime {
     PrivilegedOperation op = capturePrivilegedOperation();
     Assert.assertEquals(op.getOperationType(),
         PrivilegedOperation.OperationType.SIGNAL_CONTAINER);
-    Assert.assertEquals("run_as_user", op.getArguments().get(0));
+    Assert.assertEquals(runAsUser, op.getArguments().get(0));
     Assert.assertEquals("user", op.getArguments().get(1));
     Assert.assertEquals("2", op.getArguments().get(2));
     Assert.assertEquals("1234", op.getArguments().get(3));


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[21/50] [abbrv] hadoop git commit: HDFS-12828. OIV ReverseXML Processor fails with escaped characters

Posted by sh...@apache.org.
HDFS-12828. OIV ReverseXML Processor fails with escaped characters

Signed-off-by: Akira Ajisaka <aa...@apache.org>
(cherry picked from commit 2d9e791a9073de9f65fef5407efd3a42894bc97f)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8f341c67
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8f341c67
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8f341c67

Branch: refs/heads/YARN-8200
Commit: 8f341c673f4db981544faba79d0c20bfe9c3fd99
Parents: b4b1078
Author: Erik Krogen <ek...@linkedin.com>
Authored: Wed Apr 18 14:38:23 2018 +0900
Committer: Akira Ajisaka <aa...@apache.org>
Committed: Wed Apr 18 14:40:45 2018 +0900

----------------------------------------------------------------------
 .../tools/offlineImageViewer/OfflineImageReconstructor.java   | 4 ++--
 .../hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java | 7 ++++++-
 2 files changed, 8 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8f341c67/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageReconstructor.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageReconstructor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageReconstructor.java
index 1f629b2..d14c61b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageReconstructor.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageReconstructor.java
@@ -380,8 +380,8 @@ class OfflineImageReconstructor {
           break;
         case XMLEvent.CHARACTERS:
           String val = XMLUtils.
-              unmangleXmlString(ev.asCharacters().getData(), true);
-          parent.setVal(val);
+              unmangleXmlString(ev.asCharacters().getData(), false);
+          parent.setVal(parent.getVal() + val);
           events.nextEvent();
           break;
         case XMLEvent.ATTRIBUTE:

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8f341c67/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java
index 74e426e..d0a7567 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java
@@ -153,10 +153,15 @@ public class TestOfflineImageViewer {
       dirCount++;
       writtenFiles.put(emptydir.toString(), hdfs.getFileStatus(emptydir));
 
-      //Create a directory whose name should be escaped in XML
+      //Create directories whose name should be escaped in XML
       Path invalidXMLDir = new Path("/dirContainingInvalidXMLChar\u0000here");
       hdfs.mkdirs(invalidXMLDir);
       dirCount++;
+      Path entityRefXMLDir = new Path("/dirContainingEntityRef&here");
+      hdfs.mkdirs(entityRefXMLDir);
+      dirCount++;
+      writtenFiles.put(entityRefXMLDir.toString(),
+          hdfs.getFileStatus(entityRefXMLDir));
 
       //Create a directory with sticky bits
       Path stickyBitDir = new Path("/stickyBit");


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[26/50] [abbrv] hadoop git commit: YARN-6827. [ATS1/1.5] NPE exception while publishing recovering applications into ATS during RM restart. Contributed by Rohith Sharma K S.

Posted by sh...@apache.org.
YARN-6827. [ATS1/1.5] NPE exception while publishing recovering applications into ATS during RM restart. Contributed by Rohith Sharma K S.

(cherry picked from commit 7d06806dfdeb3252ac0defe23e8c468eabfa8b5e)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/733248ce
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/733248ce
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/733248ce

Branch: refs/heads/YARN-8200
Commit: 733248cec721c3c65c77f891194bf2124eaa8cf3
Parents: a3c1735
Author: Sunil G <su...@apache.org>
Authored: Fri Apr 20 00:05:53 2018 +0530
Committer: Sunil G <su...@apache.org>
Committed: Fri Apr 20 12:12:26 2018 +0530

----------------------------------------------------------------------
 .../hadoop/yarn/server/resourcemanager/ResourceManager.java   | 7 +++++--
 1 file changed, 5 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/733248ce/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
index 844cc4c..6670cfd 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
@@ -1227,8 +1227,6 @@ public class ResourceManager extends CompositeService implements Recoverable {
   protected void serviceStart() throws Exception {
     if (this.rmContext.isHAEnabled()) {
       transitionToStandby(false);
-    } else {
-      transitionToActive();
     }
 
     startWepApp();
@@ -1238,6 +1236,11 @@ public class ResourceManager extends CompositeService implements Recoverable {
       WebAppUtils.setRMWebAppPort(conf, port);
     }
     super.serviceStart();
+
+    // Non HA case, start after RM services are started.
+    if (!this.rmContext.isHAEnabled()) {
+      transitionToActive();
+    }
   }
   
   protected void doSecureLogin() throws IOException {


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[34/50] [abbrv] hadoop git commit: HADOOP-15385. Many tests are failing in hadoop-distcp project in branch-2. Contributed by Jason Lowe.

Posted by sh...@apache.org.
HADOOP-15385. Many tests are failing in hadoop-distcp project in branch-2. Contributed by Jason Lowe.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2b48854c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2b48854c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2b48854c

Branch: refs/heads/YARN-8200
Commit: 2b48854cfd09a048d983c2a4870d9c95573b4fff
Parents: af70c69
Author: Junping Du <ju...@apache.org>
Authored: Wed Apr 25 10:11:41 2018 +0800
Committer: Junping Du <ju...@apache.org>
Committed: Wed Apr 25 10:11:41 2018 +0800

----------------------------------------------------------------------
 .../test/java/org/apache/hadoop/tools/TestDistCpViewFs.java  | 8 ++++----
 .../test/java/org/apache/hadoop/tools/TestIntegration.java   | 8 ++++----
 2 files changed, 8 insertions(+), 8 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b48854c/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestDistCpViewFs.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestDistCpViewFs.java b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestDistCpViewFs.java
index 5511e09..cab2754 100644
--- a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestDistCpViewFs.java
+++ b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestDistCpViewFs.java
@@ -60,12 +60,12 @@ public class TestDistCpViewFs {
       ConfigUtil.addLink(vConf, "/usr", new URI(fswd.toString())); 
       fs = FileSystem.get(FsConstants.VIEWFS_URI, vConf);
       fs.setWorkingDirectory(new Path("/usr"));
-      listFile = new Path("target/tmp/listing").makeQualified(fs.getUri(),
+      root = new Path("target/TestDistCpViewFs").makeQualified(fs.getUri(),
+          fs.getWorkingDirectory()).toString();
+      listFile = new Path(root, "listing").makeQualified(fs.getUri(),
               fs.getWorkingDirectory());
-      target = new Path("target/tmp/target").makeQualified(fs.getUri(),
+      target = new Path(root, "target").makeQualified(fs.getUri(),
               fs.getWorkingDirectory()); 
-      root = new Path("target/tmp").makeQualified(fs.getUri(),
-              fs.getWorkingDirectory()).toString();
       TestDistCpUtils.delete(fs, root);
     } catch (IOException e) {
       LOG.error("Exception encountered ", e);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b48854c/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestIntegration.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestIntegration.java b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestIntegration.java
index ee8e7cc..f15d0d4 100644
--- a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestIntegration.java
+++ b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestIntegration.java
@@ -74,12 +74,12 @@ public class TestIntegration {
   public static void setup() {
     try {
       fs = FileSystem.get(getConf());
-      listFile = new Path("target/tmp/listing").makeQualified(fs.getUri(),
+      root = new Path("target/TestIntegration").makeQualified(fs.getUri(),
+          fs.getWorkingDirectory()).toString();
+      listFile = new Path(root, "listing").makeQualified(fs.getUri(),
               fs.getWorkingDirectory());
-      target = new Path("target/tmp/target").makeQualified(fs.getUri(),
+      target = new Path(root, "target").makeQualified(fs.getUri(),
               fs.getWorkingDirectory());
-      root = new Path("target/tmp").makeQualified(fs.getUri(),
-              fs.getWorkingDirectory()).toString();
       TestDistCpUtils.delete(fs, root);
     } catch (IOException e) {
       LOG.error("Exception encountered ", e);


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[15/50] [abbrv] hadoop git commit: YARN-8164. Fix a potential NPE in AbstractSchedulerPlanFollower. Contributed by lujie.

Posted by sh...@apache.org.
YARN-8164. Fix a potential NPE in AbstractSchedulerPlanFollower. Contributed by lujie.

(cherry picked from commit f1461b2487d6c7a0b87e3799a8fdb5ade40ad898)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/93fc8133
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/93fc8133
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/93fc8133

Branch: refs/heads/YARN-8200
Commit: 93fc813329f73a3c1b6a4f26f6262b94de1669b6
Parents: f37f680
Author: Inigo Goiri <in...@apache.org>
Authored: Mon Apr 16 17:32:19 2018 -0700
Committer: Inigo Goiri <in...@apache.org>
Committed: Mon Apr 16 17:32:50 2018 -0700

----------------------------------------------------------------------
 .../reservation/AbstractSchedulerPlanFollower.java              | 5 ++++-
 1 file changed, 4 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/93fc8133/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/AbstractSchedulerPlanFollower.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/AbstractSchedulerPlanFollower.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/AbstractSchedulerPlanFollower.java
index 9b6a0b0..11811f1 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/AbstractSchedulerPlanFollower.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/AbstractSchedulerPlanFollower.java
@@ -263,7 +263,10 @@ public abstract class AbstractSchedulerPlanFollower implements PlanFollower {
         if (shouldMove) {
           moveAppsInQueueSync(expiredReservation, defReservationQueue);
         }
-        if (scheduler.getAppsInQueue(expiredReservation).size() > 0) {
+        List<ApplicationAttemptId> appsInQueue = scheduler.
+              getAppsInQueue(expiredReservation);
+        int size = (appsInQueue == null ? 0 : appsInQueue.size());
+        if (size > 0) {
           scheduler.killAllAppsInQueue(expiredReservation);
           LOG.info("Killing applications in queue: {}", expiredReservation);
         } else {


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[32/50] [abbrv] hadoop git commit: HDFS-13490. RBF: Fix setSafeMode in the Router. Contributed by Inigo Goiri.

Posted by sh...@apache.org.
HDFS-13490. RBF: Fix setSafeMode in the Router. Contributed by Inigo Goiri.

(cherry picked from commit b06601acce38ed60b726b99e2830f38a1ee3d2b5)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cc5416d9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cc5416d9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cc5416d9

Branch: refs/heads/YARN-8200
Commit: cc5416d94aa73bd3fb550a6f0df8e795f34fc5ab
Parents: db9da43
Author: Yiqun Lin <yq...@apache.org>
Authored: Tue Apr 24 11:25:23 2018 +0800
Committer: Yiqun Lin <yq...@apache.org>
Committed: Tue Apr 24 11:29:06 2018 +0800

----------------------------------------------------------------------
 .../federation/router/RouterRpcServer.java      |  3 +-
 .../server/federation/router/TestRouterRpc.java | 17 ++++
 .../server/federation/router/TestSafeMode.java  | 82 ++++++++++++++++++++
 3 files changed, 101 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cc5416d9/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
index 8a0cf27..2897823 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
@@ -1356,7 +1356,8 @@ public class RouterRpcServer extends AbstractService
         action, isChecked);
     Set<FederationNamespaceInfo> nss = namenodeResolver.getNamespaces();
     Map<FederationNamespaceInfo, Boolean> results =
-        rpcClient.invokeConcurrent(nss, method, true, true, Boolean.class);
+        rpcClient.invokeConcurrent(
+            nss, method, true, !isChecked, Boolean.class);
 
     // We only report true if all the name space are in safe mode
     int numSafemode = 0;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cc5416d9/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpc.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpc.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpc.java
index d25f806..d689b4c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpc.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpc.java
@@ -1034,6 +1034,23 @@ public class TestRouterRpc {
     boolean nnSafemode =
         nnProtocol.setSafeMode(SafeModeAction.SAFEMODE_GET, false);
     assertEquals(nnSafemode, routerSafemode);
+
+    routerSafemode =
+        routerProtocol.setSafeMode(SafeModeAction.SAFEMODE_GET, true);
+    nnSafemode =
+        nnProtocol.setSafeMode(SafeModeAction.SAFEMODE_GET, true);
+    assertEquals(nnSafemode, routerSafemode);
+
+    assertFalse(routerProtocol.setSafeMode(
+        SafeModeAction.SAFEMODE_GET, false));
+    assertTrue(routerProtocol.setSafeMode(
+        SafeModeAction.SAFEMODE_ENTER, false));
+    assertTrue(routerProtocol.setSafeMode(
+        SafeModeAction.SAFEMODE_GET, false));
+    assertFalse(routerProtocol.setSafeMode(
+        SafeModeAction.SAFEMODE_LEAVE, false));
+    assertFalse(routerProtocol.setSafeMode(
+        SafeModeAction.SAFEMODE_GET, false));
   }
 
   @Test

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cc5416d9/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestSafeMode.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestSafeMode.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestSafeMode.java
new file mode 100644
index 0000000..d040b7a
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestSafeMode.java
@@ -0,0 +1,82 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.federation.router;
+
+import static org.apache.hadoop.hdfs.server.federation.FederationTestUtils.NAMENODES;
+
+import java.io.IOException;
+
+import org.apache.hadoop.hdfs.protocol.ClientProtocol;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
+import org.apache.hadoop.hdfs.server.federation.MiniRouterDFSCluster;
+import org.apache.hadoop.hdfs.server.federation.MiniRouterDFSCluster.RouterContext;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+/**
+ * Test the SafeMode.
+ */
+public class TestSafeMode {
+
+  /** Federated HDFS cluster. */
+  private MiniRouterDFSCluster cluster;
+
+  @Before
+  public  void setup() throws Exception {
+    cluster = new MiniRouterDFSCluster(true, 2);
+
+    // Start NNs and DNs and wait until ready
+    cluster.startCluster();
+
+    // Start routers with only an RPC service
+    cluster.startRouters();
+
+    // Register and verify all NNs with all routers
+    cluster.registerNamenodes();
+    cluster.waitNamenodeRegistration();
+
+    // Setup the mount table
+    cluster.installMockLocations();
+
+    // Making one Namenodes active per nameservice
+    if (cluster.isHighAvailability()) {
+      for (String ns : cluster.getNameservices()) {
+        cluster.switchToActive(ns, NAMENODES[0]);
+        cluster.switchToStandby(ns, NAMENODES[1]);
+      }
+    }
+    cluster.waitActiveNamespaces();
+  }
+
+  @After
+  public void teardown() throws IOException {
+    if (cluster != null) {
+      cluster.shutdown();
+      cluster = null;
+    }
+  }
+
+  @Test
+  public void testProxySetSafemode() throws Exception {
+    RouterContext routerContext = cluster.getRandomRouter();
+    ClientProtocol routerProtocol = routerContext.getClient().getNamenode();
+    routerProtocol.setSafeMode(SafeModeAction.SAFEMODE_GET, true);
+    routerProtocol.setSafeMode(SafeModeAction.SAFEMODE_GET, false);
+  }
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[23/50] [abbrv] hadoop git commit: HADOOP-15396. Some java source files are executable

Posted by sh...@apache.org.
HADOOP-15396. Some java source files are executable

Signed-off-by: Akira Ajisaka <aa...@apache.org>
(cherry picked from commit e4c39f3247da77d03c6015de4f18be75924fcb22)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/228869e5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/228869e5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/228869e5

Branch: refs/heads/YARN-8200
Commit: 228869e5f922a04f3e8ce7e547fdc31ed9f12127
Parents: cbcd16e
Author: Shashikant Banerjee <sb...@hortonworks.com>
Authored: Thu Apr 19 08:35:38 2018 +0900
Committer: Akira Ajisaka <aa...@apache.org>
Committed: Thu Apr 19 08:38:11 2018 +0900

----------------------------------------------------------------------
 .../src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java        | 0
 .../src/main/java/org/apache/hadoop/hdfs/DFSPacket.java              | 0
 .../src/test/java/org/apache/hadoop/hdfs/TestDFSPacket.java          | 0
 .../org/apache/hadoop/mapreduce/v2/app/job/impl/TaskAttemptImpl.java | 0
 .../java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskImpl.java   | 0
 .../org/apache/hadoop/mapreduce/v2/app/job/impl/TestTaskImpl.java    | 0
 6 files changed, 0 insertions(+), 0 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/228869e5/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
old mode 100755
new mode 100644

http://git-wip-us.apache.org/repos/asf/hadoop/blob/228869e5/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSPacket.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSPacket.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSPacket.java
old mode 100755
new mode 100644

http://git-wip-us.apache.org/repos/asf/hadoop/blob/228869e5/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/TestDFSPacket.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/TestDFSPacket.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/TestDFSPacket.java
old mode 100755
new mode 100644

http://git-wip-us.apache.org/repos/asf/hadoop/blob/228869e5/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskAttemptImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskAttemptImpl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskAttemptImpl.java
old mode 100755
new mode 100644

http://git-wip-us.apache.org/repos/asf/hadoop/blob/228869e5/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskImpl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskImpl.java
old mode 100755
new mode 100644

http://git-wip-us.apache.org/repos/asf/hadoop/blob/228869e5/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestTaskImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestTaskImpl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestTaskImpl.java
old mode 100755
new mode 100644


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[37/50] [abbrv] hadoop git commit: HDFS-13326. RBF: Improve the interfaces to modify and view mount tables. Contributed by Gang Li.

Posted by sh...@apache.org.
HDFS-13326. RBF: Improve the interfaces to modify and view mount tables. Contributed by Gang Li.

(cherry picked from commit c394051a3d4d9d531f418503cb519606ae2b2e69)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1c3d746f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1c3d746f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1c3d746f

Branch: refs/heads/YARN-8200
Commit: 1c3d746fc0a4c24b885690a24ce676d58c2f394b
Parents: a5fc638
Author: Inigo Goiri <in...@apache.org>
Authored: Thu Apr 26 12:59:22 2018 -0700
Committer: Inigo Goiri <in...@apache.org>
Committed: Thu Apr 26 13:14:08 2018 -0700

----------------------------------------------------------------------
 .../hdfs/tools/federation/RouterAdmin.java      | 130 +++++++++++++++-
 .../federation/router/TestRouterAdminCLI.java   | 150 +++++++++++++++++++
 .../src/site/markdown/HDFSCommands.md           |   6 +-
 3 files changed, 276 insertions(+), 10 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1c3d746f/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java
index b686737..17707dc 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java
@@ -94,7 +94,10 @@ public class RouterAdmin extends Configured implements Tool {
    */
   public void printUsage() {
     String usage = "Federation Admin Tools:\n"
-        + "\t[-add <source> <nameservice> <destination> "
+        + "\t[-add <source> <nameservice1, nameservice2, ...> <destination> "
+        + "[-readonly] [-order HASH|LOCAL|RANDOM|HASH_ALL] "
+        + "-owner <owner> -group <group> -mode <mode>]\n"
+        + "\t[-update <source> <nameservice1, nameservice2, ...> <destination> "
         + "[-readonly] [-order HASH|LOCAL|RANDOM|HASH_ALL] "
         + "-owner <owner> -group <group> -mode <mode>]\n"
         + "\t[-rm <source>]\n"
@@ -112,7 +115,7 @@ public class RouterAdmin extends Configured implements Tool {
   @Override
   public int run(String[] argv) throws Exception {
     if (argv.length < 1) {
-      System.err.println("Not enough parameters specificed");
+      System.err.println("Not enough parameters specified");
       printUsage();
       return -1;
     }
@@ -124,31 +127,37 @@ public class RouterAdmin extends Configured implements Tool {
     // Verify that we have enough command line parameters
     if ("-add".equals(cmd)) {
       if (argv.length < 4) {
-        System.err.println("Not enough parameters specificed for cmd " + cmd);
+        System.err.println("Not enough parameters specified for cmd " + cmd);
+        printUsage();
+        return exitCode;
+      }
+    } else if ("-update".equals(cmd)) {
+      if (argv.length < 4) {
+        System.err.println("Not enough parameters specified for cmd " + cmd);
         printUsage();
         return exitCode;
       }
     } else if ("-rm".equalsIgnoreCase(cmd)) {
       if (argv.length < 2) {
-        System.err.println("Not enough parameters specificed for cmd " + cmd);
+        System.err.println("Not enough parameters specified for cmd " + cmd);
         printUsage();
         return exitCode;
       }
     } else if ("-setQuota".equalsIgnoreCase(cmd)) {
       if (argv.length < 4) {
-        System.err.println("Not enough parameters specificed for cmd " + cmd);
+        System.err.println("Not enough parameters specified for cmd " + cmd);
         printUsage();
         return exitCode;
       }
     } else if ("-clrQuota".equalsIgnoreCase(cmd)) {
       if (argv.length < 2) {
-        System.err.println("Not enough parameters specificed for cmd " + cmd);
+        System.err.println("Not enough parameters specified for cmd " + cmd);
         printUsage();
         return exitCode;
       }
     } else if ("-safemode".equalsIgnoreCase(cmd)) {
       if (argv.length < 2) {
-        System.err.println("Not enough parameters specificed for cmd " + cmd);
+        System.err.println("Not enough parameters specified for cmd " + cmd);
         printUsage();
         return exitCode;
       }
@@ -181,7 +190,11 @@ public class RouterAdmin extends Configured implements Tool {
     try {
       if ("-add".equals(cmd)) {
         if (addMount(argv, i)) {
-          System.out.println("Successfuly added mount point " + argv[i]);
+          System.out.println("Successfully added mount point " + argv[i]);
+        }
+      } else if ("-update".equals(cmd)) {
+        if (updateMount(argv, i)) {
+          System.out.println("Successfully updated mount point " + argv[i]);
         }
       } else if ("-rm".equals(cmd)) {
         if (removeMount(argv[i])) {
@@ -399,6 +412,107 @@ public class RouterAdmin extends Configured implements Tool {
   }
 
   /**
+   * Update a mount table entry.
+   *
+   * @param parameters Parameters for the mount point.
+   * @param i Index in the parameters.
+   */
+  public boolean updateMount(String[] parameters, int i) throws IOException {
+    // Mandatory parameters
+    String mount = parameters[i++];
+    String[] nss = parameters[i++].split(",");
+    String dest = parameters[i++];
+
+    // Optional parameters
+    boolean readOnly = false;
+    String owner = null;
+    String group = null;
+    FsPermission mode = null;
+    DestinationOrder order = null;
+    while (i < parameters.length) {
+      if (parameters[i].equals("-readonly")) {
+        readOnly = true;
+      } else if (parameters[i].equals("-order")) {
+        i++;
+        try {
+          order = DestinationOrder.valueOf(parameters[i]);
+        } catch(Exception e) {
+          System.err.println("Cannot parse order: " + parameters[i]);
+        }
+      } else if (parameters[i].equals("-owner")) {
+        i++;
+        owner = parameters[i];
+      } else if (parameters[i].equals("-group")) {
+        i++;
+        group = parameters[i];
+      } else if (parameters[i].equals("-mode")) {
+        i++;
+        short modeValue = Short.parseShort(parameters[i], 8);
+        mode = new FsPermission(modeValue);
+      }
+
+      i++;
+    }
+
+    return updateMount(mount, nss, dest, readOnly, order,
+        new ACLEntity(owner, group, mode));
+  }
+
+  /**
+   * Update a mount table entry.
+   *
+   * @param mount Mount point.
+   * @param nss Nameservices where this is mounted to.
+   * @param dest Destination path.
+   * @param readonly If the mount point is read only.
+   * @param order Order of the destination locations.
+   * @param aclInfo the ACL info for mount point.
+   * @return If the mount point was updated.
+   * @throws IOException Error updating the mount point.
+   */
+  public boolean updateMount(String mount, String[] nss, String dest,
+      boolean readonly, DestinationOrder order, ACLEntity aclInfo)
+      throws IOException {
+    MountTableManager mountTable = client.getMountTableManager();
+
+    // Create a new entry
+    Map<String, String> destMap = new LinkedHashMap<>();
+    for (String ns : nss) {
+      destMap.put(ns, dest);
+    }
+    MountTable newEntry = MountTable.newInstance(mount, destMap);
+
+    newEntry.setReadOnly(readonly);
+
+    if (order != null) {
+      newEntry.setDestOrder(order);
+    }
+
+    // Update ACL info of mount table entry
+    if (aclInfo.getOwner() != null) {
+      newEntry.setOwnerName(aclInfo.getOwner());
+    }
+
+    if (aclInfo.getGroup() != null) {
+      newEntry.setGroupName(aclInfo.getGroup());
+    }
+
+    if (aclInfo.getMode() != null) {
+      newEntry.setMode(aclInfo.getMode());
+    }
+
+    UpdateMountTableEntryRequest updateRequest =
+        UpdateMountTableEntryRequest.newInstance(newEntry);
+    UpdateMountTableEntryResponse updateResponse =
+        mountTable.updateMountTableEntry(updateRequest);
+    boolean updated = updateResponse.getStatus();
+    if (!updated) {
+      System.err.println("Cannot update mount point " + mount);
+    }
+    return updated;
+  }
+
+  /**
    * Remove mount point.
    *
    * @param path Path to remove.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1c3d746f/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdminCLI.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdminCLI.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdminCLI.java
index 1ff07ac..4e84c33 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdminCLI.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdminCLI.java
@@ -529,4 +529,154 @@ public class TestRouterAdminCLI {
       }
     }, 1000, 30000);
   }
+
+  @Test
+  public void testUpdateNonExistingMountTable() throws Exception {
+    System.setOut(new PrintStream(out));
+    String nsId = "ns0";
+    String src = "/test-updateNonExistingMounttable";
+    String dest = "/updateNonExistingMounttable";
+    String[] argv = new String[] {"-update", src, nsId, dest};
+    assertEquals(0, ToolRunner.run(admin, argv));
+
+    stateStore.loadCache(MountTableStoreImpl.class, true);
+    GetMountTableEntriesRequest getRequest =
+        GetMountTableEntriesRequest.newInstance(src);
+    GetMountTableEntriesResponse getResponse =
+        client.getMountTableManager().getMountTableEntries(getRequest);
+    // Ensure the destination updated successfully
+    MountTable mountTable = getResponse.getEntries().get(0);
+    assertEquals(src, mountTable.getSourcePath());
+    assertEquals(nsId, mountTable.getDestinations().get(0).getNameserviceId());
+    assertEquals(dest, mountTable.getDestinations().get(0).getDest());
+  }
+
+  @Test
+  public void testUpdateNameserviceDestinationForExistingMountTable() throws
+  Exception {
+    // Add a mount table firstly
+    String nsId = "ns0";
+    String src = "/test-updateNameserviceDestinationForExistingMountTable";
+    String dest = "/UpdateNameserviceDestinationForExistingMountTable";
+    String[] argv = new String[] {"-add", src, nsId, dest};
+    assertEquals(0, ToolRunner.run(admin, argv));
+
+    stateStore.loadCache(MountTableStoreImpl.class, true);
+    GetMountTableEntriesRequest getRequest =
+        GetMountTableEntriesRequest.newInstance(src);
+    GetMountTableEntriesResponse getResponse =
+        client.getMountTableManager().getMountTableEntries(getRequest);
+    // Ensure mount table added successfully
+    MountTable mountTable = getResponse.getEntries().get(0);
+    assertEquals(src, mountTable.getSourcePath());
+    assertEquals(nsId, mountTable.getDestinations().get(0).getNameserviceId());
+    assertEquals(dest, mountTable.getDestinations().get(0).getDest());
+
+    // Update the destination
+    String newNsId = "ns1";
+    String newDest = "/newDestination";
+    argv = new String[] {"-update", src, newNsId, newDest};
+    assertEquals(0, ToolRunner.run(admin, argv));
+
+    stateStore.loadCache(MountTableStoreImpl.class, true);
+    getResponse = client.getMountTableManager()
+        .getMountTableEntries(getRequest);
+    // Ensure the destination updated successfully
+    mountTable = getResponse.getEntries().get(0);
+    assertEquals(src, mountTable.getSourcePath());
+    assertEquals(newNsId,
+        mountTable.getDestinations().get(0).getNameserviceId());
+    assertEquals(newDest, mountTable.getDestinations().get(0).getDest());
+  }
+
+  @Test
+  public void testUpdateReadonlyUserGroupPermissionMountable()
+      throws Exception {
+    // Add a mount table
+    String nsId = "ns0";
+    String src = "/test-updateReadonlyUserGroupPermissionMountTable";
+    String dest = "/UpdateReadonlyUserGroupPermissionMountTable";
+    String[] argv = new String[] {"-add", src, nsId, dest};
+    assertEquals(0, ToolRunner.run(admin, argv));
+
+    stateStore.loadCache(MountTableStoreImpl.class, true);
+    GetMountTableEntriesRequest getRequest =
+        GetMountTableEntriesRequest.newInstance(src);
+    GetMountTableEntriesResponse getResponse =
+        client.getMountTableManager().getMountTableEntries(getRequest);
+    // Ensure mount table added successfully
+    MountTable mountTable = getResponse.getEntries().get(0);
+    assertEquals(src, mountTable.getSourcePath());
+    assertEquals(nsId, mountTable.getDestinations().get(0).getNameserviceId());
+    assertEquals(dest, mountTable.getDestinations().get(0).getDest());
+    assertFalse(mountTable.isReadOnly());
+
+    // Update the readonly, owner, group and permission
+    String testOwner = "test_owner";
+    String testGroup = "test_group";
+    argv = new String[] {"-update", src, nsId, dest, "-readonly",
+        "-owner", testOwner, "-group", testGroup, "-mode", "0455"};
+    assertEquals(0, ToolRunner.run(admin, argv));
+
+    stateStore.loadCache(MountTableStoreImpl.class, true);
+    getResponse = client.getMountTableManager()
+        .getMountTableEntries(getRequest);
+
+    // Ensure the destination updated successfully
+    mountTable = getResponse.getEntries().get(0);
+    assertEquals(src, mountTable.getSourcePath());
+    assertEquals(nsId, mountTable.getDestinations().get(0).getNameserviceId());
+    assertEquals(dest, mountTable.getDestinations().get(0).getDest());
+    assertTrue(mountTable.isReadOnly());
+    assertEquals(testOwner, mountTable.getOwnerName());
+    assertEquals(testGroup, mountTable.getGroupName());
+    assertEquals((short)0455, mountTable.getMode().toShort());
+  }
+
+  @Test
+  public void testUpdateOrderMountTable() throws Exception {
+    testUpdateOrderMountTable(DestinationOrder.HASH);
+    testUpdateOrderMountTable(DestinationOrder.LOCAL);
+    testUpdateOrderMountTable(DestinationOrder.RANDOM);
+    testUpdateOrderMountTable(DestinationOrder.HASH_ALL);
+  }
+
+  private void testUpdateOrderMountTable(DestinationOrder order)
+      throws Exception {
+    // Add a mount table
+    String nsId = "ns0";
+    String src = "/test-updateOrderMountTable-"+order.toString();
+    String dest = "/UpdateOrderMountTable";
+    String[] argv = new String[] {"-add", src, nsId, dest};
+    assertEquals(0, ToolRunner.run(admin, argv));
+
+    stateStore.loadCache(MountTableStoreImpl.class, true);
+    GetMountTableEntriesRequest getRequest =
+        GetMountTableEntriesRequest.newInstance(src);
+    GetMountTableEntriesResponse getResponse =
+        client.getMountTableManager().getMountTableEntries(getRequest);
+
+    // Ensure mount table added successfully
+    MountTable mountTable = getResponse.getEntries().get(0);
+    assertEquals(src, mountTable.getSourcePath());
+    assertEquals(nsId, mountTable.getDestinations().get(0).getNameserviceId());
+    assertEquals(dest, mountTable.getDestinations().get(0).getDest());
+    assertEquals(DestinationOrder.HASH, mountTable.getDestOrder());
+
+    // Update the order
+    argv = new String[] {"-update", src, nsId, dest, "-order",
+        order.toString()};
+    assertEquals(0, ToolRunner.run(admin, argv));
+
+    stateStore.loadCache(MountTableStoreImpl.class, true);
+    getResponse = client.getMountTableManager()
+        .getMountTableEntries(getRequest);
+
+    // Ensure the destination updated successfully
+    mountTable = getResponse.getEntries().get(0);
+    assertEquals(src, mountTable.getSourcePath());
+    assertEquals(nsId, mountTable.getDestinations().get(0).getNameserviceId());
+    assertEquals(dest, mountTable.getDestinations().get(0).getDest());
+    assertEquals(order, mountTable.getDestOrder());
+  }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1c3d746f/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
index cb5d8a7..a58c6c8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
@@ -409,7 +409,8 @@ Runs the DFS router. See [Router](./HDFSRouterFederation.html#Router) for more i
 Usage:
 
       hdfs dfsrouteradmin
-          [-add <source> <nameservice> <destination> [-readonly] -owner <owner> -group <group> -mode <mode>]
+          [-add <source> <nameservice1, nameservice2, ...> <destination> [-readonly] [-order HASH|LOCAL|RANDOM|HASH_ALL] -owner <owner> -group <group> -mode <mode>]
+          [-update <source> <nameservice1, nameservice2, ...> <destination> [-readonly] [-order HASH|LOCAL|RANDOM|HASH_ALL] -owner <owner> -group <group> -mode <mode>]
           [-rm <source>]
           [-ls <path>]
           [-setQuota <path> -nsQuota <nsQuota> -ssQuota <quota in bytes or quota size string>]
@@ -420,7 +421,8 @@ Usage:
 
 | COMMAND\_OPTION | Description |
 |:---- |:---- |
-| `-add` *source* *nameservice* *destination* | Add a mount table entry or update if it exists. |
+| `-add` *source* *nameservices* *destination* | Add a mount table entry or update if it exists. |
+| `-update` *source* *nameservices* *destination* | Update a mount table entry or create one if it does not exist. |
 | `-rm` *source* | Remove mount point of specified path. |
 | `-ls` *path* | List mount points under specified path. |
 | `-setQuota` *path* `-nsQuota` *nsQuota* `-ssQuota` *ssQuota* | Set quota for specified path. See [HDFS Quotas Guide](./HdfsQuotaAdminGuide.html) for the quota detail. |


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[49/50] [abbrv] hadoop git commit: HDFS-13488. RBF: Reject requests when a Router is overloaded. Contributed by Inigo Goiri.

Posted by sh...@apache.org.
HDFS-13488. RBF: Reject requests when a Router is overloaded. Contributed by Inigo Goiri.

(cherry picked from commit 37269261d1232bc71708f30c76193188258ef4bd)
(cherry picked from commit 5fef28d0d4b27c5df31d325650e46f5ab5f5630f)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/06632c06
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/06632c06
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/06632c06

Branch: refs/heads/YARN-8200
Commit: 06632c06657689956edd01f44f2900392339ea85
Parents: 9fd93ee
Author: Yiqun Lin <yq...@apache.org>
Authored: Wed May 2 14:49:39 2018 +0800
Committer: Yiqun Lin <yq...@apache.org>
Committed: Wed May 2 15:02:24 2018 +0800

----------------------------------------------------------------------
 .../federation/metrics/FederationRPCMBean.java  |   2 +
 .../metrics/FederationRPCMetrics.java           |  10 +
 .../FederationRPCPerformanceMonitor.java        |   5 +
 .../server/federation/router/RBFConfigKeys.java |   3 +
 .../federation/router/RouterRpcClient.java      |  31 ++-
 .../federation/router/RouterRpcMonitor.java     |   6 +
 .../federation/router/RouterRpcServer.java      |  11 +-
 .../router/RouterSafeModeException.java         |  53 ----
 .../src/main/resources/hdfs-rbf-default.xml     |   9 +
 .../server/federation/StateStoreDFSCluster.java |  28 +++
 .../router/TestRouterClientRejectOverload.java  | 243 +++++++++++++++++++
 .../router/TestRouterRPCClientRetries.java      |  51 +---
 .../federation/router/TestRouterSafemode.java   |   3 +-
 13 files changed, 348 insertions(+), 107 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/06632c06/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationRPCMBean.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationRPCMBean.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationRPCMBean.java
index 3e031fe..973c398 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationRPCMBean.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationRPCMBean.java
@@ -40,6 +40,8 @@ public interface FederationRPCMBean {
 
   long getProxyOpFailureStandby();
 
+  long getProxyOpFailureClientOverloaded();
+
   long getProxyOpNotImplemented();
 
   long getProxyOpRetries();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/06632c06/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationRPCMetrics.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationRPCMetrics.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationRPCMetrics.java
index 94d3383..9ab4e5a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationRPCMetrics.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationRPCMetrics.java
@@ -54,6 +54,8 @@ public class FederationRPCMetrics implements FederationRPCMBean {
   private MutableCounterLong proxyOpFailureStandby;
   @Metric("Number of operations to hit a standby NN")
   private MutableCounterLong proxyOpFailureCommunicate;
+  @Metric("Number of operations to hit a client overloaded Router")
+  private MutableCounterLong proxyOpFailureClientOverloaded;
   @Metric("Number of operations not implemented")
   private MutableCounterLong proxyOpNotImplemented;
   @Metric("Number of operation retries")
@@ -118,6 +120,14 @@ public class FederationRPCMetrics implements FederationRPCMBean {
     return proxyOpFailureCommunicate.value();
   }
 
+  public void incrProxyOpFailureClientOverloaded() {
+    proxyOpFailureClientOverloaded.incr();
+  }
+
+  @Override
+  public long getProxyOpFailureClientOverloaded() {
+    return proxyOpFailureClientOverloaded.value();
+  }
 
   public void incrProxyOpNotImplemented() {
     proxyOpNotImplemented.incr();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/06632c06/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationRPCPerformanceMonitor.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationRPCPerformanceMonitor.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationRPCPerformanceMonitor.java
index 547ebb5..2c2741e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationRPCPerformanceMonitor.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationRPCPerformanceMonitor.java
@@ -154,6 +154,11 @@ public class FederationRPCPerformanceMonitor implements RouterRpcMonitor {
   }
 
   @Override
+  public void proxyOpFailureClientOverloaded() {
+    metrics.incrProxyOpFailureClientOverloaded();
+  }
+
+  @Override
   public void proxyOpNotImplemented() {
     metrics.incrProxyOpNotImplemented();
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/06632c06/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RBFConfigKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RBFConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RBFConfigKeys.java
index 170b876..363db20 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RBFConfigKeys.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RBFConfigKeys.java
@@ -113,6 +113,9 @@ public class RBFConfigKeys extends CommonConfigurationKeysPublic {
   public static final String DFS_ROUTER_CLIENT_MAX_ATTEMPTS =
       FEDERATION_ROUTER_PREFIX + "client.retry.max.attempts";
   public static final int DFS_ROUTER_CLIENT_MAX_ATTEMPTS_DEFAULT = 3;
+  public static final String DFS_ROUTER_CLIENT_REJECT_OVERLOAD =
+      FEDERATION_ROUTER_PREFIX + "client.reject.overload";
+  public static final boolean DFS_ROUTER_CLIENT_REJECT_OVERLOAD_DEFAULT = false;
 
   // HDFS Router State Store connection
   public static final String FEDERATION_FILE_RESOLVER_CLIENT_CLASS =

http://git-wip-us.apache.org/repos/asf/hadoop/blob/06632c06/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java
index 214e438..e4d304d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java
@@ -35,13 +35,16 @@ import java.util.Map;
 import java.util.Map.Entry;
 import java.util.Set;
 import java.util.TreeMap;
+import java.util.concurrent.ArrayBlockingQueue;
+import java.util.concurrent.BlockingQueue;
 import java.util.concurrent.Callable;
 import java.util.concurrent.CancellationException;
 import java.util.concurrent.ExecutionException;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
 import java.util.concurrent.Future;
+import java.util.concurrent.LinkedBlockingQueue;
+import java.util.concurrent.RejectedExecutionException;
 import java.util.concurrent.ThreadFactory;
+import java.util.concurrent.ThreadPoolExecutor;
 import java.util.concurrent.TimeUnit;
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
@@ -98,7 +101,7 @@ public class RouterRpcClient {
   /** Connection pool to the Namenodes per user for performance. */
   private final ConnectionManager connectionManager;
   /** Service to run asynchronous calls. */
-  private final ExecutorService executorService;
+  private final ThreadPoolExecutor executorService;
   /** Retry policy for router -> NN communication. */
   private final RetryPolicy retryPolicy;
   /** Optional perf monitor. */
@@ -131,8 +134,16 @@ public class RouterRpcClient {
     ThreadFactory threadFactory = new ThreadFactoryBuilder()
         .setNameFormat("RPC Router Client-%d")
         .build();
-    this.executorService = Executors.newFixedThreadPool(
-        numThreads, threadFactory);
+    BlockingQueue<Runnable> workQueue;
+    if (conf.getBoolean(
+        RBFConfigKeys.DFS_ROUTER_CLIENT_REJECT_OVERLOAD,
+        RBFConfigKeys.DFS_ROUTER_CLIENT_REJECT_OVERLOAD_DEFAULT)) {
+      workQueue = new ArrayBlockingQueue<>(numThreads);
+    } else {
+      workQueue = new LinkedBlockingQueue<>();
+    }
+    this.executorService = new ThreadPoolExecutor(numThreads, numThreads,
+        0L, TimeUnit.MILLISECONDS, workQueue, threadFactory);
 
     this.rpcMonitor = monitor;
 
@@ -1098,6 +1109,16 @@ public class RouterRpcClient {
       }
 
       return results;
+    } catch (RejectedExecutionException e) {
+      if (rpcMonitor != null) {
+        rpcMonitor.proxyOpFailureClientOverloaded();
+      }
+      int active = executorService.getActiveCount();
+      int total = executorService.getMaximumPoolSize();
+      String msg = "Not enough client threads " + active + "/" + total;
+      LOG.error(msg);
+      throw new StandbyException(
+          "Router " + routerId + " is overloaded: " + msg);
     } catch (InterruptedException ex) {
       LOG.error("Unexpected error while invoking API: {}", ex.getMessage());
       throw new IOException(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/06632c06/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcMonitor.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcMonitor.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcMonitor.java
index df9aa11..7af71af 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcMonitor.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcMonitor.java
@@ -76,6 +76,12 @@ public interface RouterRpcMonitor {
   void proxyOpFailureCommunicate();
 
   /**
+   * Failed to proxy an operation to a Namenode because the client was
+   * overloaded.
+   */
+  void proxyOpFailureClientOverloaded();
+
+  /**
    * Failed to proxy an operation because it is not implemented.
    */
   void proxyOpNotImplemented();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/06632c06/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
index b56ee5f..733e234 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
@@ -274,7 +274,6 @@ public class RouterRpcServer extends AbstractService
     // We don't want the server to log the full stack trace for some exceptions
     this.rpcServer.addTerseExceptions(
         RemoteException.class,
-        StandbyException.class,
         SafeModeException.class,
         FileNotFoundException.class,
         FileAlreadyExistsException.class,
@@ -283,6 +282,9 @@ public class RouterRpcServer extends AbstractService
         NotReplicatedYetException.class,
         IOException.class);
 
+    this.rpcServer.addSuppressedLoggingExceptions(
+        StandbyException.class);
+
     // The RPC-server port can be ephemeral... ensure we have the correct info
     InetSocketAddress listenAddress = this.rpcServer.getListenerAddress();
     this.rpcAddress = new InetSocketAddress(
@@ -397,7 +399,7 @@ public class RouterRpcServer extends AbstractService
    * @throws UnsupportedOperationException If the operation is not supported.
    */
   protected void checkOperation(OperationCategory op, boolean supported)
-      throws RouterSafeModeException, UnsupportedOperationException {
+      throws StandbyException, UnsupportedOperationException {
     checkOperation(op);
 
     if (!supported) {
@@ -419,7 +421,7 @@ public class RouterRpcServer extends AbstractService
    *                           client requests.
    */
   protected void checkOperation(OperationCategory op)
-      throws RouterSafeModeException {
+      throws StandbyException {
     // Log the function we are currently calling.
     if (rpcMonitor != null) {
       rpcMonitor.startOp();
@@ -443,7 +445,8 @@ public class RouterRpcServer extends AbstractService
       if (rpcMonitor != null) {
         rpcMonitor.routerFailureSafemode();
       }
-      throw new RouterSafeModeException(router.getRouterId(), op);
+      throw new StandbyException("Router " + router.getRouterId() +
+          " is in safe mode and cannot handle " + op + " requests");
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/06632c06/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterSafeModeException.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterSafeModeException.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterSafeModeException.java
deleted file mode 100644
index 7a78b5b..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterSafeModeException.java
+++ /dev/null
@@ -1,53 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.server.federation.router;
-
-import org.apache.hadoop.hdfs.server.namenode.NameNode.OperationCategory;
-import org.apache.hadoop.ipc.StandbyException;
-
-/**
- * Exception that the Router throws when it is in safe mode. This extends
- * {@link StandbyException} for the client to try another Router when it gets
- * this exception.
- */
-public class RouterSafeModeException extends StandbyException {
-
-  private static final long serialVersionUID = 453568188334993493L;
-
-  /** Identifier of the Router that generated this exception. */
-  private final String routerId;
-
-  /**
-   * Build a new Router safe mode exception.
-   * @param router Identifier of the Router.
-   * @param op Category of the operation (READ/WRITE).
-   */
-  public RouterSafeModeException(String router, OperationCategory op) {
-    super("Router " + router + " is in safe mode and cannot handle " + op
-        + " requests.");
-    this.routerId = router;
-  }
-
-  /**
-   * Get the id of the Router that generated this exception.
-   * @return Id of the Router that generated this exception.
-   */
-  public String getRouterId() {
-    return this.routerId;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/06632c06/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/resources/hdfs-rbf-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/resources/hdfs-rbf-default.xml b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/resources/hdfs-rbf-default.xml
index 92f899d..8806cb2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/resources/hdfs-rbf-default.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/resources/hdfs-rbf-default.xml
@@ -431,4 +431,13 @@
     </description>
   </property>
 
+  <property>
+    <name>dfs.federation.router.client.reject.overload</name>
+    <value>false</value>
+    <description>
+      Set to true to reject client requests when we run out of RPC client
+      threads.
+    </description>
+  </property>
+
 </configuration>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/06632c06/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/StateStoreDFSCluster.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/StateStoreDFSCluster.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/StateStoreDFSCluster.java
index bf63b18..9d56f13 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/StateStoreDFSCluster.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/StateStoreDFSCluster.java
@@ -28,6 +28,10 @@ import java.util.List;
 import java.util.Map;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DFSTestUtil;
+import org.apache.hadoop.hdfs.DFSUtil;
+import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
 import org.apache.hadoop.hdfs.server.federation.resolver.ActiveNamenodeResolver;
 import org.apache.hadoop.hdfs.server.federation.resolver.FederationNamenodeServiceState;
 import org.apache.hadoop.hdfs.server.federation.resolver.FileSubclusterResolver;
@@ -37,6 +41,7 @@ import org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys;
 import org.apache.hadoop.hdfs.server.federation.store.StateStoreService;
 import org.apache.hadoop.hdfs.server.federation.store.records.MembershipState;
 import org.apache.hadoop.hdfs.server.federation.store.records.MountTable;
+import org.apache.hadoop.util.StringUtils;
 
 /**
  * Test utility to mimic a federated HDFS cluster with a router and a state
@@ -145,4 +150,27 @@ public class StateStoreDFSCluster extends MiniRouterDFSCluster {
     entries.add(entry);
     return entries;
   }
+
+  /**
+   * Get the client configuration which targets all the Routers. It uses the HA
+   * setup to fails over between them.
+   * @return Configuration for the client which uses two routers.
+   */
+  public Configuration getRouterClientConf() {
+    List<RouterContext> routers = getRouters();
+    Configuration clientConf = DFSTestUtil.newHAConfiguration("fed");
+    int i = 0;
+    List<String> names = new ArrayList<>(routers.size());
+    for (RouterContext routerContext : routers) {
+      String name = "r" + i++;
+      clientConf.set(
+          DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY + ".fed." + name,
+          "localhost:" + routerContext.getRpcPort());
+      names.add(name);
+    }
+    clientConf.set(DFSUtil.addKeySuffixes(
+        HdfsClientConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX, "fed"),
+        StringUtils.join(",", names));
+    return clientConf;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/06632c06/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterClientRejectOverload.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterClientRejectOverload.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterClientRejectOverload.java
new file mode 100644
index 0000000..3c51e13
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterClientRejectOverload.java
@@ -0,0 +1,243 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.federation.router;
+
+import static org.apache.hadoop.hdfs.server.federation.FederationTestUtils.simulateSlowNamenode;
+import static org.apache.hadoop.test.GenericTestUtils.assertExceptionContains;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+import java.io.IOException;
+import java.net.URI;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.Future;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.DFSClient;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.protocol.ClientProtocol;
+import org.apache.hadoop.hdfs.server.federation.MiniRouterDFSCluster.RouterContext;
+import org.apache.hadoop.hdfs.server.federation.RouterConfigBuilder;
+import org.apache.hadoop.hdfs.server.federation.StateStoreDFSCluster;
+import org.apache.hadoop.hdfs.server.federation.metrics.FederationRPCMetrics;
+import org.apache.hadoop.hdfs.server.namenode.NameNode;
+import org.apache.hadoop.ipc.RemoteException;
+import org.apache.hadoop.ipc.StandbyException;
+import org.junit.After;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+
+/**
+ * Test the Router overload control which rejects requests when the RPC client
+ * is overloaded. This feature is managed by
+ * {@link RBFConfigKeys#DFS_ROUTER_CLIENT_REJECT_OVERLOAD}.
+ */
+public class TestRouterClientRejectOverload {
+
+  private static final Logger LOG =
+      LoggerFactory.getLogger(TestRouterClientRejectOverload.class);
+
+  private StateStoreDFSCluster cluster;
+
+  @After
+  public void cleanup() {
+    if (cluster != null) {
+      cluster.shutdown();
+      cluster = null;
+    }
+  }
+
+  private void setupCluster(boolean overloadControl) throws Exception {
+    // Build and start a federated cluster
+    cluster = new StateStoreDFSCluster(false, 2);
+    Configuration routerConf = new RouterConfigBuilder()
+        .stateStore()
+        .metrics()
+        .admin()
+        .rpc()
+        .build();
+
+    // Reduce the number of RPC clients threads to overload the Router easy
+    routerConf.setInt(RBFConfigKeys.DFS_ROUTER_CLIENT_THREADS_SIZE, 4);
+    // Overload control
+    routerConf.setBoolean(
+        RBFConfigKeys.DFS_ROUTER_CLIENT_REJECT_OVERLOAD, overloadControl);
+
+    // No need for datanodes as we use renewLease() for testing
+    cluster.setNumDatanodesPerNameservice(0);
+
+    cluster.addRouterOverrides(routerConf);
+    cluster.startCluster();
+    cluster.startRouters();
+    cluster.waitClusterUp();
+  }
+
+  @Test
+  public void testWithoutOverloadControl() throws Exception {
+    setupCluster(false);
+
+    // Nobody should get overloaded
+    testOverloaded(0);
+
+    // Set subcluster 0 as slow
+    MiniDFSCluster dfsCluster = cluster.getCluster();
+    NameNode nn0 = dfsCluster.getNameNode(0);
+    simulateSlowNamenode(nn0, 1);
+
+    // Nobody should get overloaded, but it will be really slow
+    testOverloaded(0);
+
+    // No rejected requests expected
+    for (RouterContext router : cluster.getRouters()) {
+      FederationRPCMetrics rpcMetrics =
+          router.getRouter().getRpcServer().getRPCMetrics();
+      assertEquals(0, rpcMetrics.getProxyOpFailureClientOverloaded());
+    }
+  }
+
+  @Test
+  public void testOverloadControl() throws Exception {
+    setupCluster(true);
+
+    List<RouterContext> routers = cluster.getRouters();
+    FederationRPCMetrics rpcMetrics0 =
+        routers.get(0).getRouter().getRpcServer().getRPCMetrics();
+    FederationRPCMetrics rpcMetrics1 =
+        routers.get(1).getRouter().getRpcServer().getRPCMetrics();
+
+    // Nobody should get overloaded
+    testOverloaded(0);
+    assertEquals(0, rpcMetrics0.getProxyOpFailureClientOverloaded());
+    assertEquals(0, rpcMetrics1.getProxyOpFailureClientOverloaded());
+
+    // Set subcluster 0 as slow
+    MiniDFSCluster dfsCluster = cluster.getCluster();
+    NameNode nn0 = dfsCluster.getNameNode(0);
+    simulateSlowNamenode(nn0, 1);
+
+    // The subcluster should be overloaded now and reject 4-5 requests
+    testOverloaded(4, 6);
+    assertTrue(rpcMetrics0.getProxyOpFailureClientOverloaded()
+        + rpcMetrics1.getProxyOpFailureClientOverloaded() >= 4);
+
+    // Client using HA with 2 Routers
+    // A single Router gets overloaded, but 2 will handle it
+    Configuration clientConf = cluster.getRouterClientConf();
+
+    // Each Router should get a similar number of ops (>=8) out of 2*10
+    long iniProxyOps0 = rpcMetrics0.getProxyOps();
+    long iniProxyOps1 = rpcMetrics1.getProxyOps();
+    testOverloaded(0, 0, new URI("hdfs://fed/"), clientConf, 10);
+    long proxyOps0 = rpcMetrics0.getProxyOps() - iniProxyOps0;
+    long proxyOps1 = rpcMetrics1.getProxyOps() - iniProxyOps1;
+    assertEquals(2 * 10, proxyOps0 + proxyOps1);
+    assertTrue(proxyOps0 + " operations: not distributed", proxyOps0 >= 8);
+    assertTrue(proxyOps1 + " operations: not distributed", proxyOps1 >= 8);
+  }
+
+  private void testOverloaded(int expOverload) throws Exception {
+    testOverloaded(expOverload, expOverload);
+  }
+
+  private void testOverloaded(int expOverloadMin, int expOverloadMax)
+      throws Exception {
+    RouterContext routerContext = cluster.getRandomRouter();
+    URI address = routerContext.getFileSystemURI();
+    Configuration conf = new HdfsConfiguration();
+    testOverloaded(expOverloadMin, expOverloadMax, address, conf, 10);
+  }
+
+  /**
+   * Test if the Router gets overloaded by submitting requests in parallel.
+   * We check how many requests got rejected at the end.
+   * @param expOverloadMin Min number of requests expected as overloaded.
+   * @param expOverloadMax Max number of requests expected as overloaded.
+   * @param address Destination address.
+   * @param conf Configuration of the client.
+   * @param numOps Number of operations to submit.
+   * @throws Exception If it cannot perform the test.
+   */
+  private void testOverloaded(int expOverloadMin, int expOverloadMax,
+      final URI address, final Configuration conf, final int numOps)
+          throws Exception {
+
+    // Submit renewLease() ops which go to all subclusters
+    final AtomicInteger overloadException = new AtomicInteger();
+    ExecutorService exec = Executors.newFixedThreadPool(numOps);
+    List<Future<?>> futures = new ArrayList<>();
+    for (int i = 0; i < numOps; i++) {
+      // Stagger the operations a little (50ms)
+      final int sleepTime = i * 50;
+      Future<?> future = exec.submit(new Runnable() {
+        @Override
+        public void run() {
+          DFSClient routerClient = null;
+          try {
+            Thread.sleep(sleepTime);
+            routerClient = new DFSClient(address, conf);
+            String clientName = routerClient.getClientName();
+            ClientProtocol routerProto = routerClient.getNamenode();
+            routerProto.renewLease(clientName);
+          } catch (RemoteException re) {
+            IOException ioe = re.unwrapRemoteException();
+            assertTrue("Wrong exception: " + ioe,
+                ioe instanceof StandbyException);
+            assertExceptionContains("is overloaded", ioe);
+            overloadException.incrementAndGet();
+          } catch (IOException e) {
+            fail("Unexpected exception: " + e);
+          } catch (InterruptedException e) {
+            fail("Cannot sleep: " + e);
+          } finally {
+            if (routerClient != null) {
+              try {
+                routerClient.close();
+              } catch (IOException e) {
+                LOG.error("Cannot close the client");
+              }
+            }
+          }
+        }
+      });
+      futures.add(future);
+    }
+    // Wait until all the requests are done
+    while (!futures.isEmpty()) {
+      futures.remove(0).get();
+    }
+    exec.shutdown();
+
+    int num = overloadException.get();
+    if (expOverloadMin == expOverloadMax) {
+      assertEquals(expOverloadMin, num);
+    } else {
+      assertTrue("Expected >=" + expOverloadMin + " but was " + num,
+          num >= expOverloadMin);
+      assertTrue("Expected <=" + expOverloadMax + " but was " + num,
+          num <= expOverloadMax);
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/06632c06/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRPCClientRetries.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRPCClientRetries.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRPCClientRetries.java
index 91dc2e7..e5ab3ab 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRPCClientRetries.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRPCClientRetries.java
@@ -17,13 +17,13 @@
  */
 package org.apache.hadoop.hdfs.server.federation.router;
 
+import static org.apache.hadoop.hdfs.server.federation.FederationTestUtils.simulateSlowNamenode;
+import static org.apache.hadoop.test.GenericTestUtils.assertExceptionContains;
+import static org.apache.hadoop.test.GenericTestUtils.waitFor;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNotEquals;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
-import static org.mockito.Matchers.any;
-import static org.mockito.Mockito.doAnswer;
-import static org.mockito.Mockito.spy;
 
 import java.io.IOException;
 import java.util.List;
@@ -44,12 +44,8 @@ import org.apache.hadoop.hdfs.server.federation.metrics.NamenodeBeanMetrics;
 import org.apache.hadoop.hdfs.server.federation.resolver.FederationNamenodeContext;
 import org.apache.hadoop.hdfs.server.federation.resolver.MembershipNamenodeResolver;
 import org.apache.hadoop.hdfs.server.federation.resolver.NamenodeStatusReport;
-import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
-import org.apache.hadoop.hdfs.server.namenode.NameNode.OperationCategory;
-import org.apache.hadoop.hdfs.server.namenode.ha.HAContext;
 import org.apache.hadoop.ipc.RemoteException;
-import org.apache.hadoop.test.GenericTestUtils;
 import org.codehaus.jettison.json.JSONException;
 import org.codehaus.jettison.json.JSONObject;
 import org.junit.After;
@@ -57,11 +53,6 @@ import org.junit.Before;
 import org.junit.Rule;
 import org.junit.Test;
 import org.junit.rules.Timeout;
-import org.mockito.internal.util.reflection.Whitebox;
-import org.mockito.invocation.InvocationOnMock;
-import org.mockito.stubbing.Answer;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 import com.google.common.base.Supplier;
 
@@ -70,9 +61,6 @@ import com.google.common.base.Supplier;
  */
 public class TestRouterRPCClientRetries {
 
-  private static final Logger LOG =
-      LoggerFactory.getLogger(TestRouterRPCClientRetries.class);
-
   private static StateStoreDFSCluster cluster;
   private static NamenodeContext nnContext1;
   private static RouterContext routerContext;
@@ -144,7 +132,7 @@ public class TestRouterRPCClientRetries {
       fail("Should have thrown RemoteException error.");
     } catch (RemoteException e) {
       String ns0 = cluster.getNameservices().get(0);
-      GenericTestUtils.assertExceptionContains(
+      assertExceptionContains(
           "No namenode available under nameservice " + ns0, e);
     }
 
@@ -212,14 +200,14 @@ public class TestRouterRPCClientRetries {
     // Making subcluster0 slow to reply, should only get DNs from nn1
     MiniDFSCluster dfsCluster = cluster.getCluster();
     NameNode nn0 = dfsCluster.getNameNode(0);
-    simulateNNSlow(nn0);
+    simulateSlowNamenode(nn0, 3);
     waitUpdateLiveNodes(jsonString2, metrics);
     final String jsonString3 = metrics.getLiveNodes();
     assertEquals(2, getNumDatanodes(jsonString3));
 
     // Making subcluster1 slow to reply, shouldn't get any DNs
     NameNode nn1 = dfsCluster.getNameNode(1);
-    simulateNNSlow(nn1);
+    simulateSlowNamenode(nn1, 3);
     waitUpdateLiveNodes(jsonString3, metrics);
     final String jsonString4 = metrics.getLiveNodes();
     assertEquals(0, getNumDatanodes(jsonString4));
@@ -249,36 +237,11 @@ public class TestRouterRPCClientRetries {
   private static void waitUpdateLiveNodes(
       final String oldValue, final NamenodeBeanMetrics metrics)
           throws Exception {
-    GenericTestUtils.waitFor(new Supplier<Boolean>() {
+    waitFor(new Supplier<Boolean>() {
       @Override
       public Boolean get() {
         return !oldValue.equals(metrics.getLiveNodes());
       }
     }, 500, 5 * 1000);
   }
-
-  /**
-   * Simulate that a Namenode is slow by adding a sleep to the check operation
-   * in the NN.
-   * @param nn Namenode to simulate slow.
-   * @throws Exception If we cannot add the sleep time.
-   */
-  private static void simulateNNSlow(final NameNode nn) throws Exception {
-    FSNamesystem namesystem = nn.getNamesystem();
-    HAContext haContext = namesystem.getHAContext();
-    HAContext spyHAContext = spy(haContext);
-    doAnswer(new Answer<Object>() {
-      @Override
-      public Object answer(InvocationOnMock invocation) throws Throwable {
-        LOG.info("Simulating slow namenode {}", invocation.getMock());
-        try {
-          Thread.sleep(3 * 1000);
-        } catch(InterruptedException e) {
-          LOG.error("Simulating a slow namenode aborted");
-        }
-        return null;
-      }
-    }).when(spyHAContext).checkOperation(any(OperationCategory.class));
-    Whitebox.setInternalState(namesystem, "haContext", spyHAContext);
-  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/06632c06/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterSafemode.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterSafemode.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterSafemode.java
index e5d8348..f16ceb5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterSafemode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterSafemode.java
@@ -33,6 +33,7 @@ import java.util.concurrent.TimeUnit;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.server.federation.RouterConfigBuilder;
+import org.apache.hadoop.ipc.StandbyException;
 import org.apache.hadoop.service.Service.STATE;
 import org.apache.hadoop.util.Time;
 import org.junit.After;
@@ -187,7 +188,7 @@ public class TestRouterSafemode {
     try {
       router.getRpcServer().delete("/testfile.txt", true);
       fail("We should have thrown a safe mode exception");
-    } catch (RouterSafeModeException sme) {
+    } catch (StandbyException sme) {
       exception = true;
     }
     assertTrue("We should have thrown a safe mode exception", exception);


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[10/50] [abbrv] hadoop git commit: Revert "YARN-7810. Update TestDockerContainerRuntime to test with current user credential."

Posted by sh...@apache.org.
Revert "YARN-7810.  Update TestDockerContainerRuntime to test with current user credential."

This reverts commit 724bffdb89cd62cc90a1f49c7c5e40998dc1cc0f.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cc2a2a8e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cc2a2a8e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cc2a2a8e

Branch: refs/heads/YARN-8200
Commit: cc2a2a8e063ecdb23216830be8418736af79fe7b
Parents: a772108
Author: Wei-Chiu Chuang <we...@apache.org>
Authored: Fri Apr 13 10:13:59 2018 -0700
Committer: Wei-Chiu Chuang <we...@apache.org>
Committed: Fri Apr 13 10:13:59 2018 -0700

----------------------------------------------------------------------
 .../runtime/TestDockerContainerRuntime.java     | 178 +++++++++----------
 1 file changed, 82 insertions(+), 96 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cc2a2a8e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestDockerContainerRuntime.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestDockerContainerRuntime.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestDockerContainerRuntime.java
index ab38ea2..aef94a7 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestDockerContainerRuntime.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestDockerContainerRuntime.java
@@ -81,8 +81,7 @@ public class TestDockerContainerRuntime {
   private HashMap<String, String> env;
   private String image;
   private String uidGidPair;
-  private String runAsUser = System.getProperty("user.name");
-  private String[] groups = {};
+  private String runAsUser;
   private String user;
   private String appId;
   private String containerIdStr = containerId;
@@ -131,37 +130,8 @@ public class TestDockerContainerRuntime {
     when(context.getEnvironment()).thenReturn(env);
     when(container.getUser()).thenReturn(submittingUser);
 
-    // Get the running user's uid and gid for remap
-    String uid = "";
-    String gid = "";
-    Shell.ShellCommandExecutor shexec1 = new Shell.ShellCommandExecutor(
-        new String[]{"id", "-u", runAsUser});
-    Shell.ShellCommandExecutor shexec2 = new Shell.ShellCommandExecutor(
-        new String[]{"id", "-g", runAsUser});
-    Shell.ShellCommandExecutor shexec3 = new Shell.ShellCommandExecutor(
-        new String[]{"id", "-G", runAsUser});
-    try {
-      shexec1.execute();
-      // get rid of newline at the end
-      uid = shexec1.getOutput().replaceAll("\n$", "");
-    } catch (Exception e) {
-      LOG.info("Could not run id -u command: " + e);
-    }
-    try {
-      shexec2.execute();
-      // get rid of newline at the end
-      gid = shexec2.getOutput().replaceAll("\n$", "");
-    } catch (Exception e) {
-      LOG.info("Could not run id -g command: " + e);
-    }
-    try {
-      shexec3.execute();
-      groups = shexec3.getOutput().replace("\n", " ").split(" ");
-    } catch (Exception e) {
-      LOG.info("Could not run id -G command: " + e);
-    }
-    uidGidPair = uid + ":" + gid;
-
+    uidGidPair = "";
+    runAsUser = "run_as_user";
     user = "user";
     appId = "app_id";
     containerIdStr = containerId;
@@ -331,7 +301,7 @@ public class TestDockerContainerRuntime {
     List<String> dockerCommands = Files.readAllLines(Paths.get
             (dockerCommandFile), Charset.forName("UTF-8"));
 
-    int expected = 14;
+    int expected = 13;
     int counter = 0;
     Assert.assertEquals(expected, dockerCommands.size());
     Assert.assertEquals("[docker-command-execution]",
@@ -341,8 +311,6 @@ public class TestDockerContainerRuntime {
     Assert.assertEquals("  cap-drop=ALL", dockerCommands.get(counter++));
     Assert.assertEquals("  detach=true", dockerCommands.get(counter++));
     Assert.assertEquals("  docker-command=run", dockerCommands.get(counter++));
-    Assert.assertEquals("  group-add=" + String.join(",", groups),
-        dockerCommands.get(counter++));
     Assert.assertEquals("  hostname=ctr-id", dockerCommands.get(counter++));
     Assert
         .assertEquals("  image=busybox:latest", dockerCommands.get(counter++));
@@ -358,7 +326,7 @@ public class TestDockerContainerRuntime {
             + "/test_container_log_dir:/test_container_log_dir,"
             + "/test_user_local_dir:/test_user_local_dir",
         dockerCommands.get(counter++));
-    Assert.assertEquals("  user=" + uidGidPair, dockerCommands.get(counter++));
+    Assert.assertEquals("  user=run_as_user", dockerCommands.get(counter++));
     Assert.assertEquals("  workdir=/test_container_work_dir",
         dockerCommands.get(counter++));
   }
@@ -369,6 +337,13 @@ public class TestDockerContainerRuntime {
       IOException {
     conf.setBoolean(YarnConfiguration.NM_DOCKER_ENABLE_USER_REMAPPING,
         true);
+    Shell.ShellCommandExecutor shexec = new Shell.ShellCommandExecutor(
+        new String[]{"whoami"});
+    shexec.execute();
+    // get rid of newline at the end
+    runAsUser = shexec.getOutput().replaceAll("\n$", "");
+    builder.setExecutionAttribute(RUN_AS_USER, runAsUser);
+
     DockerLinuxContainerRuntime runtime = new DockerLinuxContainerRuntime(
         mockExecutor, mockCGroupsHandler);
     runtime.initialize(conf);
@@ -378,6 +353,37 @@ public class TestDockerContainerRuntime {
     List<String> args = op.getArguments();
     String dockerCommandFile = args.get(11);
 
+    String uid = "";
+    String gid = "";
+    String[] groups = {};
+    Shell.ShellCommandExecutor shexec1 = new Shell.ShellCommandExecutor(
+        new String[]{"id", "-u", runAsUser});
+    Shell.ShellCommandExecutor shexec2 = new Shell.ShellCommandExecutor(
+        new String[]{"id", "-g", runAsUser});
+    Shell.ShellCommandExecutor shexec3 = new Shell.ShellCommandExecutor(
+        new String[]{"id", "-G", runAsUser});
+    try {
+      shexec1.execute();
+      // get rid of newline at the end
+      uid = shexec1.getOutput().replaceAll("\n$", "");
+    } catch (Exception e) {
+      LOG.info("Could not run id -u command: " + e);
+    }
+    try {
+      shexec2.execute();
+      // get rid of newline at the end
+      gid = shexec2.getOutput().replaceAll("\n$", "");
+    } catch (Exception e) {
+      LOG.info("Could not run id -g command: " + e);
+    }
+    try {
+      shexec3.execute();
+      groups = shexec3.getOutput().replace("\n", " ").split(" ");
+    } catch (Exception e) {
+      LOG.info("Could not run id -G command: " + e);
+    }
+    uidGidPair = uid + ":" + gid;
+
     List<String> dockerCommands = Files.readAllLines(
         Paths.get(dockerCommandFile), Charset.forName("UTF-8"));
 
@@ -499,7 +505,7 @@ public class TestDockerContainerRuntime {
     //This is the expected docker invocation for this case
     List<String> dockerCommands = Files
         .readAllLines(Paths.get(dockerCommandFile), Charset.forName("UTF-8"));
-    int expected = 14;
+    int expected = 13;
     int counter = 0;
     Assert.assertEquals(expected, dockerCommands.size());
     Assert.assertEquals("[docker-command-execution]",
@@ -509,8 +515,6 @@ public class TestDockerContainerRuntime {
     Assert.assertEquals("  cap-drop=ALL", dockerCommands.get(counter++));
     Assert.assertEquals("  detach=true", dockerCommands.get(counter++));
     Assert.assertEquals("  docker-command=run", dockerCommands.get(counter++));
-    Assert.assertEquals("  group-add=" + String.join(",", groups),
-        dockerCommands.get(counter++));
     Assert.assertEquals("  hostname=test.hostname",
         dockerCommands.get(counter++));
     Assert
@@ -528,7 +532,7 @@ public class TestDockerContainerRuntime {
             + "/test_container_log_dir:/test_container_log_dir,"
             + "/test_user_local_dir:/test_user_local_dir",
         dockerCommands.get(counter++));
-    Assert.assertEquals("  user=" + uidGidPair, dockerCommands.get(counter++));
+    Assert.assertEquals("  user=run_as_user", dockerCommands.get(counter++));
     Assert.assertEquals("  workdir=/test_container_work_dir",
         dockerCommands.get(counter++));
   }
@@ -567,7 +571,7 @@ public class TestDockerContainerRuntime {
     List<String> dockerCommands = Files
         .readAllLines(Paths.get(dockerCommandFile), Charset.forName("UTF-8"));
 
-    int expected = 14;
+    int expected = 13;
     int counter = 0;
     Assert.assertEquals(expected, dockerCommands.size());
     Assert.assertEquals("[docker-command-execution]",
@@ -577,8 +581,6 @@ public class TestDockerContainerRuntime {
     Assert.assertEquals("  cap-drop=ALL", dockerCommands.get(counter++));
     Assert.assertEquals("  detach=true", dockerCommands.get(counter++));
     Assert.assertEquals("  docker-command=run", dockerCommands.get(counter++));
-    Assert.assertEquals("  group-add=" + String.join(",", groups),
-        dockerCommands.get(counter++));
     Assert.assertEquals("  hostname=ctr-id", dockerCommands.get(counter++));
     Assert
         .assertEquals("  image=busybox:latest", dockerCommands.get(counter++));
@@ -594,7 +596,7 @@ public class TestDockerContainerRuntime {
             + "/test_container_log_dir:/test_container_log_dir,"
             + "/test_user_local_dir:/test_user_local_dir",
         dockerCommands.get(counter++));
-    Assert.assertEquals("  user=" + uidGidPair, dockerCommands.get(counter++));
+    Assert.assertEquals("  user=run_as_user", dockerCommands.get(counter++));
     Assert.assertEquals("  workdir=/test_container_work_dir",
         dockerCommands.get(counter++));
 
@@ -622,8 +624,6 @@ public class TestDockerContainerRuntime {
     Assert.assertEquals("  cap-drop=ALL", dockerCommands.get(counter++));
     Assert.assertEquals("  detach=true", dockerCommands.get(counter++));
     Assert.assertEquals("  docker-command=run", dockerCommands.get(counter++));
-    Assert.assertEquals("  group-add=" + String.join(",", groups),
-        dockerCommands.get(counter++));
     Assert.assertEquals("  hostname=ctr-id", dockerCommands.get(counter++));
     Assert
         .assertEquals("  image=busybox:latest", dockerCommands.get(counter++));
@@ -640,7 +640,7 @@ public class TestDockerContainerRuntime {
             + "/test_container_log_dir:/test_container_log_dir,"
             + "/test_user_local_dir:/test_user_local_dir",
         dockerCommands.get(counter++));
-    Assert.assertEquals("  user=" + uidGidPair, dockerCommands.get(counter++));
+    Assert.assertEquals("  user=run_as_user", dockerCommands.get(counter++));
     Assert.assertEquals("  workdir=/test_container_work_dir",
         dockerCommands.get(counter++));
 
@@ -677,7 +677,7 @@ public class TestDockerContainerRuntime {
     List<String> dockerCommands = Files.readAllLines(Paths.get
         (dockerCommandFile), Charset.forName("UTF-8"));
 
-    int expected = 14;
+    int expected = 13;
     Assert.assertEquals(expected, dockerCommands.size());
 
     String command = dockerCommands.get(0);
@@ -786,7 +786,7 @@ public class TestDockerContainerRuntime {
     List<String> dockerCommands = Files.readAllLines(Paths.get
         (dockerCommandFile), Charset.forName("UTF-8"));
 
-    int expected = 15;
+    int expected = 14;
     int counter = 0;
     Assert.assertEquals(expected, dockerCommands.size());
     Assert.assertEquals("[docker-command-execution]",
@@ -796,8 +796,6 @@ public class TestDockerContainerRuntime {
     Assert.assertEquals("  cap-drop=ALL", dockerCommands.get(counter++));
     Assert.assertEquals("  detach=true", dockerCommands.get(counter++));
     Assert.assertEquals("  docker-command=run", dockerCommands.get(counter++));
-    Assert.assertEquals("  group-add=" + String.join(",", groups),
-        dockerCommands.get(counter++));
     Assert.assertEquals("  hostname=ctr-id", dockerCommands.get(counter++));
     Assert
         .assertEquals("  image=busybox:latest", dockerCommands.get(counter++));
@@ -814,7 +812,7 @@ public class TestDockerContainerRuntime {
             + "/test_container_log_dir:/test_container_log_dir,"
             + "/test_user_local_dir:/test_user_local_dir",
         dockerCommands.get(counter++));
-    Assert.assertEquals("  user=" + uidGidPair, dockerCommands.get(counter++));
+    Assert.assertEquals("  user=run_as_user", dockerCommands.get(counter++));
     Assert.assertEquals("  workdir=/test_container_work_dir",
         dockerCommands.get(counter++));
   }
@@ -905,39 +903,33 @@ public class TestDockerContainerRuntime {
     List<String> dockerCommands = Files.readAllLines(Paths.get
         (dockerCommandFile), Charset.forName("UTF-8"));
 
-    int expected = 15;
-    int counter = 0;
-    Assert.assertEquals(expected, dockerCommands.size());
-    Assert.assertEquals("[docker-command-execution]",
-        dockerCommands.get(counter++));
+    Assert.assertEquals(14, dockerCommands.size());
+    Assert.assertEquals("[docker-command-execution]", dockerCommands.get(0));
     Assert.assertEquals("  cap-add=SYS_CHROOT,NET_BIND_SERVICE",
-        dockerCommands.get(counter++));
-    Assert.assertEquals("  cap-drop=ALL", dockerCommands.get(counter++));
-    Assert.assertEquals("  detach=true", dockerCommands.get(counter++));
-    Assert.assertEquals("  docker-command=run", dockerCommands.get(counter++));
-    Assert.assertEquals("  group-add=" + String.join(",", groups),
-        dockerCommands.get(counter++));
-    Assert.assertEquals("  hostname=ctr-id", dockerCommands.get(counter++));
-    Assert.assertEquals("  image=busybox:latest",
-        dockerCommands.get(counter++));
+        dockerCommands.get(1));
+    Assert.assertEquals("  cap-drop=ALL", dockerCommands.get(2));
+    Assert.assertEquals("  detach=true", dockerCommands.get(3));
+    Assert.assertEquals("  docker-command=run", dockerCommands.get(4));
+    Assert.assertEquals("  hostname=ctr-id", dockerCommands.get(5));
+    Assert.assertEquals("  image=busybox:latest", dockerCommands.get(6));
     Assert.assertEquals(
         "  launch-command=bash,/test_container_work_dir/launch_container.sh",
-        dockerCommands.get(counter++));
-    Assert.assertEquals("  name=container_id", dockerCommands.get(counter++));
-    Assert.assertEquals("  net=host", dockerCommands.get(counter++));
+        dockerCommands.get(7));
+    Assert.assertEquals("  name=container_id", dockerCommands.get(8));
+    Assert.assertEquals("  net=host", dockerCommands.get(9));
     Assert.assertEquals(
         "  ro-mounts=/test_local_dir/test_resource_file:test_mount",
-        dockerCommands.get(counter++));
+        dockerCommands.get(10));
     Assert.assertEquals(
         "  rw-mounts=/test_container_local_dir:/test_container_local_dir,"
             + "/test_filecache_dir:/test_filecache_dir,"
             + "/test_container_work_dir:/test_container_work_dir,"
             + "/test_container_log_dir:/test_container_log_dir,"
             + "/test_user_local_dir:/test_user_local_dir",
-        dockerCommands.get(counter++));
-    Assert.assertEquals("  user=" + uidGidPair, dockerCommands.get(counter++));
+        dockerCommands.get(11));
+    Assert.assertEquals("  user=run_as_user", dockerCommands.get(12));
     Assert.assertEquals("  workdir=/test_container_work_dir",
-        dockerCommands.get(counter++));
+        dockerCommands.get(13));
   }
 
   @Test
@@ -981,40 +973,34 @@ public class TestDockerContainerRuntime {
     List<String> dockerCommands = Files.readAllLines(Paths.get
         (dockerCommandFile), Charset.forName("UTF-8"));
 
-    int expected = 15;
-    int counter = 0;
-    Assert.assertEquals(expected, dockerCommands.size());
-    Assert.assertEquals("[docker-command-execution]",
-        dockerCommands.get(counter++));
+    Assert.assertEquals(14, dockerCommands.size());
+    Assert.assertEquals("[docker-command-execution]", dockerCommands.get(0));
     Assert.assertEquals("  cap-add=SYS_CHROOT,NET_BIND_SERVICE",
-        dockerCommands.get(counter++));
-    Assert.assertEquals("  cap-drop=ALL", dockerCommands.get(counter++));
-    Assert.assertEquals("  detach=true", dockerCommands.get(counter++));
-    Assert.assertEquals("  docker-command=run", dockerCommands.get(counter++));
-    Assert.assertEquals("  group-add=" + String.join(",", groups),
-        dockerCommands.get(counter++));
-    Assert.assertEquals("  hostname=ctr-id", dockerCommands.get(counter++));
-    Assert.assertEquals("  image=busybox:latest",
-        dockerCommands.get(counter++));
+        dockerCommands.get(1));
+    Assert.assertEquals("  cap-drop=ALL", dockerCommands.get(2));
+    Assert.assertEquals("  detach=true", dockerCommands.get(3));
+    Assert.assertEquals("  docker-command=run", dockerCommands.get(4));
+    Assert.assertEquals("  hostname=ctr-id", dockerCommands.get(5));
+    Assert.assertEquals("  image=busybox:latest", dockerCommands.get(6));
     Assert.assertEquals(
         "  launch-command=bash,/test_container_work_dir/launch_container.sh",
-        dockerCommands.get(counter++));
-    Assert.assertEquals("  name=container_id", dockerCommands.get(counter++));
-    Assert.assertEquals("  net=host", dockerCommands.get(counter++));
+        dockerCommands.get(7));
+    Assert.assertEquals("  name=container_id", dockerCommands.get(8));
+    Assert.assertEquals("  net=host", dockerCommands.get(9));
     Assert.assertEquals(
         "  ro-mounts=/test_local_dir/test_resource_file:test_mount1,"
             + "/test_local_dir/test_resource_file:test_mount2",
-        dockerCommands.get(counter++));
+        dockerCommands.get(10));
     Assert.assertEquals(
         "  rw-mounts=/test_container_local_dir:/test_container_local_dir,"
             + "/test_filecache_dir:/test_filecache_dir,"
             + "/test_container_work_dir:/test_container_work_dir,"
             + "/test_container_log_dir:/test_container_log_dir,"
             + "/test_user_local_dir:/test_user_local_dir",
-        dockerCommands.get(counter++));
-    Assert.assertEquals("  user=" + uidGidPair, dockerCommands.get(counter++));
+        dockerCommands.get(11));
+    Assert.assertEquals("  user=run_as_user", dockerCommands.get(12));
     Assert.assertEquals("  workdir=/test_container_work_dir",
-        dockerCommands.get(counter++));
+        dockerCommands.get(13));
 
   }
 
@@ -1034,7 +1020,7 @@ public class TestDockerContainerRuntime {
     PrivilegedOperation op = capturePrivilegedOperation();
     Assert.assertEquals(op.getOperationType(),
         PrivilegedOperation.OperationType.SIGNAL_CONTAINER);
-    Assert.assertEquals(runAsUser, op.getArguments().get(0));
+    Assert.assertEquals("run_as_user", op.getArguments().get(0));
     Assert.assertEquals("user", op.getArguments().get(1));
     Assert.assertEquals("2", op.getArguments().get(2));
     Assert.assertEquals("1234", op.getArguments().get(3));


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[18/50] [abbrv] hadoop git commit: HADOOP-15394. Backport PowerShell NodeFencer HADOOP-14309 to branch-2. Contributed by Inigo Goiri.

Posted by sh...@apache.org.
HADOOP-15394. Backport PowerShell NodeFencer HADOOP-14309 to branch-2. Contributed by Inigo Goiri.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7a2edb58
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7a2edb58
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7a2edb58

Branch: refs/heads/YARN-8200
Commit: 7a2edb588c38a221a3cfaf73d9571691b06c2f37
Parents: 5ec195e
Author: Inigo Goiri <in...@apache.org>
Authored: Tue Apr 17 11:33:09 2018 -0700
Committer: Inigo Goiri <in...@apache.org>
Committed: Tue Apr 17 11:33:09 2018 -0700

----------------------------------------------------------------------
 .../java/org/apache/hadoop/ha/NodeFencer.java   |   3 +-
 .../org/apache/hadoop/ha/PowerShellFencer.java  | 155 +++++++++++++++++++
 2 files changed, 157 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7a2edb58/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/NodeFencer.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/NodeFencer.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/NodeFencer.java
index 63f6db6..2247a34 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/NodeFencer.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/NodeFencer.java
@@ -69,7 +69,8 @@ public class NodeFencer {
   private static final Map<String, Class<? extends FenceMethod>> STANDARD_METHODS =
     ImmutableMap.<String, Class<? extends FenceMethod>>of(
         "shell", ShellCommandFencer.class,
-        "sshfence", SshFenceByTcpPort.class);
+        "sshfence", SshFenceByTcpPort.class,
+        "powershell", PowerShellFencer.class);
   
   private final List<FenceMethodWithArg> methods;
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7a2edb58/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/PowerShellFencer.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/PowerShellFencer.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/PowerShellFencer.java
new file mode 100644
index 0000000..6de618c
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/PowerShellFencer.java
@@ -0,0 +1,155 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ha;
+
+import java.io.BufferedWriter;
+import java.io.File;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.io.OutputStreamWriter;
+import java.net.InetSocketAddress;
+import java.nio.charset.StandardCharsets;
+
+import org.apache.hadoop.conf.Configured;
+import org.apache.hadoop.util.StringUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Fencer method that uses PowerShell to remotely connect to a machine and kill
+ * the required process. This only works in Windows.
+ *
+ * The argument passed to this fencer should be a unique string in the
+ * "CommandLine" attribute for the "java.exe" process. For example, the full
+ * path for the Namenode: "org.apache.hadoop.hdfs.server.namenode.NameNode".
+ * The administrator can also shorten the name to "Namenode" if it's unique.
+ */
+public class PowerShellFencer extends Configured implements FenceMethod {
+
+  private static final Logger LOG = LoggerFactory.getLogger(PowerShellFencer
+      .class);
+
+
+  @Override
+  public void checkArgs(String argStr) throws BadFencingConfigurationException {
+    LOG.info("The parameter for the PowerShell fencer is " + argStr);
+  }
+
+  @Override
+  public boolean tryFence(HAServiceTarget target, String argsStr)
+      throws BadFencingConfigurationException {
+
+    String processName = argsStr;
+    InetSocketAddress serviceAddr = target.getAddress();
+    String hostname = serviceAddr.getHostName();
+
+    // Use PowerShell to kill a remote process
+    String ps1script = buildPSScript(processName, hostname);
+    if (ps1script == null) {
+      LOG.error("Cannot build PowerShell script");
+      return false;
+    }
+
+    // Execute PowerShell script
+    LOG.info("Executing " + ps1script);
+    ProcessBuilder builder = new ProcessBuilder("powershell.exe", ps1script);
+    Process p = null;
+    try {
+      p = builder.start();
+      p.getOutputStream().close();
+    } catch (IOException e) {
+      LOG.warn("Unable to execute " + ps1script, e);
+      return false;
+    }
+
+    // Pump logs to stderr
+    StreamPumper errPumper = new StreamPumper(
+        LOG, "fencer", p.getErrorStream(), StreamPumper.StreamType.STDERR);
+    errPumper.start();
+
+    StreamPumper outPumper = new StreamPumper(
+        LOG, "fencer", p.getInputStream(), StreamPumper.StreamType.STDOUT);
+    outPumper.start();
+
+    // Waiting for the process to finish
+    int rc = 0;
+    try {
+      rc = p.waitFor();
+      errPumper.join();
+      outPumper.join();
+    } catch (InterruptedException ie) {
+      LOG.warn("Interrupted while waiting for fencing command: " + ps1script);
+      return false;
+    }
+
+    return rc == 0;
+  }
+
+  /**
+   * Build a PowerShell script to kill a java.exe process in a remote machine.
+   *
+   * @param processName Name of the process to kill. This is an attribute in
+   *                    CommandLine.
+   * @param host Host where the process is.
+   * @return Path of the PowerShell script.
+   */
+  private String buildPSScript(final String processName, final String host) {
+    LOG.info(
+        "Building PowerShell script to kill " + processName + " at " + host);
+    String ps1script = null;
+    BufferedWriter writer = null;
+    try {
+      File file = File.createTempFile("temp-fence-command", ".ps1");
+      file.deleteOnExit();
+      FileOutputStream fos = new FileOutputStream(file, false);
+      OutputStreamWriter osw =
+          new OutputStreamWriter(fos, StandardCharsets.UTF_8);
+      writer = new BufferedWriter(osw);
+
+      // Filter to identify the Namenode process
+      String filter = StringUtils.join(" and ", new String[] {
+          "Name LIKE '%java.exe%'",
+          "CommandLine LIKE '%" + processName+ "%'"});
+
+      // Identify the process
+      String cmd = "Get-WmiObject Win32_Process";
+      cmd += " -Filter \"" + filter + "\"";
+      // Remote location
+      cmd += " -Computer " + host;
+      // Kill it
+      cmd += " |% { $_.Terminate() }";
+
+      LOG.info("PowerShell command: " + cmd);
+      writer.write(cmd);
+      writer.flush();
+
+      ps1script = file.getAbsolutePath();
+    } catch (IOException ioe) {
+      LOG.error("Cannot create PowerShell script", ioe);
+    } finally {
+      if (writer != null) {
+        try {
+          writer.close();
+        } catch (IOException ioe) {
+          LOG.error("Cannot close PowerShell script", ioe);
+        }
+      }
+    }
+    return ps1script;
+  }
+}
\ No newline at end of file


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[07/50] [abbrv] hadoop git commit: YARN-8147. TestClientRMService#testGetApplications sporadically fails. Contributed by Jason Lowe

Posted by sh...@apache.org.
YARN-8147. TestClientRMService#testGetApplications sporadically fails. Contributed by Jason Lowe

(cherry picked from commit 18844599aef42f79d2af4500aa2eee472dda95cb)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7393020c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7393020c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7393020c

Branch: refs/heads/YARN-8200
Commit: 7393020cb35031a406583015591ceedeb6c97bd4
Parents: b0dfb18
Author: Eric E Payne <er...@oath.com>
Authored: Thu Apr 12 17:53:57 2018 +0000
Committer: Eric E Payne <er...@oath.com>
Committed: Thu Apr 12 18:19:12 2018 +0000

----------------------------------------------------------------------
 .../server/resourcemanager/TestClientRMService.java   | 14 +++++++++-----
 1 file changed, 9 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7393020c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java
index 6946f3c..091bcd9 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java
@@ -757,7 +757,7 @@ public class TestClientRMService {
   }
 
   @Test
-  public void testGetApplications() throws IOException, YarnException {
+  public void testGetApplications() throws Exception {
     /**
      * 1. Submit 3 applications alternately in two queues
      * 2. Test each of the filters
@@ -806,8 +806,12 @@ public class TestClientRMService {
       SubmitApplicationRequest submitRequest = mockSubmitAppRequest(
           appId, appNames[i], queues[i % queues.length],
           new HashSet<String>(tags.subList(0, i + 1)));
+      // make sure each app is submitted at a different time
+      Thread.sleep(1);
       rmService.submitApplication(submitRequest);
-      submitTimeMillis[i] = System.currentTimeMillis();
+      submitTimeMillis[i] = rmService.getApplicationReport(
+          GetApplicationReportRequest.newInstance(appId))
+          .getApplicationReport().getStartTime();
     }
 
     // Test different cases of ClientRMService#getApplications()
@@ -822,19 +826,19 @@ public class TestClientRMService {
     
     // Check start range
     request = GetApplicationsRequest.newInstance();
-    request.setStartRange(submitTimeMillis[0], System.currentTimeMillis());
+    request.setStartRange(submitTimeMillis[0] + 1, System.currentTimeMillis());
     
     // 2 applications are submitted after first timeMills
     assertEquals("Incorrect number of matching start range", 
         2, rmService.getApplications(request).getApplicationList().size());
     
     // 1 application is submitted after the second timeMills
-    request.setStartRange(submitTimeMillis[1], System.currentTimeMillis());
+    request.setStartRange(submitTimeMillis[1] + 1, System.currentTimeMillis());
     assertEquals("Incorrect number of matching start range", 
         1, rmService.getApplications(request).getApplicationList().size());
     
     // no application is submitted after the third timeMills
-    request.setStartRange(submitTimeMillis[2], System.currentTimeMillis());
+    request.setStartRange(submitTimeMillis[2] + 1, System.currentTimeMillis());
     assertEquals("Incorrect number of matching start range", 
         0, rmService.getApplications(request).getApplicationList().size());
 


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[09/50] [abbrv] hadoop git commit: HDFS-13418. NetworkTopology should be configurable when enable DFSNetworkTopology. Contributed by Tao Jie.

Posted by sh...@apache.org.
HDFS-13418. NetworkTopology should be configurable when enable DFSNetworkTopology. Contributed by Tao Jie.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a7721082
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a7721082
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a7721082

Branch: refs/heads/YARN-8200
Commit: a7721082dc3d608dc8a3ac7e1ffa0de21781f00b
Parents: 31d061e
Author: Yiqun Lin <yq...@apache.org>
Authored: Fri Apr 13 17:59:35 2018 +0800
Committer: Yiqun Lin <yq...@apache.org>
Committed: Fri Apr 13 17:59:35 2018 +0800

----------------------------------------------------------------------
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |  7 +++
 .../hadoop/hdfs/net/DFSNetworkTopology.java     |  9 +++-
 .../src/main/resources/hdfs-default.xml         | 14 ++++++
 .../blockmanagement/TestDatanodeManager.java    | 52 ++++++++++++++++++++
 4 files changed, 80 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a7721082/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 42ce05b..c9fef06 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -21,6 +21,7 @@ package org.apache.hadoop.hdfs;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
+import org.apache.hadoop.hdfs.net.DFSNetworkTopology;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyDefault;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.RamDiskReplicaLruTracker;
@@ -1003,6 +1004,12 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
       "dfs.use.dfs.network.topology";
   public static final boolean DFS_USE_DFS_NETWORK_TOPOLOGY_DEFAULT = true;
 
+  public static final String DFS_NET_TOPOLOGY_IMPL_KEY =
+      "dfs.net.topology.impl";
+
+  public static final Class<DFSNetworkTopology> DFS_NET_TOPOLOGY_IMPL_DEFAULT =
+      DFSNetworkTopology.class;
+
   // dfs.client.retry confs are moved to HdfsClientConfigKeys.Retry 
   @Deprecated
   public static final String  DFS_CLIENT_RETRY_POLICY_ENABLED_KEY

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a7721082/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/DFSNetworkTopology.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/DFSNetworkTopology.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/DFSNetworkTopology.java
index e74cdec..f3074d5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/DFSNetworkTopology.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/DFSNetworkTopology.java
@@ -22,11 +22,13 @@ import com.google.common.base.Preconditions;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.StorageType;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
 import org.apache.hadoop.net.NetworkTopology;
 import org.apache.hadoop.net.Node;
 import org.apache.hadoop.net.NodeBase;
+import org.apache.hadoop.util.ReflectionUtils;
 
 import java.util.ArrayList;
 import java.util.Collection;
@@ -44,8 +46,11 @@ public class DFSNetworkTopology extends NetworkTopology {
   private static final Random RANDOM = new Random();
 
   public static DFSNetworkTopology getInstance(Configuration conf) {
-    DFSNetworkTopology nt = new DFSNetworkTopology();
-    return (DFSNetworkTopology)nt.init(DFSTopologyNodeImpl.FACTORY);
+    DFSNetworkTopology nt = ReflectionUtils.newInstance(conf.getClass(
+        DFSConfigKeys.DFS_NET_TOPOLOGY_IMPL_KEY,
+        DFSConfigKeys.DFS_NET_TOPOLOGY_IMPL_DEFAULT,
+        DFSNetworkTopology.class), conf);
+    return (DFSNetworkTopology) nt.init(DFSTopologyNodeImpl.FACTORY);
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a7721082/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
index 282514a..66f9718 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
@@ -4360,6 +4360,20 @@
     <value>true</value>
     <description>
      Enables DFSNetworkTopology to choose nodes for placing replicas.
+      When enabled, NetworkTopology will be instantiated as class defined in
+      property dfs.net.topology.impl, otherwise NetworkTopology will be
+      instantiated as class defined in property net.topology.impl.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.net.topology.impl</name>
+    <value>org.apache.hadoop.hdfs.net.DFSNetworkTopology</value>
+    <description>
+      The implementation class of NetworkTopology used in HDFS. By default,
+      the class org.apache.hadoop.hdfs.net.DFSNetworkTopology is specified and
+      used in block placement.
+      This property only works when dfs.use.dfs.network.topology is true.
     </description>
   </property>
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a7721082/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestDatanodeManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestDatanodeManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestDatanodeManager.java
index 30e2aaf..6e1685b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestDatanodeManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestDatanodeManager.java
@@ -40,6 +40,8 @@ import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.hdfs.net.DFSNetworkTopology;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
@@ -52,11 +54,13 @@ import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import org.apache.hadoop.hdfs.server.common.StorageInfo;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
 import org.apache.hadoop.net.DNSToSwitchMapping;
+import org.apache.hadoop.net.NetworkTopology;
 import org.apache.hadoop.util.Shell;
 import org.junit.Assert;
 import org.junit.Test;
 import org.mockito.Mockito;
 import org.mockito.internal.util.reflection.Whitebox;
+
 import static org.hamcrest.core.Is.is;
 import static org.junit.Assert.*;
 
@@ -491,4 +495,52 @@ public class TestDatanodeManager {
     Assert.assertEquals("Unexpected host or host in unexpected position",
         "127.0.0.1:23456", bothAgain.get(1).getInfoAddr());
   }
+
+  @Test
+  public void testNetworkTopologyInstantiation() throws Exception {
+    // case 1, dfs.use.dfs.network.topology=true, use the default
+    // DFSNetworkTopology impl.
+    Configuration conf1 = new HdfsConfiguration();
+    FSNamesystem fsn = Mockito.mock(FSNamesystem.class);
+    DatanodeManager dm1 = mockDatanodeManager(fsn, conf1);
+    assertEquals(DFSNetworkTopology.class, dm1.getNetworkTopology().getClass());
+
+    // case 2, dfs.use.dfs.network.topology=false, use the default
+    // NetworkTopology impl.
+    Configuration conf2 = new HdfsConfiguration();
+    conf2.setBoolean(DFSConfigKeys.DFS_USE_DFS_NETWORK_TOPOLOGY_KEY, false);
+    DatanodeManager dm2 = mockDatanodeManager(fsn, conf2);
+    assertEquals(NetworkTopology.class, dm2.getNetworkTopology()
+        .getClass());
+
+    // case 3, dfs.use.dfs.network.topology=false, and specify the
+    // net.topology.impl property.
+    Configuration conf3 = new HdfsConfiguration();
+    conf3.setClass(CommonConfigurationKeysPublic.NET_TOPOLOGY_IMPL_KEY,
+        MockDfsNetworkTopology.class, NetworkTopology.class);
+    conf3.setBoolean(DFSConfigKeys.DFS_USE_DFS_NETWORK_TOPOLOGY_KEY, false);
+    DatanodeManager dm3 = mockDatanodeManager(fsn, conf3);
+    assertEquals(MockDfsNetworkTopology.class, dm3.getNetworkTopology()
+        .getClass());
+
+    // case 4, dfs.use.dfs.network.topology=true, and specify the
+    // dfs.net.topology.impl property.
+    Configuration conf4 = new HdfsConfiguration();
+    conf4.setClass(DFSConfigKeys.DFS_NET_TOPOLOGY_IMPL_KEY,
+        MockDfsNetworkTopology.class, NetworkTopology.class);
+    conf4.setBoolean(DFSConfigKeys.DFS_USE_DFS_NETWORK_TOPOLOGY_KEY, true);
+    DatanodeManager dm4 = mockDatanodeManager(fsn, conf4);
+    assertEquals(MockDfsNetworkTopology.class, dm4.getNetworkTopology()
+        .getClass());
+  }
+
+  /**
+   * A NetworkTopology implementation for test.
+   *
+   */
+  public static class MockDfsNetworkTopology extends DFSNetworkTopology {
+    public MockDfsNetworkTopology(){
+      super();
+    }
+  }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[31/50] [abbrv] hadoop git commit: HDFS-13336. Test cases of TestWriteToReplica failed in windows. Contributed by Xiao Liang.

Posted by sh...@apache.org.
HDFS-13336. Test cases of TestWriteToReplica failed in windows. Contributed by Xiao Liang.

(cherry picked from commit df92a17e02fe86279a6f4e413719d0a465b50837)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/db9da432
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/db9da432
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/db9da432

Branch: refs/heads/YARN-8200
Commit: db9da432b9ae343bf13c4f3ed45e4c8b8c20a84f
Parents: 99e82e2
Author: Inigo Goiri <in...@apache.org>
Authored: Mon Apr 23 19:12:16 2018 -0700
Committer: Inigo Goiri <in...@apache.org>
Committed: Mon Apr 23 19:14:03 2018 -0700

----------------------------------------------------------------------
 .../fsdataset/impl/TestWriteToReplica.java       | 19 +++++++++++++------
 1 file changed, 13 insertions(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/db9da432/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestWriteToReplica.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestWriteToReplica.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestWriteToReplica.java
index e7c680c..f6b1688 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestWriteToReplica.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestWriteToReplica.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNotNull;
 
+import java.io.File;
 import java.io.IOException;
 import java.io.RandomAccessFile;
 import java.util.ArrayList;
@@ -63,7 +64,8 @@ public class TestWriteToReplica {
   // test close
   @Test
   public void testClose() throws Exception {
-    MiniDFSCluster cluster = new MiniDFSCluster.Builder(new HdfsConfiguration()).build();
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(new HdfsConfiguration(),
+        new File(GenericTestUtils.getRandomizedTempPath())).build();
     
     try {
       cluster.waitActive();
@@ -85,7 +87,8 @@ public class TestWriteToReplica {
   // test append
   @Test
   public void testAppend() throws Exception {
-    MiniDFSCluster cluster = new MiniDFSCluster.Builder(new HdfsConfiguration()).build();
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(new HdfsConfiguration(),
+        new File(GenericTestUtils.getRandomizedTempPath())).build();
     try {
       cluster.waitActive();
       DataNode dn = cluster.getDataNodes().get(0);
@@ -105,7 +108,8 @@ public class TestWriteToReplica {
   // test writeToRbw
   @Test
   public void testWriteToRbw() throws Exception {
-    MiniDFSCluster cluster = new MiniDFSCluster.Builder(new HdfsConfiguration()).build();
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(new HdfsConfiguration(),
+        new File(GenericTestUtils.getRandomizedTempPath())).build();
     try {
       cluster.waitActive();
       DataNode dn = cluster.getDataNodes().get(0);
@@ -125,7 +129,8 @@ public class TestWriteToReplica {
   // test writeToTemporary
   @Test
   public void testWriteToTemporary() throws Exception {
-    MiniDFSCluster cluster = new MiniDFSCluster.Builder(new HdfsConfiguration()).build();
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(new HdfsConfiguration(),
+        new File(GenericTestUtils.getRandomizedTempPath())).build();
     try {
       cluster.waitActive();
       DataNode dn = cluster.getDataNodes().get(0);
@@ -507,7 +512,8 @@ public class TestWriteToReplica {
   @Test
   public  void testReplicaMapAfterDatanodeRestart() throws Exception {
     Configuration conf = new HdfsConfiguration();
-    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf,
+        new File(GenericTestUtils.getRandomizedTempPath()))
         .nnTopology(MiniDFSNNTopology.simpleFederatedTopology(2))
         .build();
     try {
@@ -561,7 +567,8 @@ public class TestWriteToReplica {
   @Test
   public void testRecoverInconsistentRbw() throws IOException {
     Configuration conf = new HdfsConfiguration();
-    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf,
+        new File(GenericTestUtils.getRandomizedTempPath())).build();
     cluster.waitActive();
     DataNode dn = cluster.getDataNodes().get(0);
     FsDatasetImpl fsDataset = (FsDatasetImpl)DataNodeTestUtils.getFSDataset(dn);


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[38/50] [abbrv] hadoop git commit: HDFS-13499. RBF: Show disabled name services in the UI. Contributed by Inigo Goiri.

Posted by sh...@apache.org.
HDFS-13499. RBF: Show disabled name services in the UI. Contributed by Inigo Goiri.

(cherry picked from commit 0a293de00d5f123c7c8248459aa8ae88a45f39b1)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/87c9fff3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/87c9fff3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/87c9fff3

Branch: refs/heads/YARN-8200
Commit: 87c9fff3525945f431c36ec56d0b2710cd5627fc
Parents: 1c3d746
Author: Yiqun Lin <yq...@apache.org>
Authored: Fri Apr 27 10:38:15 2018 +0800
Committer: Yiqun Lin <yq...@apache.org>
Committed: Fri Apr 27 10:41:16 2018 +0800

----------------------------------------------------------------------
 .../src/main/webapps/router/federationhealth.html               | 1 +
 .../hadoop-hdfs-rbf/src/main/webapps/router/federationhealth.js | 3 +++
 .../hadoop-hdfs-rbf/src/main/webapps/static/rbf.css             | 5 +++++
 3 files changed, 9 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/87c9fff3/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/federationhealth.html
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/federationhealth.html b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/federationhealth.html
index f1cf482..37fcb92 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/federationhealth.html
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/federationhealth.html
@@ -132,6 +132,7 @@
     <li class="federationhealth-namenode-icon federationhealth-namenode-active">Active</li>
     <li class="federationhealth-namenode-icon federationhealth-namenode-standby">Standby</li>
     <li class="federationhealth-namenode-icon federationhealth-namenode-safemode">Safe mode</li>
+    <li class="federationhealth-namenode-icon federationhealth-namenode-disabled">Disabled</li>
     <li class="federationhealth-namenode-icon federationhealth-namenode-unavailable">Unavailable</li>
   </ul>
 </div>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/87c9fff3/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/federationhealth.js
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/federationhealth.js b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/federationhealth.js
index a0b0128..6779b61 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/federationhealth.js
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/federationhealth.js
@@ -130,6 +130,9 @@
           } else if (nodes[i].state === "UNAVAILABLE") {
             n.title = capitalise(n.state);
             n.iconState = "unavailable";
+          } else if (nodes[i].state === "DISABLED") {
+            n.title = capitalise(n.state);
+            n.iconState = "disabled";
           }
           if (n.namenodeId === "null") {
             n.namenodeId = "";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/87c9fff3/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/static/rbf.css
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/static/rbf.css b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/static/rbf.css
index f923085..43112af 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/static/rbf.css
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/static/rbf.css
@@ -53,6 +53,11 @@
     content: "\e090";
 }
 
+.federationhealth-namenode-disabled:before {
+    color: #eea236;
+    content: "\e136";
+}
+
 .federationhealth-namenode-legend {
     list-style-type: none;
     text-align: right;


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[33/50] [abbrv] hadoop git commit: YARN-7598. Document how to use classpath isolation for aux-services in YARN. Contributed by Xuan Gong.

Posted by sh...@apache.org.
YARN-7598. Document how to use classpath isolation for aux-services in YARN. Contributed by Xuan Gong.

(cherry picked from commit 56788d759f47b4b158617701f543a9dcb4df69cd)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/af70c69f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/af70c69f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/af70c69f

Branch: refs/heads/YARN-8200
Commit: af70c69fb2b58f9ec25302d218abf372adf72bfc
Parents: cc5416d
Author: Junping Du <ju...@apache.org>
Authored: Tue Apr 24 18:29:14 2018 +0800
Committer: Junping Du <ju...@apache.org>
Committed: Tue Apr 24 18:31:29 2018 +0800

----------------------------------------------------------------------
 .../src/site/markdown/NodeManager.md            | 49 +++++++++++++++++++-
 1 file changed, 48 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/af70c69f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/NodeManager.md
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/NodeManager.md b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/NodeManager.md
index 3261cd7..12201b9 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/NodeManager.md
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/NodeManager.md
@@ -87,4 +87,51 @@ Step 4.  Auxiliary services.
 
   * NodeManagers in a YARN cluster can be configured to run auxiliary services. For a completely functional NM restart, YARN relies on any auxiliary service configured to also support recovery. This usually includes (1) avoiding usage of ephemeral ports so that previously running clients (in this case, usually containers) are not disrupted after restart and (2) having the auxiliary service itself support recoverability by reloading any previous state when NodeManager restarts and reinitializes the auxiliary service.
 
-  * A simple example for the above is the auxiliary service 'ShuffleHandler' for MapReduce (MR). ShuffleHandler respects the above two requirements already, so users/admins don't have do anything for it to support NM restart: (1) The configuration property **mapreduce.shuffle.port** controls which port the ShuffleHandler on a NodeManager host binds to, and it defaults to a non-ephemeral port. (2) The ShuffleHandler service also already supports recovery of previous state after NM restarts.
+  * A simple example for the above is the auxiliary service 'ShuffleHandler' for MapReduce (MR). ShuffleHandler respects the above two requirements already, so users/admins don't have to do anything for it to support NM restart: (1) The configuration property **mapreduce.shuffle.port** controls which port the ShuffleHandler on a NodeManager host binds to, and it defaults to a non-ephemeral port. (2) The ShuffleHandler service also already supports recovery of previous state after NM restarts.
+
+
+Auxiliary Service Classpath Isolation
+-------------------------------------
+
+### Introduction
+To launch auxiliary services on a NodeManager, users have to add their jar to NodeManager's classpath directly, thus put them on the system classloader. But if multiple versions of the plugin are present on the classpath, there is no control over which version actually gets loaded. Or if there are any conflicts between the dependencies introduced by the auxiliary services and the NodeManager itself, they can break the NodeManager, the auxiliary services, or both. To solve this issue, we can instantiate auxiliary services using a classloader that is different from the system classloader.
+
+### Configuration
+This section describes the configuration variables for aux-service classpath isolation.
+
+The following settings need to be set in *yarn-site.xml*.
+
+|Configuration Name | Description |
+|:---- |:---- |
+| `yarn.nodemanager.aux-services.%s.classpath` | Provide local directory which includes the related jar file as well as all the dependencies’ jar file. We could specify the single jar file or use ${local_dir_to_jar}/* to load all jars under the dep directory. |
+| `yarn.nodemanager.aux-services.%s.remote-classpath` | Provide remote absolute or relative path to jar file(We also support zip, tar.gz, tgz, tar and gz files as well). For the same aux-service class, we can only specify one of the configurations: yarn.nodemanager.aux-services.%s.classpath or yarn.nodemanager.aux-services.%s.remote-classpath. The YarnRuntimeException will be thrown. Please also make sure that the owner of the jar file must be the same as the NodeManager user and the permbits should satisfy (permbits & 0022)==0 (such as 600, it's not writable by group or other).|
+| `yarn.nodemanager.aux-services.%s.system-classes` | Normally, we do not need to set this configuration. The class would be loaded from customized classpath if it does not belongs to system-classes. For example, by default, the package org.apache.hadoop is in the system-classes, if your class CustomAuxService is in the package org.apache.hadoop, it would not be loaded from customized classpath. To solve this, either we could change the package for CustomAuxService, or configure our own system-classes which exclude org.apache.hadoop. |
+
+### Configuration Examples
+
+	<property>
+		<name>yarn.nodemanager.aux-services</name>
+		<value>mapreduce_shuffle,CustomAuxService</value>
+	</property>
+
+	<property>
+		<name>yarn.nodemanager.aux-services.CustomAuxService.classpath</name>
+		<value>${local_dir_to_jar}/CustomAuxService.jar</value>
+	</property>
+
+    <!--
+	<property>
+		<name>yarn.nodemanager.aux-services.CustomAuxService.remote-classpath</name>
+		<value>${remote-dir_to_jar}/CustomAuxService.jar</value>
+	</property>
+    -->
+
+	<property>
+		<name>yarn.nodemanager.aux-services.CustomAuxService.class</name>
+		<value>org.aux.CustomAuxService</value>
+	</property>
+
+	<property>
+		<name>yarn.nodemanager.aux-services.mapreduce_shuffle.class</name>
+		<value>org.apache.hadoop.mapred.ShuffleHandler</value>
+	</property>
\ No newline at end of file


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[22/50] [abbrv] hadoop git commit: HADOOP-15180. branch-2 : daemon processes' sysout overwrites 'ulimit -a' in daemon's out file. Contributed by Ranith Sardar

Posted by sh...@apache.org.
HADOOP-15180. branch-2 : daemon processes' sysout overwrites 'ulimit -a' in daemon's out file. Contributed by Ranith Sardar


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cbcd16e6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cbcd16e6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cbcd16e6

Branch: refs/heads/YARN-8200
Commit: cbcd16e6b7c84878bb2e26c398e3bdf943e364d0
Parents: 8f341c6
Author: Brahma Reddy Battula <br...@apache.org>
Authored: Wed Apr 18 23:01:50 2018 +0530
Committer: Brahma Reddy Battula <br...@apache.org>
Committed: Wed Apr 18 23:01:50 2018 +0530

----------------------------------------------------------------------
 .../hadoop-common/src/main/bin/hadoop-daemon.sh                  | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cbcd16e6/hadoop-common-project/hadoop-common/src/main/bin/hadoop-daemon.sh
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/bin/hadoop-daemon.sh b/hadoop-common-project/hadoop-common/src/main/bin/hadoop-daemon.sh
index bcb618e..9bb951b 100755
--- a/hadoop-common-project/hadoop-common/src/main/bin/hadoop-daemon.sh
+++ b/hadoop-common-project/hadoop-common/src/main/bin/hadoop-daemon.sh
@@ -156,10 +156,10 @@ case $startStop in
         else
           hdfsScript="$HADOOP_HDFS_HOME"/bin/hdfs
         fi
-        nohup nice -n $HADOOP_NICENESS $hdfsScript --config $HADOOP_CONF_DIR $command "$@" > "$log" 2>&1 < /dev/null &
+        nohup nice -n $HADOOP_NICENESS $hdfsScript --config $HADOOP_CONF_DIR $command "$@" >> "$log" 2>&1 < /dev/null &
       ;;
       (*)
-        nohup nice -n $HADOOP_NICENESS $hadoopScript --config $HADOOP_CONF_DIR $command "$@" > "$log" 2>&1 < /dev/null &
+        nohup nice -n $HADOOP_NICENESS $hadoopScript --config $HADOOP_CONF_DIR $command "$@" >> "$log" 2>&1 < /dev/null &
       ;;
     esac
     echo $! > $pid


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[50/50] [abbrv] hadoop git commit: MAPREDUCE-7073. Optimize TokenCache#obtainTokensForNamenodesInternal

Posted by sh...@apache.org.
MAPREDUCE-7073. Optimize TokenCache#obtainTokensForNamenodesInternal

Signed-off-by: Akira Ajisaka <aa...@apache.org>
(cherry picked from commit 1a95a4524a8c6c7be601ce8b92640a6a76164a2c)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/58bcb90e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/58bcb90e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/58bcb90e

Branch: refs/heads/YARN-8200
Commit: 58bcb90e7039bd4cec116fe66f1b1428dd493372
Parents: 06632c0
Author: Bibin A Chundatt <bi...@apache.org>
Authored: Wed May 2 16:14:28 2018 +0900
Committer: Akira Ajisaka <aa...@apache.org>
Committed: Wed May 2 16:45:24 2018 +0900

----------------------------------------------------------------------
 .../hadoop/mapreduce/security/TokenCache.java     | 14 +++++++++-----
 .../hadoop/mapreduce/security/TestTokenCache.java | 18 +++++++++---------
 2 files changed, 18 insertions(+), 14 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/58bcb90e/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/security/TokenCache.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/security/TokenCache.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/security/TokenCache.java
index 78f6c16..8cd407c 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/security/TokenCache.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/security/TokenCache.java
@@ -24,6 +24,7 @@ import java.util.Set;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.commons.lang.StringUtils;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
@@ -96,8 +97,9 @@ public class TokenCache {
     for(Path p: ps) {
       fsSet.add(p.getFileSystem(conf));
     }
+    String masterPrincipal = Master.getMasterPrincipal(conf);
     for (FileSystem fs : fsSet) {
-      obtainTokensForNamenodesInternal(fs, credentials, conf);
+      obtainTokensForNamenodesInternal(fs, credentials, conf, masterPrincipal);
     }
   }
 
@@ -122,15 +124,17 @@ public class TokenCache {
    * @param conf
    * @throws IOException
    */
-  static void obtainTokensForNamenodesInternal(FileSystem fs, 
-      Credentials credentials, Configuration conf) throws IOException {
+  static void obtainTokensForNamenodesInternal(FileSystem fs,
+      Credentials credentials, Configuration conf, String renewer)
+      throws IOException {
     // RM skips renewing token with empty renewer
     String delegTokenRenewer = "";
     if (!isTokenRenewalExcluded(fs, conf)) {
-      delegTokenRenewer = Master.getMasterPrincipal(conf);
-      if (delegTokenRenewer == null || delegTokenRenewer.length() == 0) {
+      if (StringUtils.isEmpty(renewer)) {
         throw new IOException(
             "Can't get Master Kerberos principal for use as renewer");
+      } else {
+        delegTokenRenewer = renewer;
       }
     }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/58bcb90e/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/security/TestTokenCache.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/security/TestTokenCache.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/security/TestTokenCache.java
index 127f8ae..a44e533 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/security/TestTokenCache.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/security/TestTokenCache.java
@@ -56,8 +56,8 @@ public class TestTokenCache {
   @Test
   public void testObtainTokens() throws Exception {
     Credentials credentials = new Credentials();
-    FileSystem fs = mock(FileSystem.class);  
-    TokenCache.obtainTokensForNamenodesInternal(fs, credentials, conf);
+    FileSystem fs = mock(FileSystem.class);
+    TokenCache.obtainTokensForNamenodesInternal(fs, credentials, conf, renewer);
     verify(fs).addDelegationTokens(eq(renewer), eq(credentials));
   }
 
@@ -105,23 +105,23 @@ public class TestTokenCache {
     checkToken(creds, newerToken1);
     
     // get token for fs1, see that fs2's token was loaded 
-    TokenCache.obtainTokensForNamenodesInternal(fs1, creds, conf);
+    TokenCache.obtainTokensForNamenodesInternal(fs1, creds, conf, renewer);
     checkToken(creds, newerToken1, token2);
     
     // get token for fs2, nothing should change since already present
-    TokenCache.obtainTokensForNamenodesInternal(fs2, creds, conf);
+    TokenCache.obtainTokensForNamenodesInternal(fs2, creds, conf, renewer);
     checkToken(creds, newerToken1, token2);
     
     // get token for fs3, should only add token for fs3
-    TokenCache.obtainTokensForNamenodesInternal(fs3, creds, conf);
+    TokenCache.obtainTokensForNamenodesInternal(fs3, creds, conf, renewer);
     Token<?> token3 = creds.getToken(new Text(fs3.getCanonicalServiceName()));
     assertTrue(token3 != null);
     checkToken(creds, newerToken1, token2, token3);
     
     // be paranoid, check one last time that nothing changes
-    TokenCache.obtainTokensForNamenodesInternal(fs1, creds, conf);
-    TokenCache.obtainTokensForNamenodesInternal(fs2, creds, conf);
-    TokenCache.obtainTokensForNamenodesInternal(fs3, creds, conf);
+    TokenCache.obtainTokensForNamenodesInternal(fs1, creds, conf, renewer);
+    TokenCache.obtainTokensForNamenodesInternal(fs2, creds, conf, renewer);
+    TokenCache.obtainTokensForNamenodesInternal(fs3, creds, conf, renewer);
     checkToken(creds, newerToken1, token2, token3);
   }
 
@@ -202,7 +202,7 @@ public class TestTokenCache {
     // wait to set, else the obtain tokens call above will fail with FNF
     conf.set(MRJobConfig.MAPREDUCE_JOB_CREDENTIALS_BINARY, binaryTokenFile);
     creds.writeTokenStorageFile(new Path(binaryTokenFile), conf);
-    TokenCache.obtainTokensForNamenodesInternal(fs1, creds, conf);
+    TokenCache.obtainTokensForNamenodesInternal(fs1, creds, conf, renewer);
     String fs_addr = fs1.getCanonicalServiceName();
     Token<?> nnt = TokenCache.getDelegationToken(creds, fs_addr);
     assertNotNull("Token for nn is null", nnt);


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[29/50] [abbrv] hadoop git commit: HDFS-13478. RBF: Disabled Nameservice store API. Contributed by Inigo Goiri.

Posted by sh...@apache.org.
HDFS-13478. RBF: Disabled Nameservice store API. Contributed by Inigo Goiri.

(cherry picked from commit b6dae26f4490bbd03360b8df19ef00cf843430f3)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a9752503
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a9752503
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a9752503

Branch: refs/heads/YARN-8200
Commit: a9752503034d13baad80a2884651c5906599039c
Parents: 06f3f45
Author: Yiqun Lin <yq...@apache.org>
Authored: Sat Apr 21 13:19:09 2018 +0800
Committer: Yiqun Lin <yq...@apache.org>
Committed: Sat Apr 21 14:53:12 2018 +0800

----------------------------------------------------------------------
 ...uterAdminProtocolServerSideTranslatorPB.java | 67 ++++++++++++++
 .../RouterAdminProtocolTranslatorPB.java        | 64 ++++++++++++-
 .../federation/router/NameserviceManager.java   | 51 +++++++++++
 .../federation/router/RouterAdminServer.java    | 53 ++++++++++-
 .../server/federation/router/RouterClient.java  |  4 +
 .../store/DisabledNameserviceStore.java         | 65 ++++++++++++++
 .../federation/store/StateStoreService.java     |  3 +
 .../impl/DisabledNameserviceStoreImpl.java      | 68 ++++++++++++++
 .../protocol/DisableNameserviceRequest.java     | 47 ++++++++++
 .../protocol/DisableNameserviceResponse.java    | 50 +++++++++++
 .../protocol/EnableNameserviceRequest.java      | 47 ++++++++++
 .../protocol/EnableNameserviceResponse.java     | 50 +++++++++++
 .../GetDisabledNameservicesRequest.java         | 30 +++++++
 .../GetDisabledNameservicesResponse.java        | 51 +++++++++++
 .../pb/DisableNameserviceRequestPBImpl.java     | 73 +++++++++++++++
 .../pb/DisableNameserviceResponsePBImpl.java    | 74 +++++++++++++++
 .../impl/pb/EnableNameserviceRequestPBImpl.java | 73 +++++++++++++++
 .../pb/EnableNameserviceResponsePBImpl.java     | 73 +++++++++++++++
 .../GetDisabledNameservicesRequestPBImpl.java   | 66 ++++++++++++++
 .../GetDisabledNameservicesResponsePBImpl.java  | 84 +++++++++++++++++
 .../federation/store/records/BaseRecord.java    | 13 +++
 .../store/records/DisabledNameservice.java      | 81 +++++++++++++++++
 .../impl/pb/DisabledNameservicePBImpl.java      | 95 ++++++++++++++++++++
 .../hdfs/tools/federation/RouterAdmin.java      | 74 ++++++++++++++-
 .../src/main/proto/FederationProtocol.proto     | 35 +++++++-
 .../src/main/proto/RouterProtocol.proto         | 15 ++++
 .../federation/router/TestRouterAdmin.java      | 49 ++++++++++
 .../federation/router/TestRouterAdminCLI.java   | 48 +++++++++-
 .../store/driver/TestStateStoreDriverBase.java  | 27 +++++-
 29 files changed, 1519 insertions(+), 11 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a9752503/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/protocolPB/RouterAdminProtocolServerSideTranslatorPB.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/protocolPB/RouterAdminProtocolServerSideTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/protocolPB/RouterAdminProtocolServerSideTranslatorPB.java
index 159d5c2..0204ce8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/protocolPB/RouterAdminProtocolServerSideTranslatorPB.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/protocolPB/RouterAdminProtocolServerSideTranslatorPB.java
@@ -23,8 +23,14 @@ import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.AddMountTableEntryRequestProto;
 import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.AddMountTableEntryResponseProto;
+import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.DisableNameserviceRequestProto;
+import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.DisableNameserviceResponseProto;
+import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.EnableNameserviceRequestProto;
+import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.EnableNameserviceResponseProto;
 import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.EnterSafeModeRequestProto;
 import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.EnterSafeModeResponseProto;
+import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.GetDisabledNameservicesRequestProto;
+import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.GetDisabledNameservicesResponseProto;
 import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.GetMountTableEntriesRequestProto;
 import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.GetMountTableEntriesResponseProto;
 import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.GetSafeModeRequestProto;
@@ -38,8 +44,14 @@ import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProt
 import org.apache.hadoop.hdfs.server.federation.router.RouterAdminServer;
 import org.apache.hadoop.hdfs.server.federation.store.protocol.AddMountTableEntryRequest;
 import org.apache.hadoop.hdfs.server.federation.store.protocol.AddMountTableEntryResponse;
+import org.apache.hadoop.hdfs.server.federation.store.protocol.DisableNameserviceRequest;
+import org.apache.hadoop.hdfs.server.federation.store.protocol.DisableNameserviceResponse;
+import org.apache.hadoop.hdfs.server.federation.store.protocol.EnableNameserviceRequest;
+import org.apache.hadoop.hdfs.server.federation.store.protocol.EnableNameserviceResponse;
 import org.apache.hadoop.hdfs.server.federation.store.protocol.EnterSafeModeRequest;
 import org.apache.hadoop.hdfs.server.federation.store.protocol.EnterSafeModeResponse;
+import org.apache.hadoop.hdfs.server.federation.store.protocol.GetDisabledNameservicesRequest;
+import org.apache.hadoop.hdfs.server.federation.store.protocol.GetDisabledNameservicesResponse;
 import org.apache.hadoop.hdfs.server.federation.store.protocol.GetMountTableEntriesRequest;
 import org.apache.hadoop.hdfs.server.federation.store.protocol.GetMountTableEntriesResponse;
 import org.apache.hadoop.hdfs.server.federation.store.protocol.GetSafeModeRequest;
@@ -52,8 +64,14 @@ import org.apache.hadoop.hdfs.server.federation.store.protocol.UpdateMountTableE
 import org.apache.hadoop.hdfs.server.federation.store.protocol.UpdateMountTableEntryResponse;
 import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.AddMountTableEntryRequestPBImpl;
 import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.AddMountTableEntryResponsePBImpl;
+import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.DisableNameserviceRequestPBImpl;
+import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.DisableNameserviceResponsePBImpl;
+import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.EnableNameserviceRequestPBImpl;
+import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.EnableNameserviceResponsePBImpl;
 import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.EnterSafeModeRequestPBImpl;
 import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.EnterSafeModeResponsePBImpl;
+import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.GetDisabledNameservicesRequestPBImpl;
+import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.GetDisabledNameservicesResponsePBImpl;
 import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.GetMountTableEntriesRequestPBImpl;
 import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.GetMountTableEntriesResponsePBImpl;
 import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.GetSafeModeRequestPBImpl;
@@ -208,4 +226,53 @@ public class RouterAdminProtocolServerSideTranslatorPB implements
       throw new ServiceException(e);
     }
   }
+
+  @Override
+  public DisableNameserviceResponseProto disableNameservice(
+      RpcController controller, DisableNameserviceRequestProto request)
+      throws ServiceException {
+    try {
+      DisableNameserviceRequest req =
+          new DisableNameserviceRequestPBImpl(request);
+      DisableNameserviceResponse response = server.disableNameservice(req);
+      DisableNameserviceResponsePBImpl responsePB =
+          (DisableNameserviceResponsePBImpl) response;
+      return responsePB.getProto();
+    } catch (IOException e) {
+      throw new ServiceException(e);
+    }
+  }
+
+  @Override
+  public EnableNameserviceResponseProto enableNameservice(
+      RpcController controller, EnableNameserviceRequestProto request)
+          throws ServiceException {
+    try {
+      EnableNameserviceRequest req =
+          new EnableNameserviceRequestPBImpl(request);
+      EnableNameserviceResponse response = server.enableNameservice(req);
+      EnableNameserviceResponsePBImpl responsePB =
+          (EnableNameserviceResponsePBImpl) response;
+      return responsePB.getProto();
+    } catch (IOException e) {
+      throw new ServiceException(e);
+    }
+  }
+
+  @Override
+  public GetDisabledNameservicesResponseProto getDisabledNameservices(
+      RpcController controller, GetDisabledNameservicesRequestProto request)
+      throws ServiceException {
+    try {
+      GetDisabledNameservicesRequest req =
+          new GetDisabledNameservicesRequestPBImpl(request);
+      GetDisabledNameservicesResponse response =
+          server.getDisabledNameservices(req);
+      GetDisabledNameservicesResponsePBImpl responsePB =
+          (GetDisabledNameservicesResponsePBImpl)response;
+      return responsePB.getProto();
+    } catch (IOException e) {
+      throw new ServiceException(e);
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a9752503/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/protocolPB/RouterAdminProtocolTranslatorPB.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/protocolPB/RouterAdminProtocolTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/protocolPB/RouterAdminProtocolTranslatorPB.java
index d6210ce..6e24438 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/protocolPB/RouterAdminProtocolTranslatorPB.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/protocolPB/RouterAdminProtocolTranslatorPB.java
@@ -24,8 +24,14 @@ import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.AddMountTableEntryRequestProto;
 import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.AddMountTableEntryResponseProto;
+import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.DisableNameserviceRequestProto;
+import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.DisableNameserviceResponseProto;
+import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.EnableNameserviceRequestProto;
+import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.EnableNameserviceResponseProto;
 import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.EnterSafeModeRequestProto;
 import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.EnterSafeModeResponseProto;
+import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.GetDisabledNameservicesRequestProto;
+import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.GetDisabledNameservicesResponseProto;
 import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.GetMountTableEntriesRequestProto;
 import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.GetMountTableEntriesResponseProto;
 import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.GetSafeModeRequestProto;
@@ -37,11 +43,18 @@ import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProt
 import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.UpdateMountTableEntryRequestProto;
 import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.UpdateMountTableEntryResponseProto;
 import org.apache.hadoop.hdfs.server.federation.resolver.MountTableManager;
+import org.apache.hadoop.hdfs.server.federation.router.NameserviceManager;
 import org.apache.hadoop.hdfs.server.federation.router.RouterStateManager;
 import org.apache.hadoop.hdfs.server.federation.store.protocol.AddMountTableEntryRequest;
 import org.apache.hadoop.hdfs.server.federation.store.protocol.AddMountTableEntryResponse;
+import org.apache.hadoop.hdfs.server.federation.store.protocol.DisableNameserviceRequest;
+import org.apache.hadoop.hdfs.server.federation.store.protocol.DisableNameserviceResponse;
+import org.apache.hadoop.hdfs.server.federation.store.protocol.EnableNameserviceRequest;
+import org.apache.hadoop.hdfs.server.federation.store.protocol.EnableNameserviceResponse;
 import org.apache.hadoop.hdfs.server.federation.store.protocol.EnterSafeModeRequest;
 import org.apache.hadoop.hdfs.server.federation.store.protocol.EnterSafeModeResponse;
+import org.apache.hadoop.hdfs.server.federation.store.protocol.GetDisabledNameservicesRequest;
+import org.apache.hadoop.hdfs.server.federation.store.protocol.GetDisabledNameservicesResponse;
 import org.apache.hadoop.hdfs.server.federation.store.protocol.GetMountTableEntriesRequest;
 import org.apache.hadoop.hdfs.server.federation.store.protocol.GetMountTableEntriesResponse;
 import org.apache.hadoop.hdfs.server.federation.store.protocol.GetSafeModeRequest;
@@ -54,7 +67,12 @@ import org.apache.hadoop.hdfs.server.federation.store.protocol.UpdateMountTableE
 import org.apache.hadoop.hdfs.server.federation.store.protocol.UpdateMountTableEntryResponse;
 import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.AddMountTableEntryRequestPBImpl;
 import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.AddMountTableEntryResponsePBImpl;
+import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.DisableNameserviceRequestPBImpl;
+import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.DisableNameserviceResponsePBImpl;
+import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.EnableNameserviceRequestPBImpl;
+import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.EnableNameserviceResponsePBImpl;
 import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.EnterSafeModeResponsePBImpl;
+import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.GetDisabledNameservicesResponsePBImpl;
 import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.GetMountTableEntriesRequestPBImpl;
 import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.GetMountTableEntriesResponsePBImpl;
 import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.GetSafeModeResponsePBImpl;
@@ -80,7 +98,7 @@ import com.google.protobuf.ServiceException;
 @InterfaceStability.Stable
 public class RouterAdminProtocolTranslatorPB
     implements ProtocolMetaInterface, MountTableManager,
-    Closeable, ProtocolTranslator, RouterStateManager {
+    Closeable, ProtocolTranslator, RouterStateManager, NameserviceManager {
   final private RouterAdminProtocolPB rpcProxy;
 
   public RouterAdminProtocolTranslatorPB(RouterAdminProtocolPB proxy) {
@@ -205,4 +223,48 @@ public class RouterAdminProtocolTranslatorPB
       throw new IOException(ProtobufHelper.getRemoteException(e).getMessage());
     }
   }
+
+  @Override
+  public DisableNameserviceResponse disableNameservice(
+      DisableNameserviceRequest request) throws IOException {
+    DisableNameserviceRequestPBImpl requestPB =
+        (DisableNameserviceRequestPBImpl)request;
+    DisableNameserviceRequestProto proto = requestPB.getProto();
+    try {
+      DisableNameserviceResponseProto response =
+          rpcProxy.disableNameservice(null, proto);
+      return new DisableNameserviceResponsePBImpl(response);
+    } catch (ServiceException e) {
+      throw new IOException(ProtobufHelper.getRemoteException(e).getMessage());
+    }
+  }
+
+  @Override
+  public EnableNameserviceResponse enableNameservice(
+      EnableNameserviceRequest request) throws IOException {
+    EnableNameserviceRequestPBImpl requestPB =
+        (EnableNameserviceRequestPBImpl)request;
+    EnableNameserviceRequestProto proto = requestPB.getProto();
+    try {
+      EnableNameserviceResponseProto response =
+          rpcProxy.enableNameservice(null, proto);
+      return new EnableNameserviceResponsePBImpl(response);
+    } catch (ServiceException e) {
+      throw new IOException(ProtobufHelper.getRemoteException(e).getMessage());
+    }
+  }
+
+  @Override
+  public GetDisabledNameservicesResponse getDisabledNameservices(
+      GetDisabledNameservicesRequest request) throws IOException {
+    GetDisabledNameservicesRequestProto proto =
+        GetDisabledNameservicesRequestProto.newBuilder().build();
+    try {
+      GetDisabledNameservicesResponseProto response =
+          rpcProxy.getDisabledNameservices(null, proto);
+      return new GetDisabledNameservicesResponsePBImpl(response);
+    } catch (ServiceException e) {
+      throw new IOException(ProtobufHelper.getRemoteException(e).getMessage());
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a9752503/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/NameserviceManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/NameserviceManager.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/NameserviceManager.java
new file mode 100644
index 0000000..cab336c
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/NameserviceManager.java
@@ -0,0 +1,51 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.federation.router;
+
+import java.io.IOException;
+
+import org.apache.hadoop.hdfs.server.federation.store.protocol.DisableNameserviceRequest;
+import org.apache.hadoop.hdfs.server.federation.store.protocol.DisableNameserviceResponse;
+import org.apache.hadoop.hdfs.server.federation.store.protocol.EnableNameserviceRequest;
+import org.apache.hadoop.hdfs.server.federation.store.protocol.EnableNameserviceResponse;
+import org.apache.hadoop.hdfs.server.federation.store.protocol.GetDisabledNameservicesRequest;
+import org.apache.hadoop.hdfs.server.federation.store.protocol.GetDisabledNameservicesResponse;
+
+/**
+ * Interface for enable/disable name service.
+ */
+public interface NameserviceManager {
+
+  /**
+   * Disable a name service.
+   */
+  DisableNameserviceResponse disableNameservice(
+      DisableNameserviceRequest request) throws IOException;
+
+  /**
+   * Enable a name service.
+   */
+  EnableNameserviceResponse enableNameservice(EnableNameserviceRequest request)
+      throws IOException;
+
+  /**
+   * Get the list of disabled name service.
+   */
+  GetDisabledNameservicesResponse getDisabledNameservices(
+      GetDisabledNameservicesRequest request) throws IOException;
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a9752503/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterAdminServer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterAdminServer.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterAdminServer.java
index 2c195c6..da67796 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterAdminServer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterAdminServer.java
@@ -22,6 +22,7 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY;
 
 import java.io.IOException;
 import java.net.InetSocketAddress;
+import java.util.Set;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
@@ -29,11 +30,18 @@ import org.apache.hadoop.hdfs.protocol.proto.RouterProtocolProtos.RouterAdminPro
 import org.apache.hadoop.hdfs.protocolPB.RouterAdminProtocolPB;
 import org.apache.hadoop.hdfs.protocolPB.RouterAdminProtocolServerSideTranslatorPB;
 import org.apache.hadoop.hdfs.server.federation.resolver.MountTableManager;
+import org.apache.hadoop.hdfs.server.federation.store.DisabledNameserviceStore;
 import org.apache.hadoop.hdfs.server.federation.store.MountTableStore;
 import org.apache.hadoop.hdfs.server.federation.store.protocol.AddMountTableEntryRequest;
 import org.apache.hadoop.hdfs.server.federation.store.protocol.AddMountTableEntryResponse;
+import org.apache.hadoop.hdfs.server.federation.store.protocol.DisableNameserviceRequest;
+import org.apache.hadoop.hdfs.server.federation.store.protocol.DisableNameserviceResponse;
+import org.apache.hadoop.hdfs.server.federation.store.protocol.EnableNameserviceRequest;
+import org.apache.hadoop.hdfs.server.federation.store.protocol.EnableNameserviceResponse;
 import org.apache.hadoop.hdfs.server.federation.store.protocol.EnterSafeModeRequest;
 import org.apache.hadoop.hdfs.server.federation.store.protocol.EnterSafeModeResponse;
+import org.apache.hadoop.hdfs.server.federation.store.protocol.GetDisabledNameservicesRequest;
+import org.apache.hadoop.hdfs.server.federation.store.protocol.GetDisabledNameservicesResponse;
 import org.apache.hadoop.hdfs.server.federation.store.protocol.GetMountTableEntriesRequest;
 import org.apache.hadoop.hdfs.server.federation.store.protocol.GetMountTableEntriesResponse;
 import org.apache.hadoop.hdfs.server.federation.store.protocol.GetSafeModeRequest;
@@ -62,7 +70,7 @@ import com.google.protobuf.BlockingService;
  * router. It is created, started, and stopped by {@link Router}.
  */
 public class RouterAdminServer extends AbstractService
-    implements MountTableManager, RouterStateManager {
+    implements MountTableManager, RouterStateManager, NameserviceManager {
 
   private static final Logger LOG =
       LoggerFactory.getLogger(RouterAdminServer.class);
@@ -73,6 +81,8 @@ public class RouterAdminServer extends AbstractService
 
   private MountTableStore mountTableStore;
 
+  private DisabledNameserviceStore disabledStore;
+
   /** The Admin server that listens to requests from clients. */
   private final Server adminServer;
   private final InetSocketAddress adminAddress;
@@ -166,6 +176,19 @@ public class RouterAdminServer extends AbstractService
     return this.mountTableStore;
   }
 
+  private DisabledNameserviceStore getDisabledNameserviceStore()
+      throws IOException {
+    if (this.disabledStore == null) {
+      this.disabledStore = router.getStateStore().getRegisteredRecordStore(
+          DisabledNameserviceStore.class);
+      if (this.disabledStore == null) {
+        throw new IOException(
+            "Disabled Nameservice state store is not available.");
+      }
+    }
+    return this.disabledStore;
+  }
+
   /**
    * Get the RPC address of the admin service.
    * @return Administration service RPC address.
@@ -256,6 +279,34 @@ public class RouterAdminServer extends AbstractService
             && !serverInSafeMode);
   }
 
+  @Override
+  public DisableNameserviceResponse disableNameservice(
+      DisableNameserviceRequest request) throws IOException {
+    // TODO check permissions
+    String nsId = request.getNameServiceId();
+    // TODO check that the name service exists
+    boolean success = getDisabledNameserviceStore().disableNameservice(nsId);
+    return DisableNameserviceResponse.newInstance(success);
+  }
+
+  @Override
+  public EnableNameserviceResponse enableNameservice(
+      EnableNameserviceRequest request) throws IOException {
+    // TODO check permissions
+    String nsId = request.getNameServiceId();
+    // TODO check that the name service exists
+    boolean success = getDisabledNameserviceStore().enableNameservice(nsId);
+    return EnableNameserviceResponse.newInstance(success);
+  }
+
+  @Override
+  public GetDisabledNameservicesResponse getDisabledNameservices(
+      GetDisabledNameservicesRequest request) throws IOException {
+    // TODO check permissions
+    Set<String> nsIds = getDisabledNameserviceStore().getDisabledNameservices();
+    return GetDisabledNameservicesResponse.newInstance(nsIds);
+  }
+
   /**
    * Get a new permission checker used for making mount table access
    * control. This method will be invoked during each RPC call in router

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a9752503/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClient.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClient.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClient.java
index b36e459..95fe1a8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClient.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClient.java
@@ -73,6 +73,10 @@ public class RouterClient implements Closeable {
     return proxy;
   }
 
+  public NameserviceManager getNameserviceManager() {
+    return proxy;
+  }
+
   @Override
   public synchronized void close() throws IOException {
     RPC.stopProxy(proxy);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a9752503/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/DisabledNameserviceStore.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/DisabledNameserviceStore.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/DisabledNameserviceStore.java
new file mode 100644
index 0000000..20afe98
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/DisabledNameserviceStore.java
@@ -0,0 +1,65 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.federation.store;
+
+import java.io.IOException;
+import java.util.Set;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.hdfs.server.federation.store.driver.StateStoreDriver;
+import org.apache.hadoop.hdfs.server.federation.store.records.DisabledNameservice;
+
+/**
+ * State store record to track disabled name services.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public abstract class DisabledNameserviceStore
+    extends CachedRecordStore<DisabledNameservice> {
+
+  public DisabledNameserviceStore(StateStoreDriver driver) {
+    super(DisabledNameservice.class, driver);
+  }
+
+  /**
+   * Disable a name service.
+   *
+   * @param nsId Identifier of the name service.
+   * @return If the name service was successfully disabled.
+   * @throws IOException If the state store could not be queried.
+   */
+  public abstract boolean disableNameservice(String nsId) throws IOException;
+
+  /**
+   * Enable a name service.
+   *
+   * @param nsId Identifier of the name service.
+   * @return If the name service was successfully brought back.
+   * @throws IOException If the state store could not be queried.
+   */
+  public abstract boolean enableNameservice(String nsId) throws IOException;
+
+  /**
+   * Get a list of disabled name services.
+   *
+   * @return List of disabled name services.
+   * @throws IOException If the state store could not be queried.
+   */
+  public abstract Set<String> getDisabledNameservices() throws IOException;
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a9752503/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/StateStoreService.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/StateStoreService.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/StateStoreService.java
index a0744a6..64c22ae 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/StateStoreService.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/StateStoreService.java
@@ -37,6 +37,7 @@ import org.apache.hadoop.hdfs.server.federation.metrics.StateStoreMBean;
 import org.apache.hadoop.hdfs.server.federation.metrics.StateStoreMetrics;
 import org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys;
 import org.apache.hadoop.hdfs.server.federation.store.driver.StateStoreDriver;
+import org.apache.hadoop.hdfs.server.federation.store.impl.DisabledNameserviceStoreImpl;
 import org.apache.hadoop.hdfs.server.federation.store.impl.MembershipStoreImpl;
 import org.apache.hadoop.hdfs.server.federation.store.impl.MountTableStoreImpl;
 import org.apache.hadoop.hdfs.server.federation.store.impl.RouterStoreImpl;
@@ -75,6 +76,7 @@ import com.google.common.annotations.VisibleForTesting;
  * See {@link org.apache.hadoop.fs.viewfs.ViewFs ViewFs}.
  * <li>{@link RebalancerStore}: Log of the rebalancing operations.
  * <li>{@link RouterStore}: Router state in the federation.
+ * <li>{@link DisabledNameserviceStore}: Disabled name services.
  * <li>{@link TokenStore}: Tokens in the federation.
  * </ul>
  */
@@ -152,6 +154,7 @@ public class StateStoreService extends CompositeService {
     addRecordStore(MembershipStoreImpl.class);
     addRecordStore(MountTableStoreImpl.class);
     addRecordStore(RouterStoreImpl.class);
+    addRecordStore(DisabledNameserviceStoreImpl.class);
 
     // Check the connection to the State Store periodically
     this.monitorService = new StateStoreConnectionMonitorService(this);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a9752503/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/impl/DisabledNameserviceStoreImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/impl/DisabledNameserviceStoreImpl.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/impl/DisabledNameserviceStoreImpl.java
new file mode 100644
index 0000000..5eca869
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/impl/DisabledNameserviceStoreImpl.java
@@ -0,0 +1,68 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.federation.store.impl;
+
+import java.io.IOException;
+import java.util.Set;
+import java.util.TreeSet;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.hdfs.server.federation.store.DisabledNameserviceStore;
+import org.apache.hadoop.hdfs.server.federation.store.driver.StateStoreDriver;
+import org.apache.hadoop.hdfs.server.federation.store.records.DisabledNameservice;
+
+/**
+ * Implementation of {@link DisabledNameserviceStore}.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public class DisabledNameserviceStoreImpl extends DisabledNameserviceStore {
+
+  public DisabledNameserviceStoreImpl(StateStoreDriver driver) {
+    super(driver);
+  }
+
+  @Override
+  public boolean disableNameservice(String nsId)
+      throws IOException {
+
+    DisabledNameservice record =
+        DisabledNameservice.newInstance(nsId);
+    return getDriver().put(record, false, false);
+  }
+
+  @Override
+  public boolean enableNameservice(String nsId)
+      throws IOException {
+
+    DisabledNameservice record =
+        DisabledNameservice.newInstance(nsId);
+    return getDriver().remove(record);
+  }
+
+  @Override
+  public Set<String> getDisabledNameservices() throws IOException {
+    Set<String> disabledNameservices = new TreeSet<>();
+    for (DisabledNameservice record : getCachedRecords()) {
+      String nsId = record.getNameserviceId();
+      disabledNameservices.add(nsId);
+    }
+    return disabledNameservices;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a9752503/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/DisableNameserviceRequest.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/DisableNameserviceRequest.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/DisableNameserviceRequest.java
new file mode 100644
index 0000000..7e4267d
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/DisableNameserviceRequest.java
@@ -0,0 +1,47 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.federation.store.protocol;
+
+import org.apache.hadoop.classification.InterfaceAudience.Public;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
+import org.apache.hadoop.hdfs.server.federation.store.driver.StateStoreSerializer;
+
+/**
+ * API request for disabling a name service and updating its state in the
+ * State Store.
+ */
+public abstract class DisableNameserviceRequest {
+
+  public static DisableNameserviceRequest newInstance() {
+    return StateStoreSerializer.newRecord(DisableNameserviceRequest.class);
+  }
+
+  public static DisableNameserviceRequest newInstance(String nsId) {
+    DisableNameserviceRequest request = newInstance();
+    request.setNameServiceId(nsId);
+    return request;
+  }
+
+  @Public
+  @Unstable
+  public abstract String getNameServiceId();
+
+  @Public
+  @Unstable
+  public abstract void setNameServiceId(String nsId);
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a9752503/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/DisableNameserviceResponse.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/DisableNameserviceResponse.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/DisableNameserviceResponse.java
new file mode 100644
index 0000000..cec1519
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/DisableNameserviceResponse.java
@@ -0,0 +1,50 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.federation.store.protocol;
+
+import java.io.IOException;
+
+import org.apache.hadoop.classification.InterfaceAudience.Public;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
+import org.apache.hadoop.hdfs.server.federation.store.driver.StateStoreSerializer;
+
+/**
+ * API response for disabling a name service and updating its state in the
+ * State Store.
+ */
+public abstract class DisableNameserviceResponse {
+
+  public static DisableNameserviceResponse newInstance() throws IOException {
+    return StateStoreSerializer.newRecord(DisableNameserviceResponse.class);
+  }
+
+  public static DisableNameserviceResponse newInstance(boolean status)
+      throws IOException {
+    DisableNameserviceResponse response = newInstance();
+    response.setStatus(status);
+    return response;
+  }
+
+  @Public
+  @Unstable
+  public abstract boolean getStatus();
+
+  @Public
+  @Unstable
+  public abstract void setStatus(boolean result);
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a9752503/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/EnableNameserviceRequest.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/EnableNameserviceRequest.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/EnableNameserviceRequest.java
new file mode 100644
index 0000000..dd889da
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/EnableNameserviceRequest.java
@@ -0,0 +1,47 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.federation.store.protocol;
+
+import org.apache.hadoop.classification.InterfaceAudience.Public;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
+import org.apache.hadoop.hdfs.server.federation.store.driver.StateStoreSerializer;
+
+/**
+ * API request for enabling a name service and updating its state in the
+ * State Store.
+ */
+public abstract class EnableNameserviceRequest {
+
+  public static EnableNameserviceRequest newInstance() {
+    return StateStoreSerializer.newRecord(EnableNameserviceRequest.class);
+  }
+
+  public static EnableNameserviceRequest newInstance(String nsId) {
+    EnableNameserviceRequest request = newInstance();
+    request.setNameServiceId(nsId);
+    return request;
+  }
+
+  @Public
+  @Unstable
+  public abstract String getNameServiceId();
+
+  @Public
+  @Unstable
+  public abstract void setNameServiceId(String nsId);
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a9752503/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/EnableNameserviceResponse.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/EnableNameserviceResponse.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/EnableNameserviceResponse.java
new file mode 100644
index 0000000..c0c31b2
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/EnableNameserviceResponse.java
@@ -0,0 +1,50 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.federation.store.protocol;
+
+import java.io.IOException;
+
+import org.apache.hadoop.classification.InterfaceAudience.Public;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
+import org.apache.hadoop.hdfs.server.federation.store.driver.StateStoreSerializer;
+
+/**
+ * API response for enabling a name service and updating its state in the
+ * State Store.
+ */
+public abstract class EnableNameserviceResponse {
+
+  public static EnableNameserviceResponse newInstance() throws IOException {
+    return StateStoreSerializer.newRecord(EnableNameserviceResponse.class);
+  }
+
+  public static EnableNameserviceResponse newInstance(boolean status)
+      throws IOException {
+    EnableNameserviceResponse response = newInstance();
+    response.setStatus(status);
+    return response;
+  }
+
+  @Public
+  @Unstable
+  public abstract boolean getStatus();
+
+  @Public
+  @Unstable
+  public abstract void setStatus(boolean result);
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a9752503/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/GetDisabledNameservicesRequest.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/GetDisabledNameservicesRequest.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/GetDisabledNameservicesRequest.java
new file mode 100644
index 0000000..2681542
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/GetDisabledNameservicesRequest.java
@@ -0,0 +1,30 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.federation.store.protocol;
+
+import org.apache.hadoop.hdfs.server.federation.store.driver.StateStoreSerializer;
+
+/**
+ * API request for getting the disabled name services.
+ */
+public abstract class GetDisabledNameservicesRequest {
+
+  public static GetDisabledNameservicesRequest newInstance() {
+    return StateStoreSerializer.newRecord(GetDisabledNameservicesRequest.class);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a9752503/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/GetDisabledNameservicesResponse.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/GetDisabledNameservicesResponse.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/GetDisabledNameservicesResponse.java
new file mode 100644
index 0000000..260e569
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/GetDisabledNameservicesResponse.java
@@ -0,0 +1,51 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.federation.store.protocol;
+
+import java.util.Set;
+
+import org.apache.hadoop.classification.InterfaceAudience.Public;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
+import org.apache.hadoop.hdfs.server.federation.store.driver.StateStoreSerializer;
+
+/**
+ * API response for getting the disabled nameservices in the state store.
+ */
+public abstract class GetDisabledNameservicesResponse {
+
+  public static GetDisabledNameservicesResponse newInstance() {
+    return StateStoreSerializer.newRecord(
+        GetDisabledNameservicesResponse.class);
+  }
+
+  public static GetDisabledNameservicesResponse newInstance(
+      Set<String> nsIds) {
+    GetDisabledNameservicesResponse response = newInstance();
+    response.setNameservices(nsIds);
+    return response;
+  }
+
+  @Public
+  @Unstable
+  public abstract Set<String> getNameservices();
+
+  @Public
+  @Unstable
+  public abstract void setNameservices(Set<String> nameservices);
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a9752503/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/impl/pb/DisableNameserviceRequestPBImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/impl/pb/DisableNameserviceRequestPBImpl.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/impl/pb/DisableNameserviceRequestPBImpl.java
new file mode 100644
index 0000000..5e5aa01
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/impl/pb/DisableNameserviceRequestPBImpl.java
@@ -0,0 +1,73 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb;
+
+import java.io.IOException;
+
+import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.DisableNameserviceRequestProto;
+import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.DisableNameserviceRequestProto.Builder;
+import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.DisableNameserviceRequestProtoOrBuilder;
+import org.apache.hadoop.hdfs.server.federation.store.protocol.DisableNameserviceRequest;
+import org.apache.hadoop.hdfs.server.federation.store.records.impl.pb.PBRecord;
+
+import com.google.protobuf.Message;
+
+/**
+ * Protobuf implementation of the state store API object
+ * DisableNameserviceRequest.
+ */
+public class DisableNameserviceRequestPBImpl extends DisableNameserviceRequest
+    implements PBRecord {
+
+  private FederationProtocolPBTranslator<DisableNameserviceRequestProto,
+      Builder, DisableNameserviceRequestProtoOrBuilder> translator =
+          new FederationProtocolPBTranslator<>(
+              DisableNameserviceRequestProto.class);
+
+  public DisableNameserviceRequestPBImpl() {
+  }
+
+  public DisableNameserviceRequestPBImpl(DisableNameserviceRequestProto proto) {
+    this.translator.setProto(proto);
+  }
+
+  @Override
+  public DisableNameserviceRequestProto getProto() {
+    return translator.build();
+  }
+
+  @Override
+  public void setProto(Message proto) {
+    this.translator.setProto(proto);
+  }
+
+  @Override
+  public void readInstance(String base64String) throws IOException {
+    this.translator.readInstance(base64String);
+  }
+
+  @Override
+  public String getNameServiceId() {
+    return this.translator.getProtoOrBuilder().getNameServiceId();
+  }
+
+  @Override
+  public void setNameServiceId(String nsId) {
+    this.translator.getBuilder().setNameServiceId(nsId);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a9752503/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/impl/pb/DisableNameserviceResponsePBImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/impl/pb/DisableNameserviceResponsePBImpl.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/impl/pb/DisableNameserviceResponsePBImpl.java
new file mode 100644
index 0000000..7b274d8
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/impl/pb/DisableNameserviceResponsePBImpl.java
@@ -0,0 +1,74 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb;
+
+import java.io.IOException;
+
+import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.DisableNameserviceResponseProto;
+import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.DisableNameserviceResponseProto.Builder;
+import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.DisableNameserviceResponseProtoOrBuilder;
+import org.apache.hadoop.hdfs.server.federation.store.protocol.DisableNameserviceResponse;
+import org.apache.hadoop.hdfs.server.federation.store.records.impl.pb.PBRecord;
+
+import com.google.protobuf.Message;
+
+/**
+ * Protobuf implementation of the state store API object
+ * {@link DisableNameserviceResponse}.
+ */
+public class DisableNameserviceResponsePBImpl
+    extends DisableNameserviceResponse implements PBRecord {
+
+  private FederationProtocolPBTranslator<DisableNameserviceResponseProto,
+      Builder, DisableNameserviceResponseProtoOrBuilder> translator =
+          new FederationProtocolPBTranslator<>(
+              DisableNameserviceResponseProto.class);
+
+  public DisableNameserviceResponsePBImpl() {
+  }
+
+  public DisableNameserviceResponsePBImpl(
+      DisableNameserviceResponseProto proto) {
+    this.translator.setProto(proto);
+  }
+
+  @Override
+  public DisableNameserviceResponseProto getProto() {
+    return translator.build();
+  }
+
+  @Override
+  public void setProto(Message proto) {
+    this.translator.setProto(proto);
+  }
+
+  @Override
+  public void readInstance(String base64String) throws IOException {
+    this.translator.readInstance(base64String);
+  }
+
+  @Override
+  public boolean getStatus() {
+    return this.translator.getProtoOrBuilder().getStatus();
+  }
+
+  @Override
+  public void setStatus(boolean status) {
+    this.translator.getBuilder().setStatus(status);
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a9752503/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/impl/pb/EnableNameserviceRequestPBImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/impl/pb/EnableNameserviceRequestPBImpl.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/impl/pb/EnableNameserviceRequestPBImpl.java
new file mode 100644
index 0000000..510a263
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/impl/pb/EnableNameserviceRequestPBImpl.java
@@ -0,0 +1,73 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb;
+
+import java.io.IOException;
+
+import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.EnableNameserviceRequestProto;
+import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.EnableNameserviceRequestProto.Builder;
+import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.EnableNameserviceRequestProtoOrBuilder;
+import org.apache.hadoop.hdfs.server.federation.store.protocol.EnableNameserviceRequest;
+import org.apache.hadoop.hdfs.server.federation.store.records.impl.pb.PBRecord;
+
+import com.google.protobuf.Message;
+
+/**
+ * Protobuf implementation of the state store API object
+ * EnableNameserviceRequest.
+ */
+public class EnableNameserviceRequestPBImpl extends EnableNameserviceRequest
+    implements PBRecord {
+
+  private FederationProtocolPBTranslator<EnableNameserviceRequestProto,
+      Builder, EnableNameserviceRequestProtoOrBuilder> translator =
+          new FederationProtocolPBTranslator<>(
+              EnableNameserviceRequestProto.class);
+
+  public EnableNameserviceRequestPBImpl() {
+  }
+
+  public EnableNameserviceRequestPBImpl(EnableNameserviceRequestProto proto) {
+    this.translator.setProto(proto);
+  }
+
+  @Override
+  public EnableNameserviceRequestProto getProto() {
+    return translator.build();
+  }
+
+  @Override
+  public void setProto(Message proto) {
+    this.translator.setProto(proto);
+  }
+
+  @Override
+  public void readInstance(String base64String) throws IOException {
+    this.translator.readInstance(base64String);
+  }
+
+  @Override
+  public String getNameServiceId() {
+    return this.translator.getProtoOrBuilder().getNameServiceId();
+  }
+
+  @Override
+  public void setNameServiceId(String nsId) {
+    this.translator.getBuilder().setNameServiceId(nsId);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a9752503/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/impl/pb/EnableNameserviceResponsePBImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/impl/pb/EnableNameserviceResponsePBImpl.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/impl/pb/EnableNameserviceResponsePBImpl.java
new file mode 100644
index 0000000..9429b76
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/impl/pb/EnableNameserviceResponsePBImpl.java
@@ -0,0 +1,73 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb;
+
+import java.io.IOException;
+
+import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.EnableNameserviceResponseProto;
+import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.EnableNameserviceResponseProto.Builder;
+import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.EnableNameserviceResponseProtoOrBuilder;
+import org.apache.hadoop.hdfs.server.federation.store.protocol.EnableNameserviceResponse;
+import org.apache.hadoop.hdfs.server.federation.store.records.impl.pb.PBRecord;
+
+import com.google.protobuf.Message;
+
+/**
+ * Protobuf implementation of the state store API object
+ * EnableNameserviceResponse.
+ */
+public class EnableNameserviceResponsePBImpl extends EnableNameserviceResponse
+    implements PBRecord {
+
+  private FederationProtocolPBTranslator<EnableNameserviceResponseProto,
+      Builder, EnableNameserviceResponseProtoOrBuilder> translator =
+          new FederationProtocolPBTranslator<>(
+              EnableNameserviceResponseProto.class);
+
+  public EnableNameserviceResponsePBImpl() {
+  }
+
+  public EnableNameserviceResponsePBImpl(EnableNameserviceResponseProto proto) {
+    this.translator.setProto(proto);
+  }
+
+  @Override
+  public EnableNameserviceResponseProto getProto() {
+    return translator.build();
+  }
+
+  @Override
+  public void setProto(Message proto) {
+    this.translator.setProto(proto);
+  }
+
+  @Override
+  public void readInstance(String base64String) throws IOException {
+    this.translator.readInstance(base64String);
+  }
+
+  @Override
+  public boolean getStatus() {
+    return this.translator.getProtoOrBuilder().getStatus();
+  }
+
+  @Override
+  public void setStatus(boolean status) {
+    this.translator.getBuilder().setStatus(status);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a9752503/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/impl/pb/GetDisabledNameservicesRequestPBImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/impl/pb/GetDisabledNameservicesRequestPBImpl.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/impl/pb/GetDisabledNameservicesRequestPBImpl.java
new file mode 100644
index 0000000..829fe36
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/impl/pb/GetDisabledNameservicesRequestPBImpl.java
@@ -0,0 +1,66 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb;
+
+import java.io.IOException;
+
+import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.GetDisabledNameservicesRequestProto;
+import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.GetDisabledNameservicesRequestProto.Builder;
+import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.GetDisabledNameservicesRequestProtoOrBuilder;
+import org.apache.hadoop.hdfs.server.federation.store.protocol.GetDisabledNameservicesRequest;
+import org.apache.hadoop.hdfs.server.federation.store.records.impl.pb.PBRecord;
+
+import com.google.protobuf.Message;
+
+/**
+ * Protobuf implementation of the state store API object
+ * GetDisabledNameservicesRequest.
+ */
+public class GetDisabledNameservicesRequestPBImpl
+    extends GetDisabledNameservicesRequest implements PBRecord {
+
+  private FederationProtocolPBTranslator<GetDisabledNameservicesRequestProto,
+      Builder, GetDisabledNameservicesRequestProtoOrBuilder> translator =
+          new FederationProtocolPBTranslator<>(
+              GetDisabledNameservicesRequestProto.class);
+
+  public GetDisabledNameservicesRequestPBImpl() {
+    // As this request has no parameter, we need to initialize it
+    this.translator.getBuilder();
+  }
+
+  public GetDisabledNameservicesRequestPBImpl(
+      GetDisabledNameservicesRequestProto proto) {
+    this.translator.setProto(proto);
+  }
+
+  @Override
+  public GetDisabledNameservicesRequestProto getProto() {
+    return translator.build();
+  }
+
+  @Override
+  public void setProto(Message proto) {
+    this.translator.setProto(proto);
+  }
+
+  @Override
+  public void readInstance(String base64String) throws IOException {
+    this.translator.readInstance(base64String);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a9752503/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/impl/pb/GetDisabledNameservicesResponsePBImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/impl/pb/GetDisabledNameservicesResponsePBImpl.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/impl/pb/GetDisabledNameservicesResponsePBImpl.java
new file mode 100644
index 0000000..ccc0fcc
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/impl/pb/GetDisabledNameservicesResponsePBImpl.java
@@ -0,0 +1,84 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.Set;
+import java.util.TreeSet;
+
+import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.GetDisabledNameservicesResponseProto;
+import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.GetDisabledNameservicesResponseProto.*;
+import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.GetDisabledNameservicesResponseProtoOrBuilder;
+import org.apache.hadoop.hdfs.server.federation.store.protocol.GetDisabledNameservicesResponse;
+import org.apache.hadoop.hdfs.server.federation.store.records.impl.pb.PBRecord;
+
+import com.google.protobuf.Message;
+
+/**
+ * Protobuf implementation of the state store API object
+ * GetDisabledNameservicesResponse.
+ */
+public class GetDisabledNameservicesResponsePBImpl
+    extends GetDisabledNameservicesResponse implements PBRecord {
+
+  private FederationProtocolPBTranslator<GetDisabledNameservicesResponseProto,
+      Builder, GetDisabledNameservicesResponseProtoOrBuilder> translator =
+          new FederationProtocolPBTranslator<
+              GetDisabledNameservicesResponseProto, Builder,
+              GetDisabledNameservicesResponseProtoOrBuilder>(
+                  GetDisabledNameservicesResponseProto.class);
+
+  public GetDisabledNameservicesResponsePBImpl() {
+  }
+
+  public GetDisabledNameservicesResponsePBImpl(
+      GetDisabledNameservicesResponseProto proto) {
+    this.translator.setProto(proto);
+  }
+
+  @Override
+  public GetDisabledNameservicesResponseProto getProto() {
+    return this.translator.build();
+  }
+
+  @Override
+  public void setProto(Message proto) {
+    this.translator.setProto(proto);
+  }
+
+  @Override
+  public void readInstance(String base64String) throws IOException {
+    this.translator.readInstance(base64String);
+  }
+
+  @Override
+  public Set<String> getNameservices() {
+    List<String> nsIds =
+        this.translator.getProtoOrBuilder().getNameServiceIdsList();
+    return new TreeSet<>(nsIds);
+  }
+
+  @Override
+  public void setNameservices(Set<String> nameservices) {
+    this.translator.getBuilder().clearNameServiceIds();
+    for (String nsId : nameservices) {
+      this.translator.getBuilder().addNameServiceIds(nsId);
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a9752503/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/BaseRecord.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/BaseRecord.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/BaseRecord.java
index d5e60ce..64ecc1e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/BaseRecord.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/BaseRecord.java
@@ -21,6 +21,8 @@ import java.util.Map;
 
 import org.apache.hadoop.util.Time;
 
+import com.google.common.annotations.VisibleForTesting;
+
 /**
  * Abstract base of a data record in the StateStore. All StateStore records are
  * derived from this class. Data records are persisted in the data store and
@@ -110,6 +112,17 @@ public abstract class BaseRecord implements Comparable<BaseRecord> {
   }
 
   /**
+   * If the record has fields others than the primary keys. This is used by
+   * TestStateStoreDriverBase to skip the modification check.
+   *
+   * @return If the record has more fields.
+   */
+  @VisibleForTesting
+  public boolean hasOtherFields() {
+    return true;
+  }
+
+  /**
    * Generates a cache key from a map of values.
    *
    * @param keys Map of values.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a9752503/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/DisabledNameservice.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/DisabledNameservice.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/DisabledNameservice.java
new file mode 100644
index 0000000..c8504b7
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/DisabledNameservice.java
@@ -0,0 +1,81 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.federation.store.records;
+
+import java.io.IOException;
+import java.util.SortedMap;
+import java.util.TreeMap;
+
+import org.apache.hadoop.hdfs.server.federation.store.driver.StateStoreSerializer;
+
+/**
+ * Data record indicating a specific name service ID has been disabled and
+ * is no longer valid. Allows quick disabling of name services.
+ */
+public abstract class DisabledNameservice extends BaseRecord {
+
+  public DisabledNameservice() {
+    super();
+  }
+
+  public static DisabledNameservice newInstance() throws IOException {
+    DisabledNameservice record =
+        StateStoreSerializer.newRecord(DisabledNameservice.class);
+    record.init();
+    return record;
+  }
+
+  public static DisabledNameservice newInstance(String nsId)
+      throws IOException {
+    DisabledNameservice record = newInstance();
+    record.setNameserviceId(nsId);
+    return record;
+  }
+
+  /**
+   * Get the identifier of the name service to disable.
+   *
+   * @return Identifier of the name service to disable.
+   */
+  public abstract String getNameserviceId();
+
+  /**
+   * Set the identifier of the name service to disable.
+   *
+   * @param nameServiceId Identifier of the name service to disable.
+   */
+  public abstract void setNameserviceId(String nameServiceId);
+
+  @Override
+  public SortedMap<String, String> getPrimaryKeys() {
+    SortedMap<String, String> keyMap = new TreeMap<>();
+    keyMap.put("nameServiceId", this.getNameserviceId());
+    return keyMap;
+  }
+
+  @Override
+  public boolean hasOtherFields() {
+    // We don't have fields other than the primary keys
+    return false;
+  }
+
+  @Override
+  public long getExpirationMs() {
+    return -1;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a9752503/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/impl/pb/DisabledNameservicePBImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/impl/pb/DisabledNameservicePBImpl.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/impl/pb/DisabledNameservicePBImpl.java
new file mode 100644
index 0000000..d1c9770
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/impl/pb/DisabledNameservicePBImpl.java
@@ -0,0 +1,95 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.federation.store.records.impl.pb;
+
+import java.io.IOException;
+
+import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.DisabledNameserviceRecordProto;
+import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.DisabledNameserviceRecordProto.Builder;
+import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.DisabledNameserviceRecordProtoOrBuilder;
+import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.FederationProtocolPBTranslator;
+import org.apache.hadoop.hdfs.server.federation.store.records.DisabledNameservice;
+
+import com.google.protobuf.Message;
+
+/**
+ * Protobuf implementation of the {@link DisabledNameservice} record.
+ */
+public class DisabledNameservicePBImpl extends DisabledNameservice
+    implements PBRecord {
+
+  private FederationProtocolPBTranslator<DisabledNameserviceRecordProto,
+      Builder, DisabledNameserviceRecordProtoOrBuilder> translator =
+          new FederationProtocolPBTranslator<
+              DisabledNameserviceRecordProto, Builder,
+              DisabledNameserviceRecordProtoOrBuilder>(
+                  DisabledNameserviceRecordProto.class);
+
+  public DisabledNameservicePBImpl() {
+  }
+
+  public DisabledNameservicePBImpl(
+      DisabledNameserviceRecordProto proto) {
+    this.translator.setProto(proto);
+  }
+
+  @Override
+  public DisabledNameserviceRecordProto getProto() {
+    return this.translator.build();
+  }
+
+  @Override
+  public void setProto(Message proto) {
+    this.translator.setProto(proto);
+  }
+
+  @Override
+  public void readInstance(String base64String) throws IOException {
+    this.translator.readInstance(base64String);
+  }
+
+  @Override
+  public String getNameserviceId() {
+    return this.translator.getProtoOrBuilder().getNameServiceId();
+  }
+
+  @Override
+  public void setNameserviceId(String nameServiceId) {
+    this.translator.getBuilder().setNameServiceId(nameServiceId);
+  }
+
+  @Override
+  public void setDateModified(long time) {
+    this.translator.getBuilder().setDateModified(time);
+  }
+
+  @Override
+  public long getDateModified() {
+    return this.translator.getProtoOrBuilder().getDateModified();
+  }
+
+  @Override
+  public void setDateCreated(long time) {
+    this.translator.getBuilder().setDateCreated(time);
+  }
+
+  @Override
+  public long getDateCreated() {
+    return this.translator.getProtoOrBuilder().getDateCreated();
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a9752503/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java
index ce8ffa1..b686737 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java
@@ -32,14 +32,21 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.server.federation.resolver.MountTableManager;
 import org.apache.hadoop.hdfs.server.federation.resolver.RemoteLocation;
 import org.apache.hadoop.hdfs.server.federation.resolver.order.DestinationOrder;
+import org.apache.hadoop.hdfs.server.federation.router.NameserviceManager;
 import org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys;
 import org.apache.hadoop.hdfs.server.federation.router.RouterClient;
 import org.apache.hadoop.hdfs.server.federation.router.RouterQuotaUsage;
 import org.apache.hadoop.hdfs.server.federation.router.RouterStateManager;
 import org.apache.hadoop.hdfs.server.federation.store.protocol.AddMountTableEntryRequest;
 import org.apache.hadoop.hdfs.server.federation.store.protocol.AddMountTableEntryResponse;
+import org.apache.hadoop.hdfs.server.federation.store.protocol.DisableNameserviceRequest;
+import org.apache.hadoop.hdfs.server.federation.store.protocol.DisableNameserviceResponse;
+import org.apache.hadoop.hdfs.server.federation.store.protocol.EnableNameserviceRequest;
+import org.apache.hadoop.hdfs.server.federation.store.protocol.EnableNameserviceResponse;
 import org.apache.hadoop.hdfs.server.federation.store.protocol.EnterSafeModeRequest;
 import org.apache.hadoop.hdfs.server.federation.store.protocol.EnterSafeModeResponse;
+import org.apache.hadoop.hdfs.server.federation.store.protocol.GetDisabledNameservicesRequest;
+import org.apache.hadoop.hdfs.server.federation.store.protocol.GetDisabledNameservicesResponse;
 import org.apache.hadoop.hdfs.server.federation.store.protocol.GetMountTableEntriesRequest;
 import org.apache.hadoop.hdfs.server.federation.store.protocol.GetMountTableEntriesResponse;
 import org.apache.hadoop.hdfs.server.federation.store.protocol.GetSafeModeRequest;
@@ -95,7 +102,9 @@ public class RouterAdmin extends Configured implements Tool {
         + "\t[-setQuota <path> -nsQuota <nsQuota> -ssQuota "
         + "<quota in bytes or quota size string>]\n"
         + "\t[-clrQuota <path>]\n"
-        + "\t[-safemode enter | leave | get]\n";
+        + "\t[-safemode enter | leave | get]\n"
+        + "\t[-nameservice enable | disable <nameservice>]\n"
+        + "\t[-getDisabledNameservices]\n";
 
     System.out.println(usage);
   }
@@ -143,6 +152,12 @@ public class RouterAdmin extends Configured implements Tool {
         printUsage();
         return exitCode;
       }
+    } else if ("-nameservice".equalsIgnoreCase(cmd)) {
+      if (argv.length < 3) {
+        System.err.println("Not enough parameters specificed for cmd " + cmd);
+        printUsage();
+        return exitCode;
+      }
     }
 
     // Initialize RouterClient
@@ -190,6 +205,12 @@ public class RouterAdmin extends Configured implements Tool {
         }
       } else if ("-safemode".equals(cmd)) {
         manageSafeMode(argv[i]);
+      } else if ("-nameservice".equals(cmd)) {
+        String subcmd = argv[i];
+        String nsId = argv[i + 1];
+        manageNameservice(subcmd, nsId);
+      } else if ("-getDisabledNameservices".equals(cmd)) {
+        getDisabledNameservices();
       } else {
         printUsage();
         return exitCode;
@@ -612,6 +633,57 @@ public class RouterAdmin extends Configured implements Tool {
   }
 
   /**
+   * Manage the name service: enabling/disabling.
+   * @param cmd Input command, disable or enable.
+   * @throws IOException
+   */
+  private void manageNameservice(String cmd, String nsId) throws IOException {
+    if (cmd.equals("enable")) {
+      if (enableNameservice(nsId)) {
+        System.out.println("Successfully enabled nameservice " + nsId);
+      } else {
+        System.err.println("Cannot enable " + nsId);
+      }
+    } else if (cmd.equals("disable")) {
+      if (disableNameservice(nsId)) {
+        System.out.println("Successfully disabled nameservice " + nsId);
+      } else {
+        System.err.println("Cannot disable " + nsId);
+      }
+    } else {
+      throw new IllegalArgumentException("Unknown command: " + cmd);
+    }
+  }
+
+  private boolean disableNameservice(String nsId) throws IOException {
+    NameserviceManager nameserviceManager = client.getNameserviceManager();
+    DisableNameserviceResponse response =
+        nameserviceManager.disableNameservice(
+            DisableNameserviceRequest.newInstance(nsId));
+    return response.getStatus();
+  }
+
+  private boolean enableNameservice(String nsId) throws IOException {
+    NameserviceManager nameserviceManager = client.getNameserviceManager();
+    EnableNameserviceResponse response =
+        nameserviceManager.enableNameservice(
+            EnableNameserviceRequest.newInstance(nsId));
+    return response.getStatus();
+  }
+
+  private void getDisabledNameservices() throws IOException {
+    NameserviceManager nameserviceManager = client.getNameserviceManager();
+    GetDisabledNameservicesRequest request =
+        GetDisabledNameservicesRequest.newInstance();
+    GetDisabledNameservicesResponse response =
+        nameserviceManager.getDisabledNameservices(request);
+    System.out.println("List of disabled nameservices:");
+    for (String nsId : response.getNameservices()) {
+      System.out.println(nsId);
+    }
+  }
+
+  /**
    * Inner class that stores ACL info of mount table.
    */
   static class ACLEntity {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a9752503/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/proto/FederationProtocol.proto
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/proto/FederationProtocol.proto b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/proto/FederationProtocol.proto
index cd8df5d..2ea240c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/proto/FederationProtocol.proto
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/proto/FederationProtocol.proto
@@ -241,4 +241,37 @@ message GetSafeModeRequestProto {
 
 message GetSafeModeResponseProto {
   optional bool isInSafeMode = 1;
-}
\ No newline at end of file
+}
+
+/////////////////////////////////////////////////
+// Disabled Nameservice State
+/////////////////////////////////////////////////
+
+message DisabledNameserviceRecordProto {
+  optional uint64 dateCreated = 1;
+  optional uint64 dateModified = 2;
+  optional string nameServiceId = 3;
+}
+
+message DisableNameserviceRequestProto {
+  optional string nameServiceId = 1;
+}
+
+message DisableNameserviceResponseProto {
+  optional bool status = 1;
+}
+
+message EnableNameserviceRequestProto {
+  optional string nameServiceId = 1;
+}
+
+message EnableNameserviceResponseProto {
+  optional bool status = 1;
+}
+
+message GetDisabledNameservicesRequestProto {
+}
+
+message GetDisabledNameservicesResponseProto {
+  repeated string nameServiceIds = 1;
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[04/50] [abbrv] hadoop git commit: HDFS-7101. Potential null dereference in DFSck#doWork(). Contributed by skrho and Ted Yu.

Posted by sh...@apache.org.
HDFS-7101. Potential null dereference in DFSck#doWork(). Contributed by skrho and Ted Yu.

(cherry picked from commit 113af12cfb240ea9a7189bb2701693466eb8e993)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f055a53b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f055a53b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f055a53b

Branch: refs/heads/YARN-8200
Commit: f055a53b43322a8d80a4aa362ec2a71d446334e7
Parents: a48deb1
Author: Akira Ajisaka <aa...@apache.org>
Authored: Thu Apr 12 17:47:37 2018 +0900
Committer: Akira Ajisaka <aa...@apache.org>
Committed: Thu Apr 12 17:48:45 2018 +0900

----------------------------------------------------------------------
 .../src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java          | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f055a53b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java
index 199f459..0c69b6d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java
@@ -350,7 +350,7 @@ public class DFSck extends Configured implements Tool {
     BufferedReader input = new BufferedReader(new InputStreamReader(
                                               stream, "UTF-8"));
     String line = null;
-    String lastLine = null;
+    String lastLine = NamenodeFsck.CORRUPT_STATUS;
     int errCode = -1;
     try {
       while ((line = input.readLine()) != null) {


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[41/50] [abbrv] hadoop git commit: MAPREDUCE-7042. Killed MR job data does not move to mapreduce.jobhistory.done-dir when ATS v2 is enabled. Contributed by Rohith Sharma K S.

Posted by sh...@apache.org.
MAPREDUCE-7042. Killed MR job data does not move to mapreduce.jobhistory.done-dir when ATS v2 is enabled. Contributed by Rohith Sharma K S.

(cherry picked from commit 83e60cd2db20f655e272958ef43b1b5a084ef3e3)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/600f4d40
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/600f4d40
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/600f4d40

Branch: refs/heads/YARN-8200
Commit: 600f4d402f1926c8dac678689844d0901504f803
Parents: a91d5c7
Author: Sunil G <su...@apache.org>
Authored: Thu Apr 26 19:07:02 2018 +0530
Committer: Rohith Sharma K S <ro...@apache.org>
Committed: Fri Apr 27 12:00:00 2018 +0530

----------------------------------------------------------------------
 .../jobhistory/JobHistoryEventHandler.java      | 66 +++++++++++++++++---
 .../jobhistory/TestJobHistoryEventHandler.java  | 32 +++++++++-
 2 files changed, 89 insertions(+), 9 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/600f4d40/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
index 4529d55..9d2b3be 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
@@ -75,6 +75,7 @@ import org.apache.hadoop.yarn.api.records.timelineservice.TimelineMetric;
 import org.apache.hadoop.yarn.client.api.TimelineClient;
 import org.apache.hadoop.yarn.client.api.TimelineV2Client;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.event.AsyncDispatcher;
 import org.apache.hadoop.yarn.event.EventHandler;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
@@ -119,7 +120,11 @@ public class JobHistoryEventHandler extends AbstractService
 
   protected BlockingQueue<JobHistoryEvent> eventQueue =
     new LinkedBlockingQueue<JobHistoryEvent>();
+
+  protected boolean handleTimelineEvent = false;
+  protected AsyncDispatcher atsEventDispatcher = null;
   protected Thread eventHandlingThread;
+
   private volatile boolean stopped;
   private final Object lock = new Object();
 
@@ -279,6 +284,7 @@ public class JobHistoryEventHandler extends AbstractService
               ((MRAppMaster.RunningAppContext) context).getTimelineClient();
           timelineClient.init(conf);
         }
+        handleTimelineEvent = true;
         LOG.info("Timeline service is enabled; version: " +
             YarnConfiguration.getTimelineServiceVersion(conf));
       } else {
@@ -302,10 +308,23 @@ public class JobHistoryEventHandler extends AbstractService
           "'json' or 'binary'.  Falling back to default value '" +
           JHAdminConfig.DEFAULT_MR_HS_JHIST_FORMAT + "'.");
     }
-
+    // initiate the atsEventDispatcher for timeline event
+    // if timeline service is enabled.
+    if (handleTimelineEvent) {
+      atsEventDispatcher = createDispatcher();
+      EventHandler<JobHistoryEvent> timelineEventHandler =
+          new ForwardingEventHandler();
+      atsEventDispatcher.register(EventType.class, timelineEventHandler);
+      atsEventDispatcher.setDrainEventsOnStop();
+      atsEventDispatcher.init(conf);
+    }
     super.serviceInit(conf);
   }
 
+  protected AsyncDispatcher createDispatcher() {
+    return new AsyncDispatcher("Job ATS Event Dispatcher");
+  }
+
   private void mkdir(FileSystem fs, Path path, FsPermission fsp)
       throws IOException {
     if (!fs.exists(path)) {
@@ -371,6 +390,10 @@ public class JobHistoryEventHandler extends AbstractService
         }
     }, "eventHandlingThread");
     eventHandlingThread.start();
+
+    if (handleTimelineEvent) {
+      atsEventDispatcher.start();
+    }
     super.serviceStart();
   }
 
@@ -453,6 +476,11 @@ public class JobHistoryEventHandler extends AbstractService
         LOG.info("Exception while closing file " + e.getMessage());
       }
     }
+
+    if (handleTimelineEvent && atsEventDispatcher != null) {
+      atsEventDispatcher.stop();
+    }
+
     if (timelineClient != null) {
       timelineClient.stop();
     } else if (timelineV2Client != null) {
@@ -572,6 +600,10 @@ public class JobHistoryEventHandler extends AbstractService
       }
 
       eventQueue.put(event);
+      // Process it for ATS (if enabled)
+      if (handleTimelineEvent) {
+        atsEventDispatcher.getEventHandler().handle(event);
+      }
     } catch (InterruptedException e) {
       throw new YarnRuntimeException(e);
     }
@@ -614,13 +646,6 @@ public class JobHistoryEventHandler extends AbstractService
         }
         processEventForJobSummary(event.getHistoryEvent(), mi.getJobSummary(),
             event.getJobID());
-        if (timelineV2Client != null) {
-          processEventForNewTimelineService(historyEvent, event.getJobID(),
-              event.getTimestamp());
-        } else if (timelineClient != null) {
-          processEventForTimelineServer(historyEvent, event.getJobID(),
-              event.getTimestamp());
-        }
         if (LOG.isDebugEnabled()) {
           LOG.debug("In HistoryEventHandler "
               + event.getHistoryEvent().getEventType());
@@ -702,6 +727,23 @@ public class JobHistoryEventHandler extends AbstractService
     }
   }
 
+  private void handleTimelineEvent(JobHistoryEvent event) {
+    HistoryEvent historyEvent = event.getHistoryEvent();
+    if (handleTimelineEvent) {
+      if (timelineV2Client != null) {
+        processEventForNewTimelineService(historyEvent, event.getJobID(),
+            event.getTimestamp());
+      } else if (timelineClient != null) {
+        processEventForTimelineServer(historyEvent, event.getJobID(),
+            event.getTimestamp());
+      }
+    }
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("In HistoryEventHandler, handle timelineEvent:"
+          + event.getHistoryEvent().getEventType());
+    }
+  }
+
   public void processEventForJobSummary(HistoryEvent event, JobSummary summary, 
       JobId jobId) {
     // context.getJob could be used for some of this info as well.
@@ -1708,4 +1750,12 @@ public class JobHistoryEventHandler extends AbstractService
   boolean getFlushTimerStatus() {
     return isTimerActive;
   }
+
+  private final class ForwardingEventHandler
+      implements EventHandler<JobHistoryEvent> {
+    @Override
+    public void handle(JobHistoryEvent event) {
+      handleTimelineEvent(event);
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/600f4d40/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestJobHistoryEventHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestJobHistoryEventHandler.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestJobHistoryEventHandler.java
index 51ac2ce..3fecef7 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestJobHistoryEventHandler.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestJobHistoryEventHandler.java
@@ -75,6 +75,8 @@ import org.apache.hadoop.yarn.api.records.timeline.TimelineEntity;
 import org.apache.hadoop.yarn.client.api.TimelineClient;
 import org.apache.hadoop.yarn.client.api.TimelineV2Client;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.event.AsyncDispatcher;
+import org.apache.hadoop.yarn.event.DrainDispatcher;
 import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
 import org.apache.hadoop.yarn.server.MiniYARNCluster;
 import org.apache.hadoop.yarn.server.timeline.TimelineStore;
@@ -586,6 +588,7 @@ public class TestJobHistoryEventHandler {
       handleEvent(jheh, new JobHistoryEvent(t.jobId, new AMStartedEvent(
               t.appAttemptId, 200, t.containerId, "nmhost", 3000, 4000, -1),
               currentTime - 10));
+      jheh.getDispatcher().await();
       TimelineEntities entities = ts.getEntities("MAPREDUCE_JOB", null, null,
               null, null, null, null, null, null, null);
       Assert.assertEquals(1, entities.getEntities().size());
@@ -602,6 +605,7 @@ public class TestJobHistoryEventHandler {
               "user", 200, "/foo/job.xml",
               new HashMap<JobACL, AccessControlList>(), "default"),
               currentTime + 10));
+      jheh.getDispatcher().await();
       entities = ts.getEntities("MAPREDUCE_JOB", null, null, null,
               null, null, null, null, null, null);
       Assert.assertEquals(1, entities.getEntities().size());
@@ -620,6 +624,7 @@ public class TestJobHistoryEventHandler {
       handleEvent(jheh, new JobHistoryEvent(t.jobId,
               new JobQueueChangeEvent(TypeConverter.fromYarn(t.jobId), "q2"),
               currentTime - 20));
+      jheh.getDispatcher().await();
       entities = ts.getEntities("MAPREDUCE_JOB", null, null, null,
               null, null, null, null, null, null);
       Assert.assertEquals(1, entities.getEntities().size());
@@ -642,6 +647,7 @@ public class TestJobHistoryEventHandler {
       handleEvent(jheh, new JobHistoryEvent(t.jobId,
               new JobFinishedEvent(TypeConverter.fromYarn(t.jobId), 0, 0, 0, 0,
               0, new Counters(), new Counters(), new Counters()), currentTime));
+      jheh.getDispatcher().await();
       entities = ts.getEntities("MAPREDUCE_JOB", null, null, null,
               null, null, null, null, null, null);
       Assert.assertEquals(1, entities.getEntities().size());
@@ -667,7 +673,9 @@ public class TestJobHistoryEventHandler {
 
       handleEvent(jheh, new JobHistoryEvent(t.jobId,
             new JobUnsuccessfulCompletionEvent(TypeConverter.fromYarn(t.jobId),
-            0, 0, 0, JobStateInternal.KILLED.toString()), currentTime + 20));
+            0, 0, 0, JobStateInternal.KILLED.toString()),
+            currentTime + 20));
+      jheh.getDispatcher().await();
       entities = ts.getEntities("MAPREDUCE_JOB", null, null, null,
               null, null, null, null, null, null);
       Assert.assertEquals(1, entities.getEntities().size());
@@ -697,6 +705,7 @@ public class TestJobHistoryEventHandler {
 
       handleEvent(jheh, new JobHistoryEvent(t.jobId,
             new TaskStartedEvent(t.taskID, 0, TaskType.MAP, "")));
+      jheh.getDispatcher().await();
       entities = ts.getEntities("MAPREDUCE_TASK", null, null, null,
               null, null, null, null, null, null);
       Assert.assertEquals(1, entities.getEntities().size());
@@ -710,6 +719,7 @@ public class TestJobHistoryEventHandler {
 
       handleEvent(jheh, new JobHistoryEvent(t.jobId,
             new TaskStartedEvent(t.taskID, 0, TaskType.REDUCE, "")));
+      jheh.getDispatcher().await();
       entities = ts.getEntities("MAPREDUCE_TASK", null, null, null,
               null, null, null, null, null, null);
       Assert.assertEquals(1, entities.getEntities().size());
@@ -1027,6 +1037,7 @@ class JHEvenHandlerForTest extends JobHistoryEventHandler {
 
   private EventWriter eventWriter;
   private boolean mockHistoryProcessing = true;
+  private DrainDispatcher dispatcher;
   public JHEvenHandlerForTest(AppContext context, int startCount) {
     super(context, startCount);
     JobHistoryEventHandler.fileMap.clear();
@@ -1039,12 +1050,31 @@ class JHEvenHandlerForTest extends JobHistoryEventHandler {
   }
 
   @Override
+  protected void serviceInit(Configuration conf) throws Exception {
+    super.serviceInit(conf);
+
+  }
+
+  @Override
   protected void serviceStart() {
     if (timelineClient != null) {
       timelineClient.start();
     } else if (timelineV2Client != null) {
       timelineV2Client.start();
     }
+    if (handleTimelineEvent) {
+      atsEventDispatcher.start();
+    }
+  }
+
+  @Override
+  protected AsyncDispatcher createDispatcher() {
+    dispatcher = new DrainDispatcher();
+    return dispatcher;
+  }
+
+  public DrainDispatcher getDispatcher() {
+    return dispatcher;
   }
 
   @Override


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[08/50] [abbrv] hadoop git commit: HADOOP-14970. MiniHadoopClusterManager doesn't respect lack of format option. Contributed by Erik Krogen

Posted by sh...@apache.org.
HADOOP-14970. MiniHadoopClusterManager doesn't respect lack of format option. Contributed by Erik Krogen

(cherry picked from commit 1a407bc9906306801690bc75ff0f0456f8f265fd)

Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/31d061e4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/31d061e4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/31d061e4

Branch: refs/heads/YARN-8200
Commit: 31d061e4e844147501518f64ec41cf8867cc70bd
Parents: 7393020
Author: Erik Krogen <ek...@linkedin.com>
Authored: Thu Apr 12 23:27:51 2018 -0700
Committer: Konstantin V Shvachko <sh...@apache.org>
Committed: Thu Apr 12 23:39:47 2018 -0700

----------------------------------------------------------------------
 .../java/org/apache/hadoop/mapreduce/MiniHadoopClusterManager.java  | 1 +
 1 file changed, 1 insertion(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/31d061e4/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/MiniHadoopClusterManager.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/MiniHadoopClusterManager.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/MiniHadoopClusterManager.java
index 96d0027..becc768 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/MiniHadoopClusterManager.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/MiniHadoopClusterManager.java
@@ -156,6 +156,7 @@ public class MiniHadoopClusterManager {
     if (!noDFS) {
       dfs = new MiniDFSCluster.Builder(conf).nameNodePort(nnPort)
           .nameNodeHttpPort(nnHttpPort).numDataNodes(numDataNodes)
+          .format(dfsOpts == StartupOption.FORMAT)
           .startupOption(dfsOpts).build();
       LOG.info("Started MiniDFSCluster -- namenode on port "
           + dfs.getNameNodePort());


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[14/50] [abbrv] hadoop git commit: YARN-8165. Incorrect queue name logging in AbstractContainerAllocator. Contributed by Weiwei Yan.

Posted by sh...@apache.org.
YARN-8165. Incorrect queue name logging in AbstractContainerAllocator. Contributed by Weiwei Yan.

(cherry picked from commit dd5e18c4aecba56f140c3cc11affc2cb5e61c79d)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f37f680a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f37f680a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f37f680a

Branch: refs/heads/YARN-8200
Commit: f37f680a4e795daa7f40d3bb903cb7da1a619ead
Parents: b9dc0e5
Author: Inigo Goiri <in...@apache.org>
Authored: Mon Apr 16 16:52:17 2018 -0700
Committer: Inigo Goiri <in...@apache.org>
Committed: Mon Apr 16 17:07:38 2018 -0700

----------------------------------------------------------------------
 .../capacity/allocator/AbstractContainerAllocator.java      | 9 +++++----
 1 file changed, 5 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f37f680a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/allocator/AbstractContainerAllocator.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/allocator/AbstractContainerAllocator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/allocator/AbstractContainerAllocator.java
index 5809d86..d3ab01e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/allocator/AbstractContainerAllocator.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/allocator/AbstractContainerAllocator.java
@@ -97,7 +97,8 @@ public abstract class AbstractContainerAllocator {
         // This is a reserved container
         LOG.info("Reserved container " + " application="
             + application.getApplicationId() + " resource=" + allocatedResource
-            + " queue=" + this.toString() + " cluster=" + clusterResource);
+            + " queue=" + appInfo.getQueueName()
+            + " cluster=" + clusterResource);
         assignment.getAssignmentInformation().addReservationDetails(
             updatedContainer, application.getCSLeafQueue().getQueuePath());
         assignment.getAssignmentInformation().incrReservations();
@@ -125,9 +126,9 @@ public abstract class AbstractContainerAllocator {
         // Inform the ordering policy
         LOG.info("assignedContainer" + " application attempt=" + application
             .getApplicationAttemptId() + " container=" + updatedContainer
-            .getContainerId() + " queue=" + this + " clusterResource="
-            + clusterResource + " type=" + assignment.getType()
-            + " requestedPartition="
+            .getContainerId() + " queue=" + appInfo.getQueueName()
+            + " clusterResource=" + clusterResource
+            + " type=" + assignment.getType() + " requestedPartition="
             + updatedContainer.getNodeLabelExpression());
 
         assignment.getAssignmentInformation().addAllocationDetails(


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[36/50] [abbrv] hadoop git commit: HDFS-13492. Limit httpfs binds to certain IP addresses in branch-2. Contributed by Wei-Chiu Chuang.

Posted by sh...@apache.org.
HDFS-13492. Limit httpfs binds to certain IP addresses in branch-2. Contributed by Wei-Chiu Chuang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a5fc6382
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a5fc6382
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a5fc6382

Branch: refs/heads/YARN-8200
Commit: a5fc6382351150ec3fede697dbf03a4670264f16
Parents: 33ffc96
Author: Wei-Chiu Chuang <we...@apache.org>
Authored: Thu Apr 26 09:55:03 2018 -0700
Committer: Wei-Chiu Chuang <we...@apache.org>
Committed: Thu Apr 26 09:55:03 2018 -0700

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/tomcat/server.xml | 3 ++-
 .../hadoop-hdfs-httpfs/src/main/tomcat/ssl-server.xml             | 3 ++-
 .../hadoop-hdfs-httpfs/src/site/markdown/ServerSetup.md.vm        | 3 ++-
 3 files changed, 6 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a5fc6382/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/tomcat/server.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/tomcat/server.xml b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/tomcat/server.xml
index 67f2159..f160893 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/tomcat/server.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/tomcat/server.xml
@@ -69,7 +69,8 @@
          APR (HTTP/AJP) Connector: /docs/apr.html
          Define a non-SSL HTTP/1.1 Connector on port ${httpfs.http.port}
     -->
-    <Connector port="${httpfs.http.port}" protocol="HTTP/1.1"
+    <Connector address="${httpfs.http.hostname}"
+               port="${httpfs.http.port}" protocol="HTTP/1.1"
                connectionTimeout="20000"
                maxHttpHeaderSize="${httpfs.max.http.header.size}"
                redirectPort="8443"/>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a5fc6382/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/tomcat/ssl-server.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/tomcat/ssl-server.xml b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/tomcat/ssl-server.xml
index 408d4e3..a85ada9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/tomcat/ssl-server.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/tomcat/ssl-server.xml
@@ -68,7 +68,8 @@
          This connector uses the JSSE configuration, when using APR, the
          connector should be using the OpenSSL style configuration
          described in the APR documentation -->
-    <Connector port="${httpfs.http.port}" protocol="HTTP/1.1" SSLEnabled="true"
+    <Connector address="${httpfs.http.hostname}"
+               port="${httpfs.http.port}" protocol="HTTP/1.1" SSLEnabled="true"
                maxThreads="150" scheme="https" secure="true"
                maxHttpHeaderSize="${httpfs.max.http.header.size}"
                clientAuth="${httpfs.ssl.client.auth}"

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a5fc6382/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/site/markdown/ServerSetup.md.vm
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/site/markdown/ServerSetup.md.vm b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/site/markdown/ServerSetup.md.vm
index 35c3822..305342d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/site/markdown/ServerSetup.md.vm
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/site/markdown/ServerSetup.md.vm
@@ -72,13 +72,14 @@ Embedded Tomcat Configuration
 
 To configure the embedded Tomcat go to the `tomcat/conf`.
 
-HttpFS preconfigures the HTTP and Admin ports in Tomcat's `server.xml` to 14000 and 14001.
+HttpFS preconfigures the HTTP and Admin ports in Tomcat's `server.xml` to 14000 and 14001, and it binds to all IP addresses on the host.
 
 Tomcat logs are also preconfigured to go to HttpFS's `logs/` directory.
 
 HttpFS default value for the maxHttpHeaderSize parameter in Tomcat's `server.xml` is set to 65536 by default.
 
 The following environment variables (which can be set in HttpFS's `etc/hadoop/httpfs-env.sh` script) can be used to alter those values:
+* HTTPFS\_HTTP\_HOSTNAME
 
 * HTTPFS\_HTTP\_PORT
 


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[40/50] [abbrv] hadoop git commit: YARN-8215. ATS v2 returns invalid YARN_CONTAINER_ALLOCATED_HOST_HTTP_ADDRESS from NM. Contributed by Rohith Sharma K S.

Posted by sh...@apache.org.
YARN-8215. ATS v2 returns invalid YARN_CONTAINER_ALLOCATED_HOST_HTTP_ADDRESS from NM. Contributed by Rohith Sharma K S.

(cherry picked from commit 7fc09c4f7255645025ddf902dff70a17092ea039)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a91d5c7e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a91d5c7e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a91d5c7e

Branch: refs/heads/YARN-8200
Commit: a91d5c7e2c044391dc97a555c8d1269578f48410
Parents: ab2b429
Author: Sunil G <su...@apache.org>
Authored: Fri Apr 27 10:33:42 2018 +0530
Committer: Rohith Sharma K S <ro...@apache.org>
Committed: Fri Apr 27 11:19:12 2018 +0530

----------------------------------------------------------------------
 .../timelineservice/NMTimelinePublisher.java         | 15 ++++++++++-----
 .../timelineservice/TestNMTimelinePublisher.java     |  1 -
 2 files changed, 10 insertions(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a91d5c7e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/timelineservice/NMTimelinePublisher.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/timelineservice/NMTimelinePublisher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/timelineservice/NMTimelinePublisher.java
index 52895c4..94b7025 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/timelineservice/NMTimelinePublisher.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/timelineservice/NMTimelinePublisher.java
@@ -24,6 +24,8 @@ import java.util.HashMap;
 import java.util.Map;
 import java.util.concurrent.ConcurrentHashMap;
 
+import org.apache.hadoop.yarn.webapp.util.WebAppUtils;
+
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -79,6 +81,7 @@ public class NMTimelinePublisher extends CompositeService {
   private NodeId nodeId;
 
   private String httpAddress;
+  private String httpPort;
 
   private UserGroupInformation nmLoginUGI;
 
@@ -100,6 +103,12 @@ public class NMTimelinePublisher extends CompositeService {
         UserGroupInformation.getLoginUser() :
         UserGroupInformation.getCurrentUser();
     LOG.info("Initialized NMTimelinePublisher UGI to " + nmLoginUGI);
+
+    String webAppURLWithoutScheme =
+        WebAppUtils.getNMWebAppURLWithoutScheme(conf);
+    if (webAppURLWithoutScheme.contains(":")) {
+      httpPort = webAppURLWithoutScheme.split(":")[1];
+    }
     super.serviceInit(conf);
   }
 
@@ -109,6 +118,7 @@ public class NMTimelinePublisher extends CompositeService {
     // context will be updated after containerManagerImpl is started
     // hence NMMetricsPublisher is added subservice of containerManagerImpl
     this.nodeId = context.getNodeId();
+    this.httpAddress = nodeId.getHost() + ":" + httpPort;
   }
 
   @Override
@@ -330,11 +340,6 @@ public class NMTimelinePublisher extends CompositeService {
 
   public void publishContainerEvent(ContainerEvent event) {
     // publish only when the desired event is received
-    if (this.httpAddress == null) {
-      // update httpAddress for first time. When this service started,
-      // web server will not be started.
-      this.httpAddress = nodeId.getHost() + ":" + context.getHttpPort();
-    }
     switch (event.getType()) {
     case INIT_CONTAINER:
       publishContainerCreatedEvent(event);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a91d5c7e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/timelineservice/TestNMTimelinePublisher.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/timelineservice/TestNMTimelinePublisher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/timelineservice/TestNMTimelinePublisher.java
index 50f9e8b..5e97158 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/timelineservice/TestNMTimelinePublisher.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/timelineservice/TestNMTimelinePublisher.java
@@ -59,7 +59,6 @@ public class TestNMTimelinePublisher {
     @SuppressWarnings("unchecked")
     final DummyTimelineClient timelineClient = new DummyTimelineClient(null);
     when(context.getNodeId()).thenReturn(NodeId.newInstance("localhost", 0));
-    when(context.getHttpPort()).thenReturn(0);
 
     Configuration conf = new Configuration();
     conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, true);


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[05/50] [abbrv] hadoop git commit: Revert "HDFS-13388. RequestHedgingProxyProvider calls multiple configured NNs all the time. Contributed by Jinglun."

Posted by sh...@apache.org.
Revert "HDFS-13388. RequestHedgingProxyProvider calls multiple configured NNs all the time. Contributed by Jinglun."

This reverts commit 7e692425d538454abf69b07f6e8fd686a1171ac8.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/20472bdf
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/20472bdf
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/20472bdf

Branch: refs/heads/YARN-8200
Commit: 20472bdfd2e8f084e106a8c732498e1601018d0c
Parents: f055a53
Author: Inigo Goiri <in...@apache.org>
Authored: Thu Apr 12 08:44:17 2018 -0700
Committer: Inigo Goiri <in...@apache.org>
Committed: Thu Apr 12 08:44:17 2018 -0700

----------------------------------------------------------------------
 .../ha/RequestHedgingProxyProvider.java         |  3 --
 .../ha/TestRequestHedgingProxyProvider.java     | 34 --------------------
 2 files changed, 37 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/20472bdf/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RequestHedgingProxyProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RequestHedgingProxyProvider.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RequestHedgingProxyProvider.java
index f34adce..010e9e5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RequestHedgingProxyProvider.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RequestHedgingProxyProvider.java
@@ -78,9 +78,6 @@ public class RequestHedgingProxyProvider<T> extends
     public Object
     invoke(Object proxy, final Method method, final Object[] args)
             throws Throwable {
-      if (currentUsedProxy != null) {
-        return method.invoke(currentUsedProxy.proxy, args);
-      }
       Map<Future<Object>, ProxyInfo<T>> proxyMap = new HashMap<>();
       int numAttempts = 0;
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/20472bdf/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRequestHedgingProxyProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRequestHedgingProxyProvider.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRequestHedgingProxyProvider.java
index a8a5c6e..65fbbf8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRequestHedgingProxyProvider.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRequestHedgingProxyProvider.java
@@ -42,13 +42,10 @@ import org.junit.Assert;
 import org.junit.Before;
 import org.junit.BeforeClass;
 import org.junit.Test;
-import static org.junit.Assert.assertEquals;
 import org.mockito.Matchers;
 import org.mockito.Mockito;
 import org.mockito.invocation.InvocationOnMock;
 import org.mockito.stubbing.Answer;
-import static org.mockito.Mockito.when;
-import static org.mockito.Mockito.mock;
 
 import com.google.common.collect.Lists;
 
@@ -102,37 +99,6 @@ public class TestRequestHedgingProxyProvider {
   }
 
   @Test
-  public void testRequestNNAfterOneSuccess() throws Exception {
-    final AtomicInteger count = new AtomicInteger(0);
-    final ClientProtocol goodMock = mock(ClientProtocol.class);
-    when(goodMock.getStats()).thenAnswer(new Answer<long[]>() {
-      @Override
-      public long[] answer(InvocationOnMock invocation) throws Throwable {
-        count.incrementAndGet();
-        Thread.sleep(1000);
-        return new long[]{1};
-      }
-    });
-    final ClientProtocol badMock = mock(ClientProtocol.class);
-    when(badMock.getStats()).thenAnswer(new Answer<long[]>() {
-      @Override
-      public long[] answer(InvocationOnMock invocation) throws Throwable {
-        count.incrementAndGet();
-        throw new IOException("Bad mock !!");
-      }
-    });
-
-    RequestHedgingProxyProvider<ClientProtocol> provider =
-        new RequestHedgingProxyProvider<>(conf, nnUri, ClientProtocol.class,
-            createFactory(badMock, goodMock, goodMock, badMock));
-    ClientProtocol proxy = provider.getProxy().proxy;
-    proxy.getStats();
-    assertEquals(2, count.get());
-    proxy.getStats();
-    assertEquals(3, count.get());
-  }
-
-  @Test
   public void testHedgingWhenOneIsSlow() throws Exception {
     final ClientProtocol goodMock = Mockito.mock(ClientProtocol.class);
     Mockito.when(goodMock.getStats()).thenAnswer(new Answer<long[]>() {


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[13/50] [abbrv] hadoop git commit: HDFS-13311. RBF: TestRouterAdminCLI#testCreateInvalidEntry fails on Windows. Contributed by Inigo Goiri

Posted by sh...@apache.org.
HDFS-13311. RBF: TestRouterAdminCLI#testCreateInvalidEntry fails on Windows. Contributed by Inigo Goiri


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b9dc0e59
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b9dc0e59
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b9dc0e59

Branch: refs/heads/YARN-8200
Commit: b9dc0e597ef02eaccb6e7ba0af3f943a21d2d325
Parents: 0731756
Author: Virajith Jalaparti <vi...@apache.org>
Authored: Mon Apr 16 16:08:36 2018 -0700
Committer: Virajith Jalaparti <vi...@apache.org>
Committed: Mon Apr 16 16:08:36 2018 -0700

----------------------------------------------------------------------
 .../main/java/org/apache/hadoop/util/GenericOptionsParser.java    | 3 +++
 1 file changed, 3 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b9dc0e59/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GenericOptionsParser.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GenericOptionsParser.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GenericOptionsParser.java
index 7b0a25c..a8a513d 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GenericOptionsParser.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GenericOptionsParser.java
@@ -525,6 +525,9 @@ public class GenericOptionsParser {
     }
     List<String> newArgs = new ArrayList<String>(args.length);
     for (int i=0; i < args.length; i++) {
+      if (args[i] == null) {
+        continue;
+      }
       String prop = null;
       if (args[i].equals("-D")) {
         newArgs.add(args[i]);


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[24/50] [abbrv] hadoop git commit: HDFS-13430. Fix TestEncryptionZonesWithKMS failure due to HADOOP-14445.

Posted by sh...@apache.org.
HDFS-13430. Fix TestEncryptionZonesWithKMS failure due to HADOOP-14445.

(cherry picked from commit 650359371175fba416331e73aa03d2a96ccb90e5)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2444d70a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2444d70a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2444d70a

Branch: refs/heads/YARN-8200
Commit: 2444d70a40e2d264cff03f80b1bf59a72318c510
Parents: 228869e
Author: Xiao Chen <xi...@apache.org>
Authored: Fri Apr 13 09:04:52 2018 -0700
Committer: Rushabh Shah <sh...@apache.org>
Committed: Wed Apr 18 22:43:24 2018 -0500

----------------------------------------------------------------------
 .../src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java | 3 +++
 1 file changed, 3 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2444d70a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java
index c10d331..64f0864 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java
@@ -110,6 +110,7 @@ import org.junit.Test;
 import org.junit.rules.Timeout;
 import org.mockito.Mockito;
 
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.KMS_CLIENT_COPY_LEGACY_TOKEN_KEY;
 import static org.junit.Assert.assertNotNull;
 import static org.mockito.Matchers.anyBoolean;
 import static org.mockito.Matchers.anyLong;
@@ -191,6 +192,8 @@ public class TestEncryptionZones {
     // Lower the batch size for testing
     conf.setInt(DFSConfigKeys.DFS_NAMENODE_LIST_ENCRYPTION_ZONES_NUM_RESPONSES,
         2);
+    // disable kms client copy legacy token logic because it's irrelevant.
+    conf.setBoolean(KMS_CLIENT_COPY_LEGACY_TOKEN_KEY, false);
     cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
     cluster.waitActive();
     Logger.getLogger(EncryptionZoneManager.class).setLevel(Level.TRACE);


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[11/50] [abbrv] hadoop git commit: YARN-8156. Increase the default value of yarn.timeline-service.app-collector.linger-period.ms. Contributed by Charan Hebri.

Posted by sh...@apache.org.
YARN-8156. Increase the default value of yarn.timeline-service.app-collector.linger-period.ms. Contributed by Charan Hebri.

(cherry picked from commit 669eb7bdea34f26e9b9b8a2260ae4356791622e7)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/900e6b43
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/900e6b43
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/900e6b43

Branch: refs/heads/YARN-8200
Commit: 900e6b435a32bb0726275faf86bb5b7382a5572e
Parents: cc2a2a8
Author: Rohith Sharma K S <ro...@apache.org>
Authored: Sat Apr 14 10:31:28 2018 +0530
Committer: Rohith Sharma K S <ro...@apache.org>
Committed: Sat Apr 14 11:15:30 2018 +0530

----------------------------------------------------------------------
 .../main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java   | 2 +-
 .../hadoop-yarn-common/src/main/resources/yarn-default.xml         | 2 +-
 .../collector/TestPerNodeTimelineCollectorsAuxService.java         | 2 ++
 .../hadoop-yarn-site/src/site/markdown/TimelineServiceV2.md.vm     | 2 +-
 4 files changed, 5 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/900e6b43/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index b058e83..3f0c735 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -2338,7 +2338,7 @@ public class YarnConfiguration extends Configuration {
   public static final String ATS_APP_COLLECTOR_LINGER_PERIOD_IN_MS =
       TIMELINE_SERVICE_PREFIX + "app-collector.linger-period.ms";
 
-  public static final int DEFAULT_ATS_APP_COLLECTOR_LINGER_PERIOD_IN_MS = 1000;
+  public static final int DEFAULT_ATS_APP_COLLECTOR_LINGER_PERIOD_IN_MS = 60000;
 
   public static final String NUMBER_OF_ASYNC_ENTITIES_TO_MERGE =
       TIMELINE_SERVICE_PREFIX

http://git-wip-us.apache.org/repos/asf/hadoop/blob/900e6b43/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index 24279f5..3e5e5ca 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -2380,7 +2380,7 @@
     <description>Time period till which the application collector will be alive
      in NM, after the  application master container finishes.</description>
     <name>yarn.timeline-service.app-collector.linger-period.ms</name>
-    <value>1000</value>
+    <value>60000</value>
   </property>
 
   <property>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/900e6b43/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/collector/TestPerNodeTimelineCollectorsAuxService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/collector/TestPerNodeTimelineCollectorsAuxService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/collector/TestPerNodeTimelineCollectorsAuxService.java
index 0320739..9d2bb24 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/collector/TestPerNodeTimelineCollectorsAuxService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/collector/TestPerNodeTimelineCollectorsAuxService.java
@@ -70,6 +70,8 @@ public class TestPerNodeTimelineCollectorsAuxService {
     conf.setFloat(YarnConfiguration.TIMELINE_SERVICE_VERSION, 2.0f);
     conf.setClass(YarnConfiguration.TIMELINE_SERVICE_WRITER_CLASS,
         FileSystemTimelineWriterImpl.class, TimelineWriter.class);
+    conf.setLong(YarnConfiguration.ATS_APP_COLLECTOR_LINGER_PERIOD_IN_MS,
+        1000L);
   }
 
   @After

http://git-wip-us.apache.org/repos/asf/hadoop/blob/900e6b43/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServiceV2.md.vm
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServiceV2.md.vm b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServiceV2.md.vm
index f073f54..62fff1e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServiceV2.md.vm
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServiceV2.md.vm
@@ -138,7 +138,7 @@ New configuration parameters that are introduced with v.2 are marked bold.
 | `yarn.timeline-service.reader.bind-host` | The actual address the timeline reader will bind to. If this optional address is set, reader server will bind to this address and the port specified in yarn.timeline-service.reader.webapp.address. This is most useful for making the service listen on all interfaces by setting to 0.0.0.0. |
 | **`yarn.timeline-service.hbase.configuration.file`** | Optional URL to an hbase-site.xml configuration file to be used to connect to the timeline-service hbase cluster. If empty or not specified, then the HBase configuration will be loaded from the classpath. When specified the values in the specified configuration file will override those from the ones that are present on the classpath. Defaults to `null`. |
 | **`yarn.timeline-service.writer.flush-interval-seconds`** | The setting that controls how often the timeline collector flushes the timeline writer. Defaults to `60`. |
-| **`yarn.timeline-service.app-collector.linger-period.ms`** | Time period till which the application collector will be alive in NM, after the  application master container finishes. Defaults to `1000` (1 second). |
+| **`yarn.timeline-service.app-collector.linger-period.ms`** | Time period till which the application collector will be alive in NM, after the application master container finishes. Defaults to `60000` (60 seconds). |
 | **`yarn.timeline-service.timeline-client.number-of-async-entities-to-merge`** | Time line V2 client tries to merge these many number of async entities (if available) and then call the REST ATS V2 API to submit. Defaults to `10`. |
 | **`yarn.timeline-service.hbase.coprocessor.app-final-value-retention-milliseconds`** | The setting that controls how long the final value of a metric of a completed app is retained before merging into the flow sum. Defaults to `259200000` (3 days). This should be set in the HBase cluster. |
 | **`yarn.rm.system-metrics-publisher.emit-container-events`** | The setting that controls whether yarn container metrics is published to the timeline server or not by RM. This configuration setting is for ATS V2. Defaults to `false`. |


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[03/50] [abbrv] hadoop git commit: YARN-7527. Over-allocate node resource in async-scheduling mode of CapacityScheduler. Contributed by Tao Yang.

Posted by sh...@apache.org.
YARN-7527. Over-allocate node resource in async-scheduling mode of CapacityScheduler. Contributed by Tao Yang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a48deb15
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a48deb15
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a48deb15

Branch: refs/heads/YARN-8200
Commit: a48deb1552c8b920316a630daf9748f821f741af
Parents: be627cc
Author: Weiwei Yang <ww...@apache.org>
Authored: Thu Apr 12 10:12:46 2018 +0800
Committer: Weiwei Yang <ww...@apache.org>
Committed: Thu Apr 12 10:12:46 2018 +0800

----------------------------------------------------------------------
 .../scheduler/common/fica/FiCaSchedulerApp.java |  6 +-
 .../TestCapacitySchedulerAsyncScheduling.java   | 74 ++++++++++++++++++++
 2 files changed, 78 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a48deb15/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java
index 726f7e2..cab4dd9 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java
@@ -424,8 +424,10 @@ public class FiCaSchedulerApp extends SchedulerApplicationAttempt {
 
           // Common part of check container allocation regardless if it is a
           // increase container or regular container
-          commonCheckContainerAllocation(cluster, allocation,
-              schedulerContainer);
+          if (!commonCheckContainerAllocation(cluster, allocation,
+              schedulerContainer)) {
+            return false;
+          }
         } else {
           // Container reserved first time will be NEW, after the container
           // accepted & confirmed, it will become RESERVED state

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a48deb15/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerAsyncScheduling.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerAsyncScheduling.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerAsyncScheduling.java
index da06557..7e092e8 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerAsyncScheduling.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerAsyncScheduling.java
@@ -411,6 +411,80 @@ public class TestCapacitySchedulerAsyncScheduling {
     rm.stop();
   }
 
+  @Test (timeout = 30000)
+  public void testNodeResourceOverAllocated()
+      throws Exception {
+    // disable async-scheduling for simulating complex scene
+    Configuration disableAsyncConf = new Configuration(conf);
+    disableAsyncConf.setBoolean(
+        CapacitySchedulerConfiguration.SCHEDULE_ASYNCHRONOUSLY_ENABLE, false);
+
+    // init RM & NMs & Nodes
+    final MockRM rm = new MockRM(disableAsyncConf);
+    rm.start();
+    final MockNM nm1 = rm.registerNode("h1:1234", 9 * GB);
+    final MockNM nm2 = rm.registerNode("h2:1234", 9 * GB);
+    List<MockNM> nmLst = new ArrayList<>();
+    nmLst.add(nm1);
+    nmLst.add(nm2);
+
+    // init scheduler & nodes
+    while (
+        ((CapacityScheduler) rm.getRMContext().getScheduler()).getNodeTracker()
+            .nodeCount() < 2) {
+      Thread.sleep(10);
+    }
+    Assert.assertEquals(2,
+        ((AbstractYarnScheduler) rm.getRMContext().getScheduler())
+            .getNodeTracker().nodeCount());
+    CapacityScheduler scheduler =
+        (CapacityScheduler) rm.getRMContext().getScheduler();
+    SchedulerNode sn1 = scheduler.getSchedulerNode(nm1.getNodeId());
+
+    // launch app
+    RMApp app = rm.submitApp(200, "app", "user", null, false, "default",
+        YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS, null, null, true, true);
+    MockAM am = MockRM.launchAndRegisterAM(app, rm, nm1);
+    FiCaSchedulerApp schedulerApp =
+        scheduler.getApplicationAttempt(am.getApplicationAttemptId());
+    // allocate 2 containers and running on nm1
+    Resource containerResource = Resources.createResource(5 * GB);
+    am.allocate(Arrays.asList(ResourceRequest
+            .newInstance(Priority.newInstance(0), "*", containerResource, 2)),
+        null);
+
+    // generate over-allocated proposals for nm1
+    for (int containerNo = 2; containerNo <= 3; containerNo++) {
+      Container container = Container.newInstance(
+          ContainerId.newContainerId(am.getApplicationAttemptId(), containerNo),
+          sn1.getNodeID(), sn1.getHttpAddress(), containerResource,
+          Priority.newInstance(0), null);
+      RMContainer rmContainer = new RMContainerImpl(container,
+          SchedulerRequestKey.create(ResourceRequest
+              .newInstance(Priority.newInstance(0), "*", containerResource, 1)),
+          am.getApplicationAttemptId(), sn1.getNodeID(), "user",
+          rm.getRMContext());
+      SchedulerContainer<FiCaSchedulerApp, FiCaSchedulerNode> newContainer =
+          new SchedulerContainer<>(schedulerApp,
+              scheduler.getNode(sn1.getNodeID()), rmContainer, "", true);
+      ContainerAllocationProposal<FiCaSchedulerApp, FiCaSchedulerNode>
+          newContainerProposal =
+          new ContainerAllocationProposal<>(newContainer, null, null,
+              NodeType.OFF_SWITCH, NodeType.OFF_SWITCH,
+              SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY, containerResource);
+      List<ContainerAllocationProposal<FiCaSchedulerApp, FiCaSchedulerNode>>
+          newProposals = new ArrayList<>();
+      newProposals.add(newContainerProposal);
+      ResourceCommitRequest<FiCaSchedulerApp, FiCaSchedulerNode> request =
+          new ResourceCommitRequest<>(newProposals, null, null);
+      scheduler.tryCommit(scheduler.getClusterResource(), request);
+    }
+    // make sure node resource can't be over-allocated!
+    Assert.assertTrue("Node resource is Over-allocated!",
+        sn1.getUnallocatedResource().getMemorySize() > 0);
+    rm.stop();
+  }
+
   private void allocateAndLaunchContainers(MockAM am, MockNM nm, MockRM rm,
       int nContainer, Resource resource, int priority, int startContainerId)
       throws Exception {


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[35/50] [abbrv] hadoop git commit: HDFS-13484. RBF: Disable Nameservices from the federation. Contributed by Inigo Goiri.

Posted by sh...@apache.org.
HDFS-13484. RBF: Disable Nameservices from the federation. Contributed by Inigo Goiri.

(cherry picked from commit 30fef0bf1e5c8c0ca073df99ad9b33cb0e4431a5)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/33ffc960
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/33ffc960
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/33ffc960

Branch: refs/heads/YARN-8200
Commit: 33ffc960854b299573d4b0f449c34e08c42ee885
Parents: 2b48854
Author: Yiqun Lin <yq...@apache.org>
Authored: Wed Apr 25 15:22:26 2018 +0800
Committer: Yiqun Lin <yq...@apache.org>
Committed: Wed Apr 25 15:25:16 2018 +0800

----------------------------------------------------------------------
 .../federation/metrics/FederationMetrics.java   |   2 +-
 .../resolver/ActiveNamenodeResolver.java        |   8 +
 .../FederationNamenodeServiceState.java         |   3 +-
 .../resolver/MembershipNamenodeResolver.java    | 110 ++++++---
 .../federation/router/RouterAdminServer.java    |  50 +++-
 .../router/RouterPermissionChecker.java         |  59 ++++-
 .../federation/router/RouterRpcServer.java      |  11 +-
 .../src/site/markdown/HDFSRouterFederation.md   |  11 +
 .../server/federation/FederationTestUtils.java  |  42 ++++
 .../server/federation/MiniRouterDFSCluster.java |   8 +
 .../hdfs/server/federation/MockResolver.java    |   6 +
 .../router/TestDisableNameservices.java         | 236 +++++++++++++++++++
 .../federation/router/TestRouterAdmin.java      |  50 +++-
 .../federation/router/TestRouterAdminCLI.java   |  11 +
 .../src/site/markdown/HDFSCommands.md           |   4 +
 15 files changed, 570 insertions(+), 41 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/33ffc960/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationMetrics.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationMetrics.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationMetrics.java
index 39e060f..7f2cba2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationMetrics.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationMetrics.java
@@ -686,7 +686,7 @@ public class FederationMetrics implements FederationMBean {
           namenodeResolver.getNamenodesForNameserviceId(nsId);
       if (nns != null) {
         FederationNamenodeContext nn = nns.get(0);
-        if (nn != null && nn instanceof MembershipState) {
+        if (nn instanceof MembershipState) {
           resultList.add((MembershipState) nn);
         }
       }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/33ffc960/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/ActiveNamenodeResolver.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/ActiveNamenodeResolver.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/ActiveNamenodeResolver.java
index 1773b34..f1a5329 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/ActiveNamenodeResolver.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/ActiveNamenodeResolver.java
@@ -109,6 +109,14 @@ public interface ActiveNamenodeResolver {
   Set<FederationNamespaceInfo> getNamespaces() throws IOException;
 
   /**
+   * Get a list of all namespaces that are disabled.
+   *
+   * @return List of name spaces identifier in the federation
+   * @throws IOException If the disabled list is not available.
+   */
+  Set<String> getDisabledNamespaces() throws IOException;
+
+  /**
    * Assign a unique identifier for the parent router service.
    * Required to report the status to the namenode resolver.
    *

http://git-wip-us.apache.org/repos/asf/hadoop/blob/33ffc960/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/FederationNamenodeServiceState.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/FederationNamenodeServiceState.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/FederationNamenodeServiceState.java
index c773f82..7907e30 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/FederationNamenodeServiceState.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/FederationNamenodeServiceState.java
@@ -27,7 +27,8 @@ public enum FederationNamenodeServiceState {
   ACTIVE, // HAServiceState.ACTIVE or operational.
   STANDBY, // HAServiceState.STANDBY.
   UNAVAILABLE, // When the namenode cannot be reached.
-  EXPIRED; // When the last update is too old.
+  EXPIRED, // When the last update is too old.
+  DISABLED; // When the nameservice is disabled.
 
   public static FederationNamenodeServiceState getState(HAServiceState state) {
     switch(state) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/33ffc960/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MembershipNamenodeResolver.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MembershipNamenodeResolver.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MembershipNamenodeResolver.java
index 98ddd22..0cdbdfd 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MembershipNamenodeResolver.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MembershipNamenodeResolver.java
@@ -29,10 +29,13 @@ import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
+import java.util.TreeSet;
 import java.util.concurrent.ConcurrentHashMap;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.server.federation.store.DisabledNameserviceStore;
 import org.apache.hadoop.hdfs.server.federation.store.MembershipStore;
+import org.apache.hadoop.hdfs.server.federation.store.RecordStore;
 import org.apache.hadoop.hdfs.server.federation.store.StateStoreCache;
 import org.apache.hadoop.hdfs.server.federation.store.StateStoreService;
 import org.apache.hadoop.hdfs.server.federation.store.StateStoreUnavailableException;
@@ -63,6 +66,8 @@ public class MembershipNamenodeResolver
   private final StateStoreService stateStore;
   /** Membership State Store interface. */
   private MembershipStore membershipInterface;
+  /** Disabled Nameservice State Store interface. */
+  private DisabledNameserviceStore disabledNameserviceInterface;
 
   /** Parent router ID. */
   private String routerId;
@@ -88,22 +93,38 @@ public class MembershipNamenodeResolver
 
   private synchronized MembershipStore getMembershipStore() throws IOException {
     if (this.membershipInterface == null) {
-      this.membershipInterface = this.stateStore.getRegisteredRecordStore(
-          MembershipStore.class);
-      if (this.membershipInterface == null) {
-        throw new IOException("State Store does not have an interface for " +
-            MembershipStore.class.getSimpleName());
-      }
+      this.membershipInterface = getStoreInterface(MembershipStore.class);
     }
     return this.membershipInterface;
   }
 
+  private synchronized DisabledNameserviceStore getDisabledNameserviceStore()
+      throws IOException {
+    if (this.disabledNameserviceInterface == null) {
+      this.disabledNameserviceInterface =
+          getStoreInterface(DisabledNameserviceStore.class);
+    }
+    return this.disabledNameserviceInterface;
+  }
+
+  private <T extends RecordStore<?>> T getStoreInterface(Class<T> clazz)
+      throws IOException{
+    T store = this.stateStore.getRegisteredRecordStore(clazz);
+    if (store == null) {
+      throw new IOException("State Store does not have an interface for " +
+          clazz.getSimpleName());
+    }
+    return store;
+  }
+
   @Override
   public boolean loadCache(boolean force) {
     // Our cache depends on the store, update it first
     try {
       MembershipStore membership = getMembershipStore();
       membership.loadCache(force);
+      DisabledNameserviceStore disabled = getDisabledNameserviceStore();
+      disabled.loadCache(force);
     } catch (IOException e) {
       LOG.error("Cannot update membership from the State Store", e);
     }
@@ -151,30 +172,48 @@ public class MembershipNamenodeResolver
       final String nsId) throws IOException {
 
     List<? extends FederationNamenodeContext> ret = cacheNS.get(nsId);
-    if (ret == null) {
-      try {
-        MembershipState partial = MembershipState.newInstance();
-        partial.setNameserviceId(nsId);
-        GetNamenodeRegistrationsRequest request =
-            GetNamenodeRegistrationsRequest.newInstance(partial);
+    if (ret != null) {
+      return ret;
+    }
 
-        final List<MembershipState> result =
-            getRecentRegistrationForQuery(request, true, false);
-        if (result == null || result.isEmpty()) {
-          LOG.error("Cannot locate eligible NNs for {}", nsId);
-          return null;
-        } else {
-          cacheNS.put(nsId, result);
-          ret = result;
-        }
-      } catch (StateStoreUnavailableException e) {
-        LOG.error("Cannot get active NN for {}, State Store unavailable", nsId);
-      }
+    // Not cached, generate the value
+    final List<MembershipState> result;
+    try {
+      MembershipState partial = MembershipState.newInstance();
+      partial.setNameserviceId(nsId);
+      GetNamenodeRegistrationsRequest request =
+          GetNamenodeRegistrationsRequest.newInstance(partial);
+      result = getRecentRegistrationForQuery(request, true, false);
+    } catch (StateStoreUnavailableException e) {
+      LOG.error("Cannot get active NN for {}, State Store unavailable", nsId);
+      return null;
     }
-    if (ret == null) {
+    if (result == null || result.isEmpty()) {
+      LOG.error("Cannot locate eligible NNs for {}", nsId);
       return null;
     }
-    return Collections.unmodifiableList(ret);
+
+    // Mark disabled name services
+    try {
+      Set<String> disabled =
+          getDisabledNameserviceStore().getDisabledNameservices();
+      if (disabled == null) {
+        LOG.error("Cannot get disabled name services");
+      } else {
+        for (MembershipState nn : result) {
+          if (disabled.contains(nn.getNameserviceId())) {
+            nn.setState(FederationNamenodeServiceState.DISABLED);
+          }
+        }
+      }
+    } catch (StateStoreUnavailableException e) {
+      LOG.error("Cannot get disabled name services, State Store unavailable");
+    }
+
+    // Cache the response
+    ret = Collections.unmodifiableList(result);
+    cacheNS.put(nsId, result);
+    return ret;
   }
 
   @Override
@@ -260,7 +299,24 @@ public class MembershipNamenodeResolver
     GetNamespaceInfoRequest request = GetNamespaceInfoRequest.newInstance();
     GetNamespaceInfoResponse response =
         getMembershipStore().getNamespaceInfo(request);
-    return response.getNamespaceInfo();
+    Set<FederationNamespaceInfo> nss = response.getNamespaceInfo();
+
+    // Filter disabled namespaces
+    Set<FederationNamespaceInfo> ret = new TreeSet<>();
+    Set<String> disabled = getDisabledNamespaces();
+    for (FederationNamespaceInfo ns : nss) {
+      if (!disabled.contains(ns.getNameserviceId())) {
+        ret.add(ns);
+      }
+    }
+
+    return ret;
+  }
+
+  @Override
+  public Set<String> getDisabledNamespaces() throws IOException {
+    DisabledNameserviceStore store = getDisabledNameserviceStore();
+    return store.getDisabledNameservices();
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/33ffc960/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterAdminServer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterAdminServer.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterAdminServer.java
index da67796..3da9a5a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterAdminServer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterAdminServer.java
@@ -29,6 +29,8 @@ import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.protocol.proto.RouterProtocolProtos.RouterAdminProtocolService;
 import org.apache.hadoop.hdfs.protocolPB.RouterAdminProtocolPB;
 import org.apache.hadoop.hdfs.protocolPB.RouterAdminProtocolServerSideTranslatorPB;
+import org.apache.hadoop.hdfs.server.federation.resolver.ActiveNamenodeResolver;
+import org.apache.hadoop.hdfs.server.federation.resolver.FederationNamespaceInfo;
 import org.apache.hadoop.hdfs.server.federation.resolver.MountTableManager;
 import org.apache.hadoop.hdfs.server.federation.store.DisabledNameserviceStore;
 import org.apache.hadoop.hdfs.server.federation.store.MountTableStore;
@@ -282,28 +284,60 @@ public class RouterAdminServer extends AbstractService
   @Override
   public DisableNameserviceResponse disableNameservice(
       DisableNameserviceRequest request) throws IOException {
-    // TODO check permissions
+
+    RouterPermissionChecker pc = getPermissionChecker();
+    if (pc != null) {
+      pc.checkSuperuserPrivilege();
+    }
+
     String nsId = request.getNameServiceId();
-    // TODO check that the name service exists
-    boolean success = getDisabledNameserviceStore().disableNameservice(nsId);
+    boolean success = false;
+    if (namespaceExists(nsId)) {
+      success = getDisabledNameserviceStore().disableNameservice(nsId);
+    } else {
+      LOG.error("Cannot disable {}, it does not exists", nsId);
+    }
     return DisableNameserviceResponse.newInstance(success);
   }
 
+  private boolean namespaceExists(final String nsId) throws IOException {
+    boolean found = false;
+    ActiveNamenodeResolver resolver = router.getNamenodeResolver();
+    Set<FederationNamespaceInfo> nss = resolver.getNamespaces();
+    for (FederationNamespaceInfo ns : nss) {
+      if (nsId.equals(ns.getNameserviceId())) {
+        found = true;
+        break;
+      }
+    }
+    return found;
+  }
+
   @Override
   public EnableNameserviceResponse enableNameservice(
       EnableNameserviceRequest request) throws IOException {
-    // TODO check permissions
+    RouterPermissionChecker pc = getPermissionChecker();
+    if (pc != null) {
+      pc.checkSuperuserPrivilege();
+    }
+
     String nsId = request.getNameServiceId();
-    // TODO check that the name service exists
-    boolean success = getDisabledNameserviceStore().enableNameservice(nsId);
+    DisabledNameserviceStore store = getDisabledNameserviceStore();
+    Set<String> disabled = store.getDisabledNameservices();
+    boolean success = false;
+    if (disabled.contains(nsId)) {
+      success = store.enableNameservice(nsId);
+    } else {
+      LOG.error("Cannot enable {}, it was not disabled", nsId);
+    }
     return EnableNameserviceResponse.newInstance(success);
   }
 
   @Override
   public GetDisabledNameservicesResponse getDisabledNameservices(
       GetDisabledNameservicesRequest request) throws IOException {
-    // TODO check permissions
-    Set<String> nsIds = getDisabledNameserviceStore().getDisabledNameservices();
+    Set<String> nsIds =
+        getDisabledNameserviceStore().getDisabledNameservices();
     return GetDisabledNameservicesResponse.newInstance(nsIds);
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/33ffc960/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterPermissionChecker.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterPermissionChecker.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterPermissionChecker.java
index 9d81dce..63d190c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterPermissionChecker.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterPermissionChecker.java
@@ -17,12 +17,17 @@
  */
 package org.apache.hadoop.hdfs.server.federation.router;
 
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.List;
+
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.server.federation.store.records.MountTable;
 import org.apache.hadoop.hdfs.server.namenode.FSPermissionChecker;
+import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.UserGroupInformation;
 
@@ -35,9 +40,23 @@ public class RouterPermissionChecker extends FSPermissionChecker {
   /** Mount table default permission. */
   public static final short MOUNT_TABLE_PERMISSION_DEFAULT = 00755;
 
-  public RouterPermissionChecker(String routerOwner, String supergroup,
+  /** Name of the super user. */
+  private final String superUser;
+  /** Name of the super group. */
+  private final String superGroup;
+
+  public RouterPermissionChecker(String user, String group,
       UserGroupInformation callerUgi) {
-    super(routerOwner, supergroup, callerUgi, null);
+    super(user, group, callerUgi, null);
+    this.superUser = user;
+    this.superGroup = group;
+  }
+
+  public RouterPermissionChecker(String user, String group)
+      throws IOException {
+    super(user, group, UserGroupInformation.getCurrentUser(), null);
+    this.superUser = user;
+    this.superGroup = group;
   }
 
   /**
@@ -79,4 +98,40 @@ public class RouterPermissionChecker extends FSPermissionChecker {
             + ": user " + getUser() + " does not have " + access.toString()
             + " permissions.");
   }
+
+  /**
+   * Check the superuser privileges of the current RPC caller. This method is
+   * based on Datanode#checkSuperuserPrivilege().
+   * @throws AccessControlException If the user is not authorized.
+   */
+  @Override
+  public void checkSuperuserPrivilege() throws  AccessControlException {
+
+    // Try to get the ugi in the RPC call.
+    UserGroupInformation ugi = null;
+    try {
+      ugi = NameNode.getRemoteUser();
+    } catch (IOException e) {
+      // Ignore as we catch it afterwards
+    }
+    if (ugi == null) {
+      LOG.error("Cannot get the remote user name");
+      throw new AccessControlException("Cannot get the remote user name");
+    }
+
+    // Is this by the Router user itself?
+    if (ugi.getUserName().equals(superUser)) {
+      return;
+    }
+
+    // Is the user a member of the super group?
+    List<String> groups = Arrays.asList(ugi.getGroupNames());
+    if (groups.contains(superGroup)) {
+      return;
+    }
+
+    // Not a superuser
+    throw new AccessControlException(
+        ugi.getUserName() + " is not a super user");
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/33ffc960/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
index 2897823..b56ee5f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
@@ -30,6 +30,7 @@ import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.lang.reflect.Array;
 import java.net.InetSocketAddress;
+import java.util.ArrayList;
 import java.util.Collection;
 import java.util.EnumSet;
 import java.util.HashMap;
@@ -2138,7 +2139,15 @@ public class RouterRpcServer extends AbstractService
         }
       }
 
-      return location.getDestinations();
+      // Filter disabled subclusters
+      Set<String> disabled = namenodeResolver.getDisabledNamespaces();
+      List<RemoteLocation> locs = new ArrayList<>();
+      for (RemoteLocation loc : location.getDestinations()) {
+        if (!disabled.contains(loc.getNameserviceId())) {
+          locs.add(loc);
+        }
+      }
+      return locs;
     } catch (IOException ioe) {
       if (this.rpcMonitor != null) {
         this.rpcMonitor.routerFailureStateStore();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/33ffc960/hadoop-hdfs-project/hadoop-hdfs-rbf/src/site/markdown/HDFSRouterFederation.md
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/site/markdown/HDFSRouterFederation.md b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/site/markdown/HDFSRouterFederation.md
index fdaaa11..43e89ed 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/site/markdown/HDFSRouterFederation.md
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/site/markdown/HDFSRouterFederation.md
@@ -229,6 +229,17 @@ Ls command will show below information for each mount table entry:
     Source                    Destinations              Owner                     Group                     Mode                      Quota/Usage
     /path                     ns0->/path                root                      supergroup                rwxr-xr-x                 [NsQuota: 50/0, SsQuota: 100 B/0 B]
 
+### Disabling nameservices
+
+To prevent accessing a nameservice (sublcuster), it can be disabled from the federation.
+For example, one can disable `ns1`, list it and enable it again:
+
+    [hdfs]$ $HADOOP_HOME/bin/hdfs dfsrouteradmin -nameservice disable ns1
+    [hdfs]$ $HADOOP_HOME/bin/hdfs dfsrouteradmin -getDisabledNameservices
+    [hdfs]$ $HADOOP_HOME/bin/hdfs dfsrouteradmin -nameservice enable ns1
+
+This is useful when decommissioning subclusters or when one subcluster is missbehaving (e.g., low performance or unavailability).
+
 Client configuration
 --------------------
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/33ffc960/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/FederationTestUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/FederationTestUtils.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/FederationTestUtils.java
index b138e4d..ce320f4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/FederationTestUtils.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/FederationTestUtils.java
@@ -20,6 +20,9 @@ package org.apache.hadoop.hdfs.server.federation;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertNull;
+import static org.mockito.Matchers.any;
+import static org.mockito.Mockito.doAnswer;
+import static org.mockito.Mockito.spy;
 
 import java.io.BufferedReader;
 import java.io.FileNotFoundException;
@@ -49,9 +52,18 @@ import org.apache.hadoop.hdfs.server.federation.resolver.ActiveNamenodeResolver;
 import org.apache.hadoop.hdfs.server.federation.resolver.FederationNamenodeContext;
 import org.apache.hadoop.hdfs.server.federation.resolver.FederationNamenodeServiceState;
 import org.apache.hadoop.hdfs.server.federation.resolver.NamenodeStatusReport;
+import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
+import org.apache.hadoop.hdfs.server.namenode.NameNode;
+import org.apache.hadoop.hdfs.server.namenode.NameNode.OperationCategory;
+import org.apache.hadoop.hdfs.server.namenode.ha.HAContext;
 import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.test.GenericTestUtils;
+import org.mockito.internal.util.reflection.Whitebox;
+import org.mockito.invocation.InvocationOnMock;
+import org.mockito.stubbing.Answer;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.common.base.Supplier;
 
@@ -60,6 +72,9 @@ import com.google.common.base.Supplier;
  */
 public final class FederationTestUtils {
 
+  private static final Logger LOG =
+      LoggerFactory.getLogger(FederationTestUtils.class);
+
   public final static String[] NAMESERVICES = {"ns0", "ns1"};
   public final static String[] NAMENODES = {"nn0", "nn1", "nn2", "nn3"};
   public final static String[] ROUTERS =
@@ -274,4 +289,31 @@ public final class FederationTestUtils {
       throws IOException {
     return fs.delete(new Path(path), true);
   }
+
+  /**
+   * Simulate that a Namenode is slow by adding a sleep to the check operation
+   * in the NN.
+   * @param nn Namenode to simulate slow.
+   * @param seconds Number of seconds to add to the Namenode.
+   * @throws Exception If we cannot add the sleep time.
+   */
+  public static void simulateSlowNamenode(final NameNode nn, final int seconds)
+      throws Exception {
+    FSNamesystem namesystem = nn.getNamesystem();
+    HAContext haContext = namesystem.getHAContext();
+    HAContext spyHAContext = spy(haContext);
+    doAnswer(new Answer<Object>() {
+      @Override
+      public Object answer(InvocationOnMock invocation) throws Throwable {
+        LOG.info("Simulating slow namenode {}", invocation.getMock());
+        try {
+          Thread.sleep(seconds * 1000);
+        } catch(InterruptedException e) {
+          LOG.error("Simulating a slow namenode aborted");
+        }
+        return null;
+      }
+    }).when(spyHAContext).checkOperation(any(OperationCategory.class));
+    Whitebox.setInternalState(namesystem, "haContext", spyHAContext);
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/33ffc960/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/MiniRouterDFSCluster.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/MiniRouterDFSCluster.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/MiniRouterDFSCluster.java
index 0a4de33..e34713d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/MiniRouterDFSCluster.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/MiniRouterDFSCluster.java
@@ -185,6 +185,10 @@ public class MiniRouterDFSCluster {
       return this.fileContext;
     }
 
+    public URI getFileSystemURI() {
+      return fileSystemUri;
+    }
+
     public String getHttpAddress() {
       InetSocketAddress httpAddress = router.getHttpServerAddress();
       return NetUtils.getHostPortString(httpAddress);
@@ -236,6 +240,10 @@ public class MiniRouterDFSCluster {
       return adminClient;
     }
 
+    public void resetAdminClient() {
+      adminClient = null;
+    }
+
     public DFSClient getClient() throws IOException, URISyntaxException {
       if (client == null) {
         LOG.info("Connecting to router at {}", fileSystemUri);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/33ffc960/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/MockResolver.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/MockResolver.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/MockResolver.java
index 0ce0944..36cce39 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/MockResolver.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/MockResolver.java
@@ -27,6 +27,7 @@ import java.util.LinkedList;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
+import java.util.TreeSet;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.server.federation.resolver.ActiveNamenodeResolver;
@@ -263,6 +264,11 @@ public class MockResolver
   }
 
   @Override
+  public Set<String> getDisabledNamespaces() throws IOException {
+    return new TreeSet<>();
+  }
+
+  @Override
   public PathLocation getDestinationForPath(String path) throws IOException {
     List<RemoteLocation> remoteLocations = new LinkedList<>();
     // We go from the leaves to the root

http://git-wip-us.apache.org/repos/asf/hadoop/blob/33ffc960/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestDisableNameservices.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestDisableNameservices.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestDisableNameservices.java
new file mode 100644
index 0000000..15b104d
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestDisableNameservices.java
@@ -0,0 +1,236 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.federation.router;
+
+import static org.apache.hadoop.hdfs.server.federation.FederationTestUtils.simulateSlowNamenode;
+import static org.apache.hadoop.util.Time.monotonicNow;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+import java.io.IOException;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.Set;
+import java.util.TreeMap;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.protocol.ClientProtocol;
+import org.apache.hadoop.hdfs.server.federation.MiniRouterDFSCluster.NamenodeContext;
+import org.apache.hadoop.hdfs.server.federation.MiniRouterDFSCluster.RouterContext;
+import org.apache.hadoop.hdfs.server.federation.RouterConfigBuilder;
+import org.apache.hadoop.hdfs.server.federation.StateStoreDFSCluster;
+import org.apache.hadoop.hdfs.server.federation.metrics.FederationMetrics;
+import org.apache.hadoop.hdfs.server.federation.resolver.MembershipNamenodeResolver;
+import org.apache.hadoop.hdfs.server.federation.resolver.MountTableManager;
+import org.apache.hadoop.hdfs.server.federation.resolver.MountTableResolver;
+import org.apache.hadoop.hdfs.server.federation.resolver.order.DestinationOrder;
+import org.apache.hadoop.hdfs.server.federation.store.DisabledNameserviceStore;
+import org.apache.hadoop.hdfs.server.federation.store.StateStoreService;
+import org.apache.hadoop.hdfs.server.federation.store.protocol.AddMountTableEntryRequest;
+import org.apache.hadoop.hdfs.server.federation.store.protocol.DisableNameserviceRequest;
+import org.apache.hadoop.hdfs.server.federation.store.records.MountTable;
+import org.apache.hadoop.hdfs.server.namenode.NameNode;
+import org.codehaus.jettison.json.JSONObject;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+/**
+ * Test the behavior when disabling name services.
+ */
+public class TestDisableNameservices {
+
+  private static StateStoreDFSCluster cluster;
+  private static RouterContext routerContext;
+  private static RouterClient routerAdminClient;
+  private static ClientProtocol routerProtocol;
+
+  @BeforeClass
+  public static void setUp() throws Exception {
+    // Build and start a federated cluster
+    cluster = new StateStoreDFSCluster(false, 2);
+    Configuration routerConf = new RouterConfigBuilder()
+        .stateStore()
+        .metrics()
+        .admin()
+        .rpc()
+        .build();
+    // Reduce the number of RPC threads to saturate the Router easy
+    routerConf.setInt(RBFConfigKeys.DFS_ROUTER_HANDLER_COUNT_KEY, 8);
+    routerConf.setInt(RBFConfigKeys.DFS_ROUTER_CLIENT_THREADS_SIZE, 4);
+
+    // Set the DNs to belong to only one subcluster
+    cluster.setIndependentDNs();
+
+    cluster.addRouterOverrides(routerConf);
+    // override some settings for the client
+    cluster.startCluster();
+    cluster.startRouters();
+    cluster.waitClusterUp();
+
+    routerContext = cluster.getRandomRouter();
+    routerProtocol = routerContext.getClient().getNamenode();
+    routerAdminClient = routerContext.getAdminClient();
+
+    setupNamespace();
+
+    // Simulate one of the subclusters to be slow
+    MiniDFSCluster dfsCluster = cluster.getCluster();
+    NameNode nn0 = dfsCluster.getNameNode(0);
+    simulateSlowNamenode(nn0, 1);
+  }
+
+  private static void setupNamespace() throws IOException {
+
+    // Setup a mount table to map to the two namespaces
+    MountTableManager mountTable = routerAdminClient.getMountTableManager();
+    Map<String, String> destinations = new TreeMap<>();
+    destinations.put("ns0", "/");
+    destinations.put("ns1", "/");
+    MountTable newEntry = MountTable.newInstance("/", destinations);
+    newEntry.setDestOrder(DestinationOrder.RANDOM);
+    AddMountTableEntryRequest request =
+        AddMountTableEntryRequest.newInstance(newEntry);
+    mountTable.addMountTableEntry(request);
+
+    // Refresh the cache in the Router
+    Router router = routerContext.getRouter();
+    MountTableResolver mountTableResolver =
+        (MountTableResolver) router.getSubclusterResolver();
+    mountTableResolver.loadCache(true);
+
+    // Add a folder to each namespace
+    NamenodeContext nn0 = cluster.getNamenode("ns0", null);
+    nn0.getFileSystem().mkdirs(new Path("/dirns0"));
+    NamenodeContext nn1 = cluster.getNamenode("ns1", null);
+    nn1.getFileSystem().mkdirs(new Path("/dirns1"));
+  }
+
+  @AfterClass
+  public static void tearDown() {
+    if (cluster != null) {
+      cluster.stopRouter(routerContext);
+      cluster.shutdown();
+      cluster = null;
+    }
+  }
+
+  @After
+  public void cleanup() throws IOException {
+    Router router = routerContext.getRouter();
+    StateStoreService stateStore = router.getStateStore();
+    DisabledNameserviceStore store =
+        stateStore.getRegisteredRecordStore(DisabledNameserviceStore.class);
+    store.loadCache(true);
+
+    Set<String> disabled = store.getDisabledNameservices();
+    for (String nsId : disabled) {
+      store.enableNameservice(nsId);
+    }
+    store.loadCache(true);
+  }
+
+  @Test
+  public void testWithoutDisabling() throws IOException {
+
+    // ns0 is slow and renewLease should take a long time
+    long t0 = monotonicNow();
+    routerProtocol.renewLease("client0");
+    long t = monotonicNow() - t0;
+    assertTrue("It took too little: " + t + "ms",
+        t > TimeUnit.SECONDS.toMillis(1));
+
+    // Return the results from all subclusters even if slow
+    FileSystem routerFs = routerContext.getFileSystem();
+    FileStatus[] filesStatus = routerFs.listStatus(new Path("/"));
+    assertEquals(2, filesStatus.length);
+    assertEquals("dirns0", filesStatus[0].getPath().getName());
+    assertEquals("dirns1", filesStatus[1].getPath().getName());
+  }
+
+  @Test
+  public void testDisabling() throws Exception {
+
+    disableNameservice("ns0");
+
+    // renewLease should be fast as we are skipping ns0
+    long t0 = monotonicNow();
+    routerProtocol.renewLease("client0");
+    long t = monotonicNow() - t0;
+    assertTrue("It took too long: " + t + "ms",
+        t < TimeUnit.SECONDS.toMillis(1));
+
+    // We should not report anything from ns0
+    FileSystem routerFs = routerContext.getFileSystem();
+    FileStatus[] filesStatus = routerFs.listStatus(new Path("/"));
+    assertEquals(1, filesStatus.length);
+    assertEquals("dirns1", filesStatus[0].getPath().getName());
+  }
+
+  @Test
+  public void testMetrics() throws Exception {
+    disableNameservice("ns0");
+
+    int numActive = 0;
+    int numDisabled = 0;
+    Router router = routerContext.getRouter();
+    FederationMetrics metrics = router.getMetrics();
+    String jsonString = metrics.getNameservices();
+    JSONObject jsonObject = new JSONObject(jsonString);
+    Iterator<?> keys = jsonObject.keys();
+    while (keys.hasNext()) {
+      String key = (String) keys.next();
+      JSONObject json = jsonObject.getJSONObject(key);
+      String nsId = json.getString("nameserviceId");
+      String state = json.getString("state");
+      if (nsId.equals("ns0")) {
+        assertEquals("DISABLED", state);
+        numDisabled++;
+      } else {
+        assertEquals("ACTIVE", state);
+        numActive++;
+      }
+    }
+    assertEquals(1, numActive);
+    assertEquals(1, numDisabled);
+  }
+
+  private static void disableNameservice(final String nsId)
+      throws IOException {
+    NameserviceManager nsManager = routerAdminClient.getNameserviceManager();
+    DisableNameserviceRequest req =
+        DisableNameserviceRequest.newInstance(nsId);
+    nsManager.disableNameservice(req);
+
+    Router router = routerContext.getRouter();
+    StateStoreService stateStore = router.getStateStore();
+    DisabledNameserviceStore store =
+        stateStore.getRegisteredRecordStore(DisabledNameserviceStore.class);
+    store.loadCache(true);
+    MembershipNamenodeResolver resolver =
+        (MembershipNamenodeResolver) router.getNamenodeResolver();
+    resolver.loadCache(true);
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/33ffc960/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdmin.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdmin.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdmin.java
index 5e27173..769bfe7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdmin.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdmin.java
@@ -17,21 +17,27 @@
  */
 package org.apache.hadoop.hdfs.server.federation.router;
 
+import static org.apache.hadoop.hdfs.server.federation.FederationTestUtils.createNamenodeReport;
 import static org.apache.hadoop.hdfs.server.federation.store.FederationStateStoreTestUtils.synchronizeRecords;
+import static org.apache.hadoop.test.GenericTestUtils.assertExceptionContains;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
 
 import java.io.IOException;
+import java.security.PrivilegedExceptionAction;
 import java.util.Collections;
 import java.util.List;
 import java.util.Set;
 
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdfs.server.federation.RouterConfigBuilder;
+import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
 import org.apache.hadoop.hdfs.server.federation.MiniRouterDFSCluster.RouterContext;
+import org.apache.hadoop.hdfs.server.federation.RouterConfigBuilder;
 import org.apache.hadoop.hdfs.server.federation.StateStoreDFSCluster;
+import org.apache.hadoop.hdfs.server.federation.resolver.ActiveNamenodeResolver;
 import org.apache.hadoop.hdfs.server.federation.resolver.MountTableManager;
 import org.apache.hadoop.hdfs.server.federation.resolver.RemoteLocation;
 import org.apache.hadoop.hdfs.server.federation.resolver.order.DestinationOrder;
@@ -52,6 +58,7 @@ import org.apache.hadoop.hdfs.server.federation.store.protocol.RemoveMountTableE
 import org.apache.hadoop.hdfs.server.federation.store.protocol.RemoveMountTableEntryResponse;
 import org.apache.hadoop.hdfs.server.federation.store.protocol.UpdateMountTableEntryRequest;
 import org.apache.hadoop.hdfs.server.federation.store.records.MountTable;
+import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.Time;
 import org.junit.AfterClass;
 import org.junit.Before;
@@ -86,6 +93,14 @@ public class TestRouterAdmin {
     mockMountTable = cluster.generateMockMountTable();
     Router router = routerContext.getRouter();
     stateStore = router.getStateStore();
+
+    // Add two name services for testing disabling
+    ActiveNamenodeResolver membership = router.getNamenodeResolver();
+    membership.registerNamenode(
+        createNamenodeReport("ns0", "nn1", HAServiceState.ACTIVE));
+    membership.registerNamenode(
+        createNamenodeReport("ns1", "nn1", HAServiceState.ACTIVE));
+    stateStore.refreshCaches(true);
   }
 
   @AfterClass
@@ -97,6 +112,8 @@ public class TestRouterAdmin {
   public void testSetup() throws Exception {
     assertTrue(
         synchronizeRecords(stateStore, mockMountTable, MountTable.class));
+    // Avoid running with random users
+    routerContext.resetAdminClient();
   }
 
   @Test
@@ -375,6 +392,37 @@ public class TestRouterAdmin {
     assertTrue(enableResp.getStatus());
     disabled = getDisabledNameservices(nsManager);
     assertTrue(disabled.isEmpty());
+
+    // Non existing name services should fail
+    disableReq = DisableNameserviceRequest.newInstance("nsunknown");
+    disableResp = nsManager.disableNameservice(disableReq);
+    assertFalse(disableResp.getStatus());
+  }
+
+  @Test
+  public void testNameserviceManagerUnauthorized() throws Exception {
+
+    // Try to disable a name service with a random user
+    final String username = "baduser";
+    UserGroupInformation user =
+        UserGroupInformation.createRemoteUser(username);
+    user.doAs(new PrivilegedExceptionAction<Void>() {
+      @Override
+      public Void run() throws Exception {
+        RouterClient client = routerContext.getAdminClient();
+        NameserviceManager nameservices = client.getNameserviceManager();
+        DisableNameserviceRequest disableReq =
+            DisableNameserviceRequest.newInstance("ns0");
+        try {
+          nameservices.disableNameservice(disableReq);
+          fail("We should not be able to disable nameservices");
+        } catch (IOException ioe) {
+          assertExceptionContains(
+              username + " is not a super user", ioe);
+        }
+        return null;
+      }
+    });
   }
 
   private Set<String> getDisabledNameservices(NameserviceManager nsManager)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/33ffc960/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdminCLI.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdminCLI.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdminCLI.java
index cd5edf0..1ff07ac 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdminCLI.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdminCLI.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.hdfs.server.federation.router;
 
+import static org.apache.hadoop.hdfs.server.federation.FederationTestUtils.createNamenodeReport;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
@@ -27,10 +28,12 @@ import java.net.InetSocketAddress;
 import java.util.List;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.server.federation.MiniRouterDFSCluster.RouterContext;
 import org.apache.hadoop.hdfs.server.federation.RouterConfigBuilder;
 import org.apache.hadoop.hdfs.server.federation.StateStoreDFSCluster;
+import org.apache.hadoop.hdfs.server.federation.resolver.ActiveNamenodeResolver;
 import org.apache.hadoop.hdfs.server.federation.resolver.MountTableManager;
 import org.apache.hadoop.hdfs.server.federation.resolver.RemoteLocation;
 import org.apache.hadoop.hdfs.server.federation.resolver.order.DestinationOrder;
@@ -93,6 +96,14 @@ public class TestRouterAdminCLI {
         routerSocket);
     admin = new RouterAdmin(routerConf);
     client = routerContext.getAdminClient();
+
+    // Add two fake name services to testing disabling them
+    ActiveNamenodeResolver membership = router.getNamenodeResolver();
+    membership.registerNamenode(
+        createNamenodeReport("ns0", "nn1", HAServiceState.ACTIVE));
+    membership.registerNamenode(
+        createNamenodeReport("ns1", "nn1", HAServiceState.ACTIVE));
+    stateStore.refreshCaches(true);
   }
 
   @AfterClass

http://git-wip-us.apache.org/repos/asf/hadoop/blob/33ffc960/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
index 0bada59..cb5d8a7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
@@ -415,6 +415,8 @@ Usage:
           [-setQuota <path> -nsQuota <nsQuota> -ssQuota <quota in bytes or quota size string>]
           [-clrQuota <path>]
           [-safemode enter | leave | get]
+          [-nameservice disable | enable <nameservice>]
+          [-getDisabledNameservices]
 
 | COMMAND\_OPTION | Description |
 |:---- |:---- |
@@ -424,6 +426,8 @@ Usage:
 | `-setQuota` *path* `-nsQuota` *nsQuota* `-ssQuota` *ssQuota* | Set quota for specified path. See [HDFS Quotas Guide](./HdfsQuotaAdminGuide.html) for the quota detail. |
 | `-clrQuota` *path* | Clear quota of given mount point. See [HDFS Quotas Guide](./HdfsQuotaAdminGuide.html) for the quota detail. |
 | `-safemode` `enter` `leave` `get` | Manually set the Router entering or leaving safe mode. The option *get* will be used for verifying if the Router is in safe mode state. |
+| `-nameservice` `disable` `enable` *nameservice* | Disable/enable  a name service from the federation. If disabled, requests will not go to that name service. |
+| `-getDisabledNameservices` | Get the name services that are disabled in the federation. |
 
 The commands for managing Router-based federation. See [Mount table management](./HDFSRouterFederation.html#Mount_table_management) for more info.
 


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[28/50] [abbrv] hadoop git commit: HDFS-13478. RBF: Disabled Nameservice store API. Contributed by Inigo Goiri.

Posted by sh...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a9752503/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/proto/RouterProtocol.proto
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/proto/RouterProtocol.proto b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/proto/RouterProtocol.proto
index a4e4d65..f3a2b6e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/proto/RouterProtocol.proto
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/proto/RouterProtocol.proto
@@ -59,4 +59,19 @@ service RouterAdminProtocolService {
    * Verify if current Router state is safe mode state.
    */
   rpc getSafeMode(GetSafeModeRequestProto) returns (GetSafeModeResponseProto);
+
+  /**
+   * Disable a name service.
+   */
+  rpc disableNameservice(DisableNameserviceRequestProto) returns (DisableNameserviceResponseProto);
+
+  /**
+   * Enable a name service.
+   */
+  rpc enableNameservice(EnableNameserviceRequestProto) returns (EnableNameserviceResponseProto);
+
+  /**
+   * Get the list of disabled name services.
+   */
+  rpc getDisabledNameservices(GetDisabledNameservicesRequestProto) returns (GetDisabledNameservicesResponseProto);
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a9752503/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdmin.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdmin.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdmin.java
index 10b71d7..5e27173 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdmin.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdmin.java
@@ -26,6 +26,7 @@ import static org.junit.Assert.assertTrue;
 import java.io.IOException;
 import java.util.Collections;
 import java.util.List;
+import java.util.Set;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.server.federation.RouterConfigBuilder;
@@ -35,9 +36,16 @@ import org.apache.hadoop.hdfs.server.federation.resolver.MountTableManager;
 import org.apache.hadoop.hdfs.server.federation.resolver.RemoteLocation;
 import org.apache.hadoop.hdfs.server.federation.resolver.order.DestinationOrder;
 import org.apache.hadoop.hdfs.server.federation.store.StateStoreService;
+import org.apache.hadoop.hdfs.server.federation.store.impl.DisabledNameserviceStoreImpl;
 import org.apache.hadoop.hdfs.server.federation.store.impl.MountTableStoreImpl;
 import org.apache.hadoop.hdfs.server.federation.store.protocol.AddMountTableEntryRequest;
 import org.apache.hadoop.hdfs.server.federation.store.protocol.AddMountTableEntryResponse;
+import org.apache.hadoop.hdfs.server.federation.store.protocol.DisableNameserviceRequest;
+import org.apache.hadoop.hdfs.server.federation.store.protocol.DisableNameserviceResponse;
+import org.apache.hadoop.hdfs.server.federation.store.protocol.EnableNameserviceRequest;
+import org.apache.hadoop.hdfs.server.federation.store.protocol.EnableNameserviceResponse;
+import org.apache.hadoop.hdfs.server.federation.store.protocol.GetDisabledNameservicesRequest;
+import org.apache.hadoop.hdfs.server.federation.store.protocol.GetDisabledNameservicesResponse;
 import org.apache.hadoop.hdfs.server.federation.store.protocol.GetMountTableEntriesRequest;
 import org.apache.hadoop.hdfs.server.federation.store.protocol.GetMountTableEntriesResponse;
 import org.apache.hadoop.hdfs.server.federation.store.protocol.RemoveMountTableEntryRequest;
@@ -337,4 +345,45 @@ public class TestRouterAdmin {
         mountTable.getMountTableEntries(request);
     return response.getEntries();
   }
+
+  @Test
+  public void testNameserviceManager() throws IOException {
+
+    RouterClient client = routerContext.getAdminClient();
+    NameserviceManager nsManager = client.getNameserviceManager();
+
+    // There shouldn't be any name service disabled
+    Set<String> disabled = getDisabledNameservices(nsManager);
+    assertTrue(disabled.isEmpty());
+
+    // Disable one and see it
+    DisableNameserviceRequest disableReq =
+        DisableNameserviceRequest.newInstance("ns0");
+    DisableNameserviceResponse disableResp =
+        nsManager.disableNameservice(disableReq);
+    assertTrue(disableResp.getStatus());
+    // Refresh the cache
+    disabled = getDisabledNameservices(nsManager);
+    assertEquals(1, disabled.size());
+    assertTrue(disabled.contains("ns0"));
+
+    // Enable one and we should have no disabled name services
+    EnableNameserviceRequest enableReq =
+        EnableNameserviceRequest.newInstance("ns0");
+    EnableNameserviceResponse enableResp =
+        nsManager.enableNameservice(enableReq);
+    assertTrue(enableResp.getStatus());
+    disabled = getDisabledNameservices(nsManager);
+    assertTrue(disabled.isEmpty());
+  }
+
+  private Set<String> getDisabledNameservices(NameserviceManager nsManager)
+      throws IOException {
+    stateStore.loadCache(DisabledNameserviceStoreImpl.class, true);
+    GetDisabledNameservicesRequest getReq =
+        GetDisabledNameservicesRequest.newInstance();
+    GetDisabledNameservicesResponse response =
+        nsManager.getDisabledNameservices(getReq);
+    return response.getNameservices();
+  }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a9752503/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdminCLI.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdminCLI.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdminCLI.java
index b36e434..cd5edf0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdminCLI.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdminCLI.java
@@ -22,20 +22,20 @@ import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 
 import java.io.ByteArrayOutputStream;
-
 import java.io.PrintStream;
 import java.net.InetSocketAddress;
 import java.util.List;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
-import org.apache.hadoop.hdfs.server.federation.RouterConfigBuilder;
 import org.apache.hadoop.hdfs.server.federation.MiniRouterDFSCluster.RouterContext;
+import org.apache.hadoop.hdfs.server.federation.RouterConfigBuilder;
 import org.apache.hadoop.hdfs.server.federation.StateStoreDFSCluster;
 import org.apache.hadoop.hdfs.server.federation.resolver.MountTableManager;
 import org.apache.hadoop.hdfs.server.federation.resolver.RemoteLocation;
 import org.apache.hadoop.hdfs.server.federation.resolver.order.DestinationOrder;
 import org.apache.hadoop.hdfs.server.federation.store.StateStoreService;
+import org.apache.hadoop.hdfs.server.federation.store.impl.DisabledNameserviceStoreImpl;
 import org.apache.hadoop.hdfs.server.federation.store.impl.MountTableStoreImpl;
 import org.apache.hadoop.hdfs.server.federation.store.protocol.GetMountTableEntriesRequest;
 import org.apache.hadoop.hdfs.server.federation.store.protocol.GetMountTableEntriesResponse;
@@ -65,7 +65,9 @@ public class TestRouterAdminCLI {
   private static final String TEST_USER = "test-user";
 
   private final ByteArrayOutputStream out = new ByteArrayOutputStream();
+  private final ByteArrayOutputStream err = new ByteArrayOutputStream();
   private static final PrintStream OLD_OUT = System.out;
+  private static final PrintStream OLD_ERR = System.err;
 
   @BeforeClass
   public static void globalSetUp() throws Exception {
@@ -102,8 +104,9 @@ public class TestRouterAdminCLI {
 
   @After
   public void tearDown() {
-    // set back system out
+    // set back system out/err
     System.setOut(OLD_OUT);
+    System.setErr(OLD_ERR);
   }
 
   @Test
@@ -462,6 +465,45 @@ public class TestRouterAdminCLI {
     assertEquals(-1, ToolRunner.run(admin, argv));
   }
 
+  @Test
+  public void testNameserviceManager() throws Exception {
+    // Disable a name service and check if it's disabled
+    assertEquals(0, ToolRunner.run(admin,
+        new String[] {"-nameservice", "disable", "ns0"}));
+
+    stateStore.loadCache(DisabledNameserviceStoreImpl.class, true);
+    System.setOut(new PrintStream(out));
+    assertEquals(0, ToolRunner.run(admin,
+        new String[] {"-getDisabledNameservices"}));
+    assertTrue("ns0 should be reported: " + out,
+        out.toString().contains("ns0"));
+
+    // Enable a name service and check if it's there
+    assertEquals(0, ToolRunner.run(admin,
+        new String[] {"-nameservice", "enable", "ns0"}));
+
+    out.reset();
+    stateStore.loadCache(DisabledNameserviceStoreImpl.class, true);
+    assertEquals(0, ToolRunner.run(admin,
+        new String[] {"-getDisabledNameservices"}));
+    assertFalse("ns0 should not be reported: " + out,
+        out.toString().contains("ns0"));
+
+    // Wrong commands
+    System.setErr(new PrintStream(err));
+    assertEquals(-1, ToolRunner.run(admin,
+        new String[] {"-nameservice", "enable"}));
+    String msg = "Not enough parameters specificed for cmd -nameservice";
+    assertTrue("Got error: " + err.toString(),
+        err.toString().startsWith(msg));
+
+    err.reset();
+    assertEquals(-1, ToolRunner.run(admin,
+        new String[] {"-nameservice", "wrong", "ns0"}));
+    assertTrue("Got error: " + err.toString(),
+        err.toString().startsWith("nameservice: Unknown command: wrong"));
+  }
+
   /**
    * Wait for the Router transforming to expected state.
    * @param expectedState Expected Router state.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a9752503/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/store/driver/TestStateStoreDriverBase.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/store/driver/TestStateStoreDriverBase.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/store/driver/TestStateStoreDriverBase.java
index fd29e37..d6c829b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/store/driver/TestStateStoreDriverBase.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/store/driver/TestStateStoreDriverBase.java
@@ -41,6 +41,7 @@ import org.apache.hadoop.hdfs.server.federation.router.RouterServiceState;
 import org.apache.hadoop.hdfs.server.federation.store.FederationStateStoreTestUtils;
 import org.apache.hadoop.hdfs.server.federation.store.StateStoreService;
 import org.apache.hadoop.hdfs.server.federation.store.records.BaseRecord;
+import org.apache.hadoop.hdfs.server.federation.store.records.DisabledNameservice;
 import org.apache.hadoop.hdfs.server.federation.store.records.MembershipState;
 import org.apache.hadoop.hdfs.server.federation.store.records.MountTable;
 import org.apache.hadoop.hdfs.server.federation.store.records.Query;
@@ -139,6 +140,11 @@ public class TestStateStoreDriverBase {
       StateStoreVersion version = generateFakeRecord(StateStoreVersion.class);
       routerState.setStateStoreVersion(version);
       return (T) routerState;
+    } else if (recordClass == DisabledNameservice.class) {
+      return (T) DisabledNameservice.newInstance(generateRandomString());
+    } else if (recordClass == StateStoreVersion.class) {
+      return (T) StateStoreVersion.newInstance(
+          generateRandomLong(), generateRandomLong());
     }
 
     return null;
@@ -186,6 +192,8 @@ public class TestStateStoreDriverBase {
   public static void removeAll(StateStoreDriver driver) throws IOException {
     driver.removeAll(MembershipState.class);
     driver.removeAll(MountTable.class);
+    driver.removeAll(RouterState.class);
+    driver.removeAll(DisabledNameservice.class);
   }
 
   public <T extends BaseRecord> void testInsert(
@@ -290,7 +298,7 @@ public class TestStateStoreDriverBase {
 
     // Verify no update occurred, all original records are unchanged
     QueryResult<T> newRecords = driver.get(clazz);
-    assertTrue(newRecords.getRecords().size() == 10);
+    assertEquals(10, newRecords.getRecords().size());
     assertEquals("A single entry was improperly updated in the store", 10,
         countMatchingEntries(records.getRecords(), newRecords.getRecords()));
 
@@ -300,9 +308,12 @@ public class TestStateStoreDriverBase {
     // Verify that one entry no longer matches the original set
     newRecords = driver.get(clazz);
     assertEquals(10, newRecords.getRecords().size());
-    assertEquals(
-        "Record of type " + clazz + " not updated in the store", 9,
-        countMatchingEntries(records.getRecords(), newRecords.getRecords()));
+    T record = records.getRecords().get(0);
+    if (record.hasOtherFields()) {
+      assertEquals(
+          "Record of type " + clazz + " not updated in the store", 9,
+          countMatchingEntries(records.getRecords(), newRecords.getRecords()));
+    }
   }
 
   private int countMatchingEntries(
@@ -379,6 +390,8 @@ public class TestStateStoreDriverBase {
       throws IllegalArgumentException, IllegalAccessException, IOException {
     testInsert(driver, MembershipState.class);
     testInsert(driver, MountTable.class);
+    testInsert(driver, RouterState.class);
+    testInsert(driver, DisabledNameservice.class);
   }
 
   public void testPut(StateStoreDriver driver)
@@ -386,18 +399,24 @@ public class TestStateStoreDriverBase {
       IOException, SecurityException {
     testPut(driver, MembershipState.class);
     testPut(driver, MountTable.class);
+    testPut(driver, RouterState.class);
+    testPut(driver, DisabledNameservice.class);
   }
 
   public void testRemove(StateStoreDriver driver)
       throws IllegalArgumentException, IllegalAccessException, IOException {
     testRemove(driver, MembershipState.class);
     testRemove(driver, MountTable.class);
+    testRemove(driver, RouterState.class);
+    testRemove(driver, DisabledNameservice.class);
   }
 
   public void testFetchErrors(StateStoreDriver driver)
       throws IllegalArgumentException, IllegalAccessException, IOException {
     testFetchErrors(driver, MembershipState.class);
     testFetchErrors(driver, MountTable.class);
+    testFetchErrors(driver, RouterState.class);
+    testFetchErrors(driver, DisabledNameservice.class);
   }
 
   public void testMetrics(StateStoreDriver driver)


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[02/50] [abbrv] hadoop git commit: HDFS-13427. Fix the section titles of transparent encryption document.

Posted by sh...@apache.org.
HDFS-13427. Fix the section titles of transparent encryption document.

(cherry picked from commit c7cd362afd21add324c3a82c594b133d41cf8d03)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/be627ccf
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/be627ccf
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/be627ccf

Branch: refs/heads/YARN-8200
Commit: be627ccfe373cb5dc4e8bb2a17495ef898caaee1
Parents: 724bffd
Author: Akira Ajisaka <aa...@apache.org>
Authored: Thu Apr 12 10:44:56 2018 +0900
Committer: Akira Ajisaka <aa...@apache.org>
Committed: Thu Apr 12 10:49:07 2018 +0900

----------------------------------------------------------------------
 .../src/site/markdown/TransparentEncryption.md  | 53 ++++++++++----------
 1 file changed, 27 insertions(+), 26 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/be627ccf/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/TransparentEncryption.md
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/TransparentEncryption.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/TransparentEncryption.md
index 3f9fbf0..70c24bd 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/TransparentEncryption.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/TransparentEncryption.md
@@ -17,12 +17,12 @@ Transparent Encryption in HDFS
 
 <!-- MACRO{toc|fromDepth=0|toDepth=2} -->
 
-<a name="Overview"></a>Overview
+Overview
 --------
 
 HDFS implements *transparent*, *end-to-end* encryption. Once configured, data read from and written to special HDFS directories is *transparently* encrypted and decrypted without requiring changes to user application code. This encryption is also *end-to-end*, which means the data can only be encrypted and decrypted by the client. HDFS never stores or has access to unencrypted data or unencrypted data encryption keys. This satisfies two typical requirements for encryption: *at-rest encryption* (meaning data on persistent media, such as a disk) as well as *in-transit encryption* (e.g. when data is travelling over the network).
 
-<a name="Background"></a>Background
+Background
 ----------
 
 Encryption can be done at different layers in a traditional data management software/hardware stack. Choosing to encrypt at a given layer comes with different advantages and disadvantages.
@@ -39,17 +39,17 @@ HDFS-level encryption fits between database-level and filesystem-level encryptio
 
 HDFS-level encryption also prevents attacks at the filesystem-level and below (so-called "OS-level attacks"). The operating system and disk only interact with encrypted bytes, since the data is already encrypted by HDFS.
 
-<a name="Use_Cases"></a>Use Cases
+Use Cases
 ---------
 
 Data encryption is required by a number of different government, financial, and regulatory entities. For example, the health-care industry has HIPAA regulations, the card payment industry has PCI DSS regulations, and the US government has FISMA regulations. Having transparent encryption built into HDFS makes it easier for organizations to comply with these regulations.
 
 Encryption can also be performed at the application-level, but by integrating it into HDFS, existing applications can operate on encrypted data without changes. This integrated architecture implies stronger encrypted file semantics and better coordination with other HDFS functions.
 
-<a name="Architecture"></a>Architecture
+Architecture
 ------------
 
-### <a name="Architecture_overview"></a>Overview
+### Overview
 
 For transparent encryption, we introduce a new abstraction to HDFS: the *encryption zone*. An encryption zone is a special directory whose contents will be transparently encrypted upon write and transparently decrypted upon read. Each encryption zone is associated with a single *encryption zone key* which is specified when the zone is created. Each file within an encryption zone has its own unique *data encryption key (DEK)*. DEKs are never handled directly by HDFS. Instead, HDFS only ever handles an *encrypted data encryption key (EDEK)*. Clients decrypt an EDEK, and then use the subsequent DEK to read and write data. HDFS datanodes simply see a stream of encrypted bytes.
 
@@ -65,7 +65,7 @@ A new cluster service is required to manage encryption keys: the Hadoop Key Mana
 
 The KMS will be described in more detail below.
 
-### <a name="Accessing_data_within_an_encryption_zone"></a>Accessing data within an encryption zone
+### Accessing data within an encryption zone
 
 When creating a new file in an encryption zone, the NameNode asks the KMS to generate a new EDEK encrypted with the encryption zone's key. The EDEK is then stored persistently as part of the file's metadata on the NameNode.
 
@@ -75,7 +75,7 @@ All of the above steps for the read and write path happen automatically through
 
 Access to encrypted file data and metadata is controlled by normal HDFS filesystem permissions. This means that if HDFS is compromised (for example, by gaining unauthorized access to an HDFS superuser account), a malicious user only gains access to ciphertext and encrypted keys. However, since access to encryption zone keys is controlled by a separate set of permissions on the KMS and key store, this does not pose a security threat.
 
-### <a name="Key_Management_Server_KeyProvider_EDEKs"></a>Key Management Server, KeyProvider, EDEKs
+### Key Management Server, KeyProvider, EDEKs
 
 The KMS is a proxy that interfaces with a backing key store on behalf of HDFS daemons and clients. Both the backing key store and the KMS implement the Hadoop KeyProvider API. See the [KMS documentation](../../hadoop-kms/index.html) for more information.
 
@@ -85,21 +85,21 @@ The KMS implements additional functionality which enables creation and decryptio
 
 In the context of HDFS encryption, EEKs are *encrypted data encryption keys (EDEKs)*, where a *data encryption key (DEK)* is what is used to encrypt and decrypt file data. Typically, the key store is configured to only allow end users access to the keys used to encrypt DEKs. This means that EDEKs can be safely stored and handled by HDFS, since the HDFS user will not have access to unencrypted encryption keys.
 
-<a name="Configuration"></a>Configuration
+Configuration
 -------------
 
 A necessary prerequisite is an instance of the KMS, as well as a backing key store for the KMS. See the [KMS documentation](../../hadoop-kms/index.html) for more information.
 
 Once a KMS has been set up and the NameNode and HDFS clients have been correctly configured, an admin can use the `hadoop key` and `hdfs crypto` command-line tools to create encryption keys and set up new encryption zones. Existing data can be encrypted by copying it into the new encryption zones using tools like distcp.
 
-### <a name="Configuring_the_cluster_KeyProvider"></a>Configuring the cluster KeyProvider
+### Configuring the cluster KeyProvider
 
 #### hadoop.security.key.provider.path
 
 The KeyProvider to use when interacting with encryption keys used when reading and writing to an encryption zone.
 HDFS clients will use the provider path returned from Namenode via getServerDefaults. If namenode doesn't support returning key provider uri then client's conf will be used.
 
-### <a name="Selecting_an_encryption_algorithm_and_codec"></a>Selecting an encryption algorithm and codec
+### Selecting an encryption algorithm and codec
 
 #### hadoop.security.crypto.codec.classes.EXAMPLECIPHERSUITE
 
@@ -129,7 +129,7 @@ Default: `8192`
 
 The buffer size used by CryptoInputStream and CryptoOutputStream.
 
-### <a name="Namenode_configuration"></a>Namenode configuration
+### Namenode configuration
 
 #### dfs.namenode.list.encryption.zones.num.responses
 
@@ -137,10 +137,10 @@ Default: `100`
 
 When listing encryption zones, the maximum number of zones that will be returned in a batch. Fetching the list incrementally in batches improves namenode performance.
 
-<a name="crypto_command-line_interface"></a>`crypto` command-line interface
+`crypto` command-line interface
 -------------------------------
 
-### <a name="createZone"></a>createZone
+### createZone
 
 Usage: `[-createZone -keyName <keyName> -path <path>]`
 
@@ -151,13 +151,13 @@ Create a new encryption zone.
 | *path* | The path of the encryption zone to create. It must be an empty directory. A trash directory is provisioned under this path.|
 | *keyName* | Name of the key to use for the encryption zone. Uppercase key names are unsupported. |
 
-### <a name="listZones"></a>listZones
+### listZones
 
 Usage: `[-listZones]`
 
 List all encryption zones. Requires superuser permissions.
 
-### <a name="provisionTrash"></a>provisionTrash
+### provisionTrash
 
 Usage: `[-provisionTrash -path <path>]`
 
@@ -167,7 +167,7 @@ Provision a trash directory for an encryption zone.
 |:---- |:---- |
 | *path* | The path to the root of the encryption zone. |
 
-### <a name="getFileEncryptionInfo"></a>getFileEncryptionInfo
+### getFileEncryptionInfo
 
 Usage: `[-getFileEncryptionInfo -path <path>]`
 
@@ -177,7 +177,7 @@ Get encryption information from a file. This can be used to find out whether a f
 |:---- |:---- |
 | *path* | The path of the file to get encryption information. |
 
-<a name="Example_usage"></a>Example usage
+Example usage
 -------------
 
 These instructions assume that you are running as the normal user or HDFS superuser as is appropriate. Use `sudo` as needed for your environment.
@@ -200,10 +200,10 @@ These instructions assume that you are running as the normal user or HDFS superu
     hdfs crypto -getFileEncryptionInfo -path /zone/helloWorld
     # console output: {cipherSuite: {name: AES/CTR/NoPadding, algorithmBlockSize: 16}, cryptoProtocolVersion: CryptoProtocolVersion{description='Encryption zones', version=1, unknownValue=null}, edek: 2010d301afbd43b58f10737ce4e93b39, iv: ade2293db2bab1a2e337f91361304cb3, keyName: mykey, ezKeyVersionName: mykey@0}
 
-<a name="Distcp_considerations"></a>Distcp considerations
+Distcp considerations
 ---------------------
 
-### <a name="Running_as_the_superuser"></a>Running as the superuser
+### Running as the superuser
 
 One common usecase for distcp is to replicate data between clusters for backup and disaster recovery purposes. This is typically performed by the cluster administrator, who is an HDFS superuser.
 
@@ -211,11 +211,11 @@ To enable this same workflow when using HDFS encryption, we introduced a new vir
 
 When using `/.reserved/raw` to distcp encrypted data, it's important to preserve extended attributes with the [-px](../../hadoop-distcp/DistCp.html#Command_Line_Options) flag. This is because encrypted file attributes (such as the EDEK) are exposed through extended attributes within `/.reserved/raw`, and must be preserved to be able to decrypt the file. This means that if the distcp is initiated at or above the encryption zone root, it will automatically create an encryption zone at the destination if it does not already exist. However, it's still recommended that the admin first create identical encryption zones on the destination cluster to avoid any potential mishaps.
 
-### <a name="Copying_into_encrypted_locations"></a>Copying into encrypted locations
+### Copying into encrypted locations
 
 By default, distcp compares checksums provided by the filesystem to verify that the data was successfully copied to the destination. When copying from unencrypted or encrypted location into an encrypted location, the filesystem checksums will not match since the underlying block data is different because a new EDEK will be used to encrypt at destination. In this case, specify the [-skipcrccheck](../../hadoop-distcp/DistCp.html#Command_Line_Options) and [-update](../../hadoop-distcp/DistCp.html#Command_Line_Options) distcp flags to avoid verifying checksums.
 
-<a name="Rename_and_Trash_considerations"></a>Rename and Trash considerations
+Rename and Trash considerations
 ---------------------
 
 HDFS restricts file and directory renames across encryption zone boundaries. This includes renaming an encrypted file / directory into an unencrypted directory (e.g., `hdfs dfs mv /zone/encryptedFile /home/bob`), renaming an unencrypted file or directory into an encryption zone (e.g., `hdfs dfs mv /home/bob/unEncryptedFile /zone`), and renaming between two different encryption zones (e.g., `hdfs dfs mv /home/alice/zone1/foo /home/alice/zone2`). In these examples, `/zone`, `/home/alice/zone1`, and `/home/alice/zone2` are encryption zones, while `/home/bob` is not. A rename is only allowed if the source and destination paths are in the same encryption zone, or both paths are unencrypted (not in any encryption zone).
@@ -227,10 +227,11 @@ To comply with the above rule, each encryption zone has its own `.Trash` directo
 If the encryption zone is the root directory (e.g., `/` directory), the trash path of root directory is `/.Trash`, not the `.Trash` directory under the user's home directory, and the behavior of renaming sub-directories or sub-files in root directory will keep consistent with the behavior in a general encryption zone, such as `/zone` which is mentioned at the top of this section.
 
 The `crypto` command before Hadoop 2.8.0 does not provision the `.Trash` directory automatically. If an encryption zone is created before Hadoop 2.8.0, and then the cluster is upgraded to Hadoop 2.8.0 or above, the trash directory can be provisioned using `-provisionTrash` option (e.g., `hdfs crypto -provisionTrash -path /zone`).
-<a name="Attack_vectors"></a>Attack vectors
+
+Attack vectors
 --------------
 
-### <a name="Hardware_access_exploits"></a>Hardware access exploits
+### Hardware access exploits
 
 These exploits assume that attacker has gained physical access to hard drives from cluster machines, i.e. datanodes and namenodes.
 
@@ -244,7 +245,7 @@ These exploits assume that attacker has gained physical access to hard drives fr
 
     * By itself, this does not expose cleartext, as it also requires access to DEKs.
 
-### <a name="Root_access_exploits"></a>Root access exploits
+### Root access exploits
 
 These exploits assume that attacker has gained root shell access to cluster machines, i.e. datanodes and namenodes. Many of these exploits cannot be addressed in HDFS, since a malicious root user has access to the in-memory state of processes holding encryption keys and cleartext. For these exploits, the only mitigation technique is carefully restricting and monitoring root shell access.
 
@@ -268,7 +269,7 @@ These exploits assume that attacker has gained root shell access to cluster mach
 
     * By itself, insufficient to read cleartext without the EDEK's encryption key and encrypted block files.
 
-### <a name="HDFS_admin_exploits"></a>HDFS admin exploits
+### HDFS admin exploits
 
 These exploits assume that the attacker has compromised HDFS, but does not have root or `hdfs` user shell access.
 
@@ -280,6 +281,6 @@ These exploits assume that the attacker has compromised HDFS, but does not have
 
     * By itself, insufficient to read cleartext without EDEK encryption keys.
 
-### <a name="Rogue_user_exploits"></a>Rogue user exploits
+### Rogue user exploits
 
 A rogue user can collect keys of files they have access to, and use them later to decrypt the encrypted data of those files. As the user had access to those files, they already had access to the file contents. This can be mitigated through periodic key rolling policies.


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[17/50] [abbrv] hadoop git commit: YARN-7189. Container-executor doesn't remove Docker containers that error out early. Contributed by Eric Badger

Posted by sh...@apache.org.
YARN-7189. Container-executor doesn't remove Docker containers that error out early. Contributed by Eric Badger

(cherry picked from commit 391ac5cdd2f31db2341bb731daee094b9ca309ec)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5ec195ed
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5ec195ed
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5ec195ed

Branch: refs/heads/YARN-8200
Commit: 5ec195edbcd982a3e7c2a4ea760e3ce860c87143
Parents: 88cb461
Author: Jason Lowe <jl...@apache.org>
Authored: Tue Apr 17 09:45:55 2018 -0500
Committer: Jason Lowe <jl...@apache.org>
Committed: Tue Apr 17 09:53:19 2018 -0500

----------------------------------------------------------------------
 .../impl/container-executor.c                   | 59 +++++++++++++++-----
 1 file changed, 44 insertions(+), 15 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5ec195ed/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
index c1a42ca..109ff73 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
@@ -1444,7 +1444,7 @@ int launch_docker_container_as_user(const char * user, const char *app_id,
   if (exit_code != 0) {
     fprintf(ERRORFILE, "Could not create script path\n");
     fflush(ERRORFILE);
-    goto cleanup;
+    goto pre_launch_cleanup;
   }
 
   fprintf(LOGFILE, "Creating local dirs...\n");
@@ -1455,7 +1455,7 @@ int launch_docker_container_as_user(const char * user, const char *app_id,
   if (exit_code != 0) {
     fprintf(ERRORFILE, "Could not create local files and directories %d %d\n", container_file_source, cred_file_source);
     fflush(ERRORFILE);
-    goto cleanup;
+    goto pre_launch_cleanup;
   }
 
   docker_command = construct_docker_command(command_file);
@@ -1467,14 +1467,14 @@ int launch_docker_container_as_user(const char * user, const char *app_id,
     exit_code = OUT_OF_MEMORY;
     fprintf(ERRORFILE, "Container out of memory");
     fflush(ERRORFILE);
-    goto cleanup;
+    goto pre_launch_cleanup;
   }
 
   fprintf(LOGFILE, "Changing effective user to root...\n");
   if (change_effective_user(0, user_gid) != 0) {
     fprintf(ERRORFILE, "Could not change to effective users %d, %d\n", 0, user_gid);
     fflush(ERRORFILE);
-    goto cleanup;
+    goto pre_launch_cleanup;
   }
 
   snprintf(docker_command_with_binary, command_size, "%s %s", docker_binary, docker_command);
@@ -1487,7 +1487,7 @@ int launch_docker_container_as_user(const char * user, const char *app_id,
      "Could not invoke docker %s.\n", docker_command_with_binary);
     fflush(ERRORFILE);
     exit_code = UNABLE_TO_EXECUTE_CONTAINER_SCRIPT;
-    goto cleanup;
+    goto post_launch_cleanup;
   }
 
   snprintf(docker_inspect_command, command_size,
@@ -1504,7 +1504,7 @@ int launch_docker_container_as_user(const char * user, const char *app_id,
      "Could not inspect docker to get pid %s.\n", docker_inspect_command);
     fflush(ERRORFILE);
     exit_code = UNABLE_TO_EXECUTE_CONTAINER_SCRIPT;
-    goto cleanup;
+    goto post_launch_cleanup;
   }
 
   if (pid != 0) {
@@ -1519,7 +1519,7 @@ int launch_docker_container_as_user(const char * user, const char *app_id,
         if (strcmp(*cgroup_ptr, "none") != 0 &&
              write_pid_to_cgroup_as_root(*cgroup_ptr, pid) != 0) {
           exit_code = WRITE_CGROUP_FAILED;
-          goto cleanup;
+          goto post_launch_cleanup;
         }
       }
     }
@@ -1532,7 +1532,7 @@ int launch_docker_container_as_user(const char * user, const char *app_id,
       exit_code = WRITE_PIDFILE_FAILED;
       fprintf(ERRORFILE, "Could not write pid to %s", pid_file);
       fflush(ERRORFILE);
-      goto cleanup;
+      goto post_launch_cleanup;
     }
 
     snprintf(docker_wait_command, command_size,
@@ -1578,20 +1578,49 @@ int launch_docker_container_as_user(const char * user, const char *app_id,
     }
   }
 
+post_launch_cleanup:
+
   fprintf(LOGFILE, "Removing docker container post-exit...\n");
   snprintf(docker_rm_command, command_size,
     "%s rm %s", docker_binary, container_id);
-  FILE* rm_docker = popen(docker_rm_command, "w");
-  if (pclose (rm_docker) != 0)
-  {
-    fprintf (ERRORFILE,
-     "Could not remove container %s.\n", docker_rm_command);
+  int rc, i, sleep_time = 1, max_iterations = 5;
+  for (i = 0; i < max_iterations; i++) {
+    if (i > 0) {
+      sleep(sleep_time);
+      sleep_time *= 2;
+    }
+    FILE* rm_docker = popen(docker_rm_command, "w");
+    if (rm_docker == 0) {
+      fprintf(ERRORFILE,
+        "popen() failed: %s\n", strerror(errno));
+      fflush(ERRORFILE);
+      continue;
+    }
+    rc = pclose(rm_docker);
+    if (rc == -1) {
+      fprintf(ERRORFILE,
+        "pclose() failed: %s\n", strerror(errno));
+      fflush(ERRORFILE);
+    } else if (WIFEXITED(rc)) {
+      if (WEXITSTATUS(rc) == 0) {
+        break;
+      } else {
+        fprintf(ERRORFILE,
+          "docker rm command failed with exit status: %d\n", WEXITSTATUS(rc));
+        fflush(ERRORFILE);
+      }
+    }
+  }
+
+  if (i == max_iterations) {
+    // Tried 5 times and failed.
+    fprintf(ERRORFILE,
+     "Could not remove container after %d tries: %s\n", max_iterations, docker_rm_command);
     fflush(ERRORFILE);
     exit_code = UNABLE_TO_EXECUTE_CONTAINER_SCRIPT;
-    goto cleanup;
   }
 
-cleanup:
+pre_launch_cleanup:
 
   if (exit_code_file != NULL && write_exit_code_file_as_nm(exit_code_file, exit_code) < 0) {
     fprintf (ERRORFILE,


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[16/50] [abbrv] hadoop git commit: HDFS-13435. RBF: Improve the error loggings for printing the stack trace.

Posted by sh...@apache.org.
HDFS-13435. RBF: Improve the error loggings for printing the stack trace.

(cherry picked from commit c4d3636c21acaeb2b7d56d19cd4996aa25151bd1)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/88cb461c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/88cb461c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/88cb461c

Branch: refs/heads/YARN-8200
Commit: 88cb461c877a0e540c2defe58a875f3806e06755
Parents: 93fc813
Author: Yiqun Lin <yq...@apache.org>
Authored: Tue Apr 17 11:23:22 2018 +0800
Committer: Yiqun Lin <yq...@apache.org>
Committed: Tue Apr 17 11:27:30 2018 +0800

----------------------------------------------------------------------
 .../hdfs/server/federation/metrics/FederationMetrics.java      | 2 +-
 .../hdfs/server/federation/resolver/order/LocalResolver.java   | 3 +--
 .../hdfs/server/federation/router/RouterHeartbeatService.java  | 4 ++--
 .../hadoop/hdfs/server/federation/router/RouterRpcClient.java  | 4 ++--
 .../hadoop/hdfs/server/federation/router/RouterRpcServer.java  | 6 +++---
 .../hadoop/hdfs/server/federation/store/StateStoreService.java | 2 +-
 .../federation/store/driver/impl/StateStoreFileBaseImpl.java   | 2 +-
 7 files changed, 11 insertions(+), 12 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/88cb461c/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationMetrics.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationMetrics.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationMetrics.java
index f59429e..39e060f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationMetrics.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationMetrics.java
@@ -444,7 +444,7 @@ public class FederationMetrics implements FederationMBean {
         dev = (float) Math.sqrt(dev / usages.length);
       }
     } catch (IOException e) {
-      LOG.info("Cannot get the live nodes: {}", e.getMessage());
+      LOG.error("Cannot get the live nodes: {}", e.getMessage());
     }
 
     final Map<String, Object> innerInfo = new HashMap<>();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/88cb461c/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/order/LocalResolver.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/order/LocalResolver.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/order/LocalResolver.java
index b6bd4b3..afc49c7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/order/LocalResolver.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/order/LocalResolver.java
@@ -210,8 +210,7 @@ public class LocalResolver extends RouterResolver<String, String> {
         }
       }
     } catch (IOException ioe) {
-      LOG.error("Cannot get Namenodes from the State Store: {}",
-          ioe.getMessage());
+      LOG.error("Cannot get Namenodes from the State Store", ioe);
     }
     return ret;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/88cb461c/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterHeartbeatService.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterHeartbeatService.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterHeartbeatService.java
index fe172c2..a7f02d3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterHeartbeatService.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterHeartbeatService.java
@@ -100,7 +100,7 @@ public class RouterHeartbeatService extends PeriodicService {
           LOG.debug("Router heartbeat for router {}", routerId);
         }
       } catch (IOException e) {
-        LOG.error("Cannot heartbeat router {}: {}", routerId, e.getMessage());
+        LOG.error("Cannot heartbeat router {}", routerId, e);
       }
     } else {
       LOG.warn("Cannot heartbeat router {}: State Store unavailable", routerId);
@@ -132,7 +132,7 @@ public class RouterHeartbeatService extends PeriodicService {
         }
       }
     } catch (Exception e) {
-      LOG.error("Cannot get version for {}: {}", clazz, e.getMessage());
+      LOG.error("Cannot get version for {}", clazz, e);
     }
     return version;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/88cb461c/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java
index 5fbbeed..214e438 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java
@@ -1059,8 +1059,8 @@ public class RouterRpcClient {
           results.put(location, clazz.cast(result));
         } catch (CancellationException ce) {
           T loc = orderedLocations.get(i);
-          String msg =
-              "Invocation to \"" + loc + "\" for \"" + method + "\" timed out";
+          String msg = "Invocation to \"" + loc + "\" for \""
+              + method.getMethodName() + "\" timed out";
           LOG.error(msg);
           IOException ioe = new SubClusterTimeoutException(msg);
           exceptions.put(location, ioe);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/88cb461c/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
index 9d964d9..d626699 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
@@ -2161,7 +2161,7 @@ public class RouterRpcServer extends AbstractService
           return entry.isAll();
         }
       } catch (IOException e) {
-        LOG.error("Cannot get mount point: {}", e.getMessage());
+        LOG.error("Cannot get mount point", e);
       }
     }
     return false;
@@ -2182,7 +2182,7 @@ public class RouterRpcServer extends AbstractService
           return true;
         }
       } catch (IOException e) {
-        LOG.error("Cannot get mount point: {}", e.getMessage());
+        LOG.error("Cannot get mount point", e);
       }
     }
     return false;
@@ -2211,7 +2211,7 @@ public class RouterRpcServer extends AbstractService
           ret.put(child, entry.getDateModified());
         }
       } catch (IOException e) {
-        LOG.error("Cannot get mount point: {}", e.getMessage());
+        LOG.error("Cannot get mount point", e);
       }
     }
     return ret;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/88cb461c/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/StateStoreService.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/StateStoreService.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/StateStoreService.java
index ccbde09..a0744a6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/StateStoreService.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/StateStoreService.java
@@ -183,7 +183,7 @@ public class StateStoreService extends CompositeService {
     } catch (NotCompliantMBeanException e) {
       throw new RuntimeException("Bad StateStoreMBean setup", e);
     } catch (MetricsException e) {
-      LOG.info("Failed to register State Store bean {}", e.getMessage());
+      LOG.error("Failed to register State Store bean {}", e.getMessage());
     }
 
     super.serviceInit(this.conf);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/88cb461c/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/impl/StateStoreFileBaseImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/impl/StateStoreFileBaseImpl.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/impl/StateStoreFileBaseImpl.java
index 6638d1c..15fc9c1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/impl/StateStoreFileBaseImpl.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/impl/StateStoreFileBaseImpl.java
@@ -361,7 +361,7 @@ public abstract class StateStoreFileBaseImpl
           try {
             writer.close();
           } catch (IOException e) {
-            LOG.error("Cannot close the writer for {}", recordPathTemp);
+            LOG.error("Cannot close the writer for {}", recordPathTemp, e);
           }
         }
       }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org