You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by ar...@apache.org on 2016/06/24 01:36:37 UTC

[41/49] hadoop git commit: HDFS-10540. Diskbalancer: The CLI error message for disk balancer is not enabled is not clear. Contributed by Anu Engineer.

HDFS-10540. Diskbalancer: The CLI error message for disk balancer is not enabled is not clear. Contributed by Anu Engineer.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cb68e5b3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cb68e5b3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cb68e5b3

Branch: refs/heads/HDFS-1312
Commit: cb68e5b3bdb0079af867a9e49559827ecee03010
Parents: 3225c24
Author: Anu Engineer <ae...@apache.org>
Authored: Fri Jun 17 23:25:26 2016 -0700
Committer: Arpit Agarwal <ar...@apache.org>
Committed: Thu Jun 23 18:27:00 2016 -0700

----------------------------------------------------------------------
 .../hdfs/server/datanode/DiskBalancer.java      |  2 +
 .../server/diskbalancer/command/Command.java    |  2 +-
 .../apache/hadoop/hdfs/tools/DiskBalancer.java  | 62 ++++++++------------
 3 files changed, 28 insertions(+), 38 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cb68e5b3/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancer.java
index 5fde7c5..b31b997 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancer.java
@@ -256,6 +256,8 @@ public class DiskBalancer {
       }
       ObjectMapper mapper = new ObjectMapper();
       return mapper.writeValueAsString(pathMap);
+    } catch (DiskBalancerException ex) {
+      throw ex;
     } catch (IOException e) {
       throw new DiskBalancerException("Internal error, Unable to " +
           "create JSON string.", e,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cb68e5b3/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/Command.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/Command.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/Command.java
index d2813e7..19f9945 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/Command.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/Command.java
@@ -171,7 +171,7 @@ public abstract class Command extends Configured {
       diskBalancerLogs = new Path(path);
     }
     if (fs.exists(diskBalancerLogs)) {
-      LOG.error("Another Diskbalancer instance is running ? - Target " +
+      LOG.debug("Another Diskbalancer instance is running ? - Target " +
           "Directory already exists. {}", diskBalancerLogs);
       throw new IOException("Another DiskBalancer files already exist at the " +
           "target location. " + diskBalancerLogs.toString());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cb68e5b3/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DiskBalancer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DiskBalancer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DiskBalancer.java
index d83a49c..67703c4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DiskBalancer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DiskBalancer.java
@@ -36,9 +36,7 @@ import org.apache.hadoop.util.ToolRunner;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import java.io.IOException;
 import java.io.PrintStream;
-import java.net.URISyntaxException;
 
 /**
  * DiskBalancer is a tool that can be used to ensure that data is spread evenly
@@ -169,7 +167,7 @@ public class DiskBalancer extends Configured implements Tool {
       res = ToolRunner.run(shell, argv);
     } catch (Exception ex) {
       LOG.error(ex.toString());
-      System.exit(1);
+      res = 1;
     }
     System.exit(res);
   }
@@ -449,51 +447,41 @@ public class DiskBalancer extends Configured implements Tool {
    * @param cmd  - CommandLine
    * @param opts options of command line
    * @param out  the output stream used for printing
-   * @throws IOException
-   * @throws URISyntaxException
    */
   private int dispatch(CommandLine cmd, Options opts, final PrintStream out)
-      throws IOException, URISyntaxException {
+      throws Exception {
     Command currentCommand = null;
+    if (cmd.hasOption(DiskBalancer.PLAN)) {
+      currentCommand = new PlanCommand(getConf());
+    }
 
-    try {
-
-      if (cmd.hasOption(DiskBalancer.PLAN)) {
-        currentCommand = new PlanCommand(getConf());
-      }
-
-      if (cmd.hasOption(DiskBalancer.EXECUTE)) {
-        currentCommand = new ExecuteCommand(getConf());
-      }
-
-      if (cmd.hasOption(DiskBalancer.QUERY)) {
-        currentCommand = new QueryCommand(getConf());
-      }
+    if (cmd.hasOption(DiskBalancer.EXECUTE)) {
+      currentCommand = new ExecuteCommand(getConf());
+    }
 
-      if (cmd.hasOption(DiskBalancer.CANCEL)) {
-        currentCommand = new CancelCommand(getConf());
-      }
+    if (cmd.hasOption(DiskBalancer.QUERY)) {
+      currentCommand = new QueryCommand(getConf());
+    }
 
-      if (cmd.hasOption(DiskBalancer.REPORT)) {
-        currentCommand = new ReportCommand(getConf(), out);
-      }
+    if (cmd.hasOption(DiskBalancer.CANCEL)) {
+      currentCommand = new CancelCommand(getConf());
+    }
 
-      if (cmd.hasOption(DiskBalancer.HELP)) {
-        currentCommand = new HelpCommand(getConf());
-      }
+    if (cmd.hasOption(DiskBalancer.REPORT)) {
+      currentCommand = new ReportCommand(getConf(), out);
+    }
 
-      // Invoke Main help here.
-      if (currentCommand == null) {
-        new HelpCommand(getConf()).execute(null);
-        return 1;
-      }
+    if (cmd.hasOption(DiskBalancer.HELP)) {
+      currentCommand = new HelpCommand(getConf());
+    }
 
-      currentCommand.execute(cmd);
-    } catch (Exception ex) {
-      System.err.printf(ex.getMessage());
+    // Invoke main help here.
+    if (currentCommand == null) {
+      new HelpCommand(getConf()).execute(null);
       return 1;
     }
+
+    currentCommand.execute(cmd);
     return 0;
   }
-
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org