You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by su...@apache.org on 2012/08/15 02:25:08 UTC

svn commit: r1373172 - in /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs: ./ src/main/java/org/apache/hadoop/hdfs/ src/main/java/org/apache/hadoop/hdfs/server/balancer/ src/main/java/org/apache/hadoop/hdfs/server/datanode/ src/main/java/org/apach...

Author: suresh
Date: Wed Aug 15 00:25:07 2012
New Revision: 1373172

URL: http://svn.apache.org/viewvc?rev=1373172&view=rev
Log:
Reverting previous incomplete change r1373170 for HDFS-3723

Modified:
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSHAAdmin.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSZKFailoverController.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/GetConf.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/GetGroups.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdmin.java

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1373172&r1=1373171&r2=1373172&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Wed Aug 15 00:25:07 2012
@@ -114,9 +114,6 @@ Trunk (unreleased changes)
     HDFS-3789. JournalManager#format() should be able to throw IOException
     (Ivan Kelly via todd)
 
-    HDFS-3723. Add support -h, -help to all the commands. (Jing Zhao via
-    suresh)
-
   OPTIMIZATIONS
 
   BUG FIXES

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java?rev=1373172&r1=1373171&r2=1373172&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java Wed Aug 15 00:25:07 2012
@@ -18,21 +18,8 @@
 
 package org.apache.hadoop.hdfs;
 
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_NAMENODE_ID_KEY;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_BACKUP_ADDRESS_KEY;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_DEFAULT;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_DEFAULT;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMESERVICES;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMESERVICE_ID;
-
+import static org.apache.hadoop.hdfs.DFSConfigKeys.*;
 import java.io.IOException;
-import java.io.PrintStream;
 import java.io.UnsupportedEncodingException;
 import java.net.InetSocketAddress;
 import java.net.URI;
@@ -46,17 +33,10 @@ import java.util.List;
 import java.util.Map;
 import java.util.Random;
 import java.util.Set;
+import java.util.StringTokenizer;
 
 import javax.net.SocketFactory;
 
-import org.apache.commons.cli.CommandLine;
-import org.apache.commons.cli.CommandLineParser;
-import org.apache.commons.cli.Option;
-import org.apache.commons.cli.Options;
-import org.apache.commons.cli.ParseException;
-import org.apache.commons.cli.PosixParser;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
@@ -77,7 +57,8 @@ import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.net.NodeBase;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.StringUtils;
-import org.apache.hadoop.util.ToolRunner;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 
 import com.google.common.base.Joiner;
 import com.google.common.collect.Lists;
@@ -443,6 +424,7 @@ public class DFSUtil {
    * 
    * @param conf configuration
    * @return list of InetSocketAddresses
+   * @throws IOException if no addresses are configured
    */
   public static Map<String, Map<String, InetSocketAddress>> getHaNnRpcAddresses(
       Configuration conf) {
@@ -1091,44 +1073,4 @@ public class DFSUtil {
       return null;
     }
   }
-  
-  public static Options helpOptions = new Options();
-  public static Option helpOpt = new Option("h", "help", false,
-      "get help information");
-
-  static {
-    helpOptions.addOption(helpOpt);
-  }
-
-  /**
-   * Parse the arguments for commands
-   * 
-   * @param args the argument to be parsed
-   * @param helpDescription help information to be printed out
-   * @param out Printer
-   * @param printGenericCommandUsage whether to print the 
-   *              generic command usage defined in ToolRunner
-   * @return true when the argument matches help option, false if not
-   */
-  public static boolean parseHelpArgument(String[] args,
-      String helpDescription, PrintStream out, boolean printGenericCommandUsage) {
-    if (args.length == 1) {
-      try {
-        CommandLineParser parser = new PosixParser();
-        CommandLine cmdLine = parser.parse(helpOptions, args);
-        if (cmdLine.hasOption(helpOpt.getOpt())
-            || cmdLine.hasOption(helpOpt.getLongOpt())) {
-          // should print out the help information
-          out.println(helpDescription + "\n");
-          if (printGenericCommandUsage) {
-            ToolRunner.printGenericCommandUsage(out);
-          }
-          return true;
-        }
-      } catch (ParseException pe) {
-        return false;
-      }
-    }
-    return false;
-  }
 }

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java?rev=1373172&r1=1373171&r2=1373172&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java Wed Aug 15 00:25:07 2012
@@ -17,7 +17,6 @@
  */
 package org.apache.hadoop.hdfs.server.balancer;
 
-import static com.google.common.base.Preconditions.checkArgument;
 import static org.apache.hadoop.hdfs.protocol.HdfsProtoUtil.vintPrefixed;
 
 import java.io.BufferedInputStream;
@@ -27,7 +26,6 @@ import java.io.DataOutputStream;
 import java.io.IOException;
 import java.io.InputStream;
 import java.io.OutputStream;
-import java.io.PrintStream;
 import java.net.Socket;
 import java.net.URI;
 import java.text.DateFormat;
@@ -70,6 +68,7 @@ import org.apache.hadoop.hdfs.security.t
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyDefault;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
+import org.apache.hadoop.hdfs.server.common.Util;
 import org.apache.hadoop.hdfs.server.namenode.UnsupportedActionException;
 import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations;
 import org.apache.hadoop.io.IOUtils;
@@ -80,6 +79,7 @@ import org.apache.hadoop.util.StringUtil
 import org.apache.hadoop.util.Time;
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
+import static com.google.common.base.Preconditions.checkArgument;
 
 /** <p>The balancer is a tool that balances disk space usage on an HDFS cluster
  * when some datanodes become full or when new empty nodes join the cluster.
@@ -189,13 +189,6 @@ public class Balancer {
    */
   public static final int MAX_NUM_CONCURRENT_MOVES = 5;
   
-  private static final String USAGE = "Usage: java "
-      + Balancer.class.getSimpleName()
-      + "\n\t[-policy <policy>]\tthe balancing policy: "
-      + BalancingPolicy.Node.INSTANCE.getName() + " or "
-      + BalancingPolicy.Pool.INSTANCE.getName()
-      + "\n\t[-threshold <threshold>]\tPercentage of disk capacity";
-  
   private final NameNodeConnector nnc;
   private final BalancingPolicy policy;
   private final double threshold;
@@ -1557,7 +1550,7 @@ public class Balancer {
             }
           }
         } catch(RuntimeException e) {
-          printUsage(System.err);
+          printUsage();
           throw e;
         }
       }
@@ -1565,8 +1558,13 @@ public class Balancer {
       return new Parameters(policy, threshold);
     }
 
-    private static void printUsage(PrintStream out) {
-      out.println(USAGE + "\n");
+    private static void printUsage() {
+      System.out.println("Usage: java " + Balancer.class.getSimpleName());
+      System.out.println("    [-policy <policy>]\tthe balancing policy: "
+          + BalancingPolicy.Node.INSTANCE.getName() + " or " 
+          + BalancingPolicy.Pool.INSTANCE.getName());
+      System.out.println(
+          "    [-threshold <threshold>]\tPercentage of disk capacity");
     }
   }
 
@@ -1575,10 +1573,6 @@ public class Balancer {
    * @param args Command line arguments
    */
   public static void main(String[] args) {
-    if (DFSUtil.parseHelpArgument(args, USAGE, System.out, true)) {
-      System.exit(0);
-    }
-
     try {
       System.exit(ToolRunner.run(new HdfsConfiguration(), new Cli(), args));
     } catch (Throwable e) {

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java?rev=1373172&r1=1373171&r2=1373172&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java Wed Aug 15 00:25:07 2012
@@ -46,7 +46,6 @@ import static org.apache.hadoop.hdfs.DFS
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_STARTUP_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_USER_NAME_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HTTPS_ENABLE_KEY;
-import static org.apache.hadoop.util.ExitUtil.terminate;
 
 import java.io.BufferedOutputStream;
 import java.io.ByteArrayInputStream;
@@ -56,7 +55,6 @@ import java.io.File;
 import java.io.IOException;
 import java.io.InputStream;
 import java.io.OutputStream;
-import java.io.PrintStream;
 import java.net.InetSocketAddress;
 import java.net.ServerSocket;
 import java.net.Socket;
@@ -100,8 +98,8 @@ import org.apache.hadoop.hdfs.protocol.H
 import org.apache.hadoop.hdfs.protocol.HdfsProtoUtil;
 import org.apache.hadoop.hdfs.protocol.RecoveryInProgressException;
 import org.apache.hadoop.hdfs.protocol.datatransfer.BlockConstructionStage;
-import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferEncryptor;
 import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtocol;
+import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferEncryptor;
 import org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair;
 import org.apache.hadoop.hdfs.protocol.datatransfer.Sender;
 import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ClientDatanodeProtocolService;
@@ -126,6 +124,9 @@ import org.apache.hadoop.hdfs.server.com
 import org.apache.hadoop.hdfs.server.common.JspHelper;
 import org.apache.hadoop.hdfs.server.common.StorageInfo;
 import org.apache.hadoop.hdfs.server.common.Util;
+
+import static org.apache.hadoop.util.ExitUtil.terminate;
+
 import org.apache.hadoop.hdfs.server.datanode.SecureDataNodeStarter.SecureResources;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
@@ -170,9 +171,9 @@ import org.apache.hadoop.util.Time;
 import org.apache.hadoop.util.VersionInfo;
 import org.mortbay.util.ajax.JSON;
 
-import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Joiner;
 import com.google.common.base.Preconditions;
+import com.google.common.annotations.VisibleForTesting;
 import com.google.protobuf.BlockingService;
 
 /**********************************************************
@@ -229,8 +230,6 @@ public class DataNode extends Configured
         
   static final Log ClientTraceLog =
     LogFactory.getLog(DataNode.class.getName() + ".clienttrace");
-  
-  private static final String USAGE = "Usage: java DataNode [-rollback | -regular]";
 
   /**
    * Use {@link NetUtils#createSocketAddr(String)} instead.
@@ -1542,7 +1541,7 @@ public class DataNode extends Configured
     }
     
     if (!parseArguments(args, conf)) {
-      printUsage(System.err);
+      printUsage();
       return null;
     }
     Collection<URI> dataDirs = getStorageDirs(conf);
@@ -1656,8 +1655,9 @@ public class DataNode extends Configured
         + xmitsInProgress.get() + "}";
   }
 
-  private static void printUsage(PrintStream out) {
-    out.println(USAGE + "\n");
+  private static void printUsage() {
+    System.err.println("Usage: java DataNode");
+    System.err.println("           [-rollback]");
   }
 
   /**
@@ -1742,10 +1742,6 @@ public class DataNode extends Configured
   }
   
   public static void main(String args[]) {
-    if (DFSUtil.parseHelpArgument(args, DataNode.USAGE, System.out, true)) {
-      System.exit(0);
-    }
-
     secureMain(args, null);
   }
 

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java?rev=1373172&r1=1373171&r2=1373172&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java Wed Aug 15 00:25:07 2012
@@ -18,7 +18,6 @@
 package org.apache.hadoop.hdfs.server.namenode;
 
 import java.io.IOException;
-import java.io.PrintStream;
 import java.net.InetSocketAddress;
 import java.net.URI;
 import java.util.ArrayList;
@@ -39,8 +38,6 @@ import org.apache.hadoop.fs.CommonConfig
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Trash;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.*;
-import static org.apache.hadoop.util.ExitUtil.terminate;
-import static org.apache.hadoop.util.ToolRunner.confirmPrompt;
 
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSUtil;
@@ -72,9 +69,12 @@ import org.apache.hadoop.security.Securi
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.authorize.RefreshAuthorizationPolicyProtocol;
 import org.apache.hadoop.tools.GetUserMappingsProtocol;
-import org.apache.hadoop.util.ExitUtil.ExitException;
 import org.apache.hadoop.util.ServicePlugin;
 import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.util.ExitUtil.ExitException;
+
+import static org.apache.hadoop.util.ExitUtil.terminate;
+import static org.apache.hadoop.util.ToolRunner.confirmPrompt;
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
@@ -188,22 +188,6 @@ public class NameNode {
     DFS_HA_AUTO_FAILOVER_ENABLED_KEY
   };
   
-  private static final String USAGE = "Usage: java NameNode ["
-      + StartupOption.BACKUP.getName() + "] | ["
-      + StartupOption.CHECKPOINT.getName() + "] | ["
-      + StartupOption.FORMAT.getName() + " ["
-      + StartupOption.CLUSTERID.getName() + " cid ] ["
-      + StartupOption.FORCE.getName() + "] ["
-      + StartupOption.NONINTERACTIVE.getName() + "] ] | ["
-      + StartupOption.UPGRADE.getName() + "] | ["
-      + StartupOption.ROLLBACK.getName() + "] | ["
-      + StartupOption.FINALIZE.getName() + "] | ["
-      + StartupOption.IMPORT.getName() + "] | ["
-      + StartupOption.INITIALIZESHAREDEDITS.getName() + "] | ["
-      + StartupOption.BOOTSTRAPSTANDBY.getName() + "] | ["
-      + StartupOption.RECOVER.getName() + " [ " + StartupOption.FORCE.getName()
-      + " ] ]";
-  
   public long getProtocolVersion(String protocol, 
                                  long clientVersion) throws IOException {
     if (protocol.equals(ClientProtocol.class.getName())) {
@@ -911,8 +895,25 @@ public class NameNode {
     return false;
   }
 
-  private static void printUsage(PrintStream out) {
-    out.println(USAGE + "\n");
+  private static void printUsage() {
+    System.err.println(
+      "Usage: java NameNode [" +
+      StartupOption.BACKUP.getName() + "] | [" +
+      StartupOption.CHECKPOINT.getName() + "] | [" +
+      StartupOption.FORMAT.getName() + " [" + StartupOption.CLUSTERID.getName() +  
+      " cid ] [" + StartupOption.FORCE.getName() + "] [" +
+      StartupOption.NONINTERACTIVE.getName() + "] ] | [" +
+      StartupOption.UPGRADE.getName() + "] | [" +
+      StartupOption.ROLLBACK.getName() + "] | [" +
+      StartupOption.FINALIZE.getName() + "] | [" +
+      StartupOption.IMPORT.getName() + "] | [" +
+      StartupOption.INITIALIZESHAREDEDITS.getName() + 
+        " [" + StartupOption.FORCE.getName() + "] [" +
+             StartupOption.NONINTERACTIVE.getName() + "]" +
+      "] | [" +
+      StartupOption.BOOTSTRAPSTANDBY.getName() + "] | [" + 
+      StartupOption.RECOVER.getName() + " [ " +
+        StartupOption.FORCE.getName() + " ] ]");
   }
 
   private static StartupOption parseArguments(String args[]) {
@@ -1060,7 +1061,7 @@ public class NameNode {
       conf = new HdfsConfiguration();
     StartupOption startOpt = parseArguments(argv);
     if (startOpt == null) {
-      printUsage(System.err);
+      printUsage();
       return null;
     }
     setStartupOption(conf, startOpt);
@@ -1174,10 +1175,6 @@ public class NameNode {
   /**
    */
   public static void main(String argv[]) throws Exception {
-    if (DFSUtil.parseHelpArgument(argv, NameNode.USAGE, System.out, true)) {
-      System.exit(0);
-    }
-
     try {
       StringUtils.startupShutdownMessage(NameNode.class, argv, LOG);
       NameNode namenode = createNameNode(argv, null);

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java?rev=1373172&r1=1373171&r2=1373172&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java Wed Aug 15 00:25:07 2012
@@ -562,9 +562,6 @@ public class SecondaryNameNode implement
     if (opts == null) {
       LOG.fatal("Failed to parse options");
       terminate(1);
-    } else if (opts.shouldPrintHelp()) {
-      opts.usage();
-      System.exit(0);
     }
     
     StringUtils.startupShutdownMessage(SecondaryNameNode.class, argv, LOG);
@@ -598,7 +595,6 @@ public class SecondaryNameNode implement
     private final Option geteditsizeOpt;
     private final Option checkpointOpt;
     private final Option formatOpt;
-    private final Option helpOpt;
 
 
     Command cmd;
@@ -609,7 +605,6 @@ public class SecondaryNameNode implement
     
     private boolean shouldForce;
     private boolean shouldFormat;
-    private boolean shouldPrintHelp;
 
     CommandLineOpts() {
       geteditsizeOpt = new Option("geteditsize",
@@ -617,32 +612,20 @@ public class SecondaryNameNode implement
       checkpointOpt = OptionBuilder.withArgName("force")
         .hasOptionalArg().withDescription("checkpoint on startup").create("checkpoint");;
       formatOpt = new Option("format", "format the local storage during startup");
-      helpOpt = new Option("h", "help", false, "get help information");
       
       options.addOption(geteditsizeOpt);
       options.addOption(checkpointOpt);
       options.addOption(formatOpt);
-      options.addOption(helpOpt);
     }
     
     public boolean shouldFormat() {
       return shouldFormat;
     }
 
-    public boolean shouldPrintHelp() {
-      return shouldPrintHelp;
-    }
-    
     public void parse(String ... argv) throws ParseException {
       CommandLineParser parser = new PosixParser();
       CommandLine cmdLine = parser.parse(options, argv);
       
-      if (cmdLine.hasOption(helpOpt.getOpt())
-          || cmdLine.hasOption(helpOpt.getLongOpt())) {
-        shouldPrintHelp = true;
-        return;
-      }
-      
       boolean hasGetEdit = cmdLine.hasOption(geteditsizeOpt.getOpt());
       boolean hasCheckpoint = cmdLine.hasOption(checkpointOpt.getOpt()); 
       if (hasGetEdit && hasCheckpoint) {
@@ -679,13 +662,8 @@ public class SecondaryNameNode implement
     }
     
     void usage() {
-      String header = "The Secondary NameNode is a helper "
-          + "to the primary NameNode. The Secondary is responsible "
-          + "for supporting periodic checkpoints of the HDFS metadata. "
-          + "The current design allows only one Secondary NameNode "
-          + "per HDFS cluster.";
       HelpFormatter formatter = new HelpFormatter();
-      formatter.printHelp("secondarynamenode", header, options, "", false);
+      formatter.printHelp("secondarynamenode", options);
     }
   }
 

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSHAAdmin.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSHAAdmin.java?rev=1373172&r1=1373171&r2=1373172&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSHAAdmin.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSHAAdmin.java Wed Aug 15 00:25:07 2012
@@ -42,10 +42,6 @@ public class DFSHAAdmin extends HAAdmin 
   protected void setErrOut(PrintStream errOut) {
     this.errOut = errOut;
   }
-  
-  protected void setOut(PrintStream out) {
-    this.out = out;
-  }
 
   @Override
   public void setConf(Configuration conf) {

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSZKFailoverController.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSZKFailoverController.java?rev=1373172&r1=1373171&r2=1373172&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSZKFailoverController.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSZKFailoverController.java Wed Aug 15 00:25:07 2012
@@ -162,10 +162,6 @@ public class DFSZKFailoverController ext
 
   public static void main(String args[])
       throws Exception {
-    if (DFSUtil.parseHelpArgument(args, 
-        ZKFailoverController.USAGE, System.out, true)) {
-      System.exit(0);
-    }
     
     GenericOptionsParser parser = new GenericOptionsParser(
         new HdfsConfiguration(), args);

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java?rev=1373172&r1=1373171&r2=1373172&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java Wed Aug 15 00:25:07 2012
@@ -73,25 +73,6 @@ public class DFSck extends Configured im
     HdfsConfiguration.init();
   }
 
-  private static final String USAGE = "Usage: DFSck <path> "
-      + "[-list-corruptfileblocks | "
-      + "[-move | -delete | -openforwrite] "
-      + "[-files [-blocks [-locations | -racks]]]]\n"
-      + "\t<path>\tstart checking from this path\n"
-      + "\t-move\tmove corrupted files to /lost+found\n"
-      + "\t-delete\tdelete corrupted files\n"
-      + "\t-files\tprint out files being checked\n"
-      + "\t-openforwrite\tprint out files opened for write\n"
-      + "\t-list-corruptfileblocks\tprint out list of missing "
-      + "blocks and files they belong to\n"
-      + "\t-blocks\tprint out block report\n"
-      + "\t-locations\tprint out locations for every block\n"
-      + "\t-racks\tprint out network topology for data-node locations\n"
-      + "\t\tBy default fsck ignores files opened for write, "
-      + "use -openforwrite to report such files. They are usually "
-      + " tagged CORRUPT or HEALTHY depending on their block "
-      + "allocation status";
-  
   private final UserGroupInformation ugi;
   private final PrintStream out;
 
@@ -112,9 +93,25 @@ public class DFSck extends Configured im
   /**
    * Print fsck usage information
    */
-  static void printUsage(PrintStream out) {
-    out.println(USAGE + "\n");
-    ToolRunner.printGenericCommandUsage(out);
+  static void printUsage() {
+    System.err.println("Usage: DFSck <path> [-list-corruptfileblocks | " +
+        "[-move | -delete | -openforwrite] " +
+        "[-files [-blocks [-locations | -racks]]]]");
+    System.err.println("\t<path>\tstart checking from this path");
+    System.err.println("\t-move\tmove corrupted files to /lost+found");
+    System.err.println("\t-delete\tdelete corrupted files");
+    System.err.println("\t-files\tprint out files being checked");
+    System.err.println("\t-openforwrite\tprint out files opened for write");
+    System.err.println("\t-list-corruptfileblocks\tprint out list of missing "
+        + "blocks and files they belong to");
+    System.err.println("\t-blocks\tprint out block report");
+    System.err.println("\t-locations\tprint out locations for every block");
+    System.err.println("\t-racks\tprint out network topology for data-node locations");
+    System.err.println("\t\tBy default fsck ignores files opened for write, " +
+                       "use -openforwrite to report such files. They are usually " +
+                       " tagged CORRUPT or HEALTHY depending on their block " +
+                        "allocation status");
+    ToolRunner.printGenericCommandUsage(System.err);
   }
   /**
    * @param args
@@ -122,7 +119,7 @@ public class DFSck extends Configured im
   @Override
   public int run(final String[] args) throws IOException {
     if (args.length == 0) {
-      printUsage(System.err);
+      printUsage();
       return -1;
     }
 
@@ -261,12 +258,12 @@ public class DFSck extends Configured im
         } else {
           System.err.println("fsck: can only operate on one path at a time '"
               + args[idx] + "'");
-          printUsage(System.err);
+          printUsage();
           return -1;
         }
       } else {
         System.err.println("fsck: Illegal option '" + args[idx] + "'");
-        printUsage(System.err);
+        printUsage();
         return -1;
       }
     }
@@ -307,14 +304,10 @@ public class DFSck extends Configured im
     // -files option is also used by GenericOptionsParser
     // Make sure that is not the first argument for fsck
     int res = -1;
-    if ((args.length == 0) || ("-files".equals(args[0]))) {
-      printUsage(System.err);
-      ToolRunner.printGenericCommandUsage(System.err);
-    } else if (DFSUtil.parseHelpArgument(args, USAGE, System.out, true)) {
-      res = 0;
-    } else {
+    if ((args.length == 0 ) || ("-files".equals(args[0]))) 
+      printUsage();
+    else
       res = ToolRunner.run(new DFSck(new HdfsConfiguration()), args);
-    }
     System.exit(res);
   }
 }

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java?rev=1373172&r1=1373171&r2=1373172&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java Wed Aug 15 00:25:07 2012
@@ -40,6 +40,7 @@ import org.apache.hadoop.classification.
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.HftpFileSystem;
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
@@ -47,7 +48,9 @@ import org.apache.hadoop.hdfs.security.t
 import org.apache.hadoop.hdfs.server.namenode.CancelDelegationTokenServlet;
 import org.apache.hadoop.hdfs.server.namenode.GetDelegationTokenServlet;
 import org.apache.hadoop.hdfs.server.namenode.RenewDelegationTokenServlet;
+import org.apache.hadoop.hdfs.web.URLUtils;
 import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.io.Text;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.Credentials;
 import org.apache.hadoop.security.SecurityUtil;
@@ -68,10 +71,8 @@ public class DelegationTokenFetcher {
   private static final String CANCEL = "cancel";
   private static final String RENEW = "renew";
   private static final String PRINT = "print";
-  private static final String HELP = "help";
-  private static final String HELP_SHORT = "h";
 
-  private static void printUsage(PrintStream err) {
+  private static void printUsage(PrintStream err) throws IOException {
     err.println("fetchdt retrieves delegation tokens from the NameNode");
     err.println();
     err.println("fetchdt <opts> <token file>");
@@ -106,7 +107,6 @@ public class DelegationTokenFetcher {
     fetcherOptions.addOption(CANCEL, false, "cancel the token");
     fetcherOptions.addOption(RENEW, false, "renew the token");
     fetcherOptions.addOption(PRINT, false, "print the token");
-    fetcherOptions.addOption(HELP_SHORT, HELP, false, "print out help information");
     GenericOptionsParser parser = new GenericOptionsParser(conf,
         fetcherOptions, args);
     CommandLine cmd = parser.getCommandLine();
@@ -119,14 +119,9 @@ public class DelegationTokenFetcher {
     final boolean cancel = cmd.hasOption(CANCEL);
     final boolean renew = cmd.hasOption(RENEW);
     final boolean print = cmd.hasOption(PRINT);
-    final boolean help = cmd.hasOption(HELP);
     String[] remaining = parser.getRemainingArgs();
 
     // check option validity
-    if (help) {
-      printUsage(System.out);
-      System.exit(0);
-    }
     if (cancel && renew || cancel && print || renew && print || cancel && renew
         && print) {
       System.err.println("ERROR: Only specify cancel, renew or print.");

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/GetConf.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/GetConf.java?rev=1373172&r1=1373171&r2=1373172&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/GetConf.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/GetConf.java Wed Aug 15 00:25:07 2012
@@ -324,10 +324,6 @@ public class GetConf extends Configured 
   }
 
   public static void main(String[] args) throws Exception {
-    if (DFSUtil.parseHelpArgument(args, USAGE, System.out, true)) {
-      System.exit(0);
-    }
-    
     int res = ToolRunner.run(new GetConf(new HdfsConfiguration()), args);
     System.exit(res);
   }

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/GetGroups.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/GetGroups.java?rev=1373172&r1=1373171&r2=1373172&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/GetGroups.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/GetGroups.java Wed Aug 15 00:25:07 2012
@@ -28,7 +28,6 @@ import org.apache.hadoop.conf.Configurat
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.NameNodeProxies;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
@@ -44,8 +43,6 @@ import org.apache.hadoop.util.ToolRunner
 public class GetGroups extends GetGroupsBase {
   
   private static final Log LOG = LogFactory.getLog(GetGroups.class);
-  
-  static final String USAGE = "Usage: hdfs groups [username ...]";
 
   static{
     HdfsConfiguration.init();
@@ -89,10 +86,6 @@ public class GetGroups extends GetGroups
   }
 
   public static void main(String[] argv) throws Exception {
-    if (DFSUtil.parseHelpArgument(argv, USAGE, System.out, true)) {
-      System.exit(0);
-    }
-    
     int res = ToolRunner.run(new GetGroups(new HdfsConfiguration()), argv);
     System.exit(res);
   }

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdmin.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdmin.java?rev=1373172&r1=1373171&r2=1373172&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdmin.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdmin.java Wed Aug 15 00:25:07 2012
@@ -55,9 +55,7 @@ public class TestDFSHAAdmin {
   
   private DFSHAAdmin tool;
   private ByteArrayOutputStream errOutBytes = new ByteArrayOutputStream();
-  private ByteArrayOutputStream outBytes = new ByteArrayOutputStream();
   private String errOutput;
-  private String output;
   private HAServiceProtocol mockProtocol;
   private ZKFCProtocol mockZkfcProtocol;
   
@@ -113,14 +111,12 @@ public class TestDFSHAAdmin {
     };
     tool.setConf(getHAConf());
     tool.setErrOut(new PrintStream(errOutBytes));
-    tool.setOut(new PrintStream(outBytes));
   }
 
   private void assertOutputContains(String string) {
-    if (!errOutput.contains(string) && !output.contains(string)) {
-      fail("Expected output to contain '" + string + 
-          "' but err_output was:\n" + errOutput + 
-          "\n and output was: \n" + output);
+    if (!errOutput.contains(string)) {
+      fail("Expected output to contain '" + string + "' but was:\n" +
+          errOutput);
     }
   }
   
@@ -147,7 +143,7 @@ public class TestDFSHAAdmin {
 
   @Test
   public void testHelp() throws Exception {
-    assertEquals(0, runTool("-help"));
+    assertEquals(-1, runTool("-help"));
     assertEquals(0, runTool("-help", "transitionToActive"));
     assertOutputContains("Transitions the service into Active");
   }
@@ -382,12 +378,10 @@ public class TestDFSHAAdmin {
   
   private Object runTool(String ... args) throws Exception {
     errOutBytes.reset();
-    outBytes.reset();
     LOG.info("Running: DFSHAAdmin " + Joiner.on(" ").join(args));
     int ret = tool.run(args);
     errOutput = new String(errOutBytes.toByteArray(), Charsets.UTF_8);
-    output = new String(outBytes.toByteArray(), Charsets.UTF_8);
-    LOG.info("Err_output:\n" + errOutput + "\nOutput:\n" + output);
+    LOG.info("Output:\n" + errOutput);
     return ret;
   }