You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by su...@apache.org on 2012/08/15 02:28:42 UTC
svn commit: r1373173 - in
/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs: ./
src/main/java/org/apache/hadoop/hdfs/
src/main/java/org/apache/hadoop/hdfs/server/balancer/
src/main/java/org/apache/hadoop/hdfs/server/datanode/
src/main/java/org/apach...
Author: suresh
Date: Wed Aug 15 00:28:41 2012
New Revision: 1373173
URL: http://svn.apache.org/viewvc?rev=1373173&view=rev
Log:
HDFS-3723. Add support -h, -help to all the commands. Contributed by Jing Zhao
Modified:
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSHAAdmin.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSZKFailoverController.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/GetConf.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/GetGroups.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdmin.java
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1373173&r1=1373172&r2=1373173&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Wed Aug 15 00:28:41 2012
@@ -114,6 +114,9 @@ Trunk (unreleased changes)
HDFS-3789. JournalManager#format() should be able to throw IOException
(Ivan Kelly via todd)
+ HDFS-3723. Add support -h, -help to all the commands. (Jing Zhao via
+ suresh)
+
OPTIMIZATIONS
BUG FIXES
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java?rev=1373173&r1=1373172&r2=1373173&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java Wed Aug 15 00:28:41 2012
@@ -18,8 +18,21 @@
package org.apache.hadoop.hdfs;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.*;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_NAMENODE_ID_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_BACKUP_ADDRESS_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_DEFAULT;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_DEFAULT;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMESERVICES;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMESERVICE_ID;
+
import java.io.IOException;
+import java.io.PrintStream;
import java.io.UnsupportedEncodingException;
import java.net.InetSocketAddress;
import java.net.URI;
@@ -33,10 +46,17 @@ import java.util.List;
import java.util.Map;
import java.util.Random;
import java.util.Set;
-import java.util.StringTokenizer;
import javax.net.SocketFactory;
+import org.apache.commons.cli.CommandLine;
+import org.apache.commons.cli.CommandLineParser;
+import org.apache.commons.cli.Option;
+import org.apache.commons.cli.Options;
+import org.apache.commons.cli.ParseException;
+import org.apache.commons.cli.PosixParser;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
@@ -57,8 +77,7 @@ import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.net.NodeBase;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.util.StringUtils;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.util.ToolRunner;
import com.google.common.base.Joiner;
import com.google.common.collect.Lists;
@@ -424,7 +443,6 @@ public class DFSUtil {
*
* @param conf configuration
* @return list of InetSocketAddresses
- * @throws IOException if no addresses are configured
*/
public static Map<String, Map<String, InetSocketAddress>> getHaNnRpcAddresses(
Configuration conf) {
@@ -1073,4 +1091,44 @@ public class DFSUtil {
return null;
}
}
+
+ public static Options helpOptions = new Options();
+ public static Option helpOpt = new Option("h", "help", false,
+ "get help information");
+
+ static {
+ helpOptions.addOption(helpOpt);
+ }
+
+ /**
+ * Parse the arguments for commands
+ *
+ * @param args the argument to be parsed
+ * @param helpDescription help information to be printed out
+ * @param out Printer
+ * @param printGenericCommandUsage whether to print the
+ * generic command usage defined in ToolRunner
+ * @return true when the argument matches help option, false if not
+ */
+ public static boolean parseHelpArgument(String[] args,
+ String helpDescription, PrintStream out, boolean printGenericCommandUsage) {
+ if (args.length == 1) {
+ try {
+ CommandLineParser parser = new PosixParser();
+ CommandLine cmdLine = parser.parse(helpOptions, args);
+ if (cmdLine.hasOption(helpOpt.getOpt())
+ || cmdLine.hasOption(helpOpt.getLongOpt())) {
+ // should print out the help information
+ out.println(helpDescription + "\n");
+ if (printGenericCommandUsage) {
+ ToolRunner.printGenericCommandUsage(out);
+ }
+ return true;
+ }
+ } catch (ParseException pe) {
+ return false;
+ }
+ }
+ return false;
+ }
}
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java?rev=1373173&r1=1373172&r2=1373173&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java Wed Aug 15 00:28:41 2012
@@ -17,6 +17,7 @@
*/
package org.apache.hadoop.hdfs.server.balancer;
+import static com.google.common.base.Preconditions.checkArgument;
import static org.apache.hadoop.hdfs.protocol.HdfsProtoUtil.vintPrefixed;
import java.io.BufferedInputStream;
@@ -26,6 +27,7 @@ import java.io.DataOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
+import java.io.PrintStream;
import java.net.Socket;
import java.net.URI;
import java.text.DateFormat;
@@ -68,7 +70,6 @@ import org.apache.hadoop.hdfs.security.t
import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyDefault;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
-import org.apache.hadoop.hdfs.server.common.Util;
import org.apache.hadoop.hdfs.server.namenode.UnsupportedActionException;
import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations;
import org.apache.hadoop.io.IOUtils;
@@ -79,7 +80,6 @@ import org.apache.hadoop.util.StringUtil
import org.apache.hadoop.util.Time;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
-import static com.google.common.base.Preconditions.checkArgument;
/** <p>The balancer is a tool that balances disk space usage on an HDFS cluster
* when some datanodes become full or when new empty nodes join the cluster.
@@ -189,6 +189,13 @@ public class Balancer {
*/
public static final int MAX_NUM_CONCURRENT_MOVES = 5;
+ private static final String USAGE = "Usage: java "
+ + Balancer.class.getSimpleName()
+ + "\n\t[-policy <policy>]\tthe balancing policy: "
+ + BalancingPolicy.Node.INSTANCE.getName() + " or "
+ + BalancingPolicy.Pool.INSTANCE.getName()
+ + "\n\t[-threshold <threshold>]\tPercentage of disk capacity";
+
private final NameNodeConnector nnc;
private final BalancingPolicy policy;
private final double threshold;
@@ -1550,7 +1557,7 @@ public class Balancer {
}
}
} catch(RuntimeException e) {
- printUsage();
+ printUsage(System.err);
throw e;
}
}
@@ -1558,13 +1565,8 @@ public class Balancer {
return new Parameters(policy, threshold);
}
- private static void printUsage() {
- System.out.println("Usage: java " + Balancer.class.getSimpleName());
- System.out.println(" [-policy <policy>]\tthe balancing policy: "
- + BalancingPolicy.Node.INSTANCE.getName() + " or "
- + BalancingPolicy.Pool.INSTANCE.getName());
- System.out.println(
- " [-threshold <threshold>]\tPercentage of disk capacity");
+ private static void printUsage(PrintStream out) {
+ out.println(USAGE + "\n");
}
}
@@ -1573,6 +1575,10 @@ public class Balancer {
* @param args Command line arguments
*/
public static void main(String[] args) {
+ if (DFSUtil.parseHelpArgument(args, USAGE, System.out, true)) {
+ System.exit(0);
+ }
+
try {
System.exit(ToolRunner.run(new HdfsConfiguration(), new Cli(), args));
} catch (Throwable e) {
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java?rev=1373173&r1=1373172&r2=1373173&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java Wed Aug 15 00:28:41 2012
@@ -46,6 +46,7 @@ import static org.apache.hadoop.hdfs.DFS
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_STARTUP_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_USER_NAME_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HTTPS_ENABLE_KEY;
+import static org.apache.hadoop.util.ExitUtil.terminate;
import java.io.BufferedOutputStream;
import java.io.ByteArrayInputStream;
@@ -55,6 +56,7 @@ import java.io.File;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
+import java.io.PrintStream;
import java.net.InetSocketAddress;
import java.net.ServerSocket;
import java.net.Socket;
@@ -98,8 +100,8 @@ import org.apache.hadoop.hdfs.protocol.H
import org.apache.hadoop.hdfs.protocol.HdfsProtoUtil;
import org.apache.hadoop.hdfs.protocol.RecoveryInProgressException;
import org.apache.hadoop.hdfs.protocol.datatransfer.BlockConstructionStage;
-import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtocol;
import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferEncryptor;
+import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtocol;
import org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair;
import org.apache.hadoop.hdfs.protocol.datatransfer.Sender;
import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ClientDatanodeProtocolService;
@@ -124,9 +126,6 @@ import org.apache.hadoop.hdfs.server.com
import org.apache.hadoop.hdfs.server.common.JspHelper;
import org.apache.hadoop.hdfs.server.common.StorageInfo;
import org.apache.hadoop.hdfs.server.common.Util;
-
-import static org.apache.hadoop.util.ExitUtil.terminate;
-
import org.apache.hadoop.hdfs.server.datanode.SecureDataNodeStarter.SecureResources;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
@@ -171,9 +170,9 @@ import org.apache.hadoop.util.Time;
import org.apache.hadoop.util.VersionInfo;
import org.mortbay.util.ajax.JSON;
+import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Joiner;
import com.google.common.base.Preconditions;
-import com.google.common.annotations.VisibleForTesting;
import com.google.protobuf.BlockingService;
/**********************************************************
@@ -230,6 +229,8 @@ public class DataNode extends Configured
static final Log ClientTraceLog =
LogFactory.getLog(DataNode.class.getName() + ".clienttrace");
+
+ private static final String USAGE = "Usage: java DataNode [-rollback | -regular]";
/**
* Use {@link NetUtils#createSocketAddr(String)} instead.
@@ -1541,7 +1542,7 @@ public class DataNode extends Configured
}
if (!parseArguments(args, conf)) {
- printUsage();
+ printUsage(System.err);
return null;
}
Collection<URI> dataDirs = getStorageDirs(conf);
@@ -1655,9 +1656,8 @@ public class DataNode extends Configured
+ xmitsInProgress.get() + "}";
}
- private static void printUsage() {
- System.err.println("Usage: java DataNode");
- System.err.println(" [-rollback]");
+ private static void printUsage(PrintStream out) {
+ out.println(USAGE + "\n");
}
/**
@@ -1742,6 +1742,10 @@ public class DataNode extends Configured
}
public static void main(String args[]) {
+ if (DFSUtil.parseHelpArgument(args, DataNode.USAGE, System.out, true)) {
+ System.exit(0);
+ }
+
secureMain(args, null);
}
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java?rev=1373173&r1=1373172&r2=1373173&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java Wed Aug 15 00:28:41 2012
@@ -18,6 +18,7 @@
package org.apache.hadoop.hdfs.server.namenode;
import java.io.IOException;
+import java.io.PrintStream;
import java.net.InetSocketAddress;
import java.net.URI;
import java.util.ArrayList;
@@ -38,6 +39,8 @@ import org.apache.hadoop.fs.CommonConfig
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Trash;
import static org.apache.hadoop.hdfs.DFSConfigKeys.*;
+import static org.apache.hadoop.util.ExitUtil.terminate;
+import static org.apache.hadoop.util.ToolRunner.confirmPrompt;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSUtil;
@@ -69,12 +72,9 @@ import org.apache.hadoop.security.Securi
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authorize.RefreshAuthorizationPolicyProtocol;
import org.apache.hadoop.tools.GetUserMappingsProtocol;
+import org.apache.hadoop.util.ExitUtil.ExitException;
import org.apache.hadoop.util.ServicePlugin;
import org.apache.hadoop.util.StringUtils;
-import org.apache.hadoop.util.ExitUtil.ExitException;
-
-import static org.apache.hadoop.util.ExitUtil.terminate;
-import static org.apache.hadoop.util.ToolRunner.confirmPrompt;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
@@ -188,6 +188,22 @@ public class NameNode {
DFS_HA_AUTO_FAILOVER_ENABLED_KEY
};
+ private static final String USAGE = "Usage: java NameNode ["
+ + StartupOption.BACKUP.getName() + "] | ["
+ + StartupOption.CHECKPOINT.getName() + "] | ["
+ + StartupOption.FORMAT.getName() + " ["
+ + StartupOption.CLUSTERID.getName() + " cid ] ["
+ + StartupOption.FORCE.getName() + "] ["
+ + StartupOption.NONINTERACTIVE.getName() + "] ] | ["
+ + StartupOption.UPGRADE.getName() + "] | ["
+ + StartupOption.ROLLBACK.getName() + "] | ["
+ + StartupOption.FINALIZE.getName() + "] | ["
+ + StartupOption.IMPORT.getName() + "] | ["
+ + StartupOption.INITIALIZESHAREDEDITS.getName() + "] | ["
+ + StartupOption.BOOTSTRAPSTANDBY.getName() + "] | ["
+ + StartupOption.RECOVER.getName() + " [ " + StartupOption.FORCE.getName()
+ + " ] ]";
+
public long getProtocolVersion(String protocol,
long clientVersion) throws IOException {
if (protocol.equals(ClientProtocol.class.getName())) {
@@ -895,25 +911,8 @@ public class NameNode {
return false;
}
- private static void printUsage() {
- System.err.println(
- "Usage: java NameNode [" +
- StartupOption.BACKUP.getName() + "] | [" +
- StartupOption.CHECKPOINT.getName() + "] | [" +
- StartupOption.FORMAT.getName() + " [" + StartupOption.CLUSTERID.getName() +
- " cid ] [" + StartupOption.FORCE.getName() + "] [" +
- StartupOption.NONINTERACTIVE.getName() + "] ] | [" +
- StartupOption.UPGRADE.getName() + "] | [" +
- StartupOption.ROLLBACK.getName() + "] | [" +
- StartupOption.FINALIZE.getName() + "] | [" +
- StartupOption.IMPORT.getName() + "] | [" +
- StartupOption.INITIALIZESHAREDEDITS.getName() +
- " [" + StartupOption.FORCE.getName() + "] [" +
- StartupOption.NONINTERACTIVE.getName() + "]" +
- "] | [" +
- StartupOption.BOOTSTRAPSTANDBY.getName() + "] | [" +
- StartupOption.RECOVER.getName() + " [ " +
- StartupOption.FORCE.getName() + " ] ]");
+ private static void printUsage(PrintStream out) {
+ out.println(USAGE + "\n");
}
private static StartupOption parseArguments(String args[]) {
@@ -1061,7 +1060,7 @@ public class NameNode {
conf = new HdfsConfiguration();
StartupOption startOpt = parseArguments(argv);
if (startOpt == null) {
- printUsage();
+ printUsage(System.err);
return null;
}
setStartupOption(conf, startOpt);
@@ -1175,6 +1174,10 @@ public class NameNode {
/**
*/
public static void main(String argv[]) throws Exception {
+ if (DFSUtil.parseHelpArgument(argv, NameNode.USAGE, System.out, true)) {
+ System.exit(0);
+ }
+
try {
StringUtils.startupShutdownMessage(NameNode.class, argv, LOG);
NameNode namenode = createNameNode(argv, null);
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java?rev=1373173&r1=1373172&r2=1373173&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java Wed Aug 15 00:28:41 2012
@@ -562,6 +562,9 @@ public class SecondaryNameNode implement
if (opts == null) {
LOG.fatal("Failed to parse options");
terminate(1);
+ } else if (opts.shouldPrintHelp()) {
+ opts.usage();
+ System.exit(0);
}
StringUtils.startupShutdownMessage(SecondaryNameNode.class, argv, LOG);
@@ -595,6 +598,7 @@ public class SecondaryNameNode implement
private final Option geteditsizeOpt;
private final Option checkpointOpt;
private final Option formatOpt;
+ private final Option helpOpt;
Command cmd;
@@ -605,6 +609,7 @@ public class SecondaryNameNode implement
private boolean shouldForce;
private boolean shouldFormat;
+ private boolean shouldPrintHelp;
CommandLineOpts() {
geteditsizeOpt = new Option("geteditsize",
@@ -612,20 +617,32 @@ public class SecondaryNameNode implement
checkpointOpt = OptionBuilder.withArgName("force")
.hasOptionalArg().withDescription("checkpoint on startup").create("checkpoint");;
formatOpt = new Option("format", "format the local storage during startup");
+ helpOpt = new Option("h", "help", false, "get help information");
options.addOption(geteditsizeOpt);
options.addOption(checkpointOpt);
options.addOption(formatOpt);
+ options.addOption(helpOpt);
}
public boolean shouldFormat() {
return shouldFormat;
}
+ public boolean shouldPrintHelp() {
+ return shouldPrintHelp;
+ }
+
public void parse(String ... argv) throws ParseException {
CommandLineParser parser = new PosixParser();
CommandLine cmdLine = parser.parse(options, argv);
+ if (cmdLine.hasOption(helpOpt.getOpt())
+ || cmdLine.hasOption(helpOpt.getLongOpt())) {
+ shouldPrintHelp = true;
+ return;
+ }
+
boolean hasGetEdit = cmdLine.hasOption(geteditsizeOpt.getOpt());
boolean hasCheckpoint = cmdLine.hasOption(checkpointOpt.getOpt());
if (hasGetEdit && hasCheckpoint) {
@@ -662,8 +679,13 @@ public class SecondaryNameNode implement
}
void usage() {
+ String header = "The Secondary NameNode is a helper "
+ + "to the primary NameNode. The Secondary is responsible "
+ + "for supporting periodic checkpoints of the HDFS metadata. "
+ + "The current design allows only one Secondary NameNode "
+ + "per HDFS cluster.";
HelpFormatter formatter = new HelpFormatter();
- formatter.printHelp("secondarynamenode", options);
+ formatter.printHelp("secondarynamenode", header, options, "", false);
}
}
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSHAAdmin.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSHAAdmin.java?rev=1373173&r1=1373172&r2=1373173&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSHAAdmin.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSHAAdmin.java Wed Aug 15 00:28:41 2012
@@ -42,6 +42,10 @@ public class DFSHAAdmin extends HAAdmin
protected void setErrOut(PrintStream errOut) {
this.errOut = errOut;
}
+
+ protected void setOut(PrintStream out) {
+ this.out = out;
+ }
@Override
public void setConf(Configuration conf) {
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSZKFailoverController.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSZKFailoverController.java?rev=1373173&r1=1373172&r2=1373173&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSZKFailoverController.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSZKFailoverController.java Wed Aug 15 00:28:41 2012
@@ -162,6 +162,10 @@ public class DFSZKFailoverController ext
public static void main(String args[])
throws Exception {
+ if (DFSUtil.parseHelpArgument(args,
+ ZKFailoverController.USAGE, System.out, true)) {
+ System.exit(0);
+ }
GenericOptionsParser parser = new GenericOptionsParser(
new HdfsConfiguration(), args);
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java?rev=1373173&r1=1373172&r2=1373173&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java Wed Aug 15 00:28:41 2012
@@ -73,6 +73,25 @@ public class DFSck extends Configured im
HdfsConfiguration.init();
}
+ private static final String USAGE = "Usage: DFSck <path> "
+ + "[-list-corruptfileblocks | "
+ + "[-move | -delete | -openforwrite] "
+ + "[-files [-blocks [-locations | -racks]]]]\n"
+ + "\t<path>\tstart checking from this path\n"
+ + "\t-move\tmove corrupted files to /lost+found\n"
+ + "\t-delete\tdelete corrupted files\n"
+ + "\t-files\tprint out files being checked\n"
+ + "\t-openforwrite\tprint out files opened for write\n"
+ + "\t-list-corruptfileblocks\tprint out list of missing "
+ + "blocks and files they belong to\n"
+ + "\t-blocks\tprint out block report\n"
+ + "\t-locations\tprint out locations for every block\n"
+ + "\t-racks\tprint out network topology for data-node locations\n"
+ + "\t\tBy default fsck ignores files opened for write, "
+ + "use -openforwrite to report such files. They are usually "
+ + " tagged CORRUPT or HEALTHY depending on their block "
+ + "allocation status";
+
private final UserGroupInformation ugi;
private final PrintStream out;
@@ -93,25 +112,9 @@ public class DFSck extends Configured im
/**
* Print fsck usage information
*/
- static void printUsage() {
- System.err.println("Usage: DFSck <path> [-list-corruptfileblocks | " +
- "[-move | -delete | -openforwrite] " +
- "[-files [-blocks [-locations | -racks]]]]");
- System.err.println("\t<path>\tstart checking from this path");
- System.err.println("\t-move\tmove corrupted files to /lost+found");
- System.err.println("\t-delete\tdelete corrupted files");
- System.err.println("\t-files\tprint out files being checked");
- System.err.println("\t-openforwrite\tprint out files opened for write");
- System.err.println("\t-list-corruptfileblocks\tprint out list of missing "
- + "blocks and files they belong to");
- System.err.println("\t-blocks\tprint out block report");
- System.err.println("\t-locations\tprint out locations for every block");
- System.err.println("\t-racks\tprint out network topology for data-node locations");
- System.err.println("\t\tBy default fsck ignores files opened for write, " +
- "use -openforwrite to report such files. They are usually " +
- " tagged CORRUPT or HEALTHY depending on their block " +
- "allocation status");
- ToolRunner.printGenericCommandUsage(System.err);
+ static void printUsage(PrintStream out) {
+ out.println(USAGE + "\n");
+ ToolRunner.printGenericCommandUsage(out);
}
/**
* @param args
@@ -119,7 +122,7 @@ public class DFSck extends Configured im
@Override
public int run(final String[] args) throws IOException {
if (args.length == 0) {
- printUsage();
+ printUsage(System.err);
return -1;
}
@@ -258,12 +261,12 @@ public class DFSck extends Configured im
} else {
System.err.println("fsck: can only operate on one path at a time '"
+ args[idx] + "'");
- printUsage();
+ printUsage(System.err);
return -1;
}
} else {
System.err.println("fsck: Illegal option '" + args[idx] + "'");
- printUsage();
+ printUsage(System.err);
return -1;
}
}
@@ -304,10 +307,14 @@ public class DFSck extends Configured im
// -files option is also used by GenericOptionsParser
// Make sure that is not the first argument for fsck
int res = -1;
- if ((args.length == 0 ) || ("-files".equals(args[0])))
- printUsage();
- else
+ if ((args.length == 0) || ("-files".equals(args[0]))) {
+ printUsage(System.err);
+ ToolRunner.printGenericCommandUsage(System.err);
+ } else if (DFSUtil.parseHelpArgument(args, USAGE, System.out, true)) {
+ res = 0;
+ } else {
res = ToolRunner.run(new DFSck(new HdfsConfiguration()), args);
+ }
System.exit(res);
}
}
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java?rev=1373173&r1=1373172&r2=1373173&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java Wed Aug 15 00:28:41 2012
@@ -40,7 +40,6 @@ import org.apache.hadoop.classification.
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.HftpFileSystem;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
@@ -48,9 +47,7 @@ import org.apache.hadoop.hdfs.security.t
import org.apache.hadoop.hdfs.server.namenode.CancelDelegationTokenServlet;
import org.apache.hadoop.hdfs.server.namenode.GetDelegationTokenServlet;
import org.apache.hadoop.hdfs.server.namenode.RenewDelegationTokenServlet;
-import org.apache.hadoop.hdfs.web.URLUtils;
import org.apache.hadoop.io.IOUtils;
-import org.apache.hadoop.io.Text;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.Credentials;
import org.apache.hadoop.security.SecurityUtil;
@@ -71,8 +68,10 @@ public class DelegationTokenFetcher {
private static final String CANCEL = "cancel";
private static final String RENEW = "renew";
private static final String PRINT = "print";
+ private static final String HELP = "help";
+ private static final String HELP_SHORT = "h";
- private static void printUsage(PrintStream err) throws IOException {
+ private static void printUsage(PrintStream err) {
err.println("fetchdt retrieves delegation tokens from the NameNode");
err.println();
err.println("fetchdt <opts> <token file>");
@@ -107,6 +106,7 @@ public class DelegationTokenFetcher {
fetcherOptions.addOption(CANCEL, false, "cancel the token");
fetcherOptions.addOption(RENEW, false, "renew the token");
fetcherOptions.addOption(PRINT, false, "print the token");
+ fetcherOptions.addOption(HELP_SHORT, HELP, false, "print out help information");
GenericOptionsParser parser = new GenericOptionsParser(conf,
fetcherOptions, args);
CommandLine cmd = parser.getCommandLine();
@@ -119,9 +119,14 @@ public class DelegationTokenFetcher {
final boolean cancel = cmd.hasOption(CANCEL);
final boolean renew = cmd.hasOption(RENEW);
final boolean print = cmd.hasOption(PRINT);
+ final boolean help = cmd.hasOption(HELP);
String[] remaining = parser.getRemainingArgs();
// check option validity
+ if (help) {
+ printUsage(System.out);
+ System.exit(0);
+ }
if (cancel && renew || cancel && print || renew && print || cancel && renew
&& print) {
System.err.println("ERROR: Only specify cancel, renew or print.");
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/GetConf.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/GetConf.java?rev=1373173&r1=1373172&r2=1373173&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/GetConf.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/GetConf.java Wed Aug 15 00:28:41 2012
@@ -324,6 +324,10 @@ public class GetConf extends Configured
}
public static void main(String[] args) throws Exception {
+ if (DFSUtil.parseHelpArgument(args, USAGE, System.out, true)) {
+ System.exit(0);
+ }
+
int res = ToolRunner.run(new GetConf(new HdfsConfiguration()), args);
System.exit(res);
}
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/GetGroups.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/GetGroups.java?rev=1373173&r1=1373172&r2=1373173&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/GetGroups.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/GetGroups.java Wed Aug 15 00:28:41 2012
@@ -28,6 +28,7 @@ import org.apache.hadoop.conf.Configurat
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.NameNodeProxies;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
@@ -43,6 +44,8 @@ import org.apache.hadoop.util.ToolRunner
public class GetGroups extends GetGroupsBase {
private static final Log LOG = LogFactory.getLog(GetGroups.class);
+
+ static final String USAGE = "Usage: hdfs groups [username ...]";
static{
HdfsConfiguration.init();
@@ -86,6 +89,10 @@ public class GetGroups extends GetGroups
}
public static void main(String[] argv) throws Exception {
+ if (DFSUtil.parseHelpArgument(argv, USAGE, System.out, true)) {
+ System.exit(0);
+ }
+
int res = ToolRunner.run(new GetGroups(new HdfsConfiguration()), argv);
System.exit(res);
}
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdmin.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdmin.java?rev=1373173&r1=1373172&r2=1373173&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdmin.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdmin.java Wed Aug 15 00:28:41 2012
@@ -55,7 +55,9 @@ public class TestDFSHAAdmin {
private DFSHAAdmin tool;
private ByteArrayOutputStream errOutBytes = new ByteArrayOutputStream();
+ private ByteArrayOutputStream outBytes = new ByteArrayOutputStream();
private String errOutput;
+ private String output;
private HAServiceProtocol mockProtocol;
private ZKFCProtocol mockZkfcProtocol;
@@ -111,12 +113,14 @@ public class TestDFSHAAdmin {
};
tool.setConf(getHAConf());
tool.setErrOut(new PrintStream(errOutBytes));
+ tool.setOut(new PrintStream(outBytes));
}
private void assertOutputContains(String string) {
- if (!errOutput.contains(string)) {
- fail("Expected output to contain '" + string + "' but was:\n" +
- errOutput);
+ if (!errOutput.contains(string) && !output.contains(string)) {
+ fail("Expected output to contain '" + string +
+ "' but err_output was:\n" + errOutput +
+ "\n and output was: \n" + output);
}
}
@@ -143,7 +147,7 @@ public class TestDFSHAAdmin {
@Test
public void testHelp() throws Exception {
- assertEquals(-1, runTool("-help"));
+ assertEquals(0, runTool("-help"));
assertEquals(0, runTool("-help", "transitionToActive"));
assertOutputContains("Transitions the service into Active");
}
@@ -378,10 +382,12 @@ public class TestDFSHAAdmin {
private Object runTool(String ... args) throws Exception {
errOutBytes.reset();
+ outBytes.reset();
LOG.info("Running: DFSHAAdmin " + Joiner.on(" ").join(args));
int ret = tool.run(args);
errOutput = new String(errOutBytes.toByteArray(), Charsets.UTF_8);
- LOG.info("Output:\n" + errOutput);
+ output = new String(outBytes.toByteArray(), Charsets.UTF_8);
+ LOG.info("Err_output:\n" + errOutput + "\nOutput:\n" + output);
return ret;
}