You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by su...@apache.org on 2011/06/07 01:11:10 UTC
svn commit: r1132829 - in /hadoop/hdfs/trunk: CHANGES.txt
bin/refresh-namenodes.sh src/java/org/apache/hadoop/hdfs/tools/GetConf.java
src/test/hdfs/org/apache/hadoop/hdfs/tools/TestGetConf.java
Author: suresh
Date: Mon Jun 6 23:11:09 2011
New Revision: 1132829
URL: http://svn.apache.org/viewvc?rev=1132829&view=rev
Log:
HDFS-1998. Federation: Make refresh-namenodes.sh refresh all the namenode. Contributed by Tanping Wang.
Modified:
hadoop/hdfs/trunk/CHANGES.txt
hadoop/hdfs/trunk/bin/refresh-namenodes.sh
hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/tools/GetConf.java
hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/tools/TestGetConf.java
Modified: hadoop/hdfs/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/CHANGES.txt?rev=1132829&r1=1132828&r2=1132829&view=diff
==============================================================================
--- hadoop/hdfs/trunk/CHANGES.txt (original)
+++ hadoop/hdfs/trunk/CHANGES.txt Mon Jun 6 23:11:09 2011
@@ -258,7 +258,7 @@ Trunk (unreleased changes)
HDFS-1813. Federation: Authentication using BlockToken in RPC to datanode
fails. (jitendra)
- HDFS_1630. Support fsedits checksum. (hairong)
+ HDFS-1630. Support fsedits checksum. (hairong)
HDFS-1606. Provide a stronger data guarantee in the write pipeline by
adding a new datanode when an existing datanode failed. (szetszwo)
@@ -693,6 +693,9 @@ Trunk (unreleased changes)
HDFS-1149. Lease reassignment should be persisted to the edit log.
(Aaron T. Myers via todd)
+ HDFS-1998. Federation: Make refresh-namenodes.sh refresh all the
+ namenode. (Tanping Wang via suresh)
+
Release 0.22.0 - Unreleased
INCOMPATIBLE CHANGES
Modified: hadoop/hdfs/trunk/bin/refresh-namenodes.sh
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/bin/refresh-namenodes.sh?rev=1132829&r1=1132828&r2=1132829&view=diff
==============================================================================
--- hadoop/hdfs/trunk/bin/refresh-namenodes.sh (original)
+++ hadoop/hdfs/trunk/bin/refresh-namenodes.sh Mon Jun 6 23:11:09 2011
@@ -29,19 +29,21 @@ else
. "$bin/hdfs-config.sh"
fi
-namenodes=$("$HADOOP_PREFIX/bin/hdfs" getconf -namenodes)
-
-for namenode in $namenodes ; do
- echo "Refreshing namenode [$namenode]"
- "$HADOOP_PREFIX/bin/hdfs" dfsadmin -refreshNodes
- if [ "$?" != '0' ] ; then errorFlag='1' ; fi
-done
+namenodes=$("$HADOOP_PREFIX/bin/hdfs" getconf -nnRpcAddresses)
+if [ "$?" != '0' ] ; then errorFlag='1' ;
+else
+ for namenode in $namenodes ; do
+ echo "Refreshing namenode [$namenode]"
+ "$HADOOP_PREFIX/bin/hdfs" dfsadmin -fs hdfs://$namenode -refreshNodes
+ if [ "$?" != '0' ] ; then errorFlag='1' ; fi
+ done
+fi
if [ "$errorFlag" = '1' ] ; then
echo "Error: refresh of namenodes failed, see error messages above."
exit 1
else
- echo "Refresh of all namenodes succeeded."
+ echo "Refresh of namenodes done."
fi
Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/tools/GetConf.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/tools/GetConf.java?rev=1132829&r1=1132828&r2=1132829&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/tools/GetConf.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/tools/GetConf.java Mon Jun 6 23:11:09 2011
@@ -67,7 +67,10 @@ public class GetConf extends Configured
EXCLUDE_FILE("-excludeFile",
new CommandHandler("DFSConfigKeys.DFS_HOSTS_EXCLUDE"),
"gets the exclude file path that defines the datanodes " +
- "that need to decommissioned.");
+ "that need to decommissioned."),
+ NNRPCADDRESSES("-nnRpcAddresses",
+ new NNRpcAddressesCommandHandler(),
+ "gets the namenode rpc addresses");
private final String cmd;
private final CommandHandler handler;
@@ -179,6 +182,27 @@ public class GetConf extends Configured
}
}
+ /**
+ * Handler for {@link Command#NNRPCADDRESSES}
+ * If rpc addresses are defined in configuration, we return them. Otherwise,
+ * return empty string.
+ */
+ static class NNRpcAddressesCommandHandler extends CommandHandler {
+ @Override
+ public int doWorkInternal(GetConf tool) throws IOException {
+ Configuration config = tool.getConf();
+ List<InetSocketAddress> rpclist = DFSUtil.getNNServiceRpcAddresses(config);
+ if (rpclist != null) {
+ for (InetSocketAddress rpc : rpclist) {
+ tool.printOut(rpc.getHostName()+":"+rpc.getPort());
+ }
+ return 0;
+ }
+ tool.printError("Did not get namenode service rpc addresses.");
+ return -1;
+ }
+ }
+
private final PrintStream out; // Stream for printing command output
private final PrintStream err; // Stream for printing error
Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/tools/TestGetConf.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/tools/TestGetConf.java?rev=1132829&r1=1132828&r2=1132829&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/tools/TestGetConf.java (original)
+++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/tools/TestGetConf.java Mon Jun 6 23:11:09 2011
@@ -45,7 +45,7 @@ import org.junit.Test;
*/
public class TestGetConf {
enum TestType {
- NAMENODE, BACKUP, SECONDARY
+ NAMENODE, BACKUP, SECONDARY, NNRPCADDRESSES
}
/** Setup federation nameServiceIds in the configuration */
@@ -103,6 +103,8 @@ public class TestGetConf {
return DFSUtil.getBackupNodeAddresses(conf);
case SECONDARY:
return DFSUtil.getSecondaryNameNodeAddresses(conf);
+ case NNRPCADDRESSES:
+ return DFSUtil.getNNServiceRpcAddresses(conf);
}
return null;
}
@@ -140,6 +142,9 @@ public class TestGetConf {
case SECONDARY:
args[0] = Command.SECONDARY.getName();
break;
+ case NNRPCADDRESSES:
+ args[0] = Command.NNRPCADDRESSES.getName();
+ break;
}
return runTool(conf, args, success);
}
@@ -147,9 +152,16 @@ public class TestGetConf {
/**
* Using {@link GetConf} methods get the list of given {@code type} of
* addresses
+ *
+ * @param type, TestType
+ * @param conf, configuration
+ * @param checkPort, If checkPort is true, verify NNPRCADDRESSES whose
+ * expected value is hostname:rpc-port. If checkPort is false, the
+ * expected is hostname only.
+ * @param expected, expected addresses
*/
private void getAddressListFromTool(TestType type, HdfsConfiguration conf,
- List<InetSocketAddress> expected) throws Exception {
+ boolean checkPort, List<InetSocketAddress> expected) throws Exception {
String out = getAddressListFromTool(type, conf, expected.size() != 0);
List<String> values = new ArrayList<String>();
@@ -165,15 +177,19 @@ public class TestGetConf {
int i = 0;
String[] expectedHosts = new String[expected.size()];
for (InetSocketAddress addr : expected) {
- expectedHosts[i++] = addr.getHostName();
+ if (!checkPort) {
+ expectedHosts[i++] = addr.getHostName();
+ }else {
+ expectedHosts[i++] = addr.getHostName()+":"+addr.getPort();
+ }
}
// Compare two arrays
assertTrue(Arrays.equals(expectedHosts, actual));
}
-
+
private void verifyAddresses(HdfsConfiguration conf, TestType type,
- String... expected) throws Exception {
+ boolean checkPort, String... expected) throws Exception {
// Ensure DFSUtil returned the right set of addresses
List<InetSocketAddress> list = getAddressListFromConf(type, conf);
String[] actual = toStringArray(list);
@@ -182,7 +198,7 @@ public class TestGetConf {
assertTrue(Arrays.equals(expected, actual));
// Test GetConf returned addresses
- getAddressListFromTool(type, conf, list);
+ getAddressListFromTool(type, conf, checkPort, list);
}
private static String getNameServiceId(int index) {
@@ -199,6 +215,7 @@ public class TestGetConf {
getAddressListFromTool(TestType.NAMENODE, conf, false);
System.out.println(getAddressListFromTool(TestType.BACKUP, conf, false));
getAddressListFromTool(TestType.SECONDARY, conf, false);
+ getAddressListFromTool(TestType.NNRPCADDRESSES, conf, false);
for (Command cmd : Command.values()) {
CommandHandler handler = Command.getHandler(cmd.getName());
if (handler.key != null) {
@@ -230,26 +247,29 @@ public class TestGetConf {
// Returned namenode address should match default address
conf.set(FS_DEFAULT_NAME_KEY, "hdfs://localhost:1000");
- verifyAddresses(conf, TestType.NAMENODE, "localhost:1000");
+ verifyAddresses(conf, TestType.NAMENODE, false, "localhost:1000");
+ verifyAddresses(conf, TestType.NNRPCADDRESSES, true, "localhost:1000");
// Returned address should match backupnode RPC address
- conf.set(DFS_NAMENODE_BACKUP_ADDRESS_KEY, "localhost:1001");
- verifyAddresses(conf, TestType.BACKUP, "localhost:1001");
+ conf.set(DFS_NAMENODE_BACKUP_ADDRESS_KEY,"localhost:1001");
+ verifyAddresses(conf, TestType.BACKUP, false, "localhost:1001");
// Returned address should match secondary http address
conf.set(DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY, "localhost:1002");
- verifyAddresses(conf, TestType.SECONDARY, "localhost:1002");
+ verifyAddresses(conf, TestType.SECONDARY, false, "localhost:1002");
// Returned namenode address should match service RPC address
conf = new HdfsConfiguration();
conf.set(DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, "localhost:1000");
conf.set(DFS_NAMENODE_RPC_ADDRESS_KEY, "localhost:1001");
- verifyAddresses(conf, TestType.NAMENODE, "localhost:1000");
+ verifyAddresses(conf, TestType.NAMENODE, false, "localhost:1000");
+ verifyAddresses(conf, TestType.NNRPCADDRESSES, true, "localhost:1000");
// Returned address should match RPC address
conf = new HdfsConfiguration();
conf.set(DFS_NAMENODE_RPC_ADDRESS_KEY, "localhost:1001");
- verifyAddresses(conf, TestType.NAMENODE, "localhost:1001");
+ verifyAddresses(conf, TestType.NAMENODE, false, "localhost:1001");
+ verifyAddresses(conf, TestType.NNRPCADDRESSES, true, "localhost:1001");
}
/**
@@ -272,13 +292,15 @@ public class TestGetConf {
DFS_NAMENODE_BACKUP_ADDRESS_KEY, nsCount, 2000);
String[] secondaryAddresses = setupAddress(conf,
DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY, nsCount, 3000);
- verifyAddresses(conf, TestType.NAMENODE, nnAddresses);
- verifyAddresses(conf, TestType.BACKUP, backupAddresses);
- verifyAddresses(conf, TestType.SECONDARY, secondaryAddresses);
-
- // Test to ensure namenode, backup and secondary namenode addresses are
- // returned from federation configuration. Returned namenode addresses are
- // based on regular RPC address in the absence of service RPC address
+ verifyAddresses(conf, TestType.NAMENODE, false, nnAddresses);
+ verifyAddresses(conf, TestType.BACKUP, false, backupAddresses);
+ verifyAddresses(conf, TestType.SECONDARY, false, secondaryAddresses);
+ verifyAddresses(conf, TestType.NNRPCADDRESSES, true, nnAddresses);
+
+ // Test to ensure namenode, backup, secondary namenode addresses and
+ // namenode rpc addresses are returned from federation configuration.
+ // Returned namenode addresses are based on regular RPC address
+ // in the absence of service RPC address.
conf = new HdfsConfiguration(false);
setupNameServices(conf, nsCount);
nnAddresses = setupAddress(conf,
@@ -287,14 +309,15 @@ public class TestGetConf {
DFS_NAMENODE_BACKUP_ADDRESS_KEY, nsCount, 2000);
secondaryAddresses = setupAddress(conf,
DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY, nsCount, 3000);
- verifyAddresses(conf, TestType.NAMENODE, nnAddresses);
- verifyAddresses(conf, TestType.BACKUP, backupAddresses);
- verifyAddresses(conf, TestType.SECONDARY, secondaryAddresses);
+ verifyAddresses(conf, TestType.NAMENODE, false, nnAddresses);
+ verifyAddresses(conf, TestType.BACKUP, false, backupAddresses);
+ verifyAddresses(conf, TestType.SECONDARY, false, secondaryAddresses);
+ verifyAddresses(conf, TestType.NNRPCADDRESSES, true, nnAddresses);
}
/**
- * Tests commands other than {@link Command#NAMENODE}, {@link Command#BACKUP}
- * and {@link Command#SECONDARY}
+ * Tests commands other than {@link Command#NAMENODE}, {@link Command#BACKUP},
+ * {@link Command#SECONDARY} and {@link Command#NNRPCADDRESSES}
*/
public void testTool() throws Exception {
HdfsConfiguration conf = new HdfsConfiguration(false);