You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by cu...@apache.org on 2006/12/18 21:05:41 UTC
svn commit: r488401 - in /lucene/hadoop/trunk: CHANGES.txt
src/java/org/apache/hadoop/dfs/DFSck.java
src/java/org/apache/hadoop/dfs/DataNode.java
src/java/org/apache/hadoop/dfs/FSNamesystem.java
src/java/org/apache/hadoop/dfs/NameNode.java
Author: cutting
Date: Mon Dec 18 12:05:40 2006
New Revision: 488401
URL: http://svn.apache.org/viewvc?view=rev&rev=488401
Log:
HADOOP-825. Fix HDFS daemons when configured with new URI syntax.
Modified:
lucene/hadoop/trunk/CHANGES.txt
lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DFSck.java
lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DataNode.java
lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSNamesystem.java
lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/NameNode.java
Modified: lucene/hadoop/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/CHANGES.txt?view=diff&rev=488401&r1=488400&r2=488401
==============================================================================
--- lucene/hadoop/trunk/CHANGES.txt (original)
+++ lucene/hadoop/trunk/CHANGES.txt Mon Dec 18 12:05:40 2006
@@ -105,6 +105,9 @@
sorts which take longer than the task timeout do not fail.
(Devaraj Das via cutting)
+30. HADOOP-825. Fix HDFS daemons when configured with new URI syntax.
+ (omalley via cutting)
+
Release 0.9.2 - 2006-12-15
Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DFSck.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DFSck.java?view=diff&rev=488401&r1=488400&r2=488401
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DFSck.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DFSck.java Mon Dec 18 12:05:40 2006
@@ -20,6 +20,7 @@
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
+import java.net.InetSocketAddress;
import java.net.URL;
import java.net.URLConnection;
import java.net.URLEncoder;
@@ -68,13 +69,10 @@
}
private String getInfoServer() throws IOException {
- String fsName = conf.get("fs.default.name", "local");
- if (fsName.equals("local")) {
- throw new IOException("This tool only checks DFS, but your config uses 'local' FS.");
- }
- String[] splits = fsName.split(":", 2);
+ InetSocketAddress addr =
+ DataNode.createSocketAddr(conf.get("fs.default.name"));
int infoPort = conf.getInt("dfs.info.port", 50070);
- return splits[0]+":"+infoPort;
+ return addr.getHostName() + ":" + infoPort;
}
/**
Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DataNode.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DataNode.java?view=diff&rev=488401&r1=488400&r2=488401
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DataNode.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DataNode.java Mon Dec 18 12:05:40 2006
@@ -19,6 +19,7 @@
import org.apache.commons.logging.*;
+import org.apache.hadoop.fs.Path;
import org.apache.hadoop.ipc.*;
import org.apache.hadoop.conf.*;
import org.apache.hadoop.metrics.Metrics;
@@ -75,18 +76,30 @@
//
/**
- * Util method to build socket addr from string
+ * Util method to build socket addr from either:
+ * <host>:<post>
+ * <fs>://<host>:<port>/<path>
*/
- public static InetSocketAddress createSocketAddr(String s) throws IOException {
- String target = s;
+ public static InetSocketAddress createSocketAddr(String target
+ ) throws IOException {
int colonIndex = target.indexOf(':');
if (colonIndex < 0) {
- throw new RuntimeException("Not a host:port pair: " + s);
+ throw new RuntimeException("Not a host:port pair: " + target);
+ }
+ String hostname;
+ int port;
+ if (!target.contains("/")) {
+ // must be the old style <host>:<port>
+ hostname = target.substring(0, colonIndex);
+ port = Integer.parseInt(target.substring(colonIndex + 1));
+ } else {
+ // a new uri
+ URI addr = new Path(target).toUri();
+ hostname = addr.getHost();
+ port = addr.getPort();
}
- String host = target.substring(0, colonIndex);
- int port = Integer.parseInt(target.substring(colonIndex + 1));
- return new InetSocketAddress(host, port);
+ return new InetSocketAddress(hostname, port);
}
DatanodeProtocol namenode;
Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSNamesystem.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSNamesystem.java?view=diff&rev=488401&r1=488400&r2=488401
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSNamesystem.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSNamesystem.java Mon Dec 18 12:05:40 2006
@@ -194,9 +194,11 @@
* dirs is a list oif directories where the filesystem directory state
* is stored
*/
- public FSNamesystem(File[] dirs, NameNode nn, Configuration conf) throws IOException {
+ public FSNamesystem(File[] dirs,
+ String hostname,
+ int port,
+ NameNode nn, Configuration conf) throws IOException {
fsNamesystemObject = this;
- InetSocketAddress addr = DataNode.createSocketAddr(conf.get("fs.default.name", "local"));
this.maxReplication = conf.getInt("dfs.replication.max", 512);
this.minReplication = conf.getInt("dfs.replication.min", 1);
if( minReplication <= 0 )
@@ -220,8 +222,8 @@
this.heartbeatExpireInterval = 2 * heartbeatRecheckInterval +
10 * heartbeatInterval;
- this.localMachine = addr.getHostName();
- this.port = addr.getPort();
+ this.localMachine = hostname;
+ this.port = port;
this.dir = new FSDirectory(dirs);
this.dir.loadFSImage( conf );
this.safeMode = new SafeModeInfo( conf );
Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/NameNode.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/NameNode.java?view=diff&rev=488401&r1=488400&r2=488401
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/NameNode.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/NameNode.java Mon Dec 18 12:05:40 2006
@@ -26,6 +26,7 @@
import org.apache.hadoop.util.StringUtils;
import java.io.*;
+import java.net.*;
import org.apache.hadoop.metrics.MetricsRecord;
import org.apache.hadoop.metrics.Metrics;
@@ -129,25 +130,38 @@
}
}
- private NameNodeMetrics myMetrics = null;
+ private NameNodeMetrics myMetrics = new NameNodeMetrics();
+
+ /**
+ * Initialize the server
+ * @param dirs the list of working directories
+ * @param hostname which hostname to bind to
+ * @param port the port number to bind to
+ * @param conf the configuration
+ */
+ private void init(File[] dirs, String hostname, int port,
+ Configuration conf) throws IOException {
+ this.namesystem = new FSNamesystem(dirs, hostname, port, this, conf);
+ this.handlerCount = conf.getInt("dfs.namenode.handler.count", 10);
+ this.server = RPC.getServer(this, hostname, port, handlerCount,
+ false, conf);
+ this.server.start();
+ }
/**
* Create a NameNode at the default location
*/
public NameNode(Configuration conf) throws IOException {
- this(getDirs(conf),DataNode.createSocketAddr(conf.get("fs.default.name", "local")).getHostName(),
- DataNode.createSocketAddr(conf.get("fs.default.name", "local")).getPort(), conf);
+ InetSocketAddress addr =
+ DataNode.createSocketAddr(conf.get("fs.default.name"));
+ init(getDirs(conf), addr.getHostName(), addr.getPort(), conf);
}
/**
* Create a NameNode at the specified location and start it.
*/
public NameNode(File[] dirs, String bindAddress, int port, Configuration conf) throws IOException {
- this.namesystem = new FSNamesystem(dirs, this, conf);
- this.handlerCount = conf.getInt("dfs.namenode.handler.count", 10);
- this.server = RPC.getServer(this, bindAddress, port, handlerCount, false, conf);
- this.server.start();
- myMetrics = new NameNodeMetrics();
+ init(dirs, bindAddress, port, conf);
}
/** Return the configured directories where name data is stored. */