You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by at...@apache.org on 2012/02/26 05:57:23 UTC
svn commit: r1293742 - in
/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs: ./
src/main/java/ src/main/java/org/apache/hadoop/hdfs/
src/main/java/org/apache/hadoop/hdfs/server/datanode/web/resources/
src/main/java/org/apache/hadoop/hdf...
Author: atm
Date: Sun Feb 26 04:57:17 2012
New Revision: 1293742
URL: http://svn.apache.org/viewvc?rev=1293742&view=rev
Log:
Merge trunk into HA branch.
Modified:
hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/ (props changed)
hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/ (props changed)
hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/resources/DatanodeWebHdfsMethods.java
hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java
hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileChecksumServlets.java
hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileDataServlet.java
hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeMXBean.java
hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java
hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/native/ (props changed)
hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/ (props changed)
hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/ (props changed)
hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/ (props changed)
hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs/ (props changed)
hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeJsp.java
hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java
hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java
hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java
hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStreamFile.java
hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestTransferFsImage.java
hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java
hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestGetConf.java
hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsFileSystemContract.java
Propchange: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Sun Feb 26 04:57:17 2012
@@ -1,4 +1,4 @@
-/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs:1152502-1293274
+/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs:1152502-1293741
/hadoop/core/branches/branch-0.19/hdfs:713112
/hadoop/hdfs/branches/HDFS-1052:987665-1095512
/hadoop/hdfs/branches/HDFS-265:796829-820463
Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1293742&r1=1293741&r2=1293742&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Sun Feb 26 04:57:17 2012
@@ -1,10 +1,13 @@
Hadoop HDFS Change Log
Trunk (unreleased changes)
+
INCOMPATIBLE CHANGES
+
HDFS-2676. Remove Avro RPC. (suresh)
NEW FEATURES
+
HDFS-395. DFS Scalability: Incremental block reports. (Tomasz Nykiel
via hairong)
@@ -132,8 +135,17 @@ Trunk (unreleased changes)
HDFS-2878. Fix TestBlockRecovery and move it back into main test directory.
(todd)
- HDFS-2655. BlockReaderLocal#skip performs unnecessary IO. (Brandon Li
- via jitendra)
+ HDFS-2655. BlockReaderLocal#skip performs unnecessary IO.
+ (Brandon Li via jitendra)
+
+ HDFS-3003. Remove getHostPortString() from NameNode, replace it with
+ NetUtils.getHostPortString(). (Brandon Li via atm)
+
+ HDFS-3009. Remove duplicate code in DFSClient#isLocalAddress by using
+ NetUtils. (Hari Mankude via suresh)
+
+ HDFS-3002. TestNameNodeMetrics need not wait for metrics update.
+ (suresh)
OPTIMIZATIONS
HDFS-2477. Optimize computing the diff between a block report and the
@@ -212,6 +224,20 @@ Trunk (unreleased changes)
HDFS-2968. Protocol translator for BlockRecoveryCommand broken when
multiple blocks need recovery. (todd)
+Release 0.23.3 - UNRELEASED
+
+ INCOMPATIBLE CHANGES
+
+ NEW FEATURES
+
+ HDFS-2978. The NameNode should expose name dir statuses via JMX. (atm)
+
+ IMPROVEMENTS
+
+ OPTIMIZATIONS
+
+ BUG FIXES
+
Release 0.23.2 - UNRELEASED
INCOMPATIBLE CHANGES
@@ -274,6 +300,11 @@ Release 0.23.2 - UNRELEASED
dfs.client.block.write.replace-datanode-on-failure.enable should be true.
(szetszwo)
+ HDFS-3008. Negative caching of local addrs doesn't work. (eli)
+
+ HDFS-3006. In WebHDFS, when the return body is empty, set the Content-Type
+ to application/octet-stream instead of application/json. (szetszwo)
+
Release 0.23.1 - 2012-02-17
INCOMPATIBLE CHANGES
Propchange: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Sun Feb 26 04:57:17 2012
@@ -1,4 +1,4 @@
-/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java:1159757-1293274
+/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java:1159757-1293741
/hadoop/core/branches/branch-0.19/hdfs/src/java:713112
/hadoop/core/branches/branch-0.19/hdfs/src/main/java:713112
/hadoop/core/trunk/src/hdfs:776175-785643,785929-786278
Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java?rev=1293742&r1=1293741&r2=1293742&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java Sun Feb 26 04:57:17 2012
@@ -576,26 +576,19 @@ public class DFSClient implements java.i
private static boolean isLocalAddress(InetSocketAddress targetAddr) {
InetAddress addr = targetAddr.getAddress();
Boolean cached = localAddrMap.get(addr.getHostAddress());
- if (cached != null && cached) {
+ if (cached != null) {
if (LOG.isTraceEnabled()) {
- LOG.trace("Address " + targetAddr + " is local");
+ LOG.trace("Address " + targetAddr +
+ (cached ? " is local" : " is not local"));
}
- return true;
+ return cached;
}
+
+ boolean local = NetUtils.isLocalAddress(addr);
- // Check if the address is any local or loop back
- boolean local = addr.isAnyLocalAddress() || addr.isLoopbackAddress();
-
- // Check if the address is defined on any interface
- if (!local) {
- try {
- local = NetworkInterface.getByInetAddress(addr) != null;
- } catch (SocketException e) {
- local = false;
- }
- }
if (LOG.isTraceEnabled()) {
- LOG.trace("Address " + targetAddr + " is local");
+ LOG.trace("Address " + targetAddr +
+ (local ? " is local" : " is not local"));
}
localAddrMap.put(addr.getHostAddress(), local);
return local;
Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java?rev=1293742&r1=1293741&r2=1293742&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java Sun Feb 26 04:57:17 2012
@@ -519,7 +519,7 @@ public class DFSUtil {
// Use default address as fall back
String defaultAddress;
try {
- defaultAddress = NameNode.getHostPortString(NameNode.getAddress(conf));
+ defaultAddress = NetUtils.getHostPortString(NameNode.getAddress(conf));
} catch (IllegalArgumentException e) {
defaultAddress = null;
}
Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/resources/DatanodeWebHdfsMethods.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/resources/DatanodeWebHdfsMethods.java?rev=1293742&r1=1293741&r2=1293742&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/resources/DatanodeWebHdfsMethods.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/resources/DatanodeWebHdfsMethods.java Sun Feb 26 04:57:17 2012
@@ -117,7 +117,7 @@ public class DatanodeWebHdfsMethods {
@PUT
@Path("/")
@Consumes({"*/*"})
- @Produces({MediaType.APPLICATION_JSON})
+ @Produces({MediaType.APPLICATION_OCTET_STREAM, MediaType.APPLICATION_JSON})
public Response putRoot(
final InputStream in,
@Context final UserGroupInformation ugi,
@@ -147,7 +147,7 @@ public class DatanodeWebHdfsMethods {
@PUT
@Path("{" + UriFsPathParam.NAME + ":.*}")
@Consumes({"*/*"})
- @Produces({MediaType.APPLICATION_JSON})
+ @Produces({MediaType.APPLICATION_OCTET_STREAM, MediaType.APPLICATION_JSON})
public Response put(
final InputStream in,
@Context final UserGroupInformation ugi,
@@ -209,7 +209,7 @@ public class DatanodeWebHdfsMethods {
final InetSocketAddress nnHttpAddr = NameNode.getHttpAddress(conf);
final URI uri = new URI(WebHdfsFileSystem.SCHEME, null,
nnHttpAddr.getHostName(), nnHttpAddr.getPort(), fullpath, null, null);
- return Response.created(uri).type(MediaType.APPLICATION_JSON).build();
+ return Response.created(uri).type(MediaType.APPLICATION_OCTET_STREAM).build();
}
default:
throw new UnsupportedOperationException(op + " is not supported");
@@ -222,7 +222,7 @@ public class DatanodeWebHdfsMethods {
@POST
@Path("/")
@Consumes({"*/*"})
- @Produces({MediaType.APPLICATION_JSON})
+ @Produces({MediaType.APPLICATION_OCTET_STREAM, MediaType.APPLICATION_JSON})
public Response postRoot(
final InputStream in,
@Context final UserGroupInformation ugi,
@@ -243,7 +243,7 @@ public class DatanodeWebHdfsMethods {
@POST
@Path("{" + UriFsPathParam.NAME + ":.*}")
@Consumes({"*/*"})
- @Produces({MediaType.APPLICATION_JSON})
+ @Produces({MediaType.APPLICATION_OCTET_STREAM, MediaType.APPLICATION_JSON})
public Response post(
final InputStream in,
@Context final UserGroupInformation ugi,
@@ -287,7 +287,7 @@ public class DatanodeWebHdfsMethods {
IOUtils.cleanup(LOG, out);
IOUtils.cleanup(LOG, dfsclient);
}
- return Response.ok().type(MediaType.APPLICATION_JSON).build();
+ return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
}
default:
throw new UnsupportedOperationException(op + " is not supported");
Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java?rev=1293742&r1=1293741&r2=1293742&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java Sun Feb 26 04:57:17 2012
@@ -108,13 +108,13 @@ public class BackupNode extends NameNode
@Override // NameNode
protected void setRpcServerAddress(Configuration conf,
InetSocketAddress addr) {
- conf.set(BN_ADDRESS_NAME_KEY, getHostPortString(addr));
+ conf.set(BN_ADDRESS_NAME_KEY, NetUtils.getHostPortString(addr));
}
@Override // Namenode
protected void setRpcServiceServerAddress(Configuration conf,
InetSocketAddress addr) {
- conf.set(BN_SERVICE_RPC_ADDRESS_KEY, getHostPortString(addr));
+ conf.set(BN_SERVICE_RPC_ADDRESS_KEY, NetUtils.getHostPortString(addr));
}
@Override // NameNode
@@ -126,7 +126,7 @@ public class BackupNode extends NameNode
@Override // NameNode
protected void setHttpServerAddress(Configuration conf){
- conf.set(BN_HTTP_ADDRESS_NAME_KEY, getHostPortString(getHttpAddress()));
+ conf.set(BN_HTTP_ADDRESS_NAME_KEY, NetUtils.getHostPortString(getHttpAddress()));
}
@Override // NameNode
@@ -287,8 +287,8 @@ public class BackupNode extends NameNode
InetSocketAddress nnAddress = NameNode.getServiceAddress(conf, true);
this.namenode = new NamenodeProtocolTranslatorPB(nnAddress, conf,
UserGroupInformation.getCurrentUser());
- this.nnRpcAddress = getHostPortString(nnAddress);
- this.nnHttpAddress = getHostPortString(super.getHttpServerAddress(conf));
+ this.nnRpcAddress = NetUtils.getHostPortString(nnAddress);
+ this.nnHttpAddress = NetUtils.getHostPortString(super.getHttpServerAddress(conf));
// get version and id info from the name-node
NamespaceInfo nsInfo = null;
while(!isStopRequested()) {
Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java?rev=1293742&r1=1293741&r2=1293742&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java Sun Feb 26 04:57:17 2012
@@ -153,6 +153,8 @@ import org.apache.hadoop.hdfs.server.com
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
+import org.apache.hadoop.hdfs.server.common.Storage.StorageDirType;
+import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
import org.apache.hadoop.hdfs.server.common.Storage;
import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport;
import org.apache.hadoop.hdfs.server.common.Util;
@@ -5125,6 +5127,30 @@ public class FSNamesystem implements Nam
public String getBlockPoolId() {
return blockPoolId;
}
+
+ @Override // NameNodeMXBean
+ public String getNameDirStatuses() {
+ Map<String, Map<File, StorageDirType>> statusMap =
+ new HashMap<String, Map<File, StorageDirType>>();
+
+ Map<File, StorageDirType> activeDirs = new HashMap<File, StorageDirType>();
+ for (Iterator<StorageDirectory> it
+ = getFSImage().getStorage().dirIterator(); it.hasNext();) {
+ StorageDirectory st = it.next();
+ activeDirs.put(st.getRoot(), st.getStorageDirType());
+ }
+ statusMap.put("active", activeDirs);
+
+ List<Storage.StorageDirectory> removedStorageDirs
+ = getFSImage().getStorage().getRemovedStorageDirs();
+ Map<File, StorageDirType> failedDirs = new HashMap<File, StorageDirType>();
+ for (StorageDirectory st : removedStorageDirs) {
+ failedDirs.put(st.getRoot(), st.getStorageDirType());
+ }
+ statusMap.put("failed", failedDirs);
+
+ return JSON.toString(statusMap);
+ }
/** @return the block manager. */
public BlockManager getBlockManager() {
Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileChecksumServlets.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileChecksumServlets.java?rev=1293742&r1=1293741&r2=1293742&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileChecksumServlets.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileChecksumServlets.java Sun Feb 26 04:57:17 2012
@@ -71,7 +71,7 @@ public class FileChecksumServlets {
String tokenString = ugi.getTokens().iterator().next().encodeToUrlString();
dtParam = JspHelper.getDelegationTokenUrlParam(tokenString);
}
- String addr = NameNode.getHostPortString(nn.getNameNodeAddress());
+ String addr = NetUtils.getHostPortString(nn.getNameNodeAddress());
String addrParam = JspHelper.getUrlParam(JspHelper.NAMENODE_ADDRESS, addr);
return new URL(scheme, hostname, port,
Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileDataServlet.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileDataServlet.java?rev=1293742&r1=1293741&r2=1293742&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileDataServlet.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileDataServlet.java Sun Feb 26 04:57:17 2012
@@ -33,6 +33,7 @@ import org.apache.hadoop.hdfs.protocol.D
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.server.common.JspHelper;
+import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.util.ServletUtil;
@@ -72,7 +73,7 @@ public class FileDataServlet extends Dfs
// Add namenode address to the url params
NameNode nn = NameNodeHttpServer.getNameNodeFromContext(
getServletContext());
- String addr = NameNode.getHostPortString(nn.getNameNodeAddress());
+ String addr = NetUtils.getHostPortString(nn.getNameNodeAddress());
String addrParam = JspHelper.getUrlParam(JspHelper.NAMENODE_ADDRESS, addr);
return new URL(scheme, hostname, port,
Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java?rev=1293742&r1=1293741&r2=1293742&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java Sun Feb 26 04:57:17 2012
@@ -294,13 +294,6 @@ public class NameNode {
+ namenode.getHostName()+portString);
}
- /**
- * Compose a "host:port" string from the address.
- */
- public static String getHostPortString(InetSocketAddress addr) {
- return addr.getHostName() + ":" + addr.getPort();
- }
-
//
// Common NameNode methods implementation for the active name-node role.
//
@@ -329,7 +322,7 @@ public class NameNode {
*/
protected void setRpcServiceServerAddress(Configuration conf,
InetSocketAddress serviceRPCAddress) {
- setServiceAddress(conf, getHostPortString(serviceRPCAddress));
+ setServiceAddress(conf, NetUtils.getHostPortString(serviceRPCAddress));
}
protected void setRpcServerAddress(Configuration conf,
@@ -349,7 +342,7 @@ public class NameNode {
protected void setHttpServerAddress(Configuration conf) {
conf.set(DFS_NAMENODE_HTTP_ADDRESS_KEY,
- getHostPortString(getHttpAddress()));
+ NetUtils.getHostPortString(getHttpAddress()));
}
protected void loadNamesystem(Configuration conf) throws IOException {
@@ -362,8 +355,8 @@ public class NameNode {
NamenodeRegistration setRegistration() {
nodeRegistration = new NamenodeRegistration(
- getHostPortString(rpcServer.getRpcAddress()),
- getHostPortString(getHttpAddress()),
+ NetUtils.getHostPortString(rpcServer.getRpcAddress()),
+ NetUtils.getHostPortString(getHttpAddress()),
getFSImage().getStorage(), getRole());
return nodeRegistration;
}
Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeMXBean.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeMXBean.java?rev=1293742&r1=1293741&r2=1293742&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeMXBean.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeMXBean.java Sun Feb 26 04:57:17 2012
@@ -166,4 +166,12 @@ public interface NameNodeMXBean {
* @return the block pool id
*/
public String getBlockPoolId();
+
+ /**
+ * Get status information about the directories storing image and edits logs
+ * of the NN.
+ *
+ * @return the name dir status information, as a JSON string.
+ */
+ public String getNameDirStatuses();
}
Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java?rev=1293742&r1=1293741&r2=1293742&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java Sun Feb 26 04:57:17 2012
@@ -52,6 +52,7 @@ import org.apache.hadoop.hdfs.server.com
import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
import org.apache.hadoop.io.Text;
+import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.net.NodeBase;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
@@ -409,7 +410,7 @@ class NamenodeJspHelper {
nodeToRedirect = nn.getHttpAddress().getHostName();
redirectPort = nn.getHttpAddress().getPort();
}
- String addr = NameNode.getHostPortString(nn.getNameNodeAddress());
+ String addr = NetUtils.getHostPortString(nn.getNameNodeAddress());
String fqdn = InetAddress.getByName(nodeToRedirect).getCanonicalHostName();
redirectLocation = "http://" + fqdn + ":" + redirectPort
+ "/browseDirectory.jsp?namenodeInfoPort="
Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java?rev=1293742&r1=1293741&r2=1293742&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java Sun Feb 26 04:57:17 2012
@@ -215,7 +215,7 @@ public class NamenodeWebHdfsMethods {
@PUT
@Path("/")
@Consumes({"*/*"})
- @Produces({MediaType.APPLICATION_JSON})
+ @Produces({MediaType.APPLICATION_OCTET_STREAM, MediaType.APPLICATION_JSON})
public Response putRoot(
@Context final UserGroupInformation ugi,
@QueryParam(DelegationParam.NAME) @DefaultValue(DelegationParam.DEFAULT)
@@ -263,7 +263,7 @@ public class NamenodeWebHdfsMethods {
@PUT
@Path("{" + UriFsPathParam.NAME + ":.*}")
@Consumes({"*/*"})
- @Produces({MediaType.APPLICATION_JSON})
+ @Produces({MediaType.APPLICATION_OCTET_STREAM, MediaType.APPLICATION_JSON})
public Response put(
@Context final UserGroupInformation ugi,
@QueryParam(DelegationParam.NAME) @DefaultValue(DelegationParam.DEFAULT)
@@ -324,7 +324,7 @@ public class NamenodeWebHdfsMethods {
final URI uri = redirectURI(namenode, ugi, delegation, username, doAsUser,
fullpath, op.getValue(), -1L,
permission, overwrite, bufferSize, replication, blockSize);
- return Response.temporaryRedirect(uri).build();
+ return Response.temporaryRedirect(uri).type(MediaType.APPLICATION_OCTET_STREAM).build();
}
case MKDIRS:
{
@@ -336,7 +336,7 @@ public class NamenodeWebHdfsMethods {
{
np.createSymlink(destination.getValue(), fullpath,
PermissionParam.getDefaultFsPermission(), createParent.getValue());
- return Response.ok().type(MediaType.APPLICATION_JSON).build();
+ return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
}
case RENAME:
{
@@ -348,7 +348,7 @@ public class NamenodeWebHdfsMethods {
} else {
np.rename2(fullpath, destination.getValue(),
s.toArray(new Options.Rename[s.size()]));
- return Response.ok().type(MediaType.APPLICATION_JSON).build();
+ return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
}
}
case SETREPLICATION:
@@ -364,17 +364,17 @@ public class NamenodeWebHdfsMethods {
}
np.setOwner(fullpath, owner.getValue(), group.getValue());
- return Response.ok().type(MediaType.APPLICATION_JSON).build();
+ return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
}
case SETPERMISSION:
{
np.setPermission(fullpath, permission.getFsPermission());
- return Response.ok().type(MediaType.APPLICATION_JSON).build();
+ return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
}
case SETTIMES:
{
np.setTimes(fullpath, modificationTime.getValue(), accessTime.getValue());
- return Response.ok().type(MediaType.APPLICATION_JSON).build();
+ return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
}
case RENEWDELEGATIONTOKEN:
{
@@ -389,7 +389,7 @@ public class NamenodeWebHdfsMethods {
final Token<DelegationTokenIdentifier> token = new Token<DelegationTokenIdentifier>();
token.decodeFromUrlString(delegationTokenArgument.getValue());
np.cancelDelegationToken(token);
- return Response.ok().type(MediaType.APPLICATION_JSON).build();
+ return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
}
default:
throw new UnsupportedOperationException(op + " is not supported");
@@ -406,7 +406,7 @@ public class NamenodeWebHdfsMethods {
@POST
@Path("/")
@Consumes({"*/*"})
- @Produces({MediaType.APPLICATION_JSON})
+ @Produces({MediaType.APPLICATION_OCTET_STREAM, MediaType.APPLICATION_JSON})
public Response postRoot(
@Context final UserGroupInformation ugi,
@QueryParam(DelegationParam.NAME) @DefaultValue(DelegationParam.DEFAULT)
@@ -427,7 +427,7 @@ public class NamenodeWebHdfsMethods {
@POST
@Path("{" + UriFsPathParam.NAME + ":.*}")
@Consumes({"*/*"})
- @Produces({MediaType.APPLICATION_JSON})
+ @Produces({MediaType.APPLICATION_OCTET_STREAM, MediaType.APPLICATION_JSON})
public Response post(
@Context final UserGroupInformation ugi,
@QueryParam(DelegationParam.NAME) @DefaultValue(DelegationParam.DEFAULT)
@@ -459,7 +459,7 @@ public class NamenodeWebHdfsMethods {
{
final URI uri = redirectURI(namenode, ugi, delegation, username, doAsUser,
fullpath, op.getValue(), -1L, bufferSize);
- return Response.temporaryRedirect(uri).build();
+ return Response.temporaryRedirect(uri).type(MediaType.APPLICATION_OCTET_STREAM).build();
}
default:
throw new UnsupportedOperationException(op + " is not supported");
@@ -542,7 +542,7 @@ public class NamenodeWebHdfsMethods {
{
final URI uri = redirectURI(namenode, ugi, delegation, username, doAsUser,
fullpath, op.getValue(), offset.getValue(), offset, length, bufferSize);
- return Response.temporaryRedirect(uri).build();
+ return Response.temporaryRedirect(uri).type(MediaType.APPLICATION_OCTET_STREAM).build();
}
case GET_BLOCK_LOCATIONS:
{
@@ -578,7 +578,7 @@ public class NamenodeWebHdfsMethods {
{
final URI uri = redirectURI(namenode, ugi, delegation, username, doAsUser,
fullpath, op.getValue(), -1L);
- return Response.temporaryRedirect(uri).build();
+ return Response.temporaryRedirect(uri).type(MediaType.APPLICATION_OCTET_STREAM).build();
}
case GETDELEGATIONTOKEN:
{
Propchange: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/native/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Sun Feb 26 04:57:17 2012
@@ -1,4 +1,4 @@
-/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native:1152502-1293274
+/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native:1152502-1293741
/hadoop/core/branches/branch-0.19/hdfs/src/main/native:713112
/hadoop/core/branches/branch-0.19/mapred/src/c++/libhdfs:713112
/hadoop/core/trunk/src/c++/libhdfs:776175-784663
Propchange: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Sun Feb 26 04:57:17 2012
@@ -1,4 +1,4 @@
-/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode:1159757-1293274
+/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode:1159757-1293741
/hadoop/core/branches/branch-0.19/hdfs/src/main/webapps/datanode:713112
/hadoop/core/branches/branch-0.19/hdfs/src/webapps/datanode:713112
/hadoop/core/trunk/src/webapps/datanode:776175-784663
Propchange: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Sun Feb 26 04:57:17 2012
@@ -1,4 +1,4 @@
-/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs:1152502-1293274
+/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs:1152502-1293741
/hadoop/core/branches/branch-0.19/hdfs/src/main/webapps/hdfs:713112
/hadoop/core/branches/branch-0.19/hdfs/src/webapps/hdfs:713112
/hadoop/core/trunk/src/webapps/hdfs:776175-784663
Propchange: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Sun Feb 26 04:57:17 2012
@@ -1,4 +1,4 @@
-/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary:1152502-1293274
+/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary:1152502-1293741
/hadoop/core/branches/branch-0.19/hdfs/src/main/webapps/secondary:713112
/hadoop/core/branches/branch-0.19/hdfs/src/webapps/secondary:713112
/hadoop/core/trunk/src/webapps/secondary:776175-784663
Propchange: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Sun Feb 26 04:57:17 2012
@@ -1,4 +1,4 @@
-/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs:1159757-1293274
+/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs:1159757-1293741
/hadoop/core/branches/branch-0.19/hdfs/src/test/hdfs:713112
/hadoop/core/trunk/src/test/hdfs:776175-785643
/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs:987665-1095512
Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java?rev=1293742&r1=1293741&r2=1293742&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java Sun Feb 26 04:57:17 2012
@@ -756,10 +756,10 @@ public class MiniDFSCluster {
// After the NN has started, set back the bound ports into
// the conf
conf.set(DFSUtil.addKeySuffixes(
- DFS_NAMENODE_RPC_ADDRESS_KEY, nameserviceId, nnId), NameNode
+ DFS_NAMENODE_RPC_ADDRESS_KEY, nameserviceId, nnId), NetUtils
.getHostPortString(nn.getNameNodeAddress()));
conf.set(DFSUtil.addKeySuffixes(
- DFS_NAMENODE_HTTP_ADDRESS_KEY, nameserviceId, nnId), NameNode
+ DFS_NAMENODE_HTTP_ADDRESS_KEY, nameserviceId, nnId), NetUtils
.getHostPortString(nn.getHttpAddress()));
DFSUtil.setGenericConf(conf, nameserviceId, nnId,
DFS_NAMENODE_HTTP_ADDRESS_KEY);
@@ -779,7 +779,7 @@ public class MiniDFSCluster {
*/
public URI getURI(int nnIndex) {
InetSocketAddress addr = nameNodes[nnIndex].nameNode.getNameNodeAddress();
- String hostPort = NameNode.getHostPortString(addr);
+ String hostPort = NetUtils.getHostPortString(addr);
URI uri = null;
try {
uri = new URI("hdfs://" + hostPort);
Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeJsp.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeJsp.java?rev=1293742&r1=1293741&r2=1293742&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeJsp.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeJsp.java Sun Feb 26 04:57:17 2012
@@ -35,6 +35,7 @@ import org.apache.hadoop.hdfs.HdfsConfig
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.server.common.JspHelper;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
+import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.util.ServletUtil;
import org.junit.Test;
import org.mockito.Mockito;
@@ -134,7 +135,7 @@ public class TestDatanodeJsp {
Mockito.doReturn("100").when(reqMock).getParameter("chunkSizeToView");
Mockito.doReturn("1").when(reqMock).getParameter("startOffset");
Mockito.doReturn("1024").when(reqMock).getParameter("blockSize");
- Mockito.doReturn(NameNode.getHostPortString(NameNode.getAddress(CONF)))
+ Mockito.doReturn(NetUtils.getHostPortString(NameNode.getAddress(CONF)))
.when(reqMock).getParameter("nnaddr");
Mockito.doReturn(testFile.toString()).when(reqMock).getPathInfo();
}
Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java?rev=1293742&r1=1293741&r2=1293742&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java Sun Feb 26 04:57:17 2012
@@ -40,6 +40,7 @@ import org.apache.hadoop.hdfs.server.com
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
import org.apache.hadoop.hdfs.server.namenode.FileJournalManager.EditLogFile;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
+import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.log4j.Level;
import org.junit.Before;
@@ -333,7 +334,7 @@ public class TestBackupNode {
InetSocketAddress add = backup.getNameNodeAddress();
// Write to BN
FileSystem bnFS = FileSystem.get(new Path("hdfs://"
- + NameNode.getHostPortString(add)).toUri(), conf);
+ + NetUtils.getHostPortString(add)).toUri(), conf);
boolean canWrite = true;
try {
TestCheckpoint.writeFile(bnFS, file3, replication);
Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java?rev=1293742&r1=1293741&r2=1293742&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java Sun Feb 26 04:57:17 2012
@@ -58,6 +58,7 @@ import org.apache.hadoop.hdfs.server.pro
import org.apache.hadoop.hdfs.server.protocol.RemoteEditLog;
import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest;
import org.apache.hadoop.hdfs.tools.DFSAdmin;
+import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.test.GenericTestUtils.DelayAnswer;
import org.apache.hadoop.util.StringUtils;
@@ -1433,7 +1434,7 @@ public class TestCheckpoint extends Test
.format(true).build();
NamenodeProtocols nn = cluster.getNameNodeRpc();
- String fsName = NameNode.getHostPortString(
+ String fsName = NetUtils.getHostPortString(
cluster.getNameNode().getHttpAddress());
// Make a finalized log on the server side.
Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java?rev=1293742&r1=1293741&r2=1293742&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java Sun Feb 26 04:57:17 2012
@@ -17,23 +17,33 @@
*/
package org.apache.hadoop.hdfs.server.namenode;
+import static org.junit.Assert.*;
+
+import java.io.File;
import java.lang.management.ManagementFactory;
+import java.net.URI;
+import java.util.Collection;
+import java.util.Map;
import javax.management.MBeanServer;
import javax.management.ObjectName;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.util.VersionInfo;
import org.junit.Test;
+import org.mortbay.util.ajax.JSON;
+
import junit.framework.Assert;
/**
* Class for testing {@link NameNodeMXBean} implementation
*/
public class TestNameNodeMXBean {
+ @SuppressWarnings({ "unchecked", "deprecation" })
@Test
public void testNameNodeMXBeanInfo() throws Exception {
Configuration conf = new Configuration();
@@ -88,8 +98,46 @@ public class TestNameNodeMXBean {
String deadnodeinfo = (String) (mbs.getAttribute(mxbeanName,
"DeadNodes"));
Assert.assertEquals(fsn.getDeadNodes(), deadnodeinfo);
+ // get attribute NameDirStatuses
+ String nameDirStatuses = (String) (mbs.getAttribute(mxbeanName,
+ "NameDirStatuses"));
+ Assert.assertEquals(fsn.getNameDirStatuses(), nameDirStatuses);
+ Map<String, Map<String, String>> statusMap =
+ (Map<String, Map<String, String>>) JSON.parse(nameDirStatuses);
+ Collection<URI> nameDirUris = cluster.getNameDirs(0);
+ for (URI nameDirUri : nameDirUris) {
+ File nameDir = new File(nameDirUri);
+ System.out.println("Checking for the presence of " + nameDir +
+ " in active name dirs.");
+ assertTrue(statusMap.get("active").containsKey(nameDir.getAbsolutePath()));
+ }
+ assertEquals(2, statusMap.get("active").size());
+ assertEquals(0, statusMap.get("failed").size());
+
+ // This will cause the first dir to fail.
+ File failedNameDir = new File(nameDirUris.toArray(new URI[0])[0]);
+ assertEquals(0, FileUtil.chmod(failedNameDir.getAbsolutePath(), "000"));
+ cluster.getNameNodeRpc().rollEditLog();
+
+ nameDirStatuses = (String) (mbs.getAttribute(mxbeanName,
+ "NameDirStatuses"));
+ statusMap = (Map<String, Map<String, String>>) JSON.parse(nameDirStatuses);
+ for (URI nameDirUri : nameDirUris) {
+ File nameDir = new File(nameDirUri);
+ String expectedStatus =
+ nameDir.equals(failedNameDir) ? "failed" : "active";
+ System.out.println("Checking for the presence of " + nameDir +
+ " in " + expectedStatus + " name dirs.");
+ assertTrue(statusMap.get(expectedStatus).containsKey(
+ nameDir.getAbsolutePath()));
+ }
+ assertEquals(1, statusMap.get("active").size());
+ assertEquals(1, statusMap.get("failed").size());
} finally {
if (cluster != null) {
+ for (URI dir : cluster.getNameDirs(0)) {
+ FileUtil.chmod(new File(dir).toString(), "700");
+ }
cluster.shutdown();
}
}
Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStreamFile.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStreamFile.java?rev=1293742&r1=1293741&r2=1293742&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStreamFile.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStreamFile.java Sun Feb 26 04:57:17 2012
@@ -43,6 +43,7 @@ import org.apache.hadoop.hdfs.DFSInputSt
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.server.common.JspHelper;
+import org.apache.hadoop.net.NetUtils;
import org.junit.Test;
import org.mockito.Mockito;
import org.mortbay.jetty.InclusiveByteRange;
@@ -263,7 +264,7 @@ public class TestStreamFile {
Mockito.doReturn(CONF).when(mockServletContext).getAttribute(
JspHelper.CURRENT_CONF);
- Mockito.doReturn(NameNode.getHostPortString(NameNode.getAddress(CONF)))
+ Mockito.doReturn(NetUtils.getHostPortString(NameNode.getAddress(CONF)))
.when(mockHttpServletRequest).getParameter("nnaddr");
Mockito.doReturn(testFile.toString()).when(mockHttpServletRequest)
.getPathInfo();
Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestTransferFsImage.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestTransferFsImage.java?rev=1293742&r1=1293741&r2=1293742&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestTransferFsImage.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestTransferFsImage.java Sun Feb 26 04:57:17 2012
@@ -27,6 +27,7 @@ import java.util.List;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.util.StringUtils;
import org.junit.Test;
import org.mockito.Mockito;
@@ -54,7 +55,7 @@ public class TestTransferFsImage {
new File("/xxxxx-does-not-exist/blah"));
try {
- String fsName = NameNode.getHostPortString(
+ String fsName = NetUtils.getHostPortString(
cluster.getNameNode().getHttpAddress());
String id = "getimage=1&txid=0";
@@ -86,7 +87,7 @@ public class TestTransferFsImage {
);
try {
- String fsName = NameNode.getHostPortString(
+ String fsName = NetUtils.getHostPortString(
cluster.getNameNode().getHttpAddress());
String id = "getimage=1&txid=0";
Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java?rev=1293742&r1=1293741&r2=1293742&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java Sun Feb 26 04:57:17 2012
@@ -103,12 +103,6 @@ public class TestNameNodeMetrics {
DFSTestUtil.createFile(fs, file, fileLen, replicas, rand.nextLong());
}
- private void updateMetrics() throws Exception {
- // Wait for metrics update (corresponds to dfs.namenode.replication.interval
- // for some block related metrics to get updated)
- Thread.sleep(1000);
- }
-
private void readFile(FileSystem fileSys,Path name) throws IOException {
//Just read file so that getNumBlockLocations are incremented
DataInputStream stm = fileSys.open(name);
@@ -125,7 +119,6 @@ public class TestNameNodeMetrics {
createFile(file, 3200, (short)3);
final long blockCount = 32;
int blockCapacity = namesystem.getBlockCapacity();
- updateMetrics();
assertGauge("BlockCapacity", blockCapacity, getMetrics(NS_METRICS));
MetricsRecordBuilder rb = getMetrics(NN_METRICS);
@@ -140,7 +133,6 @@ public class TestNameNodeMetrics {
while (threshold < blockCount) {
blockCapacity <<= 1;
}
- updateMetrics();
long filesTotal = file.depth() + 1; // Add 1 for root
rb = getMetrics(NS_METRICS);
assertGauge("FilesTotal", filesTotal, rb);
@@ -150,7 +142,6 @@ public class TestNameNodeMetrics {
filesTotal--; // reduce the filecount for deleted file
waitForDeletion();
- updateMetrics();
rb = getMetrics(NS_METRICS);
assertGauge("FilesTotal", filesTotal, rb);
assertGauge("BlocksTotal", 0L, rb);
@@ -179,7 +170,7 @@ public class TestNameNodeMetrics {
} finally {
cluster.getNamesystem().writeUnlock();
}
- updateMetrics();
+ Thread.sleep(1000); // Wait for block to be marked corrupt
MetricsRecordBuilder rb = getMetrics(NS_METRICS);
assertGauge("CorruptBlocks", 1L, rb);
assertGauge("PendingReplicationBlocks", 1L, rb);
@@ -201,7 +192,6 @@ public class TestNameNodeMetrics {
createFile(file, 100, (short)2);
long totalBlocks = 1;
NameNodeAdapter.setReplication(namesystem, file.toString(), (short)1);
- updateMetrics();
MetricsRecordBuilder rb = getMetrics(NS_METRICS);
assertGauge("ExcessBlocks", totalBlocks, rb);
fs.delete(file, true);
@@ -224,7 +214,7 @@ public class TestNameNodeMetrics {
} finally {
cluster.getNamesystem().writeUnlock();
}
- updateMetrics();
+ Thread.sleep(1000); // Wait for block to be marked corrupt
MetricsRecordBuilder rb = getMetrics(NS_METRICS);
assertGauge("UnderReplicatedBlocks", 1L, rb);
assertGauge("MissingBlocks", 1L, rb);
@@ -246,7 +236,6 @@ public class TestNameNodeMetrics {
Path target = getTestPath("target");
createFile(target, 100, (short)1);
fs.rename(src, target, Rename.OVERWRITE);
- updateMetrics();
MetricsRecordBuilder rb = getMetrics(NN_METRICS);
assertCounter("FilesRenamed", 1L, rb);
assertCounter("FilesDeleted", 1L, rb);
@@ -274,7 +263,6 @@ public class TestNameNodeMetrics {
//Perform create file operation
createFile(file1_Path,100,(short)2);
- updateMetrics();
//Create file does not change numGetBlockLocations metric
//expect numGetBlockLocations = 0 for previous and current interval
@@ -283,14 +271,12 @@ public class TestNameNodeMetrics {
// Open and read file operation increments GetBlockLocations
// Perform read file operation on earlier created file
readFile(fs, file1_Path);
- updateMetrics();
// Verify read file operation has incremented numGetBlockLocations by 1
assertCounter("GetBlockLocations", 1L, getMetrics(NN_METRICS));
// opening and reading file twice will increment numGetBlockLocations by 2
readFile(fs, file1_Path);
readFile(fs, file1_Path);
- updateMetrics();
assertCounter("GetBlockLocations", 3L, getMetrics(NN_METRICS));
}
@@ -308,7 +294,6 @@ public class TestNameNodeMetrics {
assertGauge("TransactionsSinceLastLogRoll", 1L, getMetrics(NS_METRICS));
fs.mkdirs(new Path(TEST_ROOT_DIR_PATH, "/tmp"));
- updateMetrics();
assertGauge("LastCheckpointTime", lastCkptTime, getMetrics(NS_METRICS));
assertGauge("LastWrittenTransactionId", 2L, getMetrics(NS_METRICS));
@@ -316,7 +301,6 @@ public class TestNameNodeMetrics {
assertGauge("TransactionsSinceLastLogRoll", 2L, getMetrics(NS_METRICS));
cluster.getNameNodeRpc().rollEditLog();
- updateMetrics();
assertGauge("LastCheckpointTime", lastCkptTime, getMetrics(NS_METRICS));
assertGauge("LastWrittenTransactionId", 4L, getMetrics(NS_METRICS));
@@ -326,7 +310,6 @@ public class TestNameNodeMetrics {
cluster.getNameNodeRpc().setSafeMode(SafeModeAction.SAFEMODE_ENTER);
cluster.getNameNodeRpc().saveNamespace();
cluster.getNameNodeRpc().setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
- updateMetrics();
long newLastCkptTime = MetricsAsserts.getLongGauge("LastCheckpointTime",
getMetrics(NS_METRICS));
Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestGetConf.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestGetConf.java?rev=1293742&r1=1293741&r2=1293742&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestGetConf.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestGetConf.java Sun Feb 26 04:57:17 2012
@@ -35,10 +35,10 @@ import static org.apache.hadoop.hdfs.DFS
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.DFSUtil.ConfiguredNNAddress;
import org.apache.hadoop.hdfs.HdfsConfiguration;
-import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.tools.GetConf;
import org.apache.hadoop.hdfs.tools.GetConf.Command;
import org.apache.hadoop.hdfs.tools.GetConf.CommandHandler;
+import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.util.ToolRunner;
import org.junit.Test;
@@ -88,7 +88,7 @@ public class TestGetConf {
private String[] toStringArray(List<ConfiguredNNAddress> list) {
String[] ret = new String[list.size()];
for (int i = 0; i < list.size(); i++) {
- ret[i] = NameNode.getHostPortString(list.get(i).getAddress());
+ ret[i] = NetUtils.getHostPortString(list.get(i).getAddress());
}
return ret;
}
Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsFileSystemContract.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsFileSystemContract.java?rev=1293742&r1=1293741&r2=1293742&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsFileSystemContract.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsFileSystemContract.java Sun Feb 26 04:57:17 2012
@@ -27,6 +27,7 @@ import java.net.URL;
import java.util.Map;
import javax.servlet.http.HttpServletResponse;
+import javax.ws.rs.core.MediaType;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.BlockLocation;
@@ -314,6 +315,8 @@ public class TestWebHdfsFileSystemContra
conn.setRequestMethod(op.getType().toString());
conn.connect();
assertEquals(HttpServletResponse.SC_OK, conn.getResponseCode());
+ assertEquals(0, conn.getContentLength());
+ assertEquals(MediaType.APPLICATION_OCTET_STREAM, conn.getContentType());
assertEquals((short)0755, webhdfs.getFileStatus(dir).getPermission().toShort());
conn.disconnect();
}