You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by sz...@apache.org on 2011/10/26 22:11:46 UTC
svn commit: r1189421 - in
/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs: ./
src/main/java/ src/main/java/org/apache/hadoop/hdfs/protocol/
src/main/java/org/apache/hadoop/hdfs/server/namenode/
src/main/java/org/apache/hadoop/hdfs/s...
Author: szetszwo
Date: Wed Oct 26 20:11:45 2011
New Revision: 1189421
URL: http://svn.apache.org/viewvc?rev=1189421&view=rev
Log:
svn merge -c 1173468 from trunk for HDFS-2340.
Added:
hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/RenewerParam.java
- copied unchanged from r1173468, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/RenewerParam.java
Modified:
hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/ (props changed)
hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/ (props changed)
hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java
hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/GetOpParam.java
hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/OverwriteParam.java
hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestDelegationToken.java
hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsFileSystemContract.java
Propchange: hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Wed Oct 26 20:11:45 2011
@@ -1,4 +1,4 @@
-/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs:1161777,1161781,1162188,1162421,1162491,1162499,1162613,1162928,1162954,1162979,1163050,1163069,1163081,1163490,1163768,1164255,1164301,1164339,1166402,1167383,1167662,1170085,1170379,1170459,1170996,1171136,1171297,1171379,1171611,1172916,1173402,1176550,1176733,1177487,1177531,1177859,1177864
+/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs:1161777,1161781,1162188,1162421,1162491,1162499,1162613,1162928,1162954,1162979,1163050,1163069,1163081,1163490,1163768,1164255,1164301,1164339,1166402,1167383,1167662,1170085,1170379,1170459,1170996,1171136,1171297,1171379,1171611,1172916,1173402,1173468,1176550,1176733,1177487,1177531,1177859,1177864
/hadoop/core/branches/branch-0.19/hdfs:713112
/hadoop/hdfs/branches/HDFS-1052:987665-1095512
/hadoop/hdfs/branches/HDFS-265:796829-820463
Modified: hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1189421&r1=1189420&r2=1189421&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original)
+++ hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Wed Oct 26 20:11:45 2011
@@ -319,6 +319,9 @@ Release 0.23.0 - Unreleased
HDFS-2318. Provide authentication to webhdfs using SPNEGO and delegation
tokens. (szetszwo)
+ HDFS-2340. Support getFileBlockLocations and getDelegationToken in webhdfs.
+ (szetszwo)
+
IMPROVEMENTS
HDFS-1875. MiniDFSCluster hard-codes dfs.datanode.address to localhost
Propchange: hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Wed Oct 26 20:11:45 2011
@@ -1,4 +1,4 @@
-/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java:1161777,1161781,1162188,1162421,1162491,1162499,1162613,1162928,1162954,1162979,1163050,1163069,1163081,1163490,1163768,1164255,1164301,1164339,1166402,1167383,1167662,1170085,1170379,1170459,1170996,1171136,1171297,1171379,1171611,1172916,1173402,1176550,1176733,1177487,1177531,1177859,1177864
+/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java:1161777,1161781,1162188,1162421,1162491,1162499,1162613,1162928,1162954,1162979,1163050,1163069,1163081,1163490,1163768,1164255,1164301,1164339,1166402,1167383,1167662,1170085,1170379,1170459,1170996,1171136,1171297,1171379,1171611,1172916,1173402,1173468,1176550,1176733,1177487,1177531,1177859,1177864
/hadoop/core/branches/branch-0.19/hdfs/src/java:713112
/hadoop/core/trunk/src/hdfs:776175-785643,785929-786278
/hadoop/hdfs/branches/HDFS-1052/src/java:987665-1095512
Modified: hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java?rev=1189421&r1=1189420&r2=1189421&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java (original)
+++ hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java Wed Oct 26 20:11:45 2011
@@ -115,6 +115,26 @@ public class DatanodeInfo extends Datano
this.location = location;
this.hostName = hostName;
}
+
+ /** Constructor */
+ public DatanodeInfo(final String name, final String storageID,
+ final int infoPort, final int ipcPort,
+ final long capacity, final long dfsUsed, final long remaining,
+ final long blockPoolUsed, final long lastUpdate, final int xceiverCount,
+ final String networkLocation, final String hostName,
+ final AdminStates adminState) {
+ super(name, storageID, infoPort, ipcPort);
+
+ this.capacity = capacity;
+ this.dfsUsed = dfsUsed;
+ this.remaining = remaining;
+ this.blockPoolUsed = blockPoolUsed;
+ this.lastUpdate = lastUpdate;
+ this.xceiverCount = xceiverCount;
+ this.location = networkLocation;
+ this.hostName = hostName;
+ this.adminState = adminState;
+ }
/** The raw capacity. */
public long getCapacity() { return capacity; }
Modified: hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java?rev=1189421&r1=1189420&r2=1189421&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java (original)
+++ hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java Wed Oct 26 20:11:45 2011
@@ -63,6 +63,7 @@ import org.apache.hadoop.hdfs.server.com
import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics;
+import org.apache.hadoop.hdfs.server.namenode.web.resources.NamenodeWebHdfsMethods;
import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations;
import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
@@ -911,8 +912,11 @@ class NameNodeRpcServer implements Namen
}
private static String getClientMachine() {
- String clientMachine = Server.getRemoteAddress();
- if (clientMachine == null) {
+ String clientMachine = NamenodeWebHdfsMethods.getRemoteAddress();
+ if (clientMachine == null) { //not a web client
+ clientMachine = Server.getRemoteAddress();
+ }
+ if (clientMachine == null) { //not a RPC client
clientMachine = "";
}
return clientMachine;
Modified: hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java?rev=1189421&r1=1189420&r2=1189421&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java (original)
+++ hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java Wed Oct 26 20:11:45 2011
@@ -78,6 +78,7 @@ import org.apache.hadoop.hdfs.web.resour
import org.apache.hadoop.hdfs.web.resources.PutOpParam;
import org.apache.hadoop.hdfs.web.resources.RecursiveParam;
import org.apache.hadoop.hdfs.web.resources.RenameOptionSetParam;
+import org.apache.hadoop.hdfs.web.resources.RenewerParam;
import org.apache.hadoop.hdfs.web.resources.ReplicationParam;
import org.apache.hadoop.hdfs.web.resources.UriFsPathParam;
import org.apache.hadoop.hdfs.web.resources.UserParam;
@@ -92,7 +93,14 @@ import org.apache.hadoop.security.token.
/** Web-hdfs NameNode implementation. */
@Path("")
public class NamenodeWebHdfsMethods {
- private static final Log LOG = LogFactory.getLog(NamenodeWebHdfsMethods.class);
+ public static final Log LOG = LogFactory.getLog(NamenodeWebHdfsMethods.class);
+
+ private static final ThreadLocal<String> REMOTE_ADDRESS = new ThreadLocal<String>();
+
+ /** @return the remote client address. */
+ public static String getRemoteAddress() {
+ return REMOTE_ADDRESS.get();
+ }
private @Context ServletContext context;
private @Context HttpServletRequest request;
@@ -215,6 +223,8 @@ public class NamenodeWebHdfsMethods {
return ugi.doAs(new PrivilegedExceptionAction<Response>() {
@Override
public Response run() throws IOException, URISyntaxException {
+ REMOTE_ADDRESS.set(request.getRemoteAddr());
+ try {
final String fullpath = path.getAbsolutePath();
final NameNode namenode = (NameNode)context.getAttribute("name.node");
@@ -272,6 +282,10 @@ public class NamenodeWebHdfsMethods {
default:
throw new UnsupportedOperationException(op + " is not supported");
}
+
+ } finally {
+ REMOTE_ADDRESS.set(null);
+ }
}
});
}
@@ -301,6 +315,8 @@ public class NamenodeWebHdfsMethods {
return ugi.doAs(new PrivilegedExceptionAction<Response>() {
@Override
public Response run() throws IOException, URISyntaxException {
+ REMOTE_ADDRESS.set(request.getRemoteAddr());
+ try {
final String fullpath = path.getAbsolutePath();
final NameNode namenode = (NameNode)context.getAttribute("name.node");
@@ -315,6 +331,10 @@ public class NamenodeWebHdfsMethods {
default:
throw new UnsupportedOperationException(op + " is not supported");
}
+
+ } finally {
+ REMOTE_ADDRESS.set(null);
+ }
}
});
}
@@ -335,10 +355,12 @@ public class NamenodeWebHdfsMethods {
final OffsetParam offset,
@QueryParam(LengthParam.NAME) @DefaultValue(LengthParam.DEFAULT)
final LengthParam length,
+ @QueryParam(RenewerParam.NAME) @DefaultValue(RenewerParam.DEFAULT)
+ final RenewerParam renewer,
@QueryParam(BufferSizeParam.NAME) @DefaultValue(BufferSizeParam.DEFAULT)
final BufferSizeParam bufferSize
) throws IOException, URISyntaxException, InterruptedException {
- return get(ugi, delegation, ROOT, op, offset, length, bufferSize);
+ return get(ugi, delegation, ROOT, op, offset, length, renewer, bufferSize);
}
/** Handle HTTP GET request. */
@@ -356,19 +378,23 @@ public class NamenodeWebHdfsMethods {
final OffsetParam offset,
@QueryParam(LengthParam.NAME) @DefaultValue(LengthParam.DEFAULT)
final LengthParam length,
+ @QueryParam(RenewerParam.NAME) @DefaultValue(RenewerParam.DEFAULT)
+ final RenewerParam renewer,
@QueryParam(BufferSizeParam.NAME) @DefaultValue(BufferSizeParam.DEFAULT)
final BufferSizeParam bufferSize
) throws IOException, URISyntaxException, InterruptedException {
if (LOG.isTraceEnabled()) {
LOG.trace(op + ": " + path + ", ugi=" + ugi
- + Param.toSortedString(", ", offset, length, bufferSize));
+ + Param.toSortedString(", ", offset, length, renewer, bufferSize));
}
return ugi.doAs(new PrivilegedExceptionAction<Response>() {
@Override
public Response run() throws IOException, URISyntaxException {
+ REMOTE_ADDRESS.set(request.getRemoteAddr());
+ try {
final NameNode namenode = (NameNode)context.getAttribute("name.node");
final String fullpath = path.getAbsolutePath();
@@ -381,6 +407,15 @@ public class NamenodeWebHdfsMethods {
op.getValue(), offset.getValue(), offset, length, bufferSize);
return Response.temporaryRedirect(uri).build();
}
+ case GETFILEBLOCKLOCATIONS:
+ {
+ final long offsetValue = offset.getValue();
+ final Long lengthValue = length.getValue();
+ final LocatedBlocks locatedblocks = np.getBlockLocations(fullpath,
+ offsetValue, lengthValue != null? lengthValue: offsetValue + 1);
+ final String js = JsonUtil.toJsonString(locatedblocks);
+ return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
+ }
case GETFILESTATUS:
{
final HdfsFileStatus status = np.getFileInfo(fullpath);
@@ -392,9 +427,20 @@ public class NamenodeWebHdfsMethods {
final StreamingOutput streaming = getListingStream(np, fullpath);
return Response.ok(streaming).type(MediaType.APPLICATION_JSON).build();
}
+ case GETDELEGATIONTOKEN:
+ {
+ final Token<? extends TokenIdentifier> token = generateDelegationToken(
+ namenode, ugi, renewer.getValue());
+ final String js = JsonUtil.toJsonString(token);
+ return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
+ }
default:
throw new UnsupportedOperationException(op + " is not supported");
}
+
+ } finally {
+ REMOTE_ADDRESS.set(null);
+ }
}
});
}
@@ -462,6 +508,9 @@ public class NamenodeWebHdfsMethods {
return ugi.doAs(new PrivilegedExceptionAction<Response>() {
@Override
public Response run() throws IOException {
+ REMOTE_ADDRESS.set(request.getRemoteAddr());
+ try {
+
final NameNode namenode = (NameNode)context.getAttribute("name.node");
final String fullpath = path.getAbsolutePath();
@@ -475,6 +524,10 @@ public class NamenodeWebHdfsMethods {
default:
throw new UnsupportedOperationException(op + " is not supported");
}
+
+ } finally {
+ REMOTE_ADDRESS.set(null);
+ }
}
});
}
Modified: hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java?rev=1189421&r1=1189420&r2=1189421&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java (original)
+++ hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java Wed Oct 26 20:11:45 2011
@@ -17,19 +17,31 @@
*/
package org.apache.hadoop.hdfs.web;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
import java.util.Map;
import java.util.TreeMap;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSUtil;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates;
+import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
+import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
+import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
+import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.ipc.RemoteException;
+import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.security.token.TokenIdentifier;
import org.mortbay.util.ajax.JSON;
/** JSON Utilities */
public class JsonUtil {
- private static final ThreadLocal<Map<String, Object>> jsonMap
- = new ThreadLocal<Map<String, Object>>() {
+ private static class ThreadLocalMap extends ThreadLocal<Map<String, Object>> {
@Override
protected Map<String, Object> initialValue() {
return new TreeMap<String, Object>();
@@ -41,7 +53,54 @@ public class JsonUtil {
m.clear();
return m;
}
- };
+ }
+
+ private static final ThreadLocalMap jsonMap = new ThreadLocalMap();
+ private static final ThreadLocalMap tokenMap = new ThreadLocalMap();
+ private static final ThreadLocalMap datanodeInfoMap = new ThreadLocalMap();
+ private static final ThreadLocalMap extendedBlockMap = new ThreadLocalMap();
+ private static final ThreadLocalMap locatedBlockMap = new ThreadLocalMap();
+
+ private static final DatanodeInfo[] EMPTY_DATANODE_INFO_ARRAY = {};
+
+ /** Convert a token object to a Json string. */
+ public static String toJsonString(final Token<? extends TokenIdentifier> token
+ ) throws IOException {
+ if (token == null) {
+ return null;
+ }
+
+ final Map<String, Object> m = tokenMap.get();
+ m.put("urlString", token.encodeToUrlString());
+ return JSON.toString(m);
+ }
+
+ /** Convert a Json map to a Token. */
+ public static Token<? extends TokenIdentifier> toToken(
+ final Map<?, ?> m) throws IOException {
+ if (m == null) {
+ return null;
+ }
+
+ final Token<DelegationTokenIdentifier> token
+ = new Token<DelegationTokenIdentifier>();
+ token.decodeFromUrlString((String)m.get("urlString"));
+ return token;
+ }
+
+ /** Convert a Json map to a Token of DelegationTokenIdentifier. */
+ @SuppressWarnings("unchecked")
+ public static Token<DelegationTokenIdentifier> toDelegationToken(
+ final Map<?, ?> m) throws IOException {
+ return (Token<DelegationTokenIdentifier>)toToken(m);
+ }
+
+ /** Convert a Json map to a Token of BlockTokenIdentifier. */
+ @SuppressWarnings("unchecked")
+ public static Token<BlockTokenIdentifier> toBlockToken(
+ final Map<?, ?> m) throws IOException {
+ return (Token<BlockTokenIdentifier>)toToken(m);
+ }
/** Convert an exception object to a Json string. */
public static String toJsonString(final Exception e) {
@@ -77,11 +136,10 @@ public class JsonUtil {
/** Convert a HdfsFileStatus object to a Json string. */
public static String toJsonString(final HdfsFileStatus status) {
- final Map<String, Object> m = jsonMap.get();
if (status == null) {
- m.put("isNull", true);
+ return null;
} else {
- m.put("isNull", false);
+ final Map<String, Object> m = jsonMap.get();
m.put("localName", status.getLocalName());
m.put("isDir", status.isDir());
m.put("isSymlink", status.isSymlink());
@@ -97,8 +155,8 @@ public class JsonUtil {
m.put("modificationTime", status.getModificationTime());
m.put("blockSize", status.getBlockSize());
m.put("replication", status.getReplication());
+ return JSON.toString(m);
}
- return JSON.toString(m);
}
@SuppressWarnings("unchecked")
@@ -106,9 +164,9 @@ public class JsonUtil {
return (Map<String, Object>) JSON.parse(jsonString);
}
- /** Convert a Json string to a HdfsFileStatus object. */
+ /** Convert a Json map to a HdfsFileStatus object. */
public static HdfsFileStatus toFileStatus(final Map<String, Object> m) {
- if ((Boolean)m.get("isNull")) {
+ if (m == null) {
return null;
}
@@ -130,4 +188,214 @@ public class JsonUtil {
permission, owner, group,
symlink, DFSUtil.string2Bytes(localName));
}
+
+ /** Convert a LocatedBlock to a Json string. */
+ public static String toJsonString(final ExtendedBlock extendedblock) {
+ if (extendedblock == null) {
+ return null;
+ }
+
+ final Map<String, Object> m = extendedBlockMap.get();
+ m.put("blockPoolId", extendedblock.getBlockPoolId());
+ m.put("blockId", extendedblock.getBlockId());
+ m.put("numBytes", extendedblock.getNumBytes());
+ m.put("generationStamp", extendedblock.getGenerationStamp());
+ return JSON.toString(m);
+ }
+
+ /** Convert a Json map to an ExtendedBlock object. */
+ public static ExtendedBlock toExtendedBlock(final Map<?, ?> m) {
+ if (m == null) {
+ return null;
+ }
+
+ final String blockPoolId = (String)m.get("blockPoolId");
+ final long blockId = (Long)m.get("blockId");
+ final long numBytes = (Long)m.get("numBytes");
+ final long generationStamp = (Long)m.get("generationStamp");
+ return new ExtendedBlock(blockPoolId, blockId, numBytes, generationStamp);
+ }
+
+ /** Convert a DatanodeInfo to a Json string. */
+ public static String toJsonString(final DatanodeInfo datanodeinfo) {
+ if (datanodeinfo == null) {
+ return null;
+ }
+
+ final Map<String, Object> m = datanodeInfoMap.get();
+ m.put("name", datanodeinfo.getName());
+ m.put("storageID", datanodeinfo.getStorageID());
+ m.put("infoPort", datanodeinfo.getInfoPort());
+
+ m.put("ipcPort", datanodeinfo.getIpcPort());
+
+ m.put("capacity", datanodeinfo.getCapacity());
+ m.put("dfsUsed", datanodeinfo.getDfsUsed());
+ m.put("remaining", datanodeinfo.getRemaining());
+ m.put("blockPoolUsed", datanodeinfo.getBlockPoolUsed());
+ m.put("lastUpdate", datanodeinfo.getLastUpdate());
+ m.put("xceiverCount", datanodeinfo.getXceiverCount());
+ m.put("networkLocation", datanodeinfo.getNetworkLocation());
+ m.put("hostName", datanodeinfo.getHostName());
+ m.put("adminState", datanodeinfo.getAdminState().name());
+ return JSON.toString(m);
+ }
+
+ /** Convert a Json map to an DatanodeInfo object. */
+ public static DatanodeInfo toDatanodeInfo(final Map<?, ?> m) {
+ if (m == null) {
+ return null;
+ }
+
+ return new DatanodeInfo(
+ (String)m.get("name"),
+ (String)m.get("storageID"),
+ (int)(long)(Long)m.get("infoPort"),
+ (int)(long)(Long)m.get("ipcPort"),
+
+ (Long)m.get("capacity"),
+ (Long)m.get("dfsUsed"),
+ (Long)m.get("remaining"),
+ (Long)m.get("blockPoolUsed"),
+ (Long)m.get("lastUpdate"),
+ (int)(long)(Long)m.get("xceiverCount"),
+ (String)m.get("networkLocation"),
+ (String)m.get("hostName"),
+ AdminStates.valueOf((String)m.get("adminState")));
+ }
+
+ /** Convert a DatanodeInfo[] to a Json string. */
+ public static String toJsonString(final DatanodeInfo[] array
+ ) throws IOException {
+ if (array == null) {
+ return null;
+ } else if (array.length == 0) {
+ return "[]";
+ } else {
+ final StringBuilder b = new StringBuilder().append('[').append(
+ toJsonString(array[0]));
+ for(int i = 1; i < array.length; i++) {
+ b.append(", ").append(toJsonString(array[i]));
+ }
+ return b.append(']').toString();
+ }
+ }
+
+ /** Convert an Object[] to a DatanodeInfo[]. */
+ public static DatanodeInfo[] toDatanodeInfoArray(final Object[] objects) {
+ if (objects == null) {
+ return null;
+ } else if (objects.length == 0) {
+ return EMPTY_DATANODE_INFO_ARRAY;
+ } else {
+ final DatanodeInfo[] array = new DatanodeInfo[objects.length];
+ for(int i = 0; i < array.length; i++) {
+ array[i] = (DatanodeInfo)toDatanodeInfo((Map<?, ?>) objects[i]);
+ }
+ return array;
+ }
+ }
+
+ /** Convert a LocatedBlock to a Json string. */
+ public static String toJsonString(final LocatedBlock locatedblock
+ ) throws IOException {
+ if (locatedblock == null) {
+ return null;
+ }
+
+ final Map<String, Object> m = locatedBlockMap.get();
+ m.put("blockToken", toJsonString(locatedblock.getBlockToken()));
+ m.put("isCorrupt", locatedblock.isCorrupt());
+ m.put("startOffset", locatedblock.getStartOffset());
+ m.put("block", toJsonString(locatedblock.getBlock()));
+
+ m.put("locations", toJsonString(locatedblock.getLocations()));
+ return JSON.toString(m);
+ }
+
+ /** Convert a Json map to LocatedBlock. */
+ public static LocatedBlock toLocatedBlock(final Map<?, ?> m) throws IOException {
+ if (m == null) {
+ return null;
+ }
+
+ final ExtendedBlock b = toExtendedBlock((Map<?, ?>)JSON.parse((String)m.get("block")));
+ final DatanodeInfo[] locations = toDatanodeInfoArray(
+ (Object[])JSON.parse((String)m.get("locations")));
+ final long startOffset = (Long)m.get("startOffset");
+ final boolean isCorrupt = (Boolean)m.get("isCorrupt");
+
+ final LocatedBlock locatedblock = new LocatedBlock(b, locations, startOffset, isCorrupt);
+ locatedblock.setBlockToken(toBlockToken((Map<?, ?>)JSON.parse((String)m.get("blockToken"))));
+ return locatedblock;
+ }
+
+ /** Convert a LocatedBlock[] to a Json string. */
+ public static String toJsonString(final List<LocatedBlock> array
+ ) throws IOException {
+ if (array == null) {
+ return null;
+ } else if (array.size() == 0) {
+ return "[]";
+ } else {
+ final StringBuilder b = new StringBuilder().append('[').append(
+ toJsonString(array.get(0)));
+ for(int i = 1; i < array.size(); i++) {
+ b.append(",\n ").append(toJsonString(array.get(i)));
+ }
+ return b.append(']').toString();
+ }
+ }
+
+ /** Convert an Object[] to a List of LocatedBlock.
+ * @throws IOException */
+ public static List<LocatedBlock> toLocatedBlockList(final Object[] objects
+ ) throws IOException {
+ if (objects == null) {
+ return null;
+ } else if (objects.length == 0) {
+ return Collections.emptyList();
+ } else {
+ final List<LocatedBlock> list = new ArrayList<LocatedBlock>(objects.length);
+ for(int i = 0; i < objects.length; i++) {
+ list.add((LocatedBlock)toLocatedBlock((Map<?, ?>)objects[i]));
+ }
+ return list;
+ }
+ }
+
+ /** Convert LocatedBlocks to a Json string. */
+ public static String toJsonString(final LocatedBlocks locatedblocks
+ ) throws IOException {
+ if (locatedblocks == null) {
+ return null;
+ }
+
+ final Map<String, Object> m = jsonMap.get();
+ m.put("fileLength", locatedblocks.getFileLength());
+ m.put("isUnderConstruction", locatedblocks.isUnderConstruction());
+
+ m.put("locatedBlocks", toJsonString(locatedblocks.getLocatedBlocks()));
+ m.put("lastLocatedBlock", toJsonString(locatedblocks.getLastLocatedBlock()));
+ m.put("isLastBlockComplete", locatedblocks.isLastBlockComplete());
+ return JSON.toString(m);
+ }
+
+ /** Convert a Json map to LocatedBlock. */
+ public static LocatedBlocks toLocatedBlocks(final Map<String, Object> m
+ ) throws IOException {
+ if (m == null) {
+ return null;
+ }
+
+ final long fileLength = (Long)m.get("fileLength");
+ final boolean isUnderConstruction = (Boolean)m.get("isUnderConstruction");
+ final List<LocatedBlock> locatedBlocks = toLocatedBlockList(
+ (Object[])JSON.parse((String) m.get("locatedBlocks")));
+ final LocatedBlock lastLocatedBlock = toLocatedBlock(
+ (Map<?, ?>)JSON.parse((String)m.get("lastLocatedBlock")));
+ final boolean isLastBlockComplete = (Boolean)m.get("isLastBlockComplete");
+ return new LocatedBlocks(fileLength, isUnderConstruction, locatedBlocks,
+ lastLocatedBlock, isLastBlockComplete);
+ }
}
\ No newline at end of file
Modified: hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java?rev=1189421&r1=1189420&r2=1189421&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java (original)
+++ hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java Wed Oct 26 20:11:45 2011
@@ -27,9 +27,12 @@ import java.net.HttpURLConnection;
import java.net.URI;
import java.net.URISyntaxException;
import java.net.URL;
+import java.util.Arrays;
+import java.util.List;
import java.util.Map;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.BlockLocation;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileAlreadyExistsException;
@@ -45,6 +48,7 @@ import org.apache.hadoop.hdfs.protocol.D
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException;
import org.apache.hadoop.hdfs.protocol.UnresolvedPathException;
+import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.hdfs.server.namenode.SafeModeException;
import org.apache.hadoop.hdfs.web.resources.AccessTimeParam;
import org.apache.hadoop.hdfs.web.resources.BlockSizeParam;
@@ -54,7 +58,9 @@ import org.apache.hadoop.hdfs.web.resour
import org.apache.hadoop.hdfs.web.resources.GetOpParam;
import org.apache.hadoop.hdfs.web.resources.GroupParam;
import org.apache.hadoop.hdfs.web.resources.HttpOpParam;
+import org.apache.hadoop.hdfs.web.resources.LengthParam;
import org.apache.hadoop.hdfs.web.resources.ModificationTimeParam;
+import org.apache.hadoop.hdfs.web.resources.OffsetParam;
import org.apache.hadoop.hdfs.web.resources.OverwriteParam;
import org.apache.hadoop.hdfs.web.resources.OwnerParam;
import org.apache.hadoop.hdfs.web.resources.Param;
@@ -63,13 +69,16 @@ import org.apache.hadoop.hdfs.web.resour
import org.apache.hadoop.hdfs.web.resources.PutOpParam;
import org.apache.hadoop.hdfs.web.resources.RecursiveParam;
import org.apache.hadoop.hdfs.web.resources.RenameOptionSetParam;
+import org.apache.hadoop.hdfs.web.resources.RenewerParam;
import org.apache.hadoop.hdfs.web.resources.ReplicationParam;
import org.apache.hadoop.hdfs.web.resources.UserParam;
+import org.apache.hadoop.io.Text;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authentication.client.AuthenticatedURL;
import org.apache.hadoop.security.authentication.client.AuthenticationException;
+import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.util.Progressable;
import org.mortbay.util.ajax.JSON;
@@ -167,7 +176,7 @@ public class WebHdfsFileSystem extends H
final Param<?,?>... parameters) throws IOException {
//initialize URI path and query
final String path = "/" + PATH_PREFIX
- + makeQualified(fspath).toUri().getPath();
+ + (fspath == null? "/": makeQualified(fspath).toUri().getPath());
final String query = op.toQueryString()
+ '&' + new UserParam(ugi)
+ Param.toSortedString("&", parameters);
@@ -396,4 +405,41 @@ public class WebHdfsFileSystem extends H
}
return statuses;
}
+
+ @Override
+ public Token<DelegationTokenIdentifier> getDelegationToken(final String renewer
+ ) throws IOException {
+ final HttpOpParam.Op op = GetOpParam.Op.GETDELEGATIONTOKEN;
+ final Map<String, Object> m = run(op, null, new RenewerParam(renewer));
+ final Token<DelegationTokenIdentifier> token = JsonUtil.toDelegationToken(m);
+ token.setService(new Text(getCanonicalServiceName()));
+ return token;
+ }
+
+ @Override
+ public List<Token<?>> getDelegationTokens(final String renewer
+ ) throws IOException {
+ final Token<?>[] t = {getDelegationToken(renewer)};
+ return Arrays.asList(t);
+ }
+
+ @Override
+ public BlockLocation[] getFileBlockLocations(final FileStatus status,
+ final long offset, final long length) throws IOException {
+ if (status == null) {
+ return null;
+ }
+ return getFileBlockLocations(status.getPath(), offset, length);
+ }
+
+ @Override
+ public BlockLocation[] getFileBlockLocations(final Path p,
+ final long offset, final long length) throws IOException {
+ statistics.incrementReadOps(1);
+
+ final HttpOpParam.Op op = GetOpParam.Op.GETFILEBLOCKLOCATIONS;
+ final Map<String, Object> m = run(op, p, new OffsetParam(offset),
+ new LengthParam(length));
+ return DFSUtil.locatedBlocks2Locations(JsonUtil.toLocatedBlocks(m));
+ }
}
\ No newline at end of file
Modified: hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/GetOpParam.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/GetOpParam.java?rev=1189421&r1=1189420&r2=1189421&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/GetOpParam.java (original)
+++ hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/GetOpParam.java Wed Oct 26 20:11:45 2011
@@ -27,10 +27,13 @@ public class GetOpParam extends HttpOpPa
/** Get operations. */
public static enum Op implements HttpOpParam.Op {
OPEN(HttpURLConnection.HTTP_OK),
+ GETFILEBLOCKLOCATIONS(HttpURLConnection.HTTP_OK),
GETFILESTATUS(HttpURLConnection.HTTP_OK),
LISTSTATUS(HttpURLConnection.HTTP_OK),
+ GETDELEGATIONTOKEN(HttpURLConnection.HTTP_OK),
+
NULL(HttpURLConnection.HTTP_NOT_IMPLEMENTED);
final int expectedHttpResponseCode;
Modified: hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/OverwriteParam.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/OverwriteParam.java?rev=1189421&r1=1189420&r2=1189421&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/OverwriteParam.java (original)
+++ hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/OverwriteParam.java Wed Oct 26 20:11:45 2011
@@ -17,7 +17,7 @@
*/
package org.apache.hadoop.hdfs.web.resources;
-/** Recursive parameter. */
+/** Overwrite parameter. */
public class OverwriteParam extends BooleanParam {
/** Parameter name. */
public static final String NAME = "overwrite";
Modified: hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestDelegationToken.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestDelegationToken.java?rev=1189421&r1=1189420&r2=1189421&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestDelegationToken.java (original)
+++ hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestDelegationToken.java Wed Oct 26 20:11:45 2011
@@ -23,12 +23,12 @@ package org.apache.hadoop.hdfs.security;
import java.io.ByteArrayInputStream;
import java.io.DataInputStream;
import java.io.IOException;
+import java.net.URI;
import java.security.PrivilegedExceptionAction;
-import junit.framework.Assert;
-
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
+import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.hdfs.DFSConfigKeys;
@@ -38,12 +38,16 @@ import org.apache.hadoop.hdfs.MiniDFSClu
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager;
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
+import org.apache.hadoop.hdfs.server.namenode.web.resources.NamenodeWebHdfsMethods;
+import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.SecretManager.InvalidToken;
import org.apache.hadoop.security.token.Token;
+import org.apache.log4j.Level;
import org.junit.After;
+import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
@@ -56,12 +60,13 @@ public class TestDelegationToken {
@Before
public void setUp() throws Exception {
config = new HdfsConfiguration();
+ config.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
config.setLong(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_MAX_LIFETIME_KEY, 10000);
config.setLong(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_RENEW_INTERVAL_KEY, 5000);
config.set("hadoop.security.auth_to_local",
"RULE:[2:$1@$0](JobTracker@.*FOO.COM)s/@.*//" + "DEFAULT");
FileSystem.setDefaultUri(config, "hdfs://localhost:" + "0");
- cluster = new MiniDFSCluster.Builder(config).build();
+ cluster = new MiniDFSCluster.Builder(config).numDataNodes(0).build();
cluster.waitActive();
dtSecretManager = NameNodeAdapter.getDtSecretManager(
cluster.getNamesystem());
@@ -155,6 +160,31 @@ public class TestDelegationToken {
@SuppressWarnings("deprecation")
@Test
+ public void testDelegationTokenWebHdfsApi() throws Exception {
+ ((Log4JLogger)NamenodeWebHdfsMethods.LOG).getLogger().setLevel(Level.ALL);
+ final String uri = WebHdfsFileSystem.SCHEME + "://"
+ + config.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY);
+ //get file system as JobTracker
+ final UserGroupInformation ugi = UserGroupInformation.createUserForTesting(
+ "JobTracker", new String[]{"user"});
+ final WebHdfsFileSystem webhdfs = ugi.doAs(
+ new PrivilegedExceptionAction<WebHdfsFileSystem>() {
+ @Override
+ public WebHdfsFileSystem run() throws Exception {
+ return (WebHdfsFileSystem)FileSystem.get(new URI(uri), config);
+ }
+ });
+
+ final Token<DelegationTokenIdentifier> token = webhdfs.getDelegationToken("JobTracker");
+ DelegationTokenIdentifier identifier = new DelegationTokenIdentifier();
+ byte[] tokenId = token.getIdentifier();
+ identifier.readFields(new DataInputStream(new ByteArrayInputStream(tokenId)));
+ LOG.info("A valid token should have non-null password, and should be renewed successfully");
+ Assert.assertTrue(null != dtSecretManager.retrievePassword(identifier));
+ dtSecretManager.renewToken(token, "JobTracker");
+ }
+
+ @Test
public void testDelegationTokenWithDoAs() throws Exception {
final DistributedFileSystem dfs = (DistributedFileSystem) cluster.getFileSystem();
final Token<DelegationTokenIdentifier> token =
Modified: hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsFileSystemContract.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsFileSystemContract.java?rev=1189421&r1=1189420&r2=1189421&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsFileSystemContract.java (original)
+++ hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsFileSystemContract.java Wed Oct 26 20:11:45 2011
@@ -23,6 +23,7 @@ import java.net.URI;
import java.security.PrivilegedExceptionAction;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.BlockLocation;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileSystemContractBaseTest;
import org.apache.hadoop.fs.Path;
@@ -114,4 +115,16 @@ public class TestWebHdfsFileSystemContra
// also okay for HDFS.
}
}
+
+ public void testGetFileBlockLocations() throws IOException {
+ final String f = "/test/testGetFileBlockLocations";
+ createFile(path(f));
+ final BlockLocation[] computed = fs.getFileBlockLocations(new Path(f), 0L, 1L);
+ final BlockLocation[] expected = cluster.getFileSystem().getFileBlockLocations(
+ new Path(f), 0L, 1L);
+ assertEquals(expected.length, computed.length);
+ for(int i = 0; i < computed.length; i++) {
+ assertEquals(expected[i].toString(), computed[i].toString());
+ }
+ }
}