You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by wa...@apache.org on 2014/07/21 23:44:57 UTC
svn commit: r1612403 [2/2] - in
/hadoop/common/branches/fs-encryption/hadoop-hdfs-project:
hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/
hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/ hadoop-hdfs/
hadoop-hdfs/src/contri...
Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java?rev=1612403&r1=1612402&r2=1612403&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java Mon Jul 21 21:44:50 2014
@@ -28,6 +28,7 @@ import java.net.URISyntaxException;
import java.security.PrivilegedExceptionAction;
import java.util.ArrayList;
import java.util.EnumSet;
+import java.util.HashSet;
import java.util.List;
import javax.servlet.ServletContext;
@@ -84,6 +85,7 @@ import org.apache.hadoop.hdfs.web.resour
import org.apache.hadoop.hdfs.web.resources.DeleteOpParam;
import org.apache.hadoop.hdfs.web.resources.DestinationParam;
import org.apache.hadoop.hdfs.web.resources.DoAsParam;
+import org.apache.hadoop.hdfs.web.resources.ExcludeDatanodesParam;
import org.apache.hadoop.hdfs.web.resources.GetOpParam;
import org.apache.hadoop.hdfs.web.resources.GroupParam;
import org.apache.hadoop.hdfs.web.resources.HttpOpParam;
@@ -113,11 +115,13 @@ import org.apache.hadoop.hdfs.web.resour
import org.apache.hadoop.io.Text;
import org.apache.hadoop.ipc.Server;
import org.apache.hadoop.net.NetworkTopology.InvalidTopologyException;
+import org.apache.hadoop.net.Node;
import org.apache.hadoop.net.NodeBase;
import org.apache.hadoop.security.Credentials;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.TokenIdentifier;
+import org.apache.hadoop.util.StringUtils;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Charsets;
@@ -190,12 +194,26 @@ public class NamenodeWebHdfsMethods {
}
return np;
}
-
+
@VisibleForTesting
static DatanodeInfo chooseDatanode(final NameNode namenode,
final String path, final HttpOpParam.Op op, final long openOffset,
- final long blocksize) throws IOException {
+ final long blocksize, final String excludeDatanodes) throws IOException {
final BlockManager bm = namenode.getNamesystem().getBlockManager();
+
+ HashSet<Node> excludes = new HashSet<Node>();
+ if (excludeDatanodes != null) {
+ for (String host : StringUtils
+ .getTrimmedStringCollection(excludeDatanodes)) {
+ int idx = host.indexOf(":");
+ if (idx != -1) {
+ excludes.add(bm.getDatanodeManager().getDatanodeByXferAddr(
+ host.substring(0, idx), Integer.parseInt(host.substring(idx + 1))));
+ } else {
+ excludes.add(bm.getDatanodeManager().getDatanodeByHost(host));
+ }
+ }
+ }
if (op == PutOpParam.Op.CREATE) {
//choose a datanode near to client
@@ -204,7 +222,7 @@ public class NamenodeWebHdfsMethods {
if (clientNode != null) {
final DatanodeStorageInfo[] storages = bm.getBlockPlacementPolicy()
.chooseTarget(path, 1, clientNode,
- new ArrayList<DatanodeStorageInfo>(), false, null, blocksize,
+ new ArrayList<DatanodeStorageInfo>(), false, excludes, blocksize,
// TODO: get storage type from the file
StorageType.DEFAULT);
if (storages.length > 0) {
@@ -233,7 +251,7 @@ public class NamenodeWebHdfsMethods {
final LocatedBlocks locations = np.getBlockLocations(path, offset, 1);
final int count = locations.locatedBlockCount();
if (count > 0) {
- return bestNode(locations.get(0).getLocations());
+ return bestNode(locations.get(0).getLocations(), excludes);
}
}
}
@@ -247,11 +265,14 @@ public class NamenodeWebHdfsMethods {
* sorted based on availability and network distances, thus it is sufficient
* to return the first element of the node here.
*/
- private static DatanodeInfo bestNode(DatanodeInfo[] nodes) throws IOException {
- if (nodes.length == 0 || nodes[0].isDecommissioned()) {
- throw new IOException("No active nodes contain this block");
+ private static DatanodeInfo bestNode(DatanodeInfo[] nodes,
+ HashSet<Node> excludes) throws IOException {
+ for (DatanodeInfo dn: nodes) {
+ if (false == dn.isDecommissioned() && false == excludes.contains(dn)) {
+ return dn;
+ }
}
- return nodes[0];
+ throw new IOException("No active nodes contain this block");
}
private Token<? extends TokenIdentifier> generateDelegationToken(
@@ -270,11 +291,12 @@ public class NamenodeWebHdfsMethods {
final UserGroupInformation ugi, final DelegationParam delegation,
final UserParam username, final DoAsParam doAsUser,
final String path, final HttpOpParam.Op op, final long openOffset,
- final long blocksize,
+ final long blocksize, final String excludeDatanodes,
final Param<?, ?>... parameters) throws URISyntaxException, IOException {
final DatanodeInfo dn;
try {
- dn = chooseDatanode(namenode, path, op, openOffset, blocksize);
+ dn = chooseDatanode(namenode, path, op, openOffset, blocksize,
+ excludeDatanodes);
} catch (InvalidTopologyException ite) {
throw new IOException("Failed to find datanode, suggest to check cluster health.", ite);
}
@@ -361,13 +383,15 @@ public class NamenodeWebHdfsMethods {
@QueryParam(SnapshotNameParam.NAME) @DefaultValue(SnapshotNameParam.DEFAULT)
final SnapshotNameParam snapshotName,
@QueryParam(OldSnapshotNameParam.NAME) @DefaultValue(OldSnapshotNameParam.DEFAULT)
- final OldSnapshotNameParam oldSnapshotName
- )throws IOException, InterruptedException {
+ final OldSnapshotNameParam oldSnapshotName,
+ @QueryParam(ExcludeDatanodesParam.NAME) @DefaultValue(ExcludeDatanodesParam.DEFAULT)
+ final ExcludeDatanodesParam excludeDatanodes
+ ) throws IOException, InterruptedException {
return put(ugi, delegation, username, doAsUser, ROOT, op, destination,
owner, group, permission, overwrite, bufferSize, replication,
blockSize, modificationTime, accessTime, renameOptions, createParent,
delegationTokenArgument, aclPermission, xattrName, xattrValue,
- xattrSetFlag, snapshotName, oldSnapshotName);
+ xattrSetFlag, snapshotName, oldSnapshotName, excludeDatanodes);
}
/** Handle HTTP PUT request. */
@@ -423,14 +447,16 @@ public class NamenodeWebHdfsMethods {
@QueryParam(SnapshotNameParam.NAME) @DefaultValue(SnapshotNameParam.DEFAULT)
final SnapshotNameParam snapshotName,
@QueryParam(OldSnapshotNameParam.NAME) @DefaultValue(OldSnapshotNameParam.DEFAULT)
- final OldSnapshotNameParam oldSnapshotName
+ final OldSnapshotNameParam oldSnapshotName,
+ @QueryParam(ExcludeDatanodesParam.NAME) @DefaultValue(ExcludeDatanodesParam.DEFAULT)
+ final ExcludeDatanodesParam excludeDatanodes
) throws IOException, InterruptedException {
init(ugi, delegation, username, doAsUser, path, op, destination, owner,
group, permission, overwrite, bufferSize, replication, blockSize,
modificationTime, accessTime, renameOptions, delegationTokenArgument,
aclPermission, xattrName, xattrValue, xattrSetFlag, snapshotName,
- oldSnapshotName);
+ oldSnapshotName, excludeDatanodes);
return ugi.doAs(new PrivilegedExceptionAction<Response>() {
@Override
@@ -441,7 +467,7 @@ public class NamenodeWebHdfsMethods {
permission, overwrite, bufferSize, replication, blockSize,
modificationTime, accessTime, renameOptions, createParent,
delegationTokenArgument, aclPermission, xattrName, xattrValue,
- xattrSetFlag, snapshotName, oldSnapshotName);
+ xattrSetFlag, snapshotName, oldSnapshotName, excludeDatanodes);
} finally {
reset();
}
@@ -474,7 +500,8 @@ public class NamenodeWebHdfsMethods {
final XAttrValueParam xattrValue,
final XAttrSetFlagParam xattrSetFlag,
final SnapshotNameParam snapshotName,
- final OldSnapshotNameParam oldSnapshotName
+ final OldSnapshotNameParam oldSnapshotName,
+ final ExcludeDatanodesParam exclDatanodes
) throws IOException, URISyntaxException {
final Configuration conf = (Configuration)context.getAttribute(JspHelper.CURRENT_CONF);
@@ -484,9 +511,10 @@ public class NamenodeWebHdfsMethods {
switch(op.getValue()) {
case CREATE:
{
- final URI uri = redirectURI(namenode, ugi, delegation, username, doAsUser,
- fullpath, op.getValue(), -1L, blockSize.getValue(conf),
- permission, overwrite, bufferSize, replication, blockSize);
+ final URI uri = redirectURI(namenode, ugi, delegation, username,
+ doAsUser, fullpath, op.getValue(), -1L, blockSize.getValue(conf),
+ exclDatanodes.getValue(), permission, overwrite, bufferSize,
+ replication, blockSize);
return Response.temporaryRedirect(uri).type(MediaType.APPLICATION_OCTET_STREAM).build();
}
case MKDIRS:
@@ -619,9 +647,12 @@ public class NamenodeWebHdfsMethods {
@QueryParam(ConcatSourcesParam.NAME) @DefaultValue(ConcatSourcesParam.DEFAULT)
final ConcatSourcesParam concatSrcs,
@QueryParam(BufferSizeParam.NAME) @DefaultValue(BufferSizeParam.DEFAULT)
- final BufferSizeParam bufferSize
+ final BufferSizeParam bufferSize,
+ @QueryParam(ExcludeDatanodesParam.NAME) @DefaultValue(ExcludeDatanodesParam.DEFAULT)
+ final ExcludeDatanodesParam excludeDatanodes
) throws IOException, InterruptedException {
- return post(ugi, delegation, username, doAsUser, ROOT, op, concatSrcs, bufferSize);
+ return post(ugi, delegation, username, doAsUser, ROOT, op, concatSrcs,
+ bufferSize, excludeDatanodes);
}
/** Handle HTTP POST request. */
@@ -643,17 +674,21 @@ public class NamenodeWebHdfsMethods {
@QueryParam(ConcatSourcesParam.NAME) @DefaultValue(ConcatSourcesParam.DEFAULT)
final ConcatSourcesParam concatSrcs,
@QueryParam(BufferSizeParam.NAME) @DefaultValue(BufferSizeParam.DEFAULT)
- final BufferSizeParam bufferSize
+ final BufferSizeParam bufferSize,
+ @QueryParam(ExcludeDatanodesParam.NAME) @DefaultValue(ExcludeDatanodesParam.DEFAULT)
+ final ExcludeDatanodesParam excludeDatanodes
) throws IOException, InterruptedException {
- init(ugi, delegation, username, doAsUser, path, op, concatSrcs, bufferSize);
+ init(ugi, delegation, username, doAsUser, path, op, concatSrcs, bufferSize,
+ excludeDatanodes);
return ugi.doAs(new PrivilegedExceptionAction<Response>() {
@Override
public Response run() throws IOException, URISyntaxException {
try {
return post(ugi, delegation, username, doAsUser,
- path.getAbsolutePath(), op, concatSrcs, bufferSize);
+ path.getAbsolutePath(), op, concatSrcs, bufferSize,
+ excludeDatanodes);
} finally {
reset();
}
@@ -669,15 +704,17 @@ public class NamenodeWebHdfsMethods {
final String fullpath,
final PostOpParam op,
final ConcatSourcesParam concatSrcs,
- final BufferSizeParam bufferSize
+ final BufferSizeParam bufferSize,
+ final ExcludeDatanodesParam excludeDatanodes
) throws IOException, URISyntaxException {
final NameNode namenode = (NameNode)context.getAttribute("name.node");
switch(op.getValue()) {
case APPEND:
{
- final URI uri = redirectURI(namenode, ugi, delegation, username, doAsUser,
- fullpath, op.getValue(), -1L, -1L, bufferSize);
+ final URI uri = redirectURI(namenode, ugi, delegation, username,
+ doAsUser, fullpath, op.getValue(), -1L, -1L,
+ excludeDatanodes.getValue(), bufferSize);
return Response.temporaryRedirect(uri).type(MediaType.APPLICATION_OCTET_STREAM).build();
}
case CONCAT:
@@ -715,10 +752,12 @@ public class NamenodeWebHdfsMethods {
@QueryParam(XAttrNameParam.NAME) @DefaultValue(XAttrNameParam.DEFAULT)
final List<XAttrNameParam> xattrNames,
@QueryParam(XAttrEncodingParam.NAME) @DefaultValue(XAttrEncodingParam.DEFAULT)
- final XAttrEncodingParam xattrEncoding
+ final XAttrEncodingParam xattrEncoding,
+ @QueryParam(ExcludeDatanodesParam.NAME) @DefaultValue(ExcludeDatanodesParam.DEFAULT)
+ final ExcludeDatanodesParam excludeDatanodes
) throws IOException, InterruptedException {
return get(ugi, delegation, username, doAsUser, ROOT, op, offset, length,
- renewer, bufferSize, xattrNames, xattrEncoding);
+ renewer, bufferSize, xattrNames, xattrEncoding, excludeDatanodes);
}
/** Handle HTTP GET request. */
@@ -747,11 +786,13 @@ public class NamenodeWebHdfsMethods {
@QueryParam(XAttrNameParam.NAME) @DefaultValue(XAttrNameParam.DEFAULT)
final List<XAttrNameParam> xattrNames,
@QueryParam(XAttrEncodingParam.NAME) @DefaultValue(XAttrEncodingParam.DEFAULT)
- final XAttrEncodingParam xattrEncoding
+ final XAttrEncodingParam xattrEncoding,
+ @QueryParam(ExcludeDatanodesParam.NAME) @DefaultValue(ExcludeDatanodesParam.DEFAULT)
+ final ExcludeDatanodesParam excludeDatanodes
) throws IOException, InterruptedException {
init(ugi, delegation, username, doAsUser, path, op, offset, length,
- renewer, bufferSize, xattrEncoding);
+ renewer, bufferSize, xattrEncoding, excludeDatanodes);
return ugi.doAs(new PrivilegedExceptionAction<Response>() {
@Override
@@ -759,7 +800,7 @@ public class NamenodeWebHdfsMethods {
try {
return get(ugi, delegation, username, doAsUser,
path.getAbsolutePath(), op, offset, length, renewer, bufferSize,
- xattrNames, xattrEncoding);
+ xattrNames, xattrEncoding, excludeDatanodes);
} finally {
reset();
}
@@ -779,7 +820,8 @@ public class NamenodeWebHdfsMethods {
final RenewerParam renewer,
final BufferSizeParam bufferSize,
final List<XAttrNameParam> xattrNames,
- final XAttrEncodingParam xattrEncoding
+ final XAttrEncodingParam xattrEncoding,
+ final ExcludeDatanodesParam excludeDatanodes
) throws IOException, URISyntaxException {
final NameNode namenode = (NameNode)context.getAttribute("name.node");
final NamenodeProtocols np = getRPCServer(namenode);
@@ -787,8 +829,9 @@ public class NamenodeWebHdfsMethods {
switch(op.getValue()) {
case OPEN:
{
- final URI uri = redirectURI(namenode, ugi, delegation, username, doAsUser,
- fullpath, op.getValue(), offset.getValue(), -1L, offset, length, bufferSize);
+ final URI uri = redirectURI(namenode, ugi, delegation, username,
+ doAsUser, fullpath, op.getValue(), offset.getValue(), -1L,
+ excludeDatanodes.getValue(), offset, length, bufferSize);
return Response.temporaryRedirect(uri).type(MediaType.APPLICATION_OCTET_STREAM).build();
}
case GET_BLOCK_LOCATIONS:
@@ -824,7 +867,7 @@ public class NamenodeWebHdfsMethods {
case GETFILECHECKSUM:
{
final URI uri = redirectURI(namenode, ugi, delegation, username, doAsUser,
- fullpath, op.getValue(), -1L, -1L);
+ fullpath, op.getValue(), -1L, -1L, null);
return Response.temporaryRedirect(uri).type(MediaType.APPLICATION_OCTET_STREAM).build();
}
case GETDELEGATIONTOKEN:
Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java?rev=1612403&r1=1612402&r2=1612403&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java Mon Jul 21 21:44:50 2014
@@ -77,7 +77,7 @@ public class DFSck extends Configured im
private static final String USAGE = "Usage: DFSck <path> "
+ "[-list-corruptfileblocks | "
+ "[-move | -delete | -openforwrite] "
- + "[-files [-blocks [-locations | -racks]]]]\n"
+ + "[-files [-blocks [-locations | -racks]]]] [-showprogress]\n"
+ "\t<path>\tstart checking from this path\n"
+ "\t-move\tmove corrupted files to /lost+found\n"
+ "\t-delete\tdelete corrupted files\n"
@@ -90,7 +90,8 @@ public class DFSck extends Configured im
+ "blocks and files they belong to\n"
+ "\t-blocks\tprint out block report\n"
+ "\t-locations\tprint out locations for every block\n"
- + "\t-racks\tprint out network topology for data-node locations\n\n"
+ + "\t-racks\tprint out network topology for data-node locations\n"
+ + "\t-showprogress\tshow progress in output. Default is OFF (no progress)\n\n"
+ "Please Note:\n"
+ "\t1. By default fsck ignores files opened for write, "
+ "use -openforwrite to report such files. They are usually "
@@ -270,6 +271,7 @@ public class DFSck extends Configured im
else if (args[idx].equals("-blocks")) { url.append("&blocks=1"); }
else if (args[idx].equals("-locations")) { url.append("&locations=1"); }
else if (args[idx].equals("-racks")) { url.append("&racks=1"); }
+ else if (args[idx].equals("-showprogress")) { url.append("&showprogress=1"); }
else if (args[idx].equals("-list-corruptfileblocks")) {
url.append("&listcorruptfileblocks=1");
doListCorruptFileBlocks = true;
Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java?rev=1612403&r1=1612402&r2=1612403&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java Mon Jul 21 21:44:50 2014
@@ -158,7 +158,7 @@ public class WebHdfsFileSystem extends F
// getCanonicalUri() in order to handle the case where no port is
// specified in the URI
this.tokenServiceName = isLogicalUri ?
- HAUtil.buildTokenServiceForLogicalUri(uri)
+ HAUtil.buildTokenServiceForLogicalUri(uri, getScheme())
: SecurityUtil.buildTokenService(getCanonicalUri());
if (!isHA) {
@@ -448,6 +448,7 @@ public class WebHdfsFileSystem extends F
protected final HttpOpParam.Op op;
private final boolean redirected;
+ protected ExcludeDatanodesParam excludeDatanodes = new ExcludeDatanodesParam("");
private boolean checkRetry;
@@ -499,6 +500,10 @@ public class WebHdfsFileSystem extends F
* a DN such as open and checksum
*/
private HttpURLConnection connect(URL url) throws IOException {
+ //redirect hostname and port
+ String redirectHost = null;
+
+
// resolve redirects for a DN operation unless already resolved
if (op.getRedirect() && !redirected) {
final HttpOpParam.Op redirectOp =
@@ -511,11 +516,24 @@ public class WebHdfsFileSystem extends F
try {
validateResponse(redirectOp, conn, false);
url = new URL(conn.getHeaderField("Location"));
+ redirectHost = url.getHost() + ":" + url.getPort();
} finally {
conn.disconnect();
}
}
- return connect(op, url);
+ try {
+ return connect(op, url);
+ } catch (IOException ioe) {
+ if (redirectHost != null) {
+ if (excludeDatanodes.getValue() != null) {
+ excludeDatanodes = new ExcludeDatanodesParam(redirectHost + ","
+ + excludeDatanodes.getValue());
+ } else {
+ excludeDatanodes = new ExcludeDatanodesParam(redirectHost);
+ }
+ }
+ throw ioe;
+ }
}
private HttpURLConnection connect(final HttpOpParam.Op op, final URL url)
@@ -652,7 +670,14 @@ public class WebHdfsFileSystem extends F
@Override
protected URL getUrl() throws IOException {
- return toUrl(op, fspath, parameters);
+ if (excludeDatanodes.getValue() != null) {
+ Param<?, ?>[] tmpParam = new Param<?, ?>[parameters.length + 1];
+ System.arraycopy(parameters, 0, tmpParam, 0, parameters.length);
+ tmpParam[parameters.length] = excludeDatanodes;
+ return toUrl(op, fspath, tmpParam);
+ } else {
+ return toUrl(op, fspath, parameters);
+ }
}
}
Propchange: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/
------------------------------------------------------------------------------
Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs:r1610851-1612402
Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html?rev=1612403&r1=1612402&r2=1612403&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html Mon Jul 21 21:44:50 2014
@@ -24,6 +24,29 @@
<title>Browsing HDFS</title>
</head>
<body>
+
+ <header class="navbar navbar-inverse bs-docs-nav" role="banner">
+ <div class="container">
+ <div class="navbar-header">
+ <div class="navbar-brand">Hadoop</div>
+ </div>
+
+ <ul class="nav navbar-nav" id="ui-tabs">
+ <li><a href="dfshealth.html#tab-overview">Overview</a></li>
+ <li><a href="dfshealth.html#tab-datanode">Datanodes</a></li>
+ <li><a href="dfshealth.html#tab-snapshot">Snapshot</a></li>
+ <li><a href="dfshealth.html#tab-startup-progress">Startup Progress</a></li>
+ <li class="dropdown">
+ <a href="#" class="dropdown-toggle" data-toggle="dropdown">Utilities <b class="caret"></b></a>
+ <ul class="dropdown-menu">
+ <li><a href="#">Browse the file system</a></li>
+ <li><a href="logs">Logs</a></li>
+ </ul>
+ </li>
+ </ul>
+ </div>
+ </header>
+
<div class="modal" id="file-info" tabindex="-1" role="dialog" aria-hidden="true">
<div class="modal-dialog">
<div class="modal-content">
@@ -73,6 +96,12 @@
</div>
<br />
<div id="panel"></div>
+
+ <div class="row">
+ <hr />
+ <div class="col-xs-2"><p>Hadoop, 2014.</p></div>
+ </div>
+
</div>
<script type="text/x-dust-template" id="tmpl-explorer">
@@ -126,7 +155,5 @@
</script><script type="text/javascript" src="/static/dfs-dust.js">
</script><script type="text/javascript" src="explorer.js">
</script>
- <hr />
- <p>Hadoop, 2014.</p>
</body>
</html>
Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java?rev=1612403&r1=1612402&r2=1612403&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java Mon Jul 21 21:44:50 2014
@@ -30,6 +30,8 @@ import static org.apache.hadoop.hdfs.DFS
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_REPLICATION_MIN_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_KEY;
+import static org.apache.hadoop.test.MetricsAsserts.assertCounter;
+import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
@@ -79,6 +81,7 @@ import org.apache.hadoop.hdfs.server.nam
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
import org.apache.hadoop.io.EnumSetWritable;
import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.metrics2.MetricsRecordBuilder;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.util.Time;
@@ -97,6 +100,8 @@ public class TestFileCreation {
((Log4JLogger)LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.ALL);
((Log4JLogger)DFSClient.LOG).getLogger().setLevel(Level.ALL);
}
+ private static final String RPC_DETAILED_METRICS =
+ "RpcDetailedActivityForPort";
static final long seed = 0xDEADBEEFL;
static final int blockSize = 8192;
@@ -371,7 +376,7 @@ public class TestFileCreation {
conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, false);
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
FileSystem fs = cluster.getFileSystem();
-
+
UserGroupInformation otherUgi = UserGroupInformation.createUserForTesting(
"testuser", new String[]{"testgroup"});
FileSystem fs2 = otherUgi.doAs(new PrivilegedExceptionAction<FileSystem>() {
@@ -380,12 +385,16 @@ public class TestFileCreation {
return FileSystem.get(cluster.getConfiguration(0));
}
});
-
+
+ String metricsName = RPC_DETAILED_METRICS + cluster.getNameNodePort();
+
try {
Path p = new Path("/testfile");
FSDataOutputStream stm1 = fs.create(p);
stm1.write(1);
+ assertCounter("CreateNumOps", 1L, getMetrics(metricsName));
+
// Create file again without overwrite
try {
fs2.create(p, false);
@@ -394,7 +403,9 @@ public class TestFileCreation {
GenericTestUtils.assertExceptionContains("already being created by",
abce);
}
-
+ // NameNodeProxies' createNNProxyWithClientProtocol has 5 retries.
+ assertCounter("AlreadyBeingCreatedExceptionNumOps",
+ 6L, getMetrics(metricsName));
FSDataOutputStream stm2 = fs2.create(p, true);
stm2.write(2);
stm2.close();
Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestIsMethodSupported.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestIsMethodSupported.java?rev=1612403&r1=1612402&r2=1612403&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestIsMethodSupported.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestIsMethodSupported.java Mon Jul 21 21:44:50 2014
@@ -25,14 +25,16 @@ import java.net.InetSocketAddress;
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.protocolPB.ClientDatanodeProtocolTranslatorPB;
-import org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB;
+import org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolPB;
import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB;
import org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB;
import org.apache.hadoop.hdfs.protocolPB.JournalProtocolTranslatorPB;
-import org.apache.hadoop.hdfs.protocolPB.NamenodeProtocolTranslatorPB;
+import org.apache.hadoop.hdfs.protocolPB.NamenodeProtocolPB;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.protocol.JournalProtocol;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
+import org.apache.hadoop.ipc.RPC;
+import org.apache.hadoop.ipc.RpcClientUtil;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.RefreshUserMappingsProtocol;
import org.apache.hadoop.security.UserGroupInformation;
@@ -76,16 +78,22 @@ public class TestIsMethodSupported {
@Test
public void testNamenodeProtocol() throws IOException {
- NamenodeProtocolTranslatorPB translator =
- (NamenodeProtocolTranslatorPB) NameNodeProxies.createNonHAProxy(conf,
+ NamenodeProtocol np =
+ NameNodeProxies.createNonHAProxy(conf,
nnAddress, NamenodeProtocol.class, UserGroupInformation.getCurrentUser(),
true).getProxy();
- boolean exists = translator.isMethodSupported("rollEditLog");
+
+ boolean exists = RpcClientUtil.isMethodSupported(np,
+ NamenodeProtocolPB.class, RPC.RpcKind.RPC_PROTOCOL_BUFFER,
+ RPC.getProtocolVersion(NamenodeProtocolPB.class), "rollEditLog");
+
assertTrue(exists);
- exists = translator.isMethodSupported("bogusMethod");
+ exists = RpcClientUtil.isMethodSupported(np,
+ NamenodeProtocolPB.class, RPC.RpcKind.RPC_PROTOCOL_BUFFER,
+ RPC.getProtocolVersion(NamenodeProtocolPB.class), "bogusMethod");
assertFalse(exists);
}
-
+
@Test
public void testDatanodeProtocol() throws IOException {
DatanodeProtocolClientSideTranslatorPB translator =
@@ -107,16 +115,18 @@ public class TestIsMethodSupported {
NetUtils.getDefaultSocketFactory(conf));
assertTrue(translator.isMethodSupported("refreshNamenodes"));
}
-
+
@Test
public void testClientNamenodeProtocol() throws IOException {
- ClientNamenodeProtocolTranslatorPB translator =
- (ClientNamenodeProtocolTranslatorPB) NameNodeProxies.createNonHAProxy(
+ ClientProtocol cp =
+ NameNodeProxies.createNonHAProxy(
conf, nnAddress, ClientProtocol.class,
UserGroupInformation.getCurrentUser(), true).getProxy();
- assertTrue(translator.isMethodSupported("mkdirs"));
+ RpcClientUtil.isMethodSupported(cp,
+ ClientNamenodeProtocolPB.class, RPC.RpcKind.RPC_PROTOCOL_BUFFER,
+ RPC.getProtocolVersion(ClientNamenodeProtocolPB.class), "mkdirs");
}
-
+
@Test
public void tesJournalProtocol() throws IOException {
JournalProtocolTranslatorPB translator = (JournalProtocolTranslatorPB)
Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestDelegationTokenForProxyUser.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestDelegationTokenForProxyUser.java?rev=1612403&r1=1612402&r2=1612403&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestDelegationTokenForProxyUser.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestDelegationTokenForProxyUser.java Mon Jul 21 21:44:50 2014
@@ -89,7 +89,8 @@ public class TestDelegationTokenForProxy
builder.append("127.0.1.1,");
builder.append(InetAddress.getLocalHost().getCanonicalHostName());
LOG.info("Local Ip addresses: " + builder.toString());
- conf.setStrings(DefaultImpersonationProvider.getProxySuperuserIpConfKey(superUserShortName),
+ conf.setStrings(DefaultImpersonationProvider.getTestProvider().
+ getProxySuperuserIpConfKey(superUserShortName),
builder.toString());
}
@@ -101,7 +102,8 @@ public class TestDelegationTokenForProxy
DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_MAX_LIFETIME_KEY, 10000);
config.setLong(
DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_RENEW_INTERVAL_KEY, 5000);
- config.setStrings(DefaultImpersonationProvider.getProxySuperuserGroupConfKey(REAL_USER),
+ config.setStrings(DefaultImpersonationProvider.getTestProvider().
+ getProxySuperuserGroupConfKey(REAL_USER),
"group1");
config.setBoolean(
DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);
Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java?rev=1612403&r1=1612402&r2=1612403&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java Mon Jul 21 21:44:50 2014
@@ -905,49 +905,46 @@ public class TestReplicationPolicy {
*/
@Test
public void testChooseReplicaToDelete() throws Exception {
- List<DatanodeDescriptor> replicaNodeList = new
- ArrayList<DatanodeDescriptor>();
- final Map<String, List<DatanodeDescriptor>> rackMap
- = new HashMap<String, List<DatanodeDescriptor>>();
+ List<DatanodeStorageInfo> replicaList = new ArrayList<DatanodeStorageInfo>();
+ final Map<String, List<DatanodeStorageInfo>> rackMap
+ = new HashMap<String, List<DatanodeStorageInfo>>();
dataNodes[0].setRemaining(4*1024*1024);
- replicaNodeList.add(dataNodes[0]);
+ replicaList.add(storages[0]);
dataNodes[1].setRemaining(3*1024*1024);
- replicaNodeList.add(dataNodes[1]);
+ replicaList.add(storages[1]);
dataNodes[2].setRemaining(2*1024*1024);
- replicaNodeList.add(dataNodes[2]);
+ replicaList.add(storages[2]);
dataNodes[5].setRemaining(1*1024*1024);
- replicaNodeList.add(dataNodes[5]);
+ replicaList.add(storages[5]);
// Refresh the last update time for all the datanodes
for (int i = 0; i < dataNodes.length; i++) {
dataNodes[i].setLastUpdate(Time.now());
}
- List<DatanodeDescriptor> first = new ArrayList<DatanodeDescriptor>();
- List<DatanodeDescriptor> second = new ArrayList<DatanodeDescriptor>();
- replicator.splitNodesWithRack(
- replicaNodeList, rackMap, first, second);
- // dataNodes[0] and dataNodes[1] are in first set as their rack has two
- // replica nodes, while datanodes[2] and dataNodes[5] are in second set.
+ List<DatanodeStorageInfo> first = new ArrayList<DatanodeStorageInfo>();
+ List<DatanodeStorageInfo> second = new ArrayList<DatanodeStorageInfo>();
+ replicator.splitNodesWithRack(replicaList, rackMap, first, second);
+ // storages[0] and storages[1] are in first set as their rack has two
+ // replica nodes, while storages[2] and dataNodes[5] are in second set.
assertEquals(2, first.size());
assertEquals(2, second.size());
- DatanodeDescriptor chosenNode = replicator.chooseReplicaToDelete(
+ DatanodeStorageInfo chosen = replicator.chooseReplicaToDelete(
null, null, (short)3, first, second);
- // Within first set, dataNodes[1] with less free space
- assertEquals(chosenNode, dataNodes[1]);
+ // Within first set, storages[1] with less free space
+ assertEquals(chosen, storages[1]);
- replicator.adjustSetsWithChosenReplica(
- rackMap, first, second, chosenNode);
+ replicator.adjustSetsWithChosenReplica(rackMap, first, second, chosen);
assertEquals(0, first.size());
assertEquals(3, second.size());
- // Within second set, dataNodes[5] with less free space
- chosenNode = replicator.chooseReplicaToDelete(
+ // Within second set, storages[5] with less free space
+ chosen = replicator.chooseReplicaToDelete(
null, null, (short)2, first, second);
- assertEquals(chosenNode, dataNodes[5]);
+ assertEquals(chosen, storages[5]);
}
/**
Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyConsiderLoad.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyConsiderLoad.java?rev=1612403&r1=1612402&r2=1612403&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyConsiderLoad.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyConsiderLoad.java Mon Jul 21 21:44:50 2014
@@ -28,6 +28,7 @@ import org.apache.hadoop.hdfs.protocol.H
import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys;
import org.apache.hadoop.hdfs.server.common.StorageInfo;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType;
+import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
import org.apache.hadoop.test.PathUtils;
@@ -101,6 +102,7 @@ public class TestReplicationPolicyConsid
}
}
+ private final double EPSILON = 0.0001;
/**
* Tests that chooseTarget with considerLoad set to true correctly calculates
* load with decommissioned nodes.
@@ -109,14 +111,6 @@ public class TestReplicationPolicyConsid
public void testChooseTargetWithDecomNodes() throws IOException {
namenode.getNamesystem().writeLock();
try {
- // Decommission DNs so BlockPlacementPolicyDefault.isGoodTarget()
- // returns false
- for (int i = 0; i < 3; i++) {
- DatanodeInfo d = dnManager.getDatanodeByXferAddr(
- dnrList.get(i).getIpAddr(),
- dnrList.get(i).getXferPort());
- d.setDecommissioned();
- }
String blockPoolId = namenode.getNamesystem().getBlockPoolId();
dnManager.handleHeartbeat(dnrList.get(3),
BlockManagerTestUtil.getStorageReportsForDatanode(dataNodes[3]),
@@ -133,6 +127,20 @@ public class TestReplicationPolicyConsid
blockPoolId, dataNodes[5].getCacheCapacity(),
dataNodes[5].getCacheRemaining(),
4, 0, 0);
+ // value in the above heartbeats
+ final int load = 2 + 4 + 4;
+
+ FSNamesystem fsn = namenode.getNamesystem();
+ assertEquals((double)load/6, fsn.getInServiceXceiverAverage(), EPSILON);
+
+ // Decommission DNs so BlockPlacementPolicyDefault.isGoodTarget()
+ // returns false
+ for (int i = 0; i < 3; i++) {
+ DatanodeDescriptor d = dnManager.getDatanode(dnrList.get(i));
+ dnManager.startDecommission(d);
+ d.setDecommissioned();
+ }
+ assertEquals((double)load/3, fsn.getInServiceXceiverAverage(), EPSILON);
// Call chooseTarget()
DatanodeStorageInfo[] targets = namenode.getNamesystem().getBlockManager()
Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyWithNodeGroup.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyWithNodeGroup.java?rev=1612403&r1=1612402&r2=1612403&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyWithNodeGroup.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyWithNodeGroup.java Mon Jul 21 21:44:50 2014
@@ -591,51 +591,50 @@ public class TestReplicationPolicyWithNo
*/
@Test
public void testChooseReplicaToDelete() throws Exception {
- List<DatanodeDescriptor> replicaNodeList =
- new ArrayList<DatanodeDescriptor>();
- final Map<String, List<DatanodeDescriptor>> rackMap =
- new HashMap<String, List<DatanodeDescriptor>>();
+ List<DatanodeStorageInfo> replicaList = new ArrayList<DatanodeStorageInfo>();
+ final Map<String, List<DatanodeStorageInfo>> rackMap
+ = new HashMap<String, List<DatanodeStorageInfo>>();
dataNodes[0].setRemaining(4*1024*1024);
- replicaNodeList.add(dataNodes[0]);
+ replicaList.add(storages[0]);
dataNodes[1].setRemaining(3*1024*1024);
- replicaNodeList.add(dataNodes[1]);
+ replicaList.add(storages[1]);
dataNodes[2].setRemaining(2*1024*1024);
- replicaNodeList.add(dataNodes[2]);
+ replicaList.add(storages[2]);
dataNodes[5].setRemaining(1*1024*1024);
- replicaNodeList.add(dataNodes[5]);
+ replicaList.add(storages[5]);
- List<DatanodeDescriptor> first = new ArrayList<DatanodeDescriptor>();
- List<DatanodeDescriptor> second = new ArrayList<DatanodeDescriptor>();
+ List<DatanodeStorageInfo> first = new ArrayList<DatanodeStorageInfo>();
+ List<DatanodeStorageInfo> second = new ArrayList<DatanodeStorageInfo>();
replicator.splitNodesWithRack(
- replicaNodeList, rackMap, first, second);
+ replicaList, rackMap, first, second);
assertEquals(3, first.size());
assertEquals(1, second.size());
- DatanodeDescriptor chosenNode = replicator.chooseReplicaToDelete(
+ DatanodeStorageInfo chosen = replicator.chooseReplicaToDelete(
null, null, (short)3, first, second);
// Within first set {dataNodes[0], dataNodes[1], dataNodes[2]},
// dataNodes[0] and dataNodes[1] are in the same nodegroup,
// but dataNodes[1] is chosen as less free space
- assertEquals(chosenNode, dataNodes[1]);
+ assertEquals(chosen, storages[1]);
- replicator.adjustSetsWithChosenReplica(rackMap, first, second, chosenNode);
+ replicator.adjustSetsWithChosenReplica(rackMap, first, second, chosen);
assertEquals(2, first.size());
assertEquals(1, second.size());
// Within first set {dataNodes[0], dataNodes[2]}, dataNodes[2] is chosen
// as less free space
- chosenNode = replicator.chooseReplicaToDelete(
+ chosen = replicator.chooseReplicaToDelete(
null, null, (short)2, first, second);
- assertEquals(chosenNode, dataNodes[2]);
+ assertEquals(chosen, storages[2]);
- replicator.adjustSetsWithChosenReplica(rackMap, first, second, chosenNode);
+ replicator.adjustSetsWithChosenReplica(rackMap, first, second, chosen);
assertEquals(0, first.size());
assertEquals(2, second.size());
// Within second set, dataNodes[5] with less free space
- chosenNode = replicator.chooseReplicaToDelete(
+ chosen = replicator.chooseReplicaToDelete(
null, null, (short)1, first, second);
- assertEquals(chosenNode, dataNodes[5]);
+ assertEquals(chosen, storages[5]);
}
/**
Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/TestJspHelper.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/TestJspHelper.java?rev=1612403&r1=1612402&r2=1612403&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/TestJspHelper.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/TestJspHelper.java Mon Jul 21 21:44:50 2014
@@ -285,8 +285,10 @@ public class TestJspHelper {
String user = "TheNurse";
conf.set(DFSConfigKeys.HADOOP_SECURITY_AUTHENTICATION, "kerberos");
- conf.set(DefaultImpersonationProvider.getProxySuperuserGroupConfKey(realUser), "*");
- conf.set(DefaultImpersonationProvider.getProxySuperuserIpConfKey(realUser), "*");
+ conf.set(DefaultImpersonationProvider.getTestProvider().
+ getProxySuperuserGroupConfKey(realUser), "*");
+ conf.set(DefaultImpersonationProvider.getTestProvider().
+ getProxySuperuserIpConfKey(realUser), "*");
ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
UserGroupInformation.setConfiguration(conf);
UserGroupInformation ugi;
Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeCapacityReport.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeCapacityReport.java?rev=1612403&r1=1612402&r2=1612403&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeCapacityReport.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeCapacityReport.java Mon Jul 21 21:44:50 2014
@@ -18,9 +18,11 @@
package org.apache.hadoop.hdfs.server.namenode;
-import static org.junit.Assert.assertTrue;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_BLOCK_WRITE_LOCATEFOLLOWINGBLOCK_RETRIES_KEY;
+import static org.junit.Assert.*;
import java.io.File;
+import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
@@ -28,12 +30,21 @@ import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.DF;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DFSOutputStream;
import org.apache.hadoop.hdfs.DFSUtil;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager;
+import org.apache.hadoop.hdfs.server.datanode.DataNode;
+import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
import org.junit.Test;
@@ -153,4 +164,177 @@ public class TestNamenodeCapacityReport
if (cluster != null) {cluster.shutdown();}
}
}
+
+ private static final float EPSILON = 0.0001f;
+ @Test
+ public void testXceiverCount() throws Exception {
+ Configuration conf = new HdfsConfiguration();
+ // don't waste time retrying if close fails
+ conf.setInt(DFS_CLIENT_BLOCK_WRITE_LOCATEFOLLOWINGBLOCK_RETRIES_KEY, 0);
+ MiniDFSCluster cluster = null;
+
+ final int nodes = 8;
+ final int fileCount = 5;
+ final short fileRepl = 3;
+
+ try {
+ cluster = new MiniDFSCluster.Builder(conf).numDataNodes(nodes).build();
+ cluster.waitActive();
+
+ final FSNamesystem namesystem = cluster.getNamesystem();
+ final DatanodeManager dnm = namesystem.getBlockManager().getDatanodeManager();
+ List<DataNode> datanodes = cluster.getDataNodes();
+ final DistributedFileSystem fs = cluster.getFileSystem();
+
+ // trigger heartbeats in case not already sent
+ triggerHeartbeats(datanodes);
+
+ // check that all nodes are live and in service
+ int expectedTotalLoad = nodes; // xceiver server adds 1 to load
+ int expectedInServiceNodes = nodes;
+ int expectedInServiceLoad = nodes;
+ assertEquals(nodes, namesystem.getNumLiveDataNodes());
+ assertEquals(expectedInServiceNodes, namesystem.getNumDatanodesInService());
+ assertEquals(expectedTotalLoad, namesystem.getTotalLoad());
+ assertEquals((double)expectedInServiceLoad/expectedInServiceLoad,
+ namesystem.getInServiceXceiverAverage(), EPSILON);
+
+ // shutdown half the nodes and force a heartbeat check to ensure
+ // counts are accurate
+ for (int i=0; i < nodes/2; i++) {
+ DataNode dn = datanodes.get(i);
+ DatanodeDescriptor dnd = dnm.getDatanode(dn.getDatanodeId());
+ dn.shutdown();
+ dnd.setLastUpdate(0L);
+ BlockManagerTestUtil.checkHeartbeat(namesystem.getBlockManager());
+ expectedInServiceNodes--;
+ assertEquals(expectedInServiceNodes, namesystem.getNumLiveDataNodes());
+ assertEquals(expectedInServiceNodes, namesystem.getNumDatanodesInService());
+ }
+
+ // restart the nodes to verify that counts are correct after
+ // node re-registration
+ cluster.restartDataNodes();
+ cluster.waitActive();
+ datanodes = cluster.getDataNodes();
+ expectedInServiceNodes = nodes;
+ assertEquals(nodes, datanodes.size());
+ assertEquals(nodes, namesystem.getNumLiveDataNodes());
+ assertEquals(expectedInServiceNodes, namesystem.getNumDatanodesInService());
+ assertEquals(expectedTotalLoad, namesystem.getTotalLoad());
+ assertEquals((double)expectedInServiceLoad/expectedInServiceLoad,
+ namesystem.getInServiceXceiverAverage(), EPSILON);
+
+ // create streams and hsync to force datastreamers to start
+ DFSOutputStream[] streams = new DFSOutputStream[fileCount];
+ for (int i=0; i < fileCount; i++) {
+ streams[i] = (DFSOutputStream)fs.create(new Path("/f"+i), fileRepl)
+ .getWrappedStream();
+ streams[i].write("1".getBytes());
+ streams[i].hsync();
+ // the load for writers is 2 because both the write xceiver & packet
+ // responder threads are counted in the load
+ expectedTotalLoad += 2*fileRepl;
+ expectedInServiceLoad += 2*fileRepl;
+ }
+ // force nodes to send load update
+ triggerHeartbeats(datanodes);
+ assertEquals(nodes, namesystem.getNumLiveDataNodes());
+ assertEquals(expectedInServiceNodes,
+ namesystem.getNumDatanodesInService());
+ assertEquals(expectedTotalLoad, namesystem.getTotalLoad());
+ assertEquals((double)expectedInServiceLoad/expectedInServiceNodes,
+ namesystem.getInServiceXceiverAverage(), EPSILON);
+
+ // decomm a few nodes, substract their load from the expected load,
+ // trigger heartbeat to force load update
+ for (int i=0; i < fileRepl; i++) {
+ expectedInServiceNodes--;
+ DatanodeDescriptor dnd =
+ dnm.getDatanode(datanodes.get(i).getDatanodeId());
+ expectedInServiceLoad -= dnd.getXceiverCount();
+ dnm.startDecommission(dnd);
+ DataNodeTestUtils.triggerHeartbeat(datanodes.get(i));
+ Thread.sleep(100);
+ assertEquals(nodes, namesystem.getNumLiveDataNodes());
+ assertEquals(expectedInServiceNodes,
+ namesystem.getNumDatanodesInService());
+ assertEquals(expectedTotalLoad, namesystem.getTotalLoad());
+ assertEquals((double)expectedInServiceLoad/expectedInServiceNodes,
+ namesystem.getInServiceXceiverAverage(), EPSILON);
+ }
+
+ // check expected load while closing each stream. recalc expected
+ // load based on whether the nodes in the pipeline are decomm
+ for (int i=0; i < fileCount; i++) {
+ int decomm = 0;
+ for (DatanodeInfo dni : streams[i].getPipeline()) {
+ DatanodeDescriptor dnd = dnm.getDatanode(dni);
+ expectedTotalLoad -= 2;
+ if (dnd.isDecommissionInProgress() || dnd.isDecommissioned()) {
+ decomm++;
+ } else {
+ expectedInServiceLoad -= 2;
+ }
+ }
+ try {
+ streams[i].close();
+ } catch (IOException ioe) {
+ // nodes will go decommissioned even if there's a UC block whose
+ // other locations are decommissioned too. we'll ignore that
+ // bug for now
+ if (decomm < fileRepl) {
+ throw ioe;
+ }
+ }
+ triggerHeartbeats(datanodes);
+ // verify node count and loads
+ assertEquals(nodes, namesystem.getNumLiveDataNodes());
+ assertEquals(expectedInServiceNodes,
+ namesystem.getNumDatanodesInService());
+ assertEquals(expectedTotalLoad, namesystem.getTotalLoad());
+ assertEquals((double)expectedInServiceLoad/expectedInServiceNodes,
+ namesystem.getInServiceXceiverAverage(), EPSILON);
+ }
+
+ // shutdown each node, verify node counts based on decomm state
+ for (int i=0; i < nodes; i++) {
+ DataNode dn = datanodes.get(i);
+ dn.shutdown();
+ // force it to appear dead so live count decreases
+ DatanodeDescriptor dnDesc = dnm.getDatanode(dn.getDatanodeId());
+ dnDesc.setLastUpdate(0L);
+ BlockManagerTestUtil.checkHeartbeat(namesystem.getBlockManager());
+ assertEquals(nodes-1-i, namesystem.getNumLiveDataNodes());
+ // first few nodes are already out of service
+ if (i >= fileRepl) {
+ expectedInServiceNodes--;
+ }
+ assertEquals(expectedInServiceNodes, namesystem.getNumDatanodesInService());
+
+ // live nodes always report load of 1. no nodes is load 0
+ double expectedXceiverAvg = (i == nodes-1) ? 0.0 : 1.0;
+ assertEquals((double)expectedXceiverAvg,
+ namesystem.getInServiceXceiverAverage(), EPSILON);
+ }
+
+ // final sanity check
+ assertEquals(0, namesystem.getNumLiveDataNodes());
+ assertEquals(0, namesystem.getNumDatanodesInService());
+ assertEquals(0.0, namesystem.getTotalLoad(), EPSILON);
+ assertEquals(0.0, namesystem.getInServiceXceiverAverage(), EPSILON);
+ } finally {
+ if (cluster != null) {
+ cluster.shutdown();
+ }
+ }
+ }
+
+ private void triggerHeartbeats(List<DataNode> datanodes)
+ throws IOException, InterruptedException {
+ for (DataNode dn : datanodes) {
+ DataNodeTestUtils.triggerHeartbeat(dn);
+ }
+ Thread.sleep(100);
+ }
}
Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartupOptionUpgrade.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartupOptionUpgrade.java?rev=1612403&r1=1612402&r2=1612403&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartupOptionUpgrade.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartupOptionUpgrade.java Mon Jul 21 21:44:50 2014
@@ -21,6 +21,8 @@ import static org.junit.Assert.assertEqu
import static org.junit.Assert.assertTrue;
import java.net.URI;
+import java.util.Arrays;
+import java.util.Collection;
import java.util.Collections;
import org.apache.hadoop.conf.Configuration;
@@ -30,11 +32,15 @@ import org.apache.hadoop.hdfs.server.com
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+import org.junit.runners.Parameterized.Parameters;
/**
* This class tests various upgrade cases from earlier versions to current
* version with and without clusterid.
*/
+@RunWith(value = Parameterized.class)
public class TestStartupOptionUpgrade {
private Configuration conf;
@@ -42,10 +48,21 @@ public class TestStartupOptionUpgrade {
private int layoutVersion;
NNStorage storage;
+ @Parameters
+ public static Collection<Object[]> startOption() {
+ Object[][] params = new Object[][] { { StartupOption.UPGRADE },
+ { StartupOption.UPGRADEONLY } };
+ return Arrays.asList(params);
+ }
+
+ public TestStartupOptionUpgrade(StartupOption startOption) {
+ super();
+ this.startOpt = startOption;
+ }
+
@Before
public void setUp() throws Exception {
conf = new HdfsConfiguration();
- startOpt = StartupOption.UPGRADE;
startOpt.setClusterId(null);
storage = new NNStorage(conf,
Collections.<URI>emptyList(),
Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDNFencing.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDNFencing.java?rev=1612403&r1=1612402&r2=1612403&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDNFencing.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDNFencing.java Mon Jul 21 21:44:50 2014
@@ -46,7 +46,7 @@ import org.apache.hadoop.hdfs.server.blo
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyDefault;
-import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
+import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
@@ -585,15 +585,14 @@ public class TestDNFencing {
}
@Override
- public DatanodeDescriptor chooseReplicaToDelete(BlockCollection inode,
+ public DatanodeStorageInfo chooseReplicaToDelete(BlockCollection inode,
Block block, short replicationFactor,
- Collection<DatanodeDescriptor> first,
- Collection<DatanodeDescriptor> second) {
+ Collection<DatanodeStorageInfo> first,
+ Collection<DatanodeStorageInfo> second) {
- Collection<DatanodeDescriptor> chooseFrom =
- !first.isEmpty() ? first : second;
+ Collection<DatanodeStorageInfo> chooseFrom = !first.isEmpty() ? first : second;
- List<DatanodeDescriptor> l = Lists.newArrayList(chooseFrom);
+ List<DatanodeStorageInfo> l = Lists.newArrayList(chooseFrom);
return l.get(DFSUtil.getRandom().nextInt(l.size()));
}
}
Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java?rev=1612403&r1=1612402&r2=1612403&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java Mon Jul 21 21:44:50 2014
@@ -50,6 +50,7 @@ import org.apache.hadoop.hdfs.Distribute
import org.apache.hadoop.hdfs.HAUtil;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.MiniDFSNNTopology;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSelector;
@@ -299,7 +300,8 @@ public class TestDelegationTokensWithHA
UserGroupInformation ugi = UserGroupInformation.createRemoteUser("test");
URI haUri = new URI("hdfs://my-ha-uri/");
- token.setService(HAUtil.buildTokenServiceForLogicalUri(haUri));
+ token.setService(HAUtil.buildTokenServiceForLogicalUri(haUri,
+ HdfsConstants.HDFS_URI_SCHEME));
ugi.addToken(token);
Collection<InetSocketAddress> nnAddrs = new HashSet<InetSocketAddress>();
@@ -355,7 +357,8 @@ public class TestDelegationTokensWithHA
@Test
public void testDFSGetCanonicalServiceName() throws Exception {
URI hAUri = HATestUtil.getLogicalUri(cluster);
- String haService = HAUtil.buildTokenServiceForLogicalUri(hAUri).toString();
+ String haService = HAUtil.buildTokenServiceForLogicalUri(hAUri,
+ HdfsConstants.HDFS_URI_SCHEME).toString();
assertEquals(haService, dfs.getCanonicalServiceName());
final String renewer = UserGroupInformation.getCurrentUser().getShortUserName();
final Token<DelegationTokenIdentifier> token =
@@ -371,7 +374,8 @@ public class TestDelegationTokensWithHA
Configuration conf = dfs.getConf();
URI haUri = HATestUtil.getLogicalUri(cluster);
AbstractFileSystem afs = AbstractFileSystem.createFileSystem(haUri, conf);
- String haService = HAUtil.buildTokenServiceForLogicalUri(haUri).toString();
+ String haService = HAUtil.buildTokenServiceForLogicalUri(haUri,
+ HdfsConstants.HDFS_URI_SCHEME).toString();
assertEquals(haService, afs.getCanonicalServiceName());
Token<?> token = afs.getDelegationTokens(
UserGroupInformation.getCurrentUser().getShortUserName()).get(0);
Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/web/resources/TestWebHdfsDataLocality.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/web/resources/TestWebHdfsDataLocality.java?rev=1612403&r1=1612402&r2=1612403&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/web/resources/TestWebHdfsDataLocality.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/web/resources/TestWebHdfsDataLocality.java Mon Jul 21 21:44:50 2014
@@ -92,7 +92,7 @@ public class TestWebHdfsDataLocality {
//The chosen datanode must be the same as the client address
final DatanodeInfo chosen = NamenodeWebHdfsMethods.chooseDatanode(
- namenode, f, PutOpParam.Op.CREATE, -1L, blocksize);
+ namenode, f, PutOpParam.Op.CREATE, -1L, blocksize, null);
Assert.assertEquals(ipAddr, chosen.getIpAddr());
}
}
@@ -117,23 +117,104 @@ public class TestWebHdfsDataLocality {
{ //test GETFILECHECKSUM
final DatanodeInfo chosen = NamenodeWebHdfsMethods.chooseDatanode(
- namenode, f, GetOpParam.Op.GETFILECHECKSUM, -1L, blocksize);
+ namenode, f, GetOpParam.Op.GETFILECHECKSUM, -1L, blocksize, null);
Assert.assertEquals(expected, chosen);
}
{ //test OPEN
final DatanodeInfo chosen = NamenodeWebHdfsMethods.chooseDatanode(
- namenode, f, GetOpParam.Op.OPEN, 0, blocksize);
+ namenode, f, GetOpParam.Op.OPEN, 0, blocksize, null);
Assert.assertEquals(expected, chosen);
}
{ //test APPEND
final DatanodeInfo chosen = NamenodeWebHdfsMethods.chooseDatanode(
- namenode, f, PostOpParam.Op.APPEND, -1L, blocksize);
+ namenode, f, PostOpParam.Op.APPEND, -1L, blocksize, null);
Assert.assertEquals(expected, chosen);
}
} finally {
cluster.shutdown();
}
}
+
+ @Test
+ public void testExcludeDataNodes() throws Exception {
+ final Configuration conf = WebHdfsTestUtil.createConf();
+ final String[] racks = {RACK0, RACK0, RACK1, RACK1, RACK2, RACK2};
+ final String[] hosts = {"DataNode1", "DataNode2", "DataNode3","DataNode4","DataNode5","DataNode6"};
+ final int nDataNodes = hosts.length;
+ LOG.info("nDataNodes=" + nDataNodes + ", racks=" + Arrays.asList(racks)
+ + ", hosts=" + Arrays.asList(hosts));
+
+ final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
+ .hosts(hosts).numDataNodes(nDataNodes).racks(racks).build();
+
+ try {
+ cluster.waitActive();
+
+ final DistributedFileSystem dfs = cluster.getFileSystem();
+ final NameNode namenode = cluster.getNameNode();
+ final DatanodeManager dm = namenode.getNamesystem().getBlockManager(
+ ).getDatanodeManager();
+ LOG.info("dm=" + dm);
+
+ final long blocksize = DFSConfigKeys.DFS_BLOCK_SIZE_DEFAULT;
+ final String f = "/foo";
+
+ //create a file with three replica.
+ final Path p = new Path(f);
+ final FSDataOutputStream out = dfs.create(p, (short)3);
+ out.write(1);
+ out.close();
+
+ //get replica location.
+ final LocatedBlocks locatedblocks = NameNodeAdapter.getBlockLocations(
+ namenode, f, 0, 1);
+ final List<LocatedBlock> lb = locatedblocks.getLocatedBlocks();
+ Assert.assertEquals(1, lb.size());
+ final DatanodeInfo[] locations = lb.get(0).getLocations();
+ Assert.assertEquals(3, locations.length);
+
+
+ //For GETFILECHECKSUM, OPEN and APPEND,
+ //the chosen datanode must be different with exclude nodes.
+
+ StringBuffer sb = new StringBuffer();
+ for (int i = 0; i < 2; i++) {
+ sb.append(locations[i].getXferAddr());
+ { // test GETFILECHECKSUM
+ final DatanodeInfo chosen = NamenodeWebHdfsMethods.chooseDatanode(
+ namenode, f, GetOpParam.Op.GETFILECHECKSUM, -1L, blocksize,
+ sb.toString());
+ for (int j = 0; j <= i; j++) {
+ Assert.assertNotEquals(locations[j].getHostName(),
+ chosen.getHostName());
+ }
+ }
+
+ { // test OPEN
+ final DatanodeInfo chosen = NamenodeWebHdfsMethods.chooseDatanode(
+ namenode, f, GetOpParam.Op.OPEN, 0, blocksize, sb.toString());
+ for (int j = 0; j <= i; j++) {
+ Assert.assertNotEquals(locations[j].getHostName(),
+ chosen.getHostName());
+ }
+ }
+
+ { // test APPEND
+ final DatanodeInfo chosen = NamenodeWebHdfsMethods
+ .chooseDatanode(namenode, f, PostOpParam.Op.APPEND, -1L,
+ blocksize, sb.toString());
+ for (int j = 0; j <= i; j++) {
+ Assert.assertNotEquals(locations[j].getHostName(),
+ chosen.getHostName());
+ }
+ }
+
+ sb.append(",");
+ }
+ } finally {
+ cluster.shutdown();
+ }
+ }
}
\ No newline at end of file
Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdminWithHA.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdminWithHA.java?rev=1612403&r1=1612402&r2=1612403&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdminWithHA.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdminWithHA.java Mon Jul 21 21:44:50 2014
@@ -21,6 +21,7 @@ import java.io.ByteArrayOutputStream;
import java.io.PrintStream;
import com.google.common.base.Charsets;
+
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
@@ -46,6 +47,7 @@ public class TestDFSAdminWithHA {
private PrintStream originErr;
private static final String NSID = "ns1";
+ private static String newLine = System.getProperty("line.separator");
private void assertOutputMatches(String string) {
String errOutput = new String(out.toByteArray(), Charsets.UTF_8);
@@ -99,6 +101,14 @@ public class TestDFSAdminWithHA {
System.err.flush();
System.setOut(originOut);
System.setErr(originErr);
+ if (admin != null) {
+ admin.close();
+ }
+ if (cluster != null) {
+ cluster.shutdown();
+ }
+ out.reset();
+ err.reset();
}
@Test(timeout = 30000)
@@ -108,25 +118,25 @@ public class TestDFSAdminWithHA {
int exitCode = admin.run(new String[] {"-safemode", "enter"});
assertEquals(err.toString().trim(), 0, exitCode);
String message = "Safe mode is ON in.*";
- assertOutputMatches(message + "\n" + message + "\n");
+ assertOutputMatches(message + newLine + message + newLine);
// Get safemode
exitCode = admin.run(new String[] {"-safemode", "get"});
assertEquals(err.toString().trim(), 0, exitCode);
message = "Safe mode is ON in.*";
- assertOutputMatches(message + "\n" + message + "\n");
+ assertOutputMatches(message + newLine + message + newLine);
// Leave safemode
exitCode = admin.run(new String[] {"-safemode", "leave"});
assertEquals(err.toString().trim(), 0, exitCode);
message = "Safe mode is OFF in.*";
- assertOutputMatches(message + "\n" + message + "\n");
+ assertOutputMatches(message + newLine + message + newLine);
// Get safemode
exitCode = admin.run(new String[] {"-safemode", "get"});
assertEquals(err.toString().trim(), 0, exitCode);
message = "Safe mode is OFF in.*";
- assertOutputMatches(message + "\n" + message + "\n");
+ assertOutputMatches(message + newLine + message + newLine);
}
@Test (timeout = 30000)
@@ -136,12 +146,12 @@ public class TestDFSAdminWithHA {
int exitCode = admin.run(new String[] {"-safemode", "enter"});
assertEquals(err.toString().trim(), 0, exitCode);
String message = "Safe mode is ON in.*";
- assertOutputMatches(message + "\n" + message + "\n");
+ assertOutputMatches(message + newLine + message + newLine);
exitCode = admin.run(new String[] {"-saveNamespace"});
assertEquals(err.toString().trim(), 0, exitCode);
message = "Save namespace successful for.*";
- assertOutputMatches(message + "\n" + message + "\n");
+ assertOutputMatches(message + newLine + message + newLine);
}
@Test (timeout = 30000)
@@ -151,17 +161,17 @@ public class TestDFSAdminWithHA {
assertEquals(err.toString().trim(), 0, exitCode);
String message = "restoreFailedStorage is set to false for.*";
// Default is false
- assertOutputMatches(message + "\n" + message + "\n");
+ assertOutputMatches(message + newLine + message + newLine);
exitCode = admin.run(new String[] {"-restoreFailedStorage", "true"});
assertEquals(err.toString().trim(), 0, exitCode);
message = "restoreFailedStorage is set to true for.*";
- assertOutputMatches(message + "\n" + message + "\n");
+ assertOutputMatches(message + newLine + message + newLine);
exitCode = admin.run(new String[] {"-restoreFailedStorage", "false"});
assertEquals(err.toString().trim(), 0, exitCode);
message = "restoreFailedStorage is set to false for.*";
- assertOutputMatches(message + "\n" + message + "\n");
+ assertOutputMatches(message + newLine + message + newLine);
}
@Test (timeout = 30000)
@@ -170,7 +180,7 @@ public class TestDFSAdminWithHA {
int exitCode = admin.run(new String[] {"-refreshNodes"});
assertEquals(err.toString().trim(), 0, exitCode);
String message = "Refresh nodes successful for.*";
- assertOutputMatches(message + "\n" + message + "\n");
+ assertOutputMatches(message + newLine + message + newLine);
}
@Test (timeout = 30000)
@@ -179,7 +189,7 @@ public class TestDFSAdminWithHA {
int exitCode = admin.run(new String[] {"-setBalancerBandwidth", "10"});
assertEquals(err.toString().trim(), 0, exitCode);
String message = "Balancer bandwidth is set to 10 for.*";
- assertOutputMatches(message + "\n" + message + "\n");
+ assertOutputMatches(message + newLine + message + newLine);
}
@Test (timeout = 30000)
@@ -189,7 +199,7 @@ public class TestDFSAdminWithHA {
assertEquals(err.toString().trim(), 0, exitCode);
String message = "Created metasave file dfs.meta in the log directory"
+ " of namenode.*";
- assertOutputMatches(message + "\n" + message + "\n");
+ assertOutputMatches(message + newLine + message + newLine);
}
@Test (timeout = 30000)
@@ -198,7 +208,7 @@ public class TestDFSAdminWithHA {
int exitCode = admin.run(new String[] {"-refreshServiceAcl"});
assertEquals(err.toString().trim(), 0, exitCode);
String message = "Refresh service acl successful for.*";
- assertOutputMatches(message + "\n" + message + "\n");
+ assertOutputMatches(message + newLine + message + newLine);
}
@Test (timeout = 30000)
@@ -207,7 +217,7 @@ public class TestDFSAdminWithHA {
int exitCode = admin.run(new String[] {"-refreshUserToGroupsMappings"});
assertEquals(err.toString().trim(), 0, exitCode);
String message = "Refresh user to groups mapping successful for.*";
- assertOutputMatches(message + "\n" + message + "\n");
+ assertOutputMatches(message + newLine + message + newLine);
}
@Test (timeout = 30000)
@@ -217,7 +227,7 @@ public class TestDFSAdminWithHA {
new String[] {"-refreshSuperUserGroupsConfiguration"});
assertEquals(err.toString().trim(), 0, exitCode);
String message = "Refresh super user groups configuration successful for.*";
- assertOutputMatches(message + "\n" + message + "\n");
+ assertOutputMatches(message + newLine + message + newLine);
}
@Test (timeout = 30000)
@@ -226,6 +236,6 @@ public class TestDFSAdminWithHA {
int exitCode = admin.run(new String[] {"-refreshCallQueue"});
assertEquals(err.toString().trim(), 0, exitCode);
String message = "Refresh call queue successful for.*";
- assertOutputMatches(message + "\n" + message + "\n");
+ assertOutputMatches(message + newLine + message + newLine);
}
}
Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestRefreshUserMappings.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestRefreshUserMappings.java?rev=1612403&r1=1612402&r2=1612403&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestRefreshUserMappings.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestRefreshUserMappings.java Mon Jul 21 21:44:50 2014
@@ -151,8 +151,10 @@ public class TestRefreshUserMappings {
final String [] GROUP_NAMES2 = new String [] {"gr3" , "gr4"};
//keys in conf
- String userKeyGroups = DefaultImpersonationProvider.getProxySuperuserGroupConfKey(SUPER_USER);
- String userKeyHosts = DefaultImpersonationProvider.getProxySuperuserIpConfKey (SUPER_USER);
+ String userKeyGroups = DefaultImpersonationProvider.getTestProvider().
+ getProxySuperuserGroupConfKey(SUPER_USER);
+ String userKeyHosts = DefaultImpersonationProvider.getTestProvider().
+ getProxySuperuserIpConfKey (SUPER_USER);
config.set(userKeyGroups, "gr3,gr4,gr5"); // superuser can proxy for this group
config.set(userKeyHosts,"127.0.0.1");