You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by cn...@apache.org on 2013/12/09 23:52:05 UTC
svn commit: r1549699 [2/2] - in
/hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs: ./
src/main/java/ src/main/java/org/apache/hadoop/hdfs/
src/main/java/org/apache/hadoop/hdfs/net/
src/main/java/org/apache/hadoop/hdfs/qjournal/client/ s...
Modified: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java?rev=1549699&r1=1549698&r2=1549699&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java (original)
+++ hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java Mon Dec 9 22:52:02 2013
@@ -17,12 +17,6 @@
*/
package org.apache.hadoop.hdfs.server.namenode;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ADMIN;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_METRICS_SESSION_ID_KEY;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_DEFAULT;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_SECONDARY_NAMENODE_KEYTAB_FILE_KEY;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_SECONDARY_NAMENODE_USER_NAME_KEY;
import static org.apache.hadoop.util.ExitUtil.terminate;
import java.io.File;
@@ -30,6 +24,7 @@ import java.io.FilenameFilter;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.net.URI;
+import java.net.URL;
import java.security.PrivilegedAction;
import java.security.PrivilegedExceptionAction;
import java.util.Collection;
@@ -69,6 +64,7 @@ import org.apache.hadoop.hdfs.server.nam
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
import org.apache.hadoop.hdfs.server.protocol.RemoteEditLog;
import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest;
+import org.apache.hadoop.http.HttpConfig;
import org.apache.hadoop.http.HttpServer;
import org.apache.hadoop.io.MD5Hash;
import org.apache.hadoop.ipc.RemoteException;
@@ -77,7 +73,6 @@ import org.apache.hadoop.metrics2.source
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.security.authorize.AccessControlList;
import org.apache.hadoop.util.Daemon;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.Time;
@@ -111,7 +106,7 @@ public class SecondaryNameNode implement
private final long starttime = Time.now();
private volatile long lastCheckpointTime = 0;
- private String fsName;
+ private URL fsName;
private CheckpointStorage checkpointImage;
private NamenodeProtocol namenode;
@@ -119,8 +114,7 @@ public class SecondaryNameNode implement
private InetSocketAddress nameNodeAddr;
private volatile boolean shouldRun;
private HttpServer infoServer;
- private int infoPort;
- private String infoBindAddress;
+ private URL imageListenURL;
private Collection<URI> checkpointDirs;
private List<URI> checkpointEditsDirs;
@@ -208,8 +202,8 @@ public class SecondaryNameNode implement
public static InetSocketAddress getHttpAddress(Configuration conf) {
return NetUtils.createSocketAddr(conf.get(
- DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY,
- DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_DEFAULT));
+ DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY,
+ DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_DEFAULT));
}
/**
@@ -219,17 +213,19 @@ public class SecondaryNameNode implement
private void initialize(final Configuration conf,
CommandLineOpts commandLineOpts) throws IOException {
final InetSocketAddress infoSocAddr = getHttpAddress(conf);
- infoBindAddress = infoSocAddr.getHostName();
+ final String infoBindAddress = infoSocAddr.getHostName();
UserGroupInformation.setConfiguration(conf);
if (UserGroupInformation.isSecurityEnabled()) {
- SecurityUtil.login(conf, DFS_SECONDARY_NAMENODE_KEYTAB_FILE_KEY,
- DFS_SECONDARY_NAMENODE_USER_NAME_KEY, infoBindAddress);
+ SecurityUtil.login(conf,
+ DFSConfigKeys.DFS_SECONDARY_NAMENODE_KEYTAB_FILE_KEY,
+ DFSConfigKeys.DFS_SECONDARY_NAMENODE_USER_NAME_KEY, infoBindAddress);
}
// initiate Java VM metrics
DefaultMetricsSystem.initialize("SecondaryNameNode");
JvmMetrics.create("SecondaryNameNode",
- conf.get(DFS_METRICS_SESSION_ID_KEY), DefaultMetricsSystem.instance());
-
+ conf.get(DFSConfigKeys.DFS_METRICS_SESSION_ID_KEY),
+ DefaultMetricsSystem.instance());
+
// Create connection to the namenode.
shouldRun = true;
nameNodeAddr = NameNode.getServiceAddress(conf, true);
@@ -254,19 +250,19 @@ public class SecondaryNameNode implement
// Initialize other scheduling parameters from the configuration
checkpointConf = new CheckpointConf(conf);
- // initialize the webserver for uploading files.
- int tmpInfoPort = infoSocAddr.getPort();
- URI httpEndpoint = URI.create("http://" + NetUtils.getHostPortString(infoSocAddr));
-
- infoServer = new HttpServer.Builder().setName("secondary")
- .addEndpoint(httpEndpoint)
- .setFindPort(tmpInfoPort == 0).setConf(conf).setACL(
- new AccessControlList(conf.get(DFS_ADMIN, " ")))
- .setSecurityEnabled(UserGroupInformation.isSecurityEnabled())
- .setUsernameConfKey(
- DFSConfigKeys.DFS_SECONDARY_NAMENODE_INTERNAL_SPNEGO_USER_NAME_KEY)
- .setKeytabConfKey(DFSUtil.getSpnegoKeytabKey(conf,
- DFSConfigKeys.DFS_SECONDARY_NAMENODE_KEYTAB_FILE_KEY)).build();
+ final InetSocketAddress httpAddr = infoSocAddr;
+
+ final String httpsAddrString = conf.get(
+ DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTPS_ADDRESS_KEY,
+ DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTPS_ADDRESS_DEFAULT);
+ InetSocketAddress httpsAddr = NetUtils.createSocketAddr(httpsAddrString);
+
+ HttpServer.Builder builder = DFSUtil.httpServerTemplateForNNAndJN(conf,
+ httpAddr, httpsAddr, "secondary",
+ DFSConfigKeys.DFS_SECONDARY_NAMENODE_INTERNAL_SPNEGO_USER_NAME_KEY,
+ DFSConfigKeys.DFS_SECONDARY_NAMENODE_KEYTAB_FILE_KEY);
+
+ infoServer = builder.build();
infoServer.setAttribute("secondary.name.node", this);
infoServer.setAttribute("name.system.image", checkpointImage);
@@ -276,14 +272,25 @@ public class SecondaryNameNode implement
infoServer.start();
LOG.info("Web server init done");
+ imageListenURL = new URL(DFSUtil.getHttpClientScheme(conf) + "://"
+ + NetUtils.getHostPortString(infoServer.getConnectorAddress(0)));
- // The web-server port can be ephemeral... ensure we have the correct info
- infoPort = infoServer.getConnectorAddress(0).getPort();
+ HttpConfig.Policy policy = DFSUtil.getHttpPolicy(conf);
+ int connIdx = 0;
+ if (policy.isHttpEnabled()) {
+ InetSocketAddress httpAddress = infoServer.getConnectorAddress(connIdx++);
+ conf.set(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY,
+ NetUtils.getHostPortString(httpAddress));
+ }
+
+ if (policy.isHttpsEnabled()) {
+ InetSocketAddress httpsAddress = infoServer.getConnectorAddress(connIdx);
+ conf.set(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTPS_ADDRESS_KEY,
+ NetUtils.getHostPortString(httpsAddress));
+ }
- conf.set(DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY, infoBindAddress + ":" + infoPort);
- LOG.info("Secondary Web-server up at: " + infoBindAddress + ":" + infoPort);
- LOG.info("Checkpoint Period :" + checkpointConf.getPeriod() + " secs " +
- "(" + checkpointConf.getPeriod() / 60 + " min)");
+ LOG.info("Checkpoint Period :" + checkpointConf.getPeriod() + " secs "
+ + "(" + checkpointConf.getPeriod() / 60 + " min)");
LOG.info("Log Size Trigger :" + checkpointConf.getTxnCount() + " txns");
}
@@ -404,7 +411,7 @@ public class SecondaryNameNode implement
* @throws IOException
*/
static boolean downloadCheckpointFiles(
- final String nnHostPort,
+ final URL nnHostPort,
final FSImage dstImage,
final CheckpointSignature sig,
final RemoteEditLogManifest manifest
@@ -467,25 +474,25 @@ public class SecondaryNameNode implement
/**
* Returns the Jetty server that the Namenode is listening on.
*/
- private String getInfoServer() throws IOException {
+ private URL getInfoServer() throws IOException {
URI fsName = FileSystem.getDefaultUri(conf);
if (!HdfsConstants.HDFS_URI_SCHEME.equalsIgnoreCase(fsName.getScheme())) {
throw new IOException("This is not a DFS");
}
- String configuredAddress = DFSUtil.getInfoServer(null, conf, false);
- String address = DFSUtil.substituteForWildcardAddress(configuredAddress,
- fsName.getHost());
- LOG.debug("Will connect to NameNode at HTTP address: " + address);
- return address;
+ final String scheme = DFSUtil.getHttpClientScheme(conf);
+ URI address = DFSUtil.getInfoServerWithDefaultHost(fsName.getHost(), conf,
+ scheme);
+ LOG.debug("Will connect to NameNode at " + address);
+ return address.toURL();
}
/**
* Return the host:port of where this SecondaryNameNode is listening
* for image transfers
*/
- private InetSocketAddress getImageListenAddress() {
- return new InetSocketAddress(infoBindAddress, infoPort);
+ private URL getImageListenAddress() {
+ return imageListenURL;
}
/**
Modified: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java?rev=1549699&r1=1549698&r2=1549699&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java (original)
+++ hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java Mon Dec 9 22:52:02 2013
@@ -17,13 +17,18 @@
*/
package org.apache.hadoop.hdfs.server.namenode;
-import java.io.*;
-import java.net.*;
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.net.HttpURLConnection;
+import java.net.InetSocketAddress;
+import java.net.URL;
import java.security.DigestInputStream;
import java.security.MessageDigest;
import java.util.ArrayList;
import java.util.List;
-import java.lang.Math;
import javax.servlet.ServletOutputStream;
import javax.servlet.ServletResponse;
@@ -41,14 +46,16 @@ import org.apache.hadoop.util.Time;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
-import org.apache.hadoop.hdfs.server.common.StorageErrorReporter;
import org.apache.hadoop.hdfs.server.common.Storage;
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
+import org.apache.hadoop.hdfs.server.common.StorageErrorReporter;
import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
import org.apache.hadoop.hdfs.server.protocol.RemoteEditLog;
import org.apache.hadoop.hdfs.util.DataTransferThrottler;
import org.apache.hadoop.hdfs.web.URLConnectionFactory;
import org.apache.hadoop.io.MD5Hash;
+import org.apache.hadoop.security.SecurityUtil;
+import org.apache.hadoop.util.Time;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.collect.Lists;
@@ -76,15 +83,15 @@ public class TransferFsImage {
private static final Log LOG = LogFactory.getLog(TransferFsImage.class);
- public static void downloadMostRecentImageToDirectory(String fsName,
+ public static void downloadMostRecentImageToDirectory(URL infoServer,
File dir) throws IOException {
String fileId = GetImageServlet.getParamStringForMostRecentImage();
- getFileClient(fsName, fileId, Lists.newArrayList(dir),
+ getFileClient(infoServer, fileId, Lists.newArrayList(dir),
null, false);
}
public static MD5Hash downloadImageToStorage(
- String fsName, long imageTxId, Storage dstStorage, boolean needDigest)
+ URL fsName, long imageTxId, Storage dstStorage, boolean needDigest)
throws IOException {
String fileid = GetImageServlet.getParamStringForImage(
imageTxId, dstStorage);
@@ -102,7 +109,7 @@ public class TransferFsImage {
return hash;
}
- static void downloadEditsToStorage(String fsName, RemoteEditLog log,
+ static void downloadEditsToStorage(URL fsName, RemoteEditLog log,
NNStorage dstStorage) throws IOException {
assert log.getStartTxId() > 0 && log.getEndTxId() > 0 :
"bad log: " + log;
@@ -156,17 +163,17 @@ public class TransferFsImage {
* Requests that the NameNode download an image from this node.
*
* @param fsName the http address for the remote NN
- * @param imageListenAddress the host/port where the local node is running an
+ * @param myNNAddress the host/port where the local node is running an
* HTTPServer hosting GetImageServlet
* @param storage the storage directory to transfer the image from
* @param txid the transaction ID of the image to be uploaded
*/
- public static void uploadImageFromStorage(String fsName,
- InetSocketAddress imageListenAddress,
+ public static void uploadImageFromStorage(URL fsName,
+ URL myNNAddress,
Storage storage, long txid) throws IOException {
String fileid = GetImageServlet.getParamStringToPutImage(
- txid, imageListenAddress, storage);
+ txid, myNNAddress, storage);
// this doesn't directly upload an image, but rather asks the NN
// to connect back to the 2NN to download the specified image.
try {
@@ -244,17 +251,11 @@ public class TransferFsImage {
* this storage object will be notified.
* @Return a digest of the received file if getChecksum is true
*/
- static MD5Hash getFileClient(String nnHostPort,
+ static MD5Hash getFileClient(URL infoServer,
String queryString, List<File> localPaths,
Storage dstStorage, boolean getChecksum) throws IOException {
-
- String str = HttpConfig.getSchemePrefix() + nnHostPort + "/getimage?" +
- queryString;
- LOG.info("Opening connection to " + str);
- //
- // open connection to remote server
- //
- URL url = new URL(str);
+ URL url = new URL(infoServer, "/getimage?" + queryString);
+ LOG.info("Opening connection to " + url);
return doGetUrl(url, localPaths, dstStorage, getChecksum);
}
Modified: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java?rev=1549699&r1=1549698&r2=1549699&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java (original)
+++ hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java Mon Dec 9 22:52:02 2013
@@ -23,6 +23,7 @@ import static org.apache.hadoop.hdfs.DFS
import java.io.IOException;
import java.net.InetSocketAddress;
import java.net.URI;
+import java.net.URL;
import java.security.PrivilegedAction;
import java.util.Collection;
import java.util.List;
@@ -69,7 +70,7 @@ public class BootstrapStandby implements
private String nnId;
private String otherNNId;
- private String otherHttpAddr;
+ private URL otherHttpAddr;
private InetSocketAddress otherIpcAddr;
private Collection<URI> dirsToFormat;
private List<URI> editUrisToFormat;
@@ -179,6 +180,7 @@ public class BootstrapStandby implements
// Check with the user before blowing away data.
if (!Storage.confirmFormat(storage.dirIterable(null),
force, interactive)) {
+ storage.close();
return ERR_CODE_ALREADY_FORMATTED;
}
@@ -203,7 +205,7 @@ public class BootstrapStandby implements
// Download that checkpoint into our storage directories.
MD5Hash hash = TransferFsImage.downloadImageToStorage(
- otherHttpAddr.toString(), imageTxId,
+ otherHttpAddr, imageTxId,
storage, true);
image.saveDigestAndRenameCheckpointImage(imageTxId, hash);
return 0;
@@ -276,11 +278,10 @@ public class BootstrapStandby implements
"Could not determine valid IPC address for other NameNode (%s)" +
", got: %s", otherNNId, otherIpcAddr);
- otherHttpAddr = DFSUtil.getInfoServer(null, otherNode, false);
- otherHttpAddr = DFSUtil.substituteForWildcardAddress(otherHttpAddr,
- otherIpcAddr.getHostName());
-
-
+ final String scheme = DFSUtil.getHttpClientScheme(conf);
+ otherHttpAddr = DFSUtil.getInfoServerWithDefaultHost(
+ otherIpcAddr.getHostName(), otherNode, scheme).toURL();
+
dirsToFormat = FSNamesystem.getNamespaceDirs(conf);
editUrisToFormat = FSNamesystem.getNamespaceEditsDirs(
conf, false);
Modified: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/StandbyCheckpointer.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/StandbyCheckpointer.java?rev=1549699&r1=1549698&r2=1549699&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/StandbyCheckpointer.java (original)
+++ hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/StandbyCheckpointer.java Mon Dec 9 22:52:02 2013
@@ -20,7 +20,8 @@ package org.apache.hadoop.hdfs.server.na
import static org.apache.hadoop.util.Time.now;
import java.io.IOException;
-import java.net.InetSocketAddress;
+import java.net.URI;
+import java.net.URL;
import java.security.PrivilegedAction;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutionException;
@@ -43,7 +44,6 @@ import org.apache.hadoop.hdfs.server.nam
import org.apache.hadoop.hdfs.server.namenode.SaveNamespaceCancelledException;
import org.apache.hadoop.hdfs.server.namenode.TransferFsImage;
import org.apache.hadoop.hdfs.util.Canceler;
-import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.security.UserGroupInformation;
@@ -66,8 +66,8 @@ public class StandbyCheckpointer {
private long lastCheckpointTime;
private final CheckpointerThread thread;
private final ThreadFactory uploadThreadFactory;
- private String activeNNAddress;
- private InetSocketAddress myNNAddress;
+ private URL activeNNAddress;
+ private URL myNNAddress;
private Object cancelLock = new Object();
private Canceler canceler;
@@ -94,7 +94,7 @@ public class StandbyCheckpointer {
*/
private void setNameNodeAddresses(Configuration conf) throws IOException {
// Look up our own address.
- String myAddrString = getHttpAddress(conf);
+ myNNAddress = getHttpAddress(conf);
// Look up the active node's address
Configuration confForActive = HAUtil.getConfForOtherNode(conf);
@@ -103,32 +103,22 @@ public class StandbyCheckpointer {
// Sanity-check.
Preconditions.checkArgument(checkAddress(activeNNAddress),
"Bad address for active NN: %s", activeNNAddress);
- Preconditions.checkArgument(checkAddress(myAddrString),
- "Bad address for standby NN: %s", myAddrString);
- myNNAddress = NetUtils.createSocketAddr(myAddrString);
+ Preconditions.checkArgument(checkAddress(myNNAddress),
+ "Bad address for standby NN: %s", myNNAddress);
}
- private String getHttpAddress(Configuration conf) throws IOException {
- String configuredAddr = DFSUtil.getInfoServer(null, conf, false);
-
- // Use the hostname from the RPC address as a default, in case
- // the HTTP address is configured to 0.0.0.0.
- String hostnameFromRpc = NameNode.getServiceAddress(
- conf, true).getHostName();
- try {
- return DFSUtil.substituteForWildcardAddress(
- configuredAddr, hostnameFromRpc);
- } catch (IOException e) {
- throw new IllegalArgumentException(e);
- }
+ private URL getHttpAddress(Configuration conf) throws IOException {
+ final String scheme = DFSUtil.getHttpClientScheme(conf);
+ String defaultHost = NameNode.getServiceAddress(conf, true).getHostName();
+ URI addr = DFSUtil.getInfoServerWithDefaultHost(defaultHost, conf, scheme);
+ return addr.toURL();
}
/**
* Ensure that the given address is valid and has a port
* specified.
*/
- private boolean checkAddress(String addrStr) {
- InetSocketAddress addr = NetUtils.createSocketAddr(addrStr);
+ private static boolean checkAddress(URL addr) {
return addr.getPort() != 0;
}
@@ -344,7 +334,7 @@ public class StandbyCheckpointer {
}
@VisibleForTesting
- String getActiveNNAddress() {
+ URL getActiveNNAddress() {
return activeNNAddress;
}
}
Modified: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileDiff.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileDiff.java?rev=1549699&r1=1549698&r2=1549699&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileDiff.java (original)
+++ hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileDiff.java Mon Dec 9 22:52:02 2013
@@ -33,7 +33,7 @@ import org.apache.hadoop.hdfs.server.nam
* The difference of an {@link INodeFile} between two snapshots.
*/
public class FileDiff extends
- AbstractINodeDiff<INodeFileWithSnapshot, INodeFileAttributes, FileDiff> {
+ AbstractINodeDiff<INodeFile, INodeFileAttributes, FileDiff> {
/** The file size at snapshot creation time. */
private final long fileSize;
@@ -56,11 +56,12 @@ public class FileDiff extends
}
@Override
- Quota.Counts combinePosteriorAndCollectBlocks(
- INodeFileWithSnapshot currentINode, FileDiff posterior,
- BlocksMapUpdateInfo collectedBlocks, final List<INode> removedINodes) {
- return currentINode.updateQuotaAndCollectBlocks(posterior, collectedBlocks,
- removedINodes);
+ Quota.Counts combinePosteriorAndCollectBlocks(INodeFile currentINode,
+ FileDiff posterior, BlocksMapUpdateInfo collectedBlocks,
+ final List<INode> removedINodes) {
+ return currentINode.getFileWithSnapshotFeature()
+ .updateQuotaAndCollectBlocks(currentINode, posterior, collectedBlocks,
+ removedINodes);
}
@Override
@@ -84,9 +85,10 @@ public class FileDiff extends
}
@Override
- Quota.Counts destroyDiffAndCollectBlocks(INodeFileWithSnapshot currentINode,
+ Quota.Counts destroyDiffAndCollectBlocks(INodeFile currentINode,
BlocksMapUpdateInfo collectedBlocks, final List<INode> removedINodes) {
- return currentINode.updateQuotaAndCollectBlocks(this, collectedBlocks,
- removedINodes);
+ return currentINode.getFileWithSnapshotFeature()
+ .updateQuotaAndCollectBlocks(currentINode, this, collectedBlocks,
+ removedINodes);
}
}
Modified: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileDiffList.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileDiffList.java?rev=1549699&r1=1549698&r2=1549699&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileDiffList.java (original)
+++ hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileDiffList.java Mon Dec 9 22:52:02 2013
@@ -17,19 +17,20 @@
*/
package org.apache.hadoop.hdfs.server.namenode.snapshot;
+import org.apache.hadoop.hdfs.server.namenode.INodeFile;
import org.apache.hadoop.hdfs.server.namenode.INodeFileAttributes;
/** A list of FileDiffs for storing snapshot data. */
public class FileDiffList extends
- AbstractINodeDiffList<INodeFileWithSnapshot, INodeFileAttributes, FileDiff> {
+ AbstractINodeDiffList<INodeFile, INodeFileAttributes, FileDiff> {
@Override
- FileDiff createDiff(Snapshot snapshot, INodeFileWithSnapshot file) {
+ FileDiff createDiff(Snapshot snapshot, INodeFile file) {
return new FileDiff(snapshot, file);
}
@Override
- INodeFileAttributes createSnapshotCopy(INodeFileWithSnapshot currentINode) {
+ INodeFileAttributes createSnapshotCopy(INodeFile currentINode) {
return new INodeFileAttributes.SnapshotCopy(currentINode);
}
}
Modified: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectorySnapshottable.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectorySnapshottable.java?rev=1549699&r1=1549698&r2=1549699&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectorySnapshottable.java (original)
+++ hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectorySnapshottable.java Mon Dec 9 22:52:02 2013
@@ -34,9 +34,9 @@ import org.apache.hadoop.classification.
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
-import org.apache.hadoop.hdfs.protocol.SnapshotException;
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport.DiffReportEntry;
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport.DiffType;
+import org.apache.hadoop.hdfs.protocol.SnapshotException;
import org.apache.hadoop.hdfs.server.namenode.Content;
import org.apache.hadoop.hdfs.server.namenode.ContentSummaryComputationContext;
import org.apache.hadoop.hdfs.server.namenode.INode;
@@ -432,8 +432,8 @@ public class INodeDirectorySnapshottable
parentPath.remove(parentPath.size() - 1);
}
}
- } else if (node.isFile() && node.asFile() instanceof INodeFileWithSnapshot) {
- INodeFileWithSnapshot file = (INodeFileWithSnapshot) node.asFile();
+ } else if (node.isFile() && node.asFile().isWithSnapshot()) {
+ INodeFile file = node.asFile();
Snapshot earlierSnapshot = diffReport.isFromEarlier() ? diffReport.from
: diffReport.to;
Snapshot laterSnapshot = diffReport.isFromEarlier() ? diffReport.to
Modified: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectoryWithSnapshot.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectoryWithSnapshot.java?rev=1549699&r1=1549698&r2=1549699&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectoryWithSnapshot.java (original)
+++ hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectoryWithSnapshot.java Mon Dec 9 22:52:02 2013
@@ -37,6 +37,7 @@ import org.apache.hadoop.hdfs.server.nam
import org.apache.hadoop.hdfs.server.namenode.INode;
import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
import org.apache.hadoop.hdfs.server.namenode.INodeDirectoryAttributes;
+import org.apache.hadoop.hdfs.server.namenode.INodeFile;
import org.apache.hadoop.hdfs.server.namenode.INodeMap;
import org.apache.hadoop.hdfs.server.namenode.INodeReference;
import org.apache.hadoop.hdfs.server.namenode.Quota;
@@ -803,10 +804,9 @@ public class INodeDirectoryWithSnapshot
}
// For DstReference node, since the node is not in the created list of
// prior, we should treat it as regular file/dir
- } else if (topNode.isFile()
- && topNode.asFile() instanceof INodeFileWithSnapshot) {
- INodeFileWithSnapshot fs = (INodeFileWithSnapshot) topNode.asFile();
- counts.add(fs.getDiffs().deleteSnapshotDiff(post, prior, fs,
+ } else if (topNode.isFile() && topNode.asFile().isWithSnapshot()) {
+ INodeFile file = topNode.asFile();
+ counts.add(file.getDiffs().deleteSnapshotDiff(post, prior, file,
collectedBlocks, removedINodes, countDiffChange));
} else if (topNode.isDirectory()) {
INodeDirectory dir = topNode.asDirectory();
Modified: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotFSImageFormat.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotFSImageFormat.java?rev=1549699&r1=1549698&r2=1549699&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotFSImageFormat.java (original)
+++ hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotFSImageFormat.java Mon Dec 9 22:52:02 2013
@@ -97,8 +97,7 @@ public class SnapshotFSImageFormat {
public static void saveFileDiffList(final INodeFile file,
final DataOutput out) throws IOException {
- saveINodeDiffs(file instanceof INodeFileWithSnapshot?
- ((INodeFileWithSnapshot) file).getDiffs(): null, out, null);
+ saveINodeDiffs(file.getDiffs(), out, null);
}
public static FileDiffList loadFileDiffList(DataInput in,
Modified: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java?rev=1549699&r1=1549698&r2=1549699&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java (original)
+++ hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java Mon Dec 9 22:52:02 2013
@@ -20,6 +20,7 @@ package org.apache.hadoop.hdfs.tools;
import java.io.File;
import java.io.IOException;
import java.net.InetSocketAddress;
+import java.net.URL;
import java.security.PrivilegedExceptionAction;
import java.util.ArrayList;
import java.util.Collections;
@@ -47,9 +48,9 @@ import org.apache.hadoop.hdfs.NameNodePr
import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
-import org.apache.hadoop.hdfs.protocol.SnapshotException;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
+import org.apache.hadoop.hdfs.protocol.SnapshotException;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.namenode.TransferFsImage;
import org.apache.hadoop.ipc.RPC;
@@ -547,8 +548,10 @@ public class DFSAdmin extends FsShell {
* @throws IOException
*/
public int fetchImage(final String[] argv, final int idx) throws IOException {
- final String infoServer = DFSUtil.getInfoServer(
- HAUtil.getAddressOfActive(getDFS()), getConf(), false);
+ Configuration conf = getConf();
+ final URL infoServer = DFSUtil.getInfoServer(
+ HAUtil.getAddressOfActive(getDFS()), conf,
+ DFSUtil.getHttpClientScheme(conf)).toURL();
SecurityUtil.doAsCurrentUser(new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
Modified: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java?rev=1549699&r1=1549698&r2=1549699&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java (original)
+++ hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java Mon Dec 9 22:52:02 2013
@@ -22,6 +22,7 @@ import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.PrintStream;
+import java.net.URI;
import java.net.URL;
import java.net.URLConnection;
import java.net.URLEncoder;
@@ -37,7 +38,6 @@ import org.apache.hadoop.hdfs.HAUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.server.namenode.NamenodeFsck;
import org.apache.hadoop.hdfs.web.URLConnectionFactory;
-import org.apache.hadoop.http.HttpConfig;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authentication.client.AuthenticationException;
import org.apache.hadoop.util.StringUtils;
@@ -227,7 +227,7 @@ public class DFSck extends Configured im
* @return Returns http address or null if failure.
* @throws IOException if we can't determine the active NN address
*/
- private String getCurrentNamenodeAddress() throws IOException {
+ private URI getCurrentNamenodeAddress() throws IOException {
//String nnAddress = null;
Configuration conf = getConf();
@@ -245,19 +245,21 @@ public class DFSck extends Configured im
return null;
}
- return DFSUtil.getInfoServer(HAUtil.getAddressOfActive(fs), conf, false);
+ return DFSUtil.getInfoServer(HAUtil.getAddressOfActive(fs), conf,
+ DFSUtil.getHttpClientScheme(conf));
}
private int doWork(final String[] args) throws IOException {
- final StringBuilder url = new StringBuilder(HttpConfig.getSchemePrefix());
+ final StringBuilder url = new StringBuilder();
- String namenodeAddress = getCurrentNamenodeAddress();
+ URI namenodeAddress = getCurrentNamenodeAddress();
if (namenodeAddress == null) {
//Error message already output in {@link #getCurrentNamenodeAddress()}
System.err.println("DFSck exiting.");
return 0;
}
- url.append(namenodeAddress);
+
+ url.append(namenodeAddress.toString());
System.err.println("Connecting to namenode via " + url.toString());
url.append("/fsck?ugi=").append(ugi.getShortUserName());
Modified: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java?rev=1549699&r1=1549698&r2=1549699&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java (original)
+++ hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java Mon Dec 9 22:52:02 2013
@@ -157,6 +157,8 @@ public class WebHdfsFileSystem extends F
) throws IOException {
super.initialize(uri, conf);
setConf(conf);
+ /** set user pattern based on configuration file */
+ UserParam.setUserPattern(conf.get(DFSConfigKeys.DFS_WEBHDFS_USER_PATTERN_KEY, DFSConfigKeys.DFS_WEBHDFS_USER_PATTERN_DEFAULT));
connectionFactory = URLConnectionFactory
.newDefaultURLConnectionFactory(conf);
initializeTokenAspect();
Modified: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/UserParam.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/UserParam.java?rev=1549699&r1=1549698&r2=1549699&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/UserParam.java (original)
+++ hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/UserParam.java Mon Dec 9 22:52:02 2013
@@ -17,8 +17,10 @@
*/
package org.apache.hadoop.hdfs.web.resources;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_WEBHDFS_USER_PATTERN_DEFAULT;
import org.apache.hadoop.security.UserGroupInformation;
-
+import com.google.common.annotations.VisibleForTesting;
+
import java.text.MessageFormat;
import java.util.regex.Pattern;
@@ -29,8 +31,21 @@ public class UserParam extends StringPar
/** Default parameter value. */
public static final String DEFAULT = "";
- private static final Domain DOMAIN = new Domain(NAME,
- Pattern.compile("^[A-Za-z_][A-Za-z0-9._-]*[$]?$"));
+ private static Domain domain = new Domain(NAME, Pattern.compile(DFS_WEBHDFS_USER_PATTERN_DEFAULT));
+
+ @VisibleForTesting
+ public static Domain getUserPatternDomain() {
+ return domain;
+ }
+
+ @VisibleForTesting
+ public static void setUserPatternDomain(Domain dm) {
+ domain = dm;
+ }
+
+ public static void setUserPattern(String pattern) {
+ domain = new Domain(NAME, Pattern.compile(pattern));
+ }
private static String validateLength(String str) {
if (str == null) {
@@ -50,7 +65,7 @@ public class UserParam extends StringPar
* @param str a string representation of the parameter value.
*/
public UserParam(final String str) {
- super(DOMAIN, str == null || str.equals(DEFAULT)? null : validateLength(str));
+ super(domain, str == null || str.equals(DEFAULT)? null : validateLength(str));
}
/**
@@ -64,4 +79,4 @@ public class UserParam extends StringPar
public String getName() {
return NAME;
}
-}
\ No newline at end of file
+}
Propchange: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/native/
------------------------------------------------------------------------------
Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native:r1548386-1549698
Modified: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/QJournalProtocol.proto
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/QJournalProtocol.proto?rev=1549699&r1=1549698&r2=1549699&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/QJournalProtocol.proto (original)
+++ hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/QJournalProtocol.proto Mon Dec 9 22:52:02 2013
@@ -142,7 +142,9 @@ message GetJournalStateRequestProto {
message GetJournalStateResponseProto {
required uint64 lastPromisedEpoch = 1;
+ // Deprecated by fromURL
required uint32 httpPort = 2;
+ optional string fromURL = 3;
}
/**
@@ -182,7 +184,9 @@ message GetEditLogManifestRequestProto {
message GetEditLogManifestResponseProto {
required RemoteEditLogManifestProto manifest = 1;
+ // Deprecated by fromURL
required uint32 httpPort = 2;
+ optional string fromURL = 3;
// TODO: we should add nsinfo somewhere
// to verify that it matches up with our expectation
Modified: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml?rev=1549699&r1=1549698&r2=1549699&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml (original)
+++ hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml Mon Dec 9 22:52:02 2013
@@ -1593,4 +1593,12 @@
</description>
</property>
+<property>
+ <name>dfs.webhdfs.user.provider.user.pattern</name>
+ <value>^[A-Za-z_][A-Za-z0-9._-]*[$]?$</value>
+ <description>
+ Valid pattern for user and group names for webhdfs, it must be a valid java regex.
+ </description>
+</property>
+
</configuration>
Propchange: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/
------------------------------------------------------------------------------
Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode:r1548386-1549698
Propchange: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/
------------------------------------------------------------------------------
Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs:r1548386-1549698
Propchange: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/
------------------------------------------------------------------------------
Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary:r1548386-1549698
Propchange: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs/
------------------------------------------------------------------------------
Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs:r1548386-1549698
Modified: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java?rev=1549699&r1=1549698&r2=1549699&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java (original)
+++ hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java Mon Dec 9 22:52:02 2013
@@ -19,7 +19,6 @@
package org.apache.hadoop.hdfs;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY;
-import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_FAILOVER_PROXY_PROVIDER_KEY_PREFIX;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_BACKUP_ADDRESS_KEY;
@@ -431,20 +430,22 @@ public class TestDFSUtil {
}
@Test
- public void testGetInfoServer() throws IOException {
+ public void testGetInfoServer() throws IOException, URISyntaxException {
HdfsConfiguration conf = new HdfsConfiguration();
- conf.set(HADOOP_SECURITY_AUTHENTICATION, "kerberos");
- UserGroupInformation.setConfiguration(conf);
- String httpsport = DFSUtil.getInfoServer(null, conf, true);
- assertEquals("0.0.0.0:"+DFS_NAMENODE_HTTPS_PORT_DEFAULT, httpsport);
-
- String httpport = DFSUtil.getInfoServer(null, conf, false);
- assertEquals("0.0.0.0:"+DFS_NAMENODE_HTTP_PORT_DEFAULT, httpport);
-
- String httpAddress = DFSUtil.getInfoServer(new InetSocketAddress(
- "localhost", 8020), conf, false);
- assertEquals("localhost:" + DFS_NAMENODE_HTTP_PORT_DEFAULT, httpAddress);
+ URI httpsport = DFSUtil.getInfoServer(null, conf, "https");
+ assertEquals(new URI("https", null, "0.0.0.0",
+ DFS_NAMENODE_HTTPS_PORT_DEFAULT, null, null, null), httpsport);
+
+ URI httpport = DFSUtil.getInfoServer(null, conf, "http");
+ assertEquals(new URI("http", null, "0.0.0.0",
+ DFS_NAMENODE_HTTP_PORT_DEFAULT, null, null, null), httpport);
+
+ URI httpAddress = DFSUtil.getInfoServer(new InetSocketAddress(
+ "localhost", 8020), conf, "http");
+ assertEquals(
+ URI.create("http://localhost:" + DFS_NAMENODE_HTTP_PORT_DEFAULT),
+ httpAddress);
}
@Test
Modified: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelShortCircuitReadUnCached.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelShortCircuitReadUnCached.java?rev=1549699&r1=1549698&r2=1549699&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelShortCircuitReadUnCached.java (original)
+++ hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelShortCircuitReadUnCached.java Mon Dec 9 22:52:02 2013
@@ -42,6 +42,10 @@ public class TestParallelShortCircuitRea
new File(sockDir.getDir(),
"TestParallelShortCircuitReadUnCached._PORT.sock").getAbsolutePath());
conf.setBoolean(DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_KEY, true);
+ // Enabling data transfer encryption should have no effect when using
+ // short-circuit local reads. This is a regression test for HDFS-5353.
+ conf.setBoolean(DFSConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_KEY, true);
+ conf.setBoolean(DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY, true);
conf.setBoolean(DFSConfigKeys.
DFS_CLIENT_READ_SHORTCIRCUIT_SKIP_CHECKSUM_KEY, false);
conf.setBoolean(DFSConfigKeys.
Modified: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPeerCache.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPeerCache.java?rev=1549699&r1=1549698&r2=1549699&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPeerCache.java (original)
+++ hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPeerCache.java Mon Dec 9 22:52:02 2013
@@ -140,6 +140,11 @@ public class TestPeerCache {
public int hashCode() {
return dnId.hashCode() ^ (hasDomain ? 1 : 0);
}
+
+ @Override
+ public boolean hasSecureChannel() {
+ return false;
+ }
}
@Test
Modified: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniJournalCluster.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniJournalCluster.java?rev=1549699&r1=1549698&r2=1549699&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniJournalCluster.java (original)
+++ hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniJournalCluster.java Mon Dec 9 22:52:02 2013
@@ -31,6 +31,7 @@ import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.qjournal.server.JournalNode;
+import org.apache.hadoop.net.NetUtils;
import com.google.common.base.Joiner;
import com.google.common.collect.Lists;
@@ -66,11 +67,21 @@ public class MiniJournalCluster {
}
}
+ private static final class JNInfo {
+ private JournalNode node;
+ private InetSocketAddress ipcAddr;
+ private String httpServerURI;
+
+ private JNInfo(JournalNode node) {
+ this.node = node;
+ this.ipcAddr = node.getBoundIpcAddress();
+ this.httpServerURI = node.getHttpServerURI();
+ }
+ }
+
private static final Log LOG = LogFactory.getLog(MiniJournalCluster.class);
private File baseDir;
- private JournalNode nodes[];
- private InetSocketAddress ipcAddrs[];
- private InetSocketAddress httpAddrs[];
+ private JNInfo nodes[];
private MiniJournalCluster(Builder b) throws IOException {
LOG.info("Starting MiniJournalCluster with " +
@@ -81,22 +92,19 @@ public class MiniJournalCluster {
} else {
this.baseDir = new File(MiniDFSCluster.getBaseDirectory());
}
-
- nodes = new JournalNode[b.numJournalNodes];
- ipcAddrs = new InetSocketAddress[b.numJournalNodes];
- httpAddrs = new InetSocketAddress[b.numJournalNodes];
+
+ nodes = new JNInfo[b.numJournalNodes];
+
for (int i = 0; i < b.numJournalNodes; i++) {
if (b.format) {
File dir = getStorageDir(i);
LOG.debug("Fully deleting JN directory " + dir);
FileUtil.fullyDelete(dir);
}
- nodes[i] = new JournalNode();
- nodes[i].setConf(createConfForNode(b, i));
- nodes[i].start();
-
- ipcAddrs[i] = nodes[i].getBoundIpcAddress();
- httpAddrs[i] = nodes[i].getBoundHttpAddress();
+ JournalNode jn = new JournalNode();
+ jn.setConf(createConfForNode(b, i));
+ jn.start();
+ nodes[i] = new JNInfo(jn);
}
}
@@ -106,8 +114,8 @@ public class MiniJournalCluster {
*/
public URI getQuorumJournalURI(String jid) {
List<String> addrs = Lists.newArrayList();
- for (InetSocketAddress addr : ipcAddrs) {
- addrs.add("127.0.0.1:" + addr.getPort());
+ for (JNInfo info : nodes) {
+ addrs.add("127.0.0.1:" + info.ipcAddr.getPort());
}
String addrsVal = Joiner.on(";").join(addrs);
LOG.debug("Setting logger addresses to: " + addrsVal);
@@ -122,8 +130,8 @@ public class MiniJournalCluster {
* Start the JournalNodes in the cluster.
*/
public void start() throws IOException {
- for (JournalNode jn : nodes) {
- jn.start();
+ for (JNInfo info : nodes) {
+ info.node.start();
}
}
@@ -133,12 +141,12 @@ public class MiniJournalCluster {
*/
public void shutdown() throws IOException {
boolean failed = false;
- for (JournalNode jn : nodes) {
+ for (JNInfo info : nodes) {
try {
- jn.stopAndJoin(0);
+ info.node.stopAndJoin(0);
} catch (Exception e) {
failed = true;
- LOG.warn("Unable to stop journal node " + jn, e);
+ LOG.warn("Unable to stop journal node " + info.node, e);
}
}
if (failed) {
@@ -150,8 +158,8 @@ public class MiniJournalCluster {
Configuration conf = new Configuration(b.conf);
File logDir = getStorageDir(idx);
conf.set(DFSConfigKeys.DFS_JOURNALNODE_EDITS_DIR_KEY, logDir.toString());
- conf.set(DFSConfigKeys.DFS_JOURNALNODE_RPC_ADDRESS_KEY, "0.0.0.0:0");
- conf.set(DFSConfigKeys.DFS_JOURNALNODE_HTTP_ADDRESS_KEY, "0.0.0.0:0");
+ conf.set(DFSConfigKeys.DFS_JOURNALNODE_RPC_ADDRESS_KEY, "localhost:0");
+ conf.set(DFSConfigKeys.DFS_JOURNALNODE_HTTP_ADDRESS_KEY, "localhost:0");
return conf;
}
@@ -164,23 +172,33 @@ public class MiniJournalCluster {
}
public JournalNode getJournalNode(int i) {
- return nodes[i];
+ return nodes[i].node;
}
public void restartJournalNode(int i) throws InterruptedException, IOException {
- Configuration conf = new Configuration(nodes[i].getConf());
- if (nodes[i].isStarted()) {
- nodes[i].stopAndJoin(0);
- }
-
- conf.set(DFSConfigKeys.DFS_JOURNALNODE_RPC_ADDRESS_KEY, "127.0.0.1:" +
- ipcAddrs[i].getPort());
- conf.set(DFSConfigKeys.DFS_JOURNALNODE_HTTP_ADDRESS_KEY, "127.0.0.1:" +
- httpAddrs[i].getPort());
-
- nodes[i] = new JournalNode();
- nodes[i].setConf(conf);
- nodes[i].start();
+ JNInfo info = nodes[i];
+ JournalNode jn = info.node;
+ Configuration conf = new Configuration(jn.getConf());
+ if (jn.isStarted()) {
+ jn.stopAndJoin(0);
+ }
+
+ conf.set(DFSConfigKeys.DFS_JOURNALNODE_RPC_ADDRESS_KEY,
+ NetUtils.getHostPortString(info.ipcAddr));
+
+ final String uri = info.httpServerURI;
+ if (uri.startsWith("http://")) {
+ conf.set(DFSConfigKeys.DFS_JOURNALNODE_HTTP_ADDRESS_KEY,
+ uri.substring(("http://".length())));
+ } else if (info.httpServerURI.startsWith("https://")) {
+ conf.set(DFSConfigKeys.DFS_JOURNALNODE_HTTPS_ADDRESS_KEY,
+ uri.substring(("https://".length())));
+ }
+
+ JournalNode newJN = new JournalNode();
+ newJN.setConf(conf);
+ newJN.start();
+ info.node = newJN;
}
public int getQuorumSize() {
Modified: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNode.java?rev=1549699&r1=1549698&r2=1549699&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNode.java (original)
+++ hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNode.java Mon Dec 9 22:52:02 2013
@@ -25,7 +25,6 @@ import static org.junit.Assert.fail;
import java.io.File;
import java.net.HttpURLConnection;
-import java.net.InetSocketAddress;
import java.net.URL;
import java.util.concurrent.ExecutionException;
@@ -163,10 +162,7 @@ public class TestJournalNode {
@Test(timeout=100000)
public void testHttpServer() throws Exception {
- InetSocketAddress addr = jn.getBoundHttpAddress();
- assertTrue(addr.getPort() > 0);
-
- String urlRoot = "http://localhost:" + addr.getPort();
+ String urlRoot = jn.getHttpServerURI();
// Check default servlets.
String pageContents = DFSTestUtil.urlGet(new URL(urlRoot + "/jmx"));
Modified: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java?rev=1549699&r1=1549698&r2=1549699&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java (original)
+++ hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java Mon Dec 9 22:52:02 2013
@@ -34,6 +34,7 @@ import java.io.RandomAccessFile;
import java.lang.management.ManagementFactory;
import java.net.InetSocketAddress;
import java.net.URI;
+import java.net.URL;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
@@ -71,7 +72,6 @@ import org.apache.hadoop.hdfs.server.pro
import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest;
import org.apache.hadoop.hdfs.tools.DFSAdmin;
import org.apache.hadoop.io.Text;
-import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.test.GenericTestUtils.DelayAnswer;
import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
@@ -218,6 +218,7 @@ public class TestCheckpoint {
assertTrue("Removed directory wasn't what was expected",
listRsd.size() > 0 && listRsd.get(listRsd.size() - 1).getRoot().
toString().indexOf("storageDirToCheck") != -1);
+ nnStorage.close();
}
/*
@@ -1947,8 +1948,9 @@ public class TestCheckpoint {
.format(true).build();
NamenodeProtocols nn = cluster.getNameNodeRpc();
- String fsName = NetUtils.getHostPortString(
- cluster.getNameNode().getHttpAddress());
+ URL fsName = DFSUtil.getInfoServer(
+ cluster.getNameNode().getServiceRpcAddress(), conf,
+ DFSUtil.getHttpClientScheme(conf)).toURL();
// Make a finalized log on the server side.
nn.rollEditLog();
@@ -1980,8 +1982,7 @@ public class TestCheckpoint {
}
try {
- InetSocketAddress fakeAddr = new InetSocketAddress(1);
- TransferFsImage.uploadImageFromStorage(fsName, fakeAddr, dstImage, 0);
+ TransferFsImage.uploadImageFromStorage(fsName, new URL("http://localhost:1234"), dstImage, 0);
fail("Storage info was not verified");
} catch (IOException ioe) {
String msg = StringUtils.stringifyException(ioe);
Modified: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSnapshotPathINodes.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSnapshotPathINodes.java?rev=1549699&r1=1549698&r2=1549699&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSnapshotPathINodes.java (original)
+++ hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSnapshotPathINodes.java Mon Dec 9 22:52:02 2013
@@ -32,7 +32,6 @@ import org.apache.hadoop.hdfs.Distribute
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectorySnapshottable;
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot;
-import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeFileWithSnapshot;
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
import org.junit.AfterClass;
import org.junit.Assert;
@@ -239,7 +238,7 @@ public class TestSnapshotPathINodes {
// The last INode should be the INode for sub1
final INode last = nodesInPath.getLastINode();
assertEquals(last.getFullPathName(), sub1.toString());
- assertFalse(last instanceof INodeFileWithSnapshot);
+ assertFalse(last instanceof INodeFile);
String[] invalidPathComponent = {"invalidDir", "foo", ".snapshot", "bar"};
Path invalidPath = new Path(invalidPathComponent[0]);
@@ -287,7 +286,7 @@ public class TestSnapshotPathINodes {
// Check the INode for file1 (snapshot file)
final INode inode = inodes[inodes.length - 1];
assertEquals(file1.getName(), inode.getLocalName());
- assertEquals(INodeFileWithSnapshot.class, inode.getClass());
+ assertTrue(inode.asFile().isWithSnapshot());
}
// Check the INodes for path /TestSnapshot/sub1/file1
@@ -391,6 +390,8 @@ public class TestSnapshotPathINodes {
// The last INode should be associated with file1
assertEquals(inodes[components.length - 1].getFullPathName(),
file1.toString());
+ // record the modification time of the inode
+ final long modTime = inodes[inodes.length - 1].getModificationTime();
// Create a snapshot for the dir, and check the inodes for the path
// pointing to a snapshot file
@@ -414,10 +415,10 @@ public class TestSnapshotPathINodes {
// Check the INode for snapshot of file1
INode snapshotFileNode = ssInodes[ssInodes.length - 1];
assertEquals(snapshotFileNode.getLocalName(), file1.getName());
- assertTrue(snapshotFileNode instanceof INodeFileWithSnapshot);
+ assertTrue(snapshotFileNode.asFile().isWithSnapshot());
// The modification time of the snapshot INode should be the same with the
// original INode before modification
- assertEquals(inodes[inodes.length - 1].getModificationTime(),
+ assertEquals(modTime,
snapshotFileNode.getModificationTime(ssNodesInPath.getPathSnapshot()));
// Check the INode for /TestSnapshot/sub1/file1 again
@@ -432,7 +433,6 @@ public class TestSnapshotPathINodes {
final int last = components.length - 1;
assertEquals(newInodes[last].getFullPathName(), file1.toString());
// The modification time of the INode for file3 should have been changed
- Assert.assertFalse(inodes[last].getModificationTime()
- == newInodes[last].getModificationTime());
+ Assert.assertFalse(modTime == newInodes[last].getModificationTime());
}
}
Modified: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestTransferFsImage.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestTransferFsImage.java?rev=1549699&r1=1549698&r2=1549699&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestTransferFsImage.java (original)
+++ hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestTransferFsImage.java Mon Dec 9 22:52:02 2013
@@ -34,11 +34,11 @@ import javax.servlet.http.HttpServletReq
import javax.servlet.http.HttpServletResponse;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.http.HttpServer;
import org.apache.hadoop.http.HttpServerFunctionalTest;
-import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.test.PathUtils;
import org.apache.hadoop.util.StringUtils;
import org.junit.Test;
@@ -66,8 +66,9 @@ public class TestTransferFsImage {
new File("/xxxxx-does-not-exist/blah"));
try {
- String fsName = NetUtils.getHostPortString(
- cluster.getNameNode().getHttpAddress());
+ URL fsName = DFSUtil.getInfoServer(
+ cluster.getNameNode().getServiceRpcAddress(), conf,
+ DFSUtil.getHttpClientScheme(conf)).toURL();
String id = "getimage=1&txid=0";
TransferFsImage.getFileClient(fsName, id, localPath, mockStorage, false);
@@ -98,8 +99,10 @@ public class TestTransferFsImage {
);
try {
- String fsName = NetUtils.getHostPortString(
- cluster.getNameNode().getHttpAddress());
+ URL fsName = DFSUtil.getInfoServer(
+ cluster.getNameNode().getServiceRpcAddress(), conf,
+ DFSUtil.getHttpClientScheme(conf)).toURL();
+
String id = "getimage=1&txid=0";
TransferFsImage.getFileClient(fsName, id, localPaths, mockStorage, false);
@@ -123,7 +126,7 @@ public class TestTransferFsImage {
URL serverURL = HttpServerFunctionalTest.getServerURL(testServer);
TransferFsImage.timeout = 2000;
try {
- TransferFsImage.getFileClient(serverURL.getAuthority(), "txid=1", null,
+ TransferFsImage.getFileClient(serverURL, "txid=1", null,
null, false);
fail("TransferImage Should fail with timeout");
} catch (SocketTimeoutException e) {
Modified: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAConfiguration.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAConfiguration.java?rev=1549699&r1=1549698&r2=1549699&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAConfiguration.java (original)
+++ hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAConfiguration.java Mon Dec 9 22:52:02 2013
@@ -24,6 +24,7 @@ import static org.junit.Assert.fail;
import java.io.IOException;
import java.net.URI;
+import java.net.URL;
import java.util.Collection;
import org.apache.hadoop.conf.Configuration;
@@ -86,7 +87,8 @@ public class TestHAConfiguration {
// 0.0.0.0, it should substitute the address from the RPC configuration
// above.
StandbyCheckpointer checkpointer = new StandbyCheckpointer(conf, fsn);
- assertEquals("1.2.3.2:" + DFSConfigKeys.DFS_NAMENODE_HTTP_PORT_DEFAULT,
+ assertEquals(new URL("http", "1.2.3.2",
+ DFSConfigKeys.DFS_NAMENODE_HTTP_PORT_DEFAULT, ""),
checkpointer.getActiveNNAddress());
}
Modified: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestINodeFileUnderConstructionWithSnapshot.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestINodeFileUnderConstructionWithSnapshot.java?rev=1549699&r1=1549698&r2=1549699&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestINodeFileUnderConstructionWithSnapshot.java (original)
+++ hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestINodeFileUnderConstructionWithSnapshot.java Mon Dec 9 22:52:02 2013
@@ -176,7 +176,7 @@ public class TestINodeFileUnderConstruct
dirNode = (INodeDirectorySnapshottable) fsdir.getINode(dir.toString());
last = dirNode.getDiffs().getLast();
Snapshot s1 = last.snapshot;
- assertTrue(fileNode instanceof INodeFileWithSnapshot);
+ assertTrue(fileNode.isWithSnapshot());
assertEquals(BLOCKSIZE * 3, fileNode.computeFileSize(s1));
// 4. modify file --> append without closing stream --> take snapshot -->
Modified: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java?rev=1549699&r1=1549698&r2=1549699&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java (original)
+++ hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java Mon Dec 9 22:52:02 2013
@@ -403,8 +403,7 @@ public class TestRenameWithSnapshots {
final Path foo_s3 = SnapshotTestHelper.getSnapshotPath(sdir1, "s3",
"foo");
assertFalse(hdfs.exists(foo_s3));
- INodeFileWithSnapshot sfoo = (INodeFileWithSnapshot) fsdir.getINode(
- newfoo.toString()).asFile();
+ INodeFile sfoo = fsdir.getINode(newfoo.toString()).asFile();
assertEquals("s2", sfoo.getDiffs().getLastSnapshot().getRoot()
.getLocalName());
}
@@ -604,8 +603,7 @@ public class TestRenameWithSnapshots {
status = hdfs.getFileStatus(foo_s2);
assertEquals(REPL, status.getReplication());
- INodeFileWithSnapshot snode = (INodeFileWithSnapshot) fsdir.getINode(
- newfoo.toString()).asFile();
+ INodeFile snode = fsdir.getINode(newfoo.toString()).asFile();
assertEquals(1, snode.getDiffs().asList().size());
assertEquals("s2", snode.getDiffs().getLastSnapshot().getRoot()
.getLocalName());
@@ -763,8 +761,7 @@ public class TestRenameWithSnapshots {
.asDirectory();
assertEquals(1, foo.getDiffs().asList().size());
assertEquals("s1", foo.getLastSnapshot().getRoot().getLocalName());
- INodeFileWithSnapshot bar1 = (INodeFileWithSnapshot) fsdir.getINode4Write(
- bar1_dir1.toString()).asFile();
+ INodeFile bar1 = fsdir.getINode4Write(bar1_dir1.toString()).asFile();
assertEquals(1, bar1.getDiffs().asList().size());
assertEquals("s1", bar1.getDiffs().getLastSnapshot().getRoot()
.getLocalName());
@@ -774,7 +771,7 @@ public class TestRenameWithSnapshots {
INodeReference.WithCount barWithCount = (WithCount) barRef
.getReferredINode();
assertEquals(2, barWithCount.getReferenceCount());
- INodeFileWithSnapshot bar = (INodeFileWithSnapshot) barWithCount.asFile();
+ INodeFile bar = barWithCount.asFile();
assertEquals(1, bar.getDiffs().asList().size());
assertEquals("s1", bar.getDiffs().getLastSnapshot().getRoot()
.getLocalName());
@@ -984,8 +981,7 @@ public class TestRenameWithSnapshots {
assertEquals("s333", fooDiffs.get(2).snapshot.getRoot().getLocalName());
assertEquals("s22", fooDiffs.get(1).snapshot.getRoot().getLocalName());
assertEquals("s1", fooDiffs.get(0).snapshot.getRoot().getLocalName());
- INodeFileWithSnapshot bar1 = (INodeFileWithSnapshot) fsdir.getINode4Write(
- bar1_dir1.toString()).asFile();
+ INodeFile bar1 = fsdir.getINode4Write(bar1_dir1.toString()).asFile();
List<FileDiff> bar1Diffs = bar1.getDiffs().asList();
assertEquals(3, bar1Diffs.size());
assertEquals("s333", bar1Diffs.get(2).snapshot.getRoot().getLocalName());
@@ -997,7 +993,7 @@ public class TestRenameWithSnapshots {
INodeReference.WithCount barWithCount = (WithCount) barRef.getReferredINode();
// 5 references: s1, s22, s333, s2222, current tree of sdir1
assertEquals(5, barWithCount.getReferenceCount());
- INodeFileWithSnapshot bar = (INodeFileWithSnapshot) barWithCount.asFile();
+ INodeFile bar = barWithCount.asFile();
List<FileDiff> barDiffs = bar.getDiffs().asList();
assertEquals(4, barDiffs.size());
assertEquals("s2222", barDiffs.get(3).snapshot.getRoot().getLocalName());
@@ -1047,7 +1043,7 @@ public class TestRenameWithSnapshots {
barRef = fsdir.getINode(bar_s2222.toString()).asReference();
barWithCount = (WithCount) barRef.getReferredINode();
assertEquals(4, barWithCount.getReferenceCount());
- bar = (INodeFileWithSnapshot) barWithCount.asFile();
+ bar = barWithCount.asFile();
barDiffs = bar.getDiffs().asList();
assertEquals(4, barDiffs.size());
assertEquals("s2222", barDiffs.get(3).snapshot.getRoot().getLocalName());
@@ -1229,7 +1225,7 @@ public class TestRenameWithSnapshots {
fooRef = fsdir.getINode4Write(foo2.toString());
assertTrue(fooRef instanceof INodeReference.DstReference);
INodeFile fooNode = fooRef.asFile();
- assertTrue(fooNode instanceof INodeFileWithSnapshot);
+ assertTrue(fooNode.isWithSnapshot());
assertTrue(fooNode.isUnderConstruction());
} finally {
if (out != null) {
@@ -1240,7 +1236,7 @@ public class TestRenameWithSnapshots {
fooRef = fsdir.getINode4Write(foo2.toString());
assertTrue(fooRef instanceof INodeReference.DstReference);
INodeFile fooNode = fooRef.asFile();
- assertTrue(fooNode instanceof INodeFileWithSnapshot);
+ assertTrue(fooNode.isWithSnapshot());
assertFalse(fooNode.isUnderConstruction());
restartClusterAndCheckImage(true);
@@ -1715,8 +1711,7 @@ public class TestRenameWithSnapshots {
assertTrue(diff.getChildrenDiff().getList(ListType.CREATED).isEmpty());
// bar was converted to filewithsnapshot while renaming
- INodeFileWithSnapshot barNode = (INodeFileWithSnapshot) fsdir
- .getINode4Write(bar.toString());
+ INodeFile barNode = fsdir.getINode4Write(bar.toString()).asFile();
assertSame(barNode, children.get(0));
assertSame(fooNode, barNode.getParent());
List<FileDiff> barDiffList = barNode.getDiffs().asList();
Modified: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotBlocksMap.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotBlocksMap.java?rev=1549699&r1=1549698&r2=1549699&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotBlocksMap.java (original)
+++ hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotBlocksMap.java Mon Dec 9 22:52:02 2013
@@ -19,6 +19,7 @@ package org.apache.hadoop.hdfs.server.na
import static org.apache.hadoop.test.GenericTestUtils.assertExceptionContains;
import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
@@ -167,7 +168,8 @@ public class TestSnapshotBlocksMap {
Assert.assertSame(INodeFile.class, f1.getClass());
hdfs.setReplication(file1, (short)2);
f1 = assertBlockCollection(file1.toString(), 2, fsdir, blockmanager);
- Assert.assertSame(INodeFileWithSnapshot.class, f1.getClass());
+ assertTrue(f1.isWithSnapshot());
+ assertFalse(f1.isUnderConstruction());
}
// Check the block information for file0
Modified: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java?rev=1549699&r1=1549698&r2=1549699&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java (original)
+++ hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java Mon Dec 9 22:52:02 2013
@@ -277,10 +277,10 @@ public class TestSnapshotDeletion {
TestSnapshotBlocksMap.assertBlockCollection(new Path(snapshotNoChangeDir,
noChangeFileSCopy.getLocalName()).toString(), 1, fsdir, blockmanager);
- INodeFileWithSnapshot metaChangeFile2SCopy =
- (INodeFileWithSnapshot) children.get(0);
+ INodeFile metaChangeFile2SCopy = children.get(0).asFile();
assertEquals(metaChangeFile2.getName(), metaChangeFile2SCopy.getLocalName());
- assertEquals(INodeFileWithSnapshot.class, metaChangeFile2SCopy.getClass());
+ assertTrue(metaChangeFile2SCopy.isWithSnapshot());
+ assertFalse(metaChangeFile2SCopy.isUnderConstruction());
TestSnapshotBlocksMap.assertBlockCollection(new Path(snapshotNoChangeDir,
metaChangeFile2SCopy.getLocalName()).toString(), 1, fsdir, blockmanager);
@@ -338,8 +338,9 @@ public class TestSnapshotDeletion {
INode child = children.get(0);
assertEquals(child.getLocalName(), metaChangeFile1.getName());
// check snapshot copy of metaChangeFile1
- assertEquals(INodeFileWithSnapshot.class, child.getClass());
- INodeFileWithSnapshot metaChangeFile1SCopy = (INodeFileWithSnapshot) child;
+ INodeFile metaChangeFile1SCopy = child.asFile();
+ assertTrue(metaChangeFile1SCopy.isWithSnapshot());
+ assertFalse(metaChangeFile1SCopy.isUnderConstruction());
assertEquals(REPLICATION_1,
metaChangeFile1SCopy.getFileReplication(null));
assertEquals(REPLICATION_1,
Modified: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java?rev=1549699&r1=1549698&r2=1549699&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java (original)
+++ hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java Mon Dec 9 22:52:02 2013
@@ -261,6 +261,34 @@ public class TestWebHDFS {
}
}
+ @Test(timeout=300000)
+ public void testNumericalUserName() throws Exception {
+ final Configuration conf = WebHdfsTestUtil.createConf();
+ conf.set(DFSConfigKeys.DFS_WEBHDFS_USER_PATTERN_KEY, "^[A-Za-z0-9_][A-Za-z0-9._-]*[$]?$");
+ final MiniDFSCluster cluster =
+ new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
+ try {
+ cluster.waitActive();
+ WebHdfsTestUtil.getWebHdfsFileSystem(conf, WebHdfsFileSystem.SCHEME)
+ .setPermission(new Path("/"),
+ new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL));
+
+ UserGroupInformation.createUserForTesting("123", new String[]{"my-group"})
+ .doAs(new PrivilegedExceptionAction<Void>() {
+ @Override
+ public Void run() throws IOException, URISyntaxException {
+ FileSystem fs = WebHdfsTestUtil.getWebHdfsFileSystem(conf,
+ WebHdfsFileSystem.SCHEME);
+ Path d = new Path("/my-dir");
+ Assert.assertTrue(fs.mkdirs(d));
+ return null;
+ }
+ });
+ } finally {
+ cluster.shutdown();
+ }
+ }
+
/**
* WebHdfs should be enabled by default after HDFS-5532
*
Modified: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/resources/TestParam.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/resources/TestParam.java?rev=1549699&r1=1549698&r2=1549699&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/resources/TestParam.java (original)
+++ hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/resources/TestParam.java Mon Dec 9 22:52:02 2013
@@ -285,4 +285,19 @@ public class TestParam {
Assert.assertEquals(expected, computed.getValue());
}
}
+
+ @Test
+ public void testUserNameOkAfterResettingPattern() {
+ UserParam.Domain oldDomain = UserParam.getUserPatternDomain();
+
+ String newPattern = "^[A-Za-z0-9_][A-Za-z0-9._-]*[$]?$";
+ UserParam.setUserPattern(newPattern);
+
+ UserParam userParam = new UserParam("1x");
+ assertNotNull(userParam.getValue());
+ userParam = new UserParam("123");
+ assertNotNull(userParam.getValue());
+
+ UserParam.setUserPatternDomain(oldDomain);
+ }
}