You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by sz...@apache.org on 2012/11/21 22:08:50 UTC
svn commit: r1412297 - in
/hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common: ./
src/main/docs/ src/main/docs/src/documentation/content/xdocs/
src/main/java/ src/main/java/org/apache/hadoop/fs/ src/test/core/
src/test/java/org/apache...
Author: szetszwo
Date: Wed Nov 21 21:08:45 2012
New Revision: 1412297
URL: http://svn.apache.org/viewvc?rev=1412297&view=rev
Log:
Merge r1410998 through r1412282 from trunk.
Added:
hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestDelegationTokenRenewer.java
- copied unchanged from r1412282, hadoop/common/trunk/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestDelegationTokenRenewer.java
Modified:
hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/CHANGES.txt (contents, props changed)
hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/docs/ (props changed)
hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/docs/src/documentation/content/xdocs/cluster_setup.xml
hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/ (props changed)
hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DelegationTokenRenewer.java
hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java
hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/test/core/ (props changed)
hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemContractBaseTest.java
Modified: hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/CHANGES.txt?rev=1412297&r1=1412296&r2=1412297&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/CHANGES.txt (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/CHANGES.txt Wed Nov 21 21:08:45 2012
@@ -132,6 +132,12 @@ Trunk (Unreleased)
HADOOP-9004. Allow security unit tests to use external KDC. (Stephen Chu
via suresh)
+ HADOOP-6616. Improve documentation for rack awareness. (Adam Faris via
+ jghoman)
+
+ HADOOP-9075. FileContext#FSLinkResolver should be made static.
+ (Arpit Agarwal via suresh)
+
BUG FIXES
HADOOP-8177. MBeans shouldn't try to register when it fails to create MBeanName.
@@ -371,6 +377,9 @@ Release 2.0.3-alpha - Unreleased
HADOOP-9035. Generalize setup of LoginContext (daryn via bobby)
+ HADOOP-9042. Add a test for umask in FileSystemContractBaseTest.
+ (Colin McCabe via eli)
+
OPTIMIZATIONS
HADOOP-8866. SampleQuantiles#query is O(N^2) instead of O(N). (Andrew Wang
@@ -435,6 +444,9 @@ Release 2.0.3-alpha - Unreleased
HADOOP-6607. Add different variants of non caching HTTP headers. (tucu)
+ HADOOP-9049. DelegationTokenRenewer needs to be Singleton and FileSystems
+ should register/deregister to/from. (Karthik Kambatla via tomwhite)
+
Release 2.0.2-alpha - 2012-09-07
INCOMPATIBLE CHANGES
@@ -1137,6 +1149,9 @@ Release 0.23.6 - UNRELEASED
BUG FIXES
+ HADOOP-9072. Hadoop-Common-0.23-Build Fails to build in Jenkins
+ (Robert Parker via tgraves)
+
Release 0.23.5 - UNRELEASED
INCOMPATIBLE CHANGES
Propchange: hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/CHANGES.txt
------------------------------------------------------------------------------
Merged /hadoop/common/trunk/hadoop-common-project/hadoop-common/CHANGES.txt:r1410998-1412282
Propchange: hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/docs/
------------------------------------------------------------------------------
Merged /hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/docs:r1410998-1412282
Modified: hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/docs/src/documentation/content/xdocs/cluster_setup.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/docs/src/documentation/content/xdocs/cluster_setup.xml?rev=1412297&r1=1412296&r2=1412297&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/docs/src/documentation/content/xdocs/cluster_setup.xml (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/docs/src/documentation/content/xdocs/cluster_setup.xml Wed Nov 21 21:08:45 2012
@@ -1292,23 +1292,139 @@
<section>
<title>Hadoop Rack Awareness</title>
- <p>The HDFS and the Map/Reduce components are rack-aware.</p>
- <p>The <code>NameNode</code> and the <code>JobTracker</code> obtains the
- <code>rack id</code> of the slaves in the cluster by invoking an API
- <a href="ext:api/org/apache/hadoop/net/dnstoswitchmapping/resolve
- ">resolve</a> in an administrator configured
- module. The API resolves the slave's DNS name (also IP address) to a
- rack id. What module to use can be configured using the configuration
- item <code>net.topology.node.switch.mapping.impl</code>. The default
- implementation of the same runs a script/command configured using
- <code>net.topology.script.file.name</code>. If topology.script.file.name is
- not set, the rack id <code>/default-rack</code> is returned for any
- passed IP address. The additional configuration in the Map/Reduce
- part is <code>mapred.cache.task.levels</code> which determines the number
- of levels (in the network topology) of caches. So, for example, if it is
- the default value of 2, two levels of caches will be constructed -
- one for hosts (host -> task mapping) and another for racks
- (rack -> task mapping).
+ <p>
+ Both HDFS and Map/Reduce components are rack-aware. HDFS block placement will use rack
+ awareness for fault tolerance by placing one block replica on a different rack. This provides
+ data availability in the event of a network switch failure within the cluster. The jobtracker uses rack
+ awareness to reduce network transfers of HDFS data blocks by attempting to schedule tasks on datanodes with a local
+ copy of needed HDFS blocks. If the tasks cannot be scheduled on the datanodes
+ containing the needed HDFS blocks, then the tasks will be scheduled on the same rack to reduce network transfers if possible.
+ </p>
+ <p>The NameNode and the JobTracker obtain the rack id of the cluster slaves by invoking either
+ an external script or java class as specified by configuration files. Using either the
+ java class or external script for topology, output must adhere to the java
+ <a href="ext:api/org/apache/hadoop/net/dnstoswitchmapping/resolve">DNSToSwitchMapping</a>
+ interface. The interface expects a one-to-one correspondence to be maintained
+ and the topology information in the format of '/myrack/myhost', where '/' is the topology
+ delimiter, 'myrack' is the rack identifier, and 'myhost' is the individual host. Assuming
+ a single /24 subnet per rack, one could use the format of '/192.168.100.0/192.168.100.5' as a
+ unique rack-host topology mapping.
+ </p>
+ <p>
+ To use the java class for topology mapping, the class name is specified by the
+ <code>'topology.node.switch.mapping.impl'</code> parameter in the configuration file.
+ An example, NetworkTopology.java, is included with the hadoop distribution and can be customized
+ by the hadoop administrator. If not included with your distribution, NetworkTopology.java can also be found in the Hadoop
+ <a href="http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java?view=markup">
+ subversion tree</a>. Using a java class instead of an external script has a slight performance benefit in
+ that it doesn't need to fork an external process when a new slave node registers itself with the jobtracker or namenode.
+ As this class is only used during slave node registration, the performance benefit is limited.
+ </p>
+ <p>
+ If implementing an external script, it will be specified with the
+ <code>topology.script.file.name</code> parameter in the configuration files. Unlike the java
+ class, the external topology script is not included with the Hadoop distribution and is provided by the
+ administrator. Hadoop will send multiple IP addresses to ARGV when forking the topology script. The
+ number of IP addresses sent to the topology script is controlled with <code>net.topology.script.number.args</code>
+ and defaults to 100. If <code>net.topology.script.number.args</code> was changed to 1, a topology script would
+ get forked for each IP submitted by datanodes and/or tasktrackers. Below are example topology scripts.
+ </p>
+ <section>
+ <title>Python example</title>
+ <source>
+ <code>
+ #!/usr/bin/python
+
+ # this script makes assumptions about the physical environment.
+ # 1) each rack is its own layer 3 network with a /24 subnet, which could be typical where each rack has its own
+ # switch with uplinks to a central core router.
+ #
+ # +-----------+
+ # |core router|
+ # +-----------+
+ # / \
+ # +-----------+ +-----------+
+ # |rack switch| |rack switch|
+ # +-----------+ +-----------+
+ # | data node | | data node |
+ # +-----------+ +-----------+
+ # | data node | | data node |
+ # +-----------+ +-----------+
+ #
+ # 2) topology script gets list of IP's as input, calculates network address, and prints '/network_address/ip'.
+
+ import netaddr
+ import sys
+ sys.argv.pop(0) # discard name of topology script from argv list as we just want IP addresses
+
+ netmask = '255.255.255.0' # set netmask to what's being used in your environment. The example uses a /24
+
+ for ip in sys.argv: # loop over list of datanode IP's
+ address = '{0}/{1}'.format(ip, netmask) # format address string so it looks like 'ip/netmask' to make netaddr work
+ try:
+ network_address = netaddr.IPNetwork(address).network # calculate and print network address
+ print "/{0}".format(network_address)
+ except:
+ print "/rack-unknown" # print catch-all value if unable to calculate network address
+
+ </code>
+ </source>
+ </section>
+
+ <section>
+ <title>Bash example</title>
+ <source>
+ <code>
+ #!/bin/bash
+ # Here's a bash example to show just how simple these scripts can be
+
+ # Assuming we have flat network with everything on a single switch, we can fake a rack topology.
+ # This could occur in a lab environment where we have limited nodes,like 2-8 physical machines on a unmanaged switch.
+ # This may also apply to multiple virtual machines running on the same physical hardware.
+ # The number of machines isn't important, but that we are trying to fake a network topology when there isn't one.
+ #
+ # +----------+ +--------+
+ # |jobtracker| |datanode|
+ # +----------+ +--------+
+ # \ /
+ # +--------+ +--------+ +--------+
+ # |datanode|--| switch |--|datanode|
+ # +--------+ +--------+ +--------+
+ # / \
+ # +--------+ +--------+
+ # |datanode| |namenode|
+ # +--------+ +--------+
+ #
+ # With this network topology, we are treating each host as a rack. This is being done by taking the last octet
+ # in the datanode's IP and prepending it with the word '/rack-'. The advantage for doing this is so HDFS
+ # can create its 'off-rack' block copy.
+
+ # 1) 'echo $@' will echo all ARGV values to xargs.
+ # 2) 'xargs' will enforce that we print a single argv value per line
+ # 3) 'awk' will split fields on dots and append the last field to the string '/rack-'. If awk
+ # fails to split on four dots, it will still print '/rack-' last field value
+
+ echo $@ | xargs -n 1 | awk -F '.' '{print "/rack-"$NF}'
+
+
+ </code>
+ </source>
+ </section>
+
+
+ <p>
+ If <code>topology.script.file.name</code> or <code>topology.node.switch.mapping.impl</code> is
+ not set, the rack id '/default-rack' is returned for any passed IP address.
+ While this behavior appears desirable, it can cause issues with HDFS block replication as
+ default behavior is to write one replicated block off rack and is unable to do so as there is
+ only a single rack named '/default-rack'.
+ </p>
+ <p>
+ An additional configuration setting is <code>mapred.cache.task.levels</code> which determines
+ the number of levels (in the network topology) of caches. So, for example, if it is the
+ default value of 2, two levels of caches will be constructed - one for hosts
+ (host -> task mapping) and another for racks (rack -> task mapping). Giving us our one-to-one
+ mapping of '/myrack/myhost'
</p>
</section>
Propchange: hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/
------------------------------------------------------------------------------
Merged /hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/java:r1410998-1412282
Modified: hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DelegationTokenRenewer.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DelegationTokenRenewer.java?rev=1412297&r1=1412296&r2=1412297&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DelegationTokenRenewer.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DelegationTokenRenewer.java Wed Nov 21 21:08:45 2012
@@ -33,7 +33,7 @@ import org.apache.hadoop.util.Time;
* A daemon thread that waits for the next file system to renew.
*/
@InterfaceAudience.Private
-public class DelegationTokenRenewer<T extends FileSystem & DelegationTokenRenewer.Renewable>
+public class DelegationTokenRenewer
extends Thread {
/** The renewable interface used by the renewer. */
public interface Renewable {
@@ -93,7 +93,7 @@ public class DelegationTokenRenewer<T ex
* @param newTime the new time
*/
private void updateRenewalTime() {
- renewalTime = RENEW_CYCLE + Time.now();
+ renewalTime = renewCycle + Time.now();
}
/**
@@ -134,34 +134,69 @@ public class DelegationTokenRenewer<T ex
}
/** Wait for 95% of a day between renewals */
- private static final int RENEW_CYCLE = 24 * 60 * 60 * 950;
+ private static final int RENEW_CYCLE = 24 * 60 * 60 * 950;
- private DelayQueue<RenewAction<T>> queue = new DelayQueue<RenewAction<T>>();
+ @InterfaceAudience.Private
+ protected static int renewCycle = RENEW_CYCLE;
- public DelegationTokenRenewer(final Class<T> clazz) {
+ /** Queue to maintain the RenewActions to be processed by the {@link #run()} */
+ private volatile DelayQueue<RenewAction<?>> queue = new DelayQueue<RenewAction<?>>();
+
+ /**
+ * Create the singleton instance. However, the thread can be started lazily in
+ * {@link #addRenewAction(FileSystem)}
+ */
+ private static DelegationTokenRenewer INSTANCE = null;
+
+ private DelegationTokenRenewer(final Class<? extends FileSystem> clazz) {
super(clazz.getSimpleName() + "-" + DelegationTokenRenewer.class.getSimpleName());
setDaemon(true);
}
+ public static synchronized DelegationTokenRenewer getInstance() {
+ if (INSTANCE == null) {
+ INSTANCE = new DelegationTokenRenewer(FileSystem.class);
+ }
+ return INSTANCE;
+ }
+
/** Add a renew action to the queue. */
- public void addRenewAction(final T fs) {
+ public synchronized <T extends FileSystem & Renewable> void addRenewAction(final T fs) {
queue.add(new RenewAction<T>(fs));
+ if (!isAlive()) {
+ start();
+ }
}
+ /** Remove the associated renew action from the queue */
+ public synchronized <T extends FileSystem & Renewable> void removeRenewAction(
+ final T fs) {
+ for (RenewAction<?> action : queue) {
+ if (action.weakFs.get() == fs) {
+ queue.remove(action);
+ return;
+ }
+ }
+ }
+
+ @SuppressWarnings("static-access")
@Override
public void run() {
for(;;) {
- RenewAction<T> action = null;
+ RenewAction<?> action = null;
try {
- action = queue.take();
- if (action.renew()) {
- action.updateRenewalTime();
- queue.add(action);
+ synchronized (this) {
+ action = queue.take();
+ if (action.renew()) {
+ action.updateRenewalTime();
+ queue.add(action);
+ }
}
} catch (InterruptedException ie) {
return;
} catch (Exception ie) {
- T.LOG.warn("Failed to renew token, action=" + action, ie);
+ action.weakFs.get().LOG.warn("Failed to renew token, action=" + action,
+ ie);
}
}
}
Modified: hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java?rev=1412297&r1=1412296&r2=1412297&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java Wed Nov 21 21:08:45 2012
@@ -1119,7 +1119,7 @@ public final class FileContext {
* @param target The symlink's absolute target
* @return Fully qualified version of the target.
*/
- private Path qualifySymlinkTarget(final AbstractFileSystem pathFS,
+ private static Path qualifySymlinkTarget(final AbstractFileSystem pathFS,
Path pathWithLink, Path target) {
// NB: makeQualified uses the target's scheme and authority, if
// specified, and the scheme and authority of pathFS, if not.
@@ -2321,7 +2321,7 @@ public final class FileContext {
* Class used to perform an operation on and resolve symlinks in a
* path. The operation may potentially span multiple file systems.
*/
- protected abstract class FSLinkResolver<T> {
+ protected static abstract class FSLinkResolver<T> {
// The maximum number of symbolic link components in a path
private static final int MAX_PATH_LINKS = 32;
Propchange: hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/test/core/
------------------------------------------------------------------------------
Merged /hadoop/common/trunk/hadoop-common-project/hadoop-common/src/test/core:r1410998-1412282
Modified: hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemContractBaseTest.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemContractBaseTest.java?rev=1412297&r1=1412296&r2=1412297&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemContractBaseTest.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemContractBaseTest.java Wed Nov 21 21:08:45 2012
@@ -23,11 +23,13 @@ import java.io.IOException;
import junit.framework.TestCase;
+import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsPermission;
/**
* <p>
@@ -43,7 +45,7 @@ import org.apache.hadoop.fs.Path;
* </p>
*/
public abstract class FileSystemContractBaseTest extends TestCase {
-
+ protected final static String TEST_UMASK = "062";
protected FileSystem fs;
protected byte[] data = new byte[getBlockSize() * 2]; // two blocks of data
{
@@ -151,7 +153,26 @@ public abstract class FileSystemContract
assertFalse(fs.exists(testDeepSubDir));
}
-
+
+ public void testMkdirsWithUmask() throws Exception {
+ if (fs.getScheme().equals("s3") || fs.getScheme().equals("s3n")) {
+ // skip permission tests for S3FileSystem until HDFS-1333 is fixed.
+ return;
+ }
+ Configuration conf = fs.getConf();
+ String oldUmask = conf.get(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY);
+ try {
+ conf.set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY, TEST_UMASK);
+ final Path dir = new Path("/test/newDir");
+ assertTrue(fs.mkdirs(dir, new FsPermission((short)0777)));
+ FileStatus status = fs.getFileStatus(dir);
+ assertTrue(status.isDirectory());
+ assertEquals((short)0715, status.getPermission().toShort());
+ } finally {
+ conf.set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY, oldUmask);
+ }
+ }
+
public void testGetFileStatusThrowsExceptionForNonExistentFile()
throws Exception {
try {