You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by to...@apache.org on 2012/05/12 22:52:42 UTC

svn commit: r1337645 [2/2] - in /hadoop/common/branches/HDFS-3042/hadoop-common-project: hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/ hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/ hadoop-auth...

Modified: hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/Token.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/Token.java?rev=1337645&r1=1337644&r2=1337645&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/Token.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/Token.java Sat May 12 20:52:34 2012
@@ -18,10 +18,15 @@
 
 package org.apache.hadoop.security.token;
 
+import com.google.common.collect.Maps;
+
+import java.io.ByteArrayInputStream;
 import java.io.DataInput;
+import java.io.DataInputStream;
 import java.io.DataOutput;
 import java.io.IOException;
 import java.util.Arrays;
+import java.util.Map;
 import java.util.ServiceLoader;
 
 import org.apache.commons.codec.binary.Base64;
@@ -37,6 +42,7 @@ import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.Writable;
 import org.apache.hadoop.io.WritableComparator;
 import org.apache.hadoop.io.WritableUtils;
+import org.apache.hadoop.util.ReflectionUtils;
 
 /**
  * The client-side form of the token.
@@ -45,6 +51,9 @@ import org.apache.hadoop.io.WritableUtil
 @InterfaceStability.Evolving
 public class Token<T extends TokenIdentifier> implements Writable {
   public static final Log LOG = LogFactory.getLog(Token.class);
+  
+  private static Map<Text, Class<? extends TokenIdentifier>> tokenKindMap;
+  
   private byte[] identifier;
   private byte[] password;
   private Text kind;
@@ -100,13 +109,49 @@ public class Token<T extends TokenIdenti
   }
 
   /**
-   * Get the token identifier
-   * @return the token identifier
+   * Get the token identifier's byte representation
+   * @return the token identifier's byte representation
    */
   public byte[] getIdentifier() {
     return identifier;
   }
   
+  private static synchronized Class<? extends TokenIdentifier>
+      getClassForIdentifier(Text kind) {
+    if (tokenKindMap == null) {
+      tokenKindMap = Maps.newHashMap();
+      for (TokenIdentifier id : ServiceLoader.load(TokenIdentifier.class)) {
+        tokenKindMap.put(id.getKind(), id.getClass());
+      }
+    }
+    Class<? extends TokenIdentifier> cls = tokenKindMap.get(kind);
+    if (cls == null) {
+      LOG.warn("Cannot find class for token kind " + kind);
+       return null;
+    }
+    return cls;
+  }
+  
+  /**
+   * Get the token identifier object, or null if it could not be constructed
+   * (because the class could not be loaded, for example).
+   * @return the token identifier, or null
+   * @throws IOException 
+   */
+  @SuppressWarnings("unchecked")
+  public T decodeIdentifier() throws IOException {
+    Class<? extends TokenIdentifier> cls = getClassForIdentifier(getKind());
+    if (cls == null) {
+      return null;
+    }
+    TokenIdentifier tokenIdentifier = ReflectionUtils.newInstance(cls, null);
+    ByteArrayInputStream buf = new ByteArrayInputStream(identifier);
+    DataInputStream in = new DataInputStream(buf);  
+    tokenIdentifier.readFields(in);
+    in.close();
+    return (T) tokenIdentifier;
+  }
+  
   /**
    * Get the token password/secret
    * @return the token password/secret
@@ -260,16 +305,31 @@ public class Token<T extends TokenIdenti
       buffer.append(num);
     }
   }
+  
+  private void identifierToString(StringBuilder buffer) {
+    T id = null;
+    try {
+      id = decodeIdentifier();
+    } catch (IOException e) {
+      // handle in the finally block
+    } finally {
+      if (id != null) {
+        buffer.append("(").append(id).append(")");
+      } else {
+        addBinaryBuffer(buffer, identifier);
+      }
+    }
+  }
 
   @Override
   public String toString() {
     StringBuilder buffer = new StringBuilder();
-    buffer.append("Ident: ");
-    addBinaryBuffer(buffer, identifier);
-    buffer.append(", Kind: ");
+    buffer.append("Kind: ");
     buffer.append(kind.toString());
     buffer.append(", Service: ");
     buffer.append(service.toString());
+    buffer.append(", Ident: ");
+    identifierToString(buffer);
     return buffer.toString();
   }
   

Modified: hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/VersionUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/VersionUtil.java?rev=1337645&r1=1337644&r2=1337645&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/VersionUtil.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/VersionUtil.java Sat May 12 20:52:34 2012
@@ -22,12 +22,21 @@ import java.util.regex.Pattern;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 
+import com.google.common.collect.ComparisonChain;
+
 @InterfaceAudience.Private
 public abstract class VersionUtil {
   
   private static final Pattern COMPONENT_GROUPS = Pattern.compile("(\\d+)|(\\D+)");
 
   /**
+   * Suffix added by maven for nightly builds and other snapshot releases.
+   * These releases are considered to precede the non-SNAPSHOT version
+   * with the same version number.
+   */
+  private static final String SNAPSHOT_SUFFIX = "-SNAPSHOT";
+
+  /**
    * This function splits the two versions on &quot;.&quot; and performs a
    * naturally-ordered comparison of the resulting components. For example, the
    * version string "0.3" is considered to precede "0.20", despite the fact that
@@ -48,6 +57,11 @@ public abstract class VersionUtil {
    * between the two versions, then the version with fewer components is
    * considered to precede the version with more components.
    * 
+   * In addition to the above rules, there is one special case: maven SNAPSHOT
+   * releases are considered to precede a non-SNAPSHOT release with an
+   * otherwise identical version number. For example, 2.0-SNAPSHOT precedes
+   * 2.0.
+   * 
    * This function returns a negative integer if version1 precedes version2, a
    * positive integer if version2 precedes version1, and 0 if and only if the
    * two versions' components are identical in value and cardinality.
@@ -61,6 +75,11 @@ public abstract class VersionUtil {
    *         versions are equal.
    */
   public static int compareVersions(String version1, String version2) {
+    boolean isSnapshot1 = version1.endsWith(SNAPSHOT_SUFFIX);
+    boolean isSnapshot2 = version2.endsWith(SNAPSHOT_SUFFIX);
+    version1 = stripSnapshotSuffix(version1);
+    version2 = stripSnapshotSuffix(version2);
+    
     String[] version1Parts = version1.split("\\.");
     String[] version2Parts = version2.split("\\.");
     
@@ -87,9 +106,21 @@ public abstract class VersionUtil {
         return component1.length() - component2.length();
       }
     }
-    return version1Parts.length - version2Parts.length;
+    
+    return ComparisonChain.start()
+      .compare(version1Parts.length, version2Parts.length)
+      .compare(isSnapshot2, isSnapshot1)
+      .result();
   }
   
+  private static String stripSnapshotSuffix(String version) {
+    if (version.endsWith(SNAPSHOT_SUFFIX)) {
+      return version.substring(0, version.length() - SNAPSHOT_SUFFIX.length());
+    } else {
+      return version;
+    }
+  }
+
   private static boolean isNumeric(String s) {
     try {
       Integer.parseInt(s);

Modified: hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/src/main/native/acinclude.m4
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/src/main/native/acinclude.m4?rev=1337645&r1=1337644&r2=1337645&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/src/main/native/acinclude.m4 (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/src/main/native/acinclude.m4 Sat May 12 20:52:34 2012
@@ -1,4 +1,4 @@
-# AC_COMPUTE_NEEDED_DSO(LIBRARY, PREPROC_SYMBOL)
+# AC_COMPUTE_NEEDED_DSO(LIBRARY, TEST_PROGRAM, PREPROC_SYMBOL)
 # --------------------------------------------------
 # Compute the 'actual' dynamic-library used 
 # for LIBRARY and set it to PREPROC_SYMBOL
@@ -6,7 +6,7 @@ AC_DEFUN([AC_COMPUTE_NEEDED_DSO],
 [
 AC_CACHE_CHECK([Checking for the 'actual' dynamic-library for '-l$1'], ac_cv_libname_$1,
   [
-  echo 'int main(int argc, char **argv){return 0;}' > conftest.c
+  echo '$2' > conftest.c
   if test -z "`${CC} ${LDFLAGS} -o conftest conftest.c -l$1 2>&1`"; then
     dnl Try objdump and ldd in that order to get the dynamic library
     if test ! -z "`which objdump | grep -v 'no objdump'`"; then
@@ -24,5 +24,5 @@ AC_CACHE_CHECK([Checking for the 'actual
   rm -f conftest*
   ]
 )
-AC_DEFINE_UNQUOTED($2, ${ac_cv_libname_$1}, [The 'actual' dynamic-library for '-l$1'])
+AC_DEFINE_UNQUOTED($3, ${ac_cv_libname_$1}, [The 'actual' dynamic-library for '-l$1'])
 ])# AC_COMPUTE_NEEDED_DSO

Modified: hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/src/main/native/configure.ac
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/src/main/native/configure.ac?rev=1337645&r1=1337644&r2=1337645&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/src/main/native/configure.ac (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/src/main/native/configure.ac Sat May 12 20:52:34 2012
@@ -87,10 +87,20 @@ CPPFLAGS=$cppflags_bak
 AC_SUBST([JNI_CPPFLAGS])
 
 dnl Check for zlib headers
-AC_CHECK_HEADERS([zlib.h zconf.h], AC_COMPUTE_NEEDED_DSO(z,HADOOP_ZLIB_LIBRARY), AC_MSG_ERROR(Zlib headers were not found... native-hadoop library needs zlib to build. Please install the requisite zlib development package.))
+AC_CHECK_HEADERS([zlib.h zconf.h],
+  AC_COMPUTE_NEEDED_DSO(z,
+    [#include "zlib.h"
+    int main(int argc, char **argv){zlibVersion();return 0;}],
+    HADOOP_ZLIB_LIBRARY),
+  AC_MSG_ERROR(Zlib headers were not found... native-hadoop library needs zlib to build. Please install the requisite zlib development package.))
 
 dnl Check for snappy headers
-AC_CHECK_HEADERS([snappy-c.h], AC_COMPUTE_NEEDED_DSO(snappy,HADOOP_SNAPPY_LIBRARY), AC_MSG_WARN(Snappy headers were not found... building without snappy.))
+AC_CHECK_HEADERS([snappy-c.h],
+  AC_COMPUTE_NEEDED_DSO(snappy,
+    [#include "snappy-c.h"
+    int main(int argc, char **argv){snappy_compress(0,0,0,0);return 0;}],
+    HADOOP_SNAPPY_LIBRARY),
+  AC_MSG_WARN(Snappy headers were not found... building without snappy.))
 
 dnl Check for headers needed by the native Group resolution implementation
 AC_CHECK_HEADERS([fcntl.h stdlib.h string.h unistd.h], [], AC_MSG_ERROR(Some system headers not found... please ensure their presence on your platform.))

Modified: hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/security/JniBasedUnixGroupsNetgroupMapping.c
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/security/JniBasedUnixGroupsNetgroupMapping.c?rev=1337645&r1=1337644&r2=1337645&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/security/JniBasedUnixGroupsNetgroupMapping.c (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/security/JniBasedUnixGroupsNetgroupMapping.c Sat May 12 20:52:34 2012
@@ -70,7 +70,7 @@ Java_org_apache_hadoop_security_JniBased
 
   // set the name of the group for subsequent calls to getnetgrent
   // note that we want to end group lokup regardless whether setnetgrent
-  // was successfull or not (as long as it was called we need to call
+  // was successful or not (as long as it was called we need to call
   // endnetgrent)
   setnetgrentCalledFlag = 1;
   if(setnetgrent(cgroup) == 1) {

Modified: hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/src/main/packages/templates/conf/hadoop-env.sh
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/src/main/packages/templates/conf/hadoop-env.sh?rev=1337645&r1=1337644&r2=1337645&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/src/main/packages/templates/conf/hadoop-env.sh (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/src/main/packages/templates/conf/hadoop-env.sh Sat May 12 20:52:34 2012
@@ -48,10 +48,10 @@ done
 export HADOOP_OPTS="-Djava.net.preferIPv4Stack=true $HADOOP_CLIENT_OPTS"
 
 # Command specific options appended to HADOOP_OPTS when specified
-export HADOOP_NAMENODE_OPTS="-Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,RFAS} -Dhdfs.audit.logger=INFO,RFAAUDIT $HADOOP_NAMENODE_OPTS"
+export HADOOP_NAMENODE_OPTS="-Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,RFAS} -Dhdfs.audit.logger=${HDFS_AUDIT_LOGGER:-INFO,NullAppender} $HADOOP_NAMENODE_OPTS"
 export HADOOP_DATANODE_OPTS="-Dhadoop.security.logger=ERROR,RFAS $HADOOP_DATANODE_OPTS"
 
-export HADOOP_SECONDARYNAMENODE_OPTS="-Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,RFAS} -Dhdfs.audit.logger=INFO,RFAAUDIT $HADOOP_SECONDARYNAMENODE_OPTS"
+export HADOOP_SECONDARYNAMENODE_OPTS="-Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,RFAS} -Dhdfs.audit.logger=${HDFS_AUDIT_LOGGER:-INFO,NullAppender} $HADOOP_SECONDARYNAMENODE_OPTS"
 
 # The ZKFC does not need a large heap, and keeping it small avoids
 # any potential for long GC pauses

Modified: hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/src/main/packages/templates/conf/hdfs-site.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/src/main/packages/templates/conf/hdfs-site.xml?rev=1337645&r1=1337644&r2=1337645&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/src/main/packages/templates/conf/hdfs-site.xml (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/src/main/packages/templates/conf/hdfs-site.xml Sat May 12 20:52:34 2012
@@ -129,13 +129,6 @@
   </property>
 
   <property>
-    <name>dfs.secondary.https.port</name>
-    <value>50490</value>
-    <description>The https port where secondary-namenode binds</description>
-
-  </property>
-
-  <property>
     <name>dfs.datanode.kerberos.principal</name>
     <value>dn/_HOST@${local.realm}</value>
     <description>

Modified: hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/src/main/packages/templates/conf/log4j.properties
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/src/main/packages/templates/conf/log4j.properties?rev=1337645&r1=1337644&r2=1337645&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/src/main/packages/templates/conf/log4j.properties (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/src/main/packages/templates/conf/log4j.properties Sat May 12 20:52:34 2012
@@ -102,7 +102,7 @@ log4j.appender.TLA.layout.ConversionPatt
 #
 #Security appender
 #
-hadoop.security.logger=INFO,console
+hadoop.security.logger=INFO,NullAppender
 hadoop.security.log.maxfilesize=256MB
 hadoop.security.log.maxbackupindex=20
 log4j.category.SecurityLogger=${hadoop.security.logger}
@@ -126,7 +126,7 @@ log4j.appender.DRFAS.DatePattern=.yyyy-M
 #
 # hdfs audit logging
 #
-hdfs.audit.logger=INFO,console
+hdfs.audit.logger=INFO,NullAppender
 hdfs.audit.log.maxfilesize=256MB
 hdfs.audit.log.maxbackupindex=20
 log4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}
@@ -141,7 +141,7 @@ log4j.appender.RFAAUDIT.MaxBackupIndex=$
 #
 # mapred audit logging
 #
-mapred.audit.logger=INFO,console
+mapred.audit.logger=INFO,NullAppender
 mapred.audit.log.maxfilesize=256MB
 mapred.audit.log.maxbackupindex=20
 log4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger}

Modified: hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/src/main/proto/RpcPayloadHeader.proto
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/src/main/proto/RpcPayloadHeader.proto?rev=1337645&r1=1337644&r2=1337645&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/src/main/proto/RpcPayloadHeader.proto (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/src/main/proto/RpcPayloadHeader.proto Sat May 12 20:52:34 2012
@@ -19,7 +19,6 @@ option java_package = "org.apache.hadoop
 option java_outer_classname = "RpcPayloadHeaderProtos";
 option java_generate_equals_and_hash = true;
 
-
 /**
  * This is the rpc payload header. It is sent with every rpc call.
  * 
@@ -34,8 +33,6 @@ option java_generate_equals_and_hash = t
  *
  */
 
-
-
 /**
  * RpcKind determine the rpcEngine and the serialization of the rpc payload
  */
@@ -54,5 +51,27 @@ enum RpcPayloadOperationProto {
 message RpcPayloadHeaderProto { // the header for the RpcRequest
   optional RpcKindProto rpcKind = 1;
   optional RpcPayloadOperationProto rpcOp = 2;
-  optional uint32 callId = 3; // each rpc has a callId that is also used in response
+  required uint32 callId = 3; // each rpc has a callId that is also used in response
+}
+
+enum RpcStatusProto {
+ SUCCESS = 0;  // RPC succeeded
+ ERROR = 1;    // RPC Failed
+ FATAL = 2;    // Fatal error - connection is closed
+}
+
+/**
+ * Rpc Response Header
+ *    - If successfull then the Respose follows after this header
+ *        - length (4 byte int), followed by the response
+ *    - If error or fatal - the exception info follow
+ *        - length (4 byte int) Class name of exception - UTF-8 string
+ *        - length (4 byte int) Stacktrace - UTF-8 string
+ *        - if the strings are null then the length is -1
+ * In case of Fatal error then the respose contains the Serverside's IPC version
+ */
+message RpcResponseHeaderProto {
+  required uint32 callId = 1; // callId used in Request
+  required RpcStatusProto status = 2;
+  optional uint32 serverIpcVersionNum = 3; // in case of an fatal IPC error 
 }

Modified: hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml?rev=1337645&r1=1337644&r2=1337645&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml Sat May 12 20:52:34 2012
@@ -63,6 +63,15 @@
 </property>
 
 <property>
+  <name>hadoop.security.instrumentation.requires.admin</name>
+  <value>false</value>
+  <description>
+    Indicates if administrator ACLs are required to access
+    instrumentation servlets (JMX, METRICS, CONF, STACKS).
+  </description>
+</property>
+
+<property>
   <name>hadoop.security.authentication</name>
   <value>simple</value>
   <description>Possible values are simple (no authentication), and kerberos

Propchange: hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/src/test/core/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-common-project/hadoop-common/src/test/core:r1333291-1337618

Modified: hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystem.java?rev=1337645&r1=1337644&r2=1337645&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystem.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystem.java Sat May 12 20:52:34 2012
@@ -18,11 +18,14 @@
 package org.apache.hadoop.fs;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem.Statistics;
+
 import static org.apache.hadoop.fs.FileSystemTestHelper.*;
 
 import java.io.*;
 
 import static org.junit.Assert.*;
+
 import org.junit.Before;
 import org.junit.Test;
 
@@ -233,4 +236,16 @@ public class TestLocalFileSystem {
     assertTrue("Did not delete file", fs.delete(file1));
     assertTrue("Did not delete non-empty dir", fs.delete(dir1));
   }
+  
+  @Test
+  public void testStatistics() throws Exception {
+    FileSystem.getLocal(new Configuration());
+    int fileSchemeCount = 0;
+    for (Statistics stats : FileSystem.getAllStatistics()) {
+      if (stats.getScheme().equals("file")) {
+        fileSchemeCount++;
+      }
+    }
+    assertEquals(1, fileSchemeCount);
+  }
 }

Modified: hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFileSystemBaseTest.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFileSystemBaseTest.java?rev=1337645&r1=1337644&r2=1337645&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFileSystemBaseTest.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFileSystemBaseTest.java Sat May 12 20:52:34 2012
@@ -71,11 +71,8 @@ public class ViewFileSystemBaseTest {
 
   @Before
   public void setUp() throws Exception {
-    targetTestRoot = FileSystemTestHelper.getAbsoluteTestRootPath(fsTarget);
-    // In case previous test was killed before cleanup
-    fsTarget.delete(targetTestRoot, true);
+    initializeTargetTestRoot();
     
-    fsTarget.mkdirs(targetTestRoot);
     // Make  user and data dirs - we creates links to them in the mount table
     fsTarget.mkdirs(new Path(targetTestRoot,"user"));
     fsTarget.mkdirs(new Path(targetTestRoot,"data"));
@@ -99,7 +96,16 @@ public class ViewFileSystemBaseTest {
     fsTarget.delete(FileSystemTestHelper.getTestRootPath(fsTarget), true);
   }
   
+  void initializeTargetTestRoot() throws IOException {
+    targetTestRoot = FileSystemTestHelper.getAbsoluteTestRootPath(fsTarget);
+    // In case previous test was killed before cleanup
+    fsTarget.delete(targetTestRoot, true);
+    
+    fsTarget.mkdirs(targetTestRoot);
+  }
+  
   void setupMountPoints() {
+    ConfigUtil.addLink(conf, "/targetRoot", targetTestRoot.toUri());
     ConfigUtil.addLink(conf, "/user", new Path(targetTestRoot,"user").toUri());
     ConfigUtil.addLink(conf, "/user2", new Path(targetTestRoot,"user").toUri());
     ConfigUtil.addLink(conf, "/data", new Path(targetTestRoot,"data").toUri());
@@ -121,7 +127,7 @@ public class ViewFileSystemBaseTest {
   }
   
   int getExpectedMountPoints() {
-    return 7;
+    return 8;
   }
   
   /**
@@ -166,7 +172,7 @@ public class ViewFileSystemBaseTest {
         }
       }
     }
-    Assert.assertEquals(expectedTokenCount / 2, delTokens.size());
+    Assert.assertEquals((expectedTokenCount + 1) / 2, delTokens.size());
   }
 
   int getExpectedDelegationTokenCountWithCredentials() {
@@ -309,6 +315,16 @@ public class ViewFileSystemBaseTest {
     Assert.assertTrue("Renamed dest should  exist as dir in target",
         fsTarget.isDirectory(new Path(targetTestRoot,"user/dirFooBar")));
     
+    // Make a directory under a directory that's mounted from the root of another FS
+    fsView.mkdirs(new Path("/targetRoot/dirFoo"));
+    Assert.assertTrue(fsView.exists(new Path("/targetRoot/dirFoo")));
+    boolean dirFooPresent = false;
+    for (FileStatus fileStatus : fsView.listStatus(new Path("/targetRoot/"))) {
+      if (fileStatus.getPath().getName().equals("dirFoo")) {
+        dirFooPresent = true;
+      }
+    }
+    Assert.assertTrue(dirFooPresent);
   }
   
   // rename across mount points that point to same target also fail 
@@ -418,7 +434,7 @@ public class ViewFileSystemBaseTest {
   }
   
   int getExpectedDirPaths() {
-    return 6;
+    return 7;
   }
   
   @Test

Modified: hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFsBaseTest.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFsBaseTest.java?rev=1337645&r1=1337644&r2=1337645&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFsBaseTest.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFsBaseTest.java Sat May 12 20:52:34 2012
@@ -33,6 +33,7 @@ import org.apache.hadoop.fs.AbstractFile
 import org.apache.hadoop.fs.BlockLocation;
 import org.apache.hadoop.fs.FileContext;
 import org.apache.hadoop.fs.FileContextTestHelper;
+import org.apache.hadoop.fs.RemoteIterator;
 import org.apache.hadoop.fs.FileContextTestHelper.fileType;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FsConstants;
@@ -77,12 +78,8 @@ public class ViewFsBaseTest {
 
   @Before
   public void setUp() throws Exception {
-
-    targetTestRoot = FileContextTestHelper.getAbsoluteTestRootPath(fcTarget);
-    // In case previous test was killed before cleanup
-    fcTarget.delete(targetTestRoot, true);
+    initializeTargetTestRoot();
     
-    fcTarget.mkdir(targetTestRoot, FileContext.DEFAULT_PERM, true);
     // Make  user and data dirs - we creates links to them in the mount table
     fcTarget.mkdir(new Path(targetTestRoot,"user"),
         FileContext.DEFAULT_PERM, true);
@@ -100,6 +97,7 @@ public class ViewFsBaseTest {
     
     // Set up the defaultMT in the config with our mount point links
     conf = new Configuration();
+    ConfigUtil.addLink(conf, "/targetRoot", targetTestRoot.toUri());
     ConfigUtil.addLink(conf, "/user",
         new Path(targetTestRoot,"user").toUri());
     ConfigUtil.addLink(conf, "/user2",
@@ -118,6 +116,14 @@ public class ViewFsBaseTest {
     fcView = FileContext.getFileContext(FsConstants.VIEWFS_URI, conf);
     // Also try viewfs://default/    - note authority is name of mount table
   }
+  
+  void initializeTargetTestRoot() throws IOException {
+    targetTestRoot = FileContextTestHelper.getAbsoluteTestRootPath(fcTarget);
+    // In case previous test was killed before cleanup
+    fcTarget.delete(targetTestRoot, true);
+    
+    fcTarget.mkdir(targetTestRoot, FileContext.DEFAULT_PERM, true);
+  }
 
   @After
   public void tearDown() throws Exception {
@@ -128,7 +134,11 @@ public class ViewFsBaseTest {
   public void testGetMountPoints() {
     ViewFs viewfs = (ViewFs) fcView.getDefaultFileSystem();
     MountPoint[] mountPoints = viewfs.getMountPoints();
-    Assert.assertEquals(7, mountPoints.length); 
+    Assert.assertEquals(8, mountPoints.length);
+  }
+  
+  int getExpectedDelegationTokenCount() {
+    return 0;
   }
   
   /**
@@ -140,7 +150,7 @@ public class ViewFsBaseTest {
   public void testGetDelegationTokens() throws IOException {
     List<Token<?>> delTokens = 
         fcView.getDelegationTokens(new Path("/"), "sanjay");
-    Assert.assertEquals(0, delTokens.size()); 
+    Assert.assertEquals(getExpectedDelegationTokenCount(), delTokens.size());
   }
 
   
@@ -281,6 +291,19 @@ public class ViewFsBaseTest {
     Assert.assertTrue("Renamed dest should  exist as dir in target",
         isDir(fcTarget,new Path(targetTestRoot,"user/dirFooBar")));
     
+    // Make a directory under a directory that's mounted from the root of another FS
+    fcView.mkdir(new Path("/targetRoot/dirFoo"), FileContext.DEFAULT_PERM, false);
+    Assert.assertTrue(exists(fcView, new Path("/targetRoot/dirFoo")));
+    boolean dirFooPresent = false;
+    RemoteIterator<FileStatus> dirContents = fcView.listStatus(new Path(
+        "/targetRoot/"));
+    while (dirContents.hasNext()) {
+      FileStatus fileStatus = dirContents.next();
+      if (fileStatus.getPath().getName().equals("dirFoo")) {
+        dirFooPresent = true;
+      }
+    }
+    Assert.assertTrue(dirFooPresent);
   }
   
   // rename across mount points that point to same target also fail 
@@ -358,7 +381,7 @@ public class ViewFsBaseTest {
     
     FileStatus[] dirPaths = fcView.util().listStatus(new Path("/"));
     FileStatus fs;
-    Assert.assertEquals(6, dirPaths.length);
+    Assert.assertEquals(7, dirPaths.length);
     fs = FileContextTestHelper.containsPath(fcView, "/user", dirPaths);
       Assert.assertNotNull(fs);
       Assert.assertTrue("A mount should appear as symlink", fs.isSymlink());

Modified: hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/HttpServerFunctionalTest.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/HttpServerFunctionalTest.java?rev=1337645&r1=1337644&r2=1337645&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/HttpServerFunctionalTest.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/HttpServerFunctionalTest.java Sat May 12 20:52:34 2012
@@ -19,6 +19,7 @@
 
 package org.apache.hadoop.http;
 
+import org.apache.hadoop.security.authorize.AccessControlList;
 import org.junit.Assert;
 import org.apache.hadoop.conf.Configuration;
 
@@ -70,6 +71,12 @@ public class HttpServerFunctionalTest ex
     return createServer(TEST, conf);
   }
 
+  public static HttpServer createTestServer(Configuration conf, AccessControlList adminsAcl)
+      throws IOException {
+    prepareTestWebapp();
+    return createServer(TEST, conf, adminsAcl);
+  }
+
   /**
    * Create but do not start the test webapp server. The test webapp dir is
    * prepared/checked in advance.
@@ -132,6 +139,11 @@ public class HttpServerFunctionalTest ex
       throws IOException {
     return new HttpServer(webapp, "0.0.0.0", 0, true, conf);
   }
+
+  public static HttpServer createServer(String webapp, Configuration conf, AccessControlList adminsAcl)
+      throws IOException {
+    return new HttpServer(webapp, "0.0.0.0", 0, true, conf, adminsAcl);
+  }
   /**
    * Create an HttpServer instance for the given webapp
    * @param webapp the webapp to work with

Modified: hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServer.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServer.java?rev=1337645&r1=1337644&r2=1337645&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServer.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServer.java Sat May 12 20:52:34 2012
@@ -60,7 +60,6 @@ import org.apache.hadoop.security.author
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.Test;
-import org.mockito.Mock;
 import org.mockito.Mockito;
 import org.mortbay.util.ajax.JSON;
 
@@ -360,6 +359,8 @@ public class TestHttpServer extends Http
     Configuration conf = new Configuration();
     conf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION,
         true);
+    conf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_INSTRUMENTATION_REQUIRES_ADMIN,
+        true);
     conf.set(HttpServer.FILTER_INITIALIZER_PROPERTY,
         DummyFilterInitializer.class.getName());
 
@@ -468,6 +469,26 @@ public class TestHttpServer extends Http
 
   }
 
+  @Test
+  public void testRequiresAuthorizationAccess() throws Exception {
+    Configuration conf = new Configuration();
+    ServletContext context = Mockito.mock(ServletContext.class);
+    Mockito.when(context.getAttribute(HttpServer.CONF_CONTEXT_ATTRIBUTE)).thenReturn(conf);
+    HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
+    HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
+
+    //requires admin access to instrumentation, FALSE by default
+    Assert.assertTrue(HttpServer.isInstrumentationAccessAllowed(context, request, response));
+
+    //requires admin access to instrumentation, TRUE
+    conf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_INSTRUMENTATION_REQUIRES_ADMIN, true);
+    conf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, true);
+    AccessControlList acls = Mockito.mock(AccessControlList.class);
+    Mockito.when(acls.isUserAllowed(Mockito.<UserGroupInformation>any())).thenReturn(false);
+    Mockito.when(context.getAttribute(HttpServer.ADMINS_ACL)).thenReturn(acls);
+    Assert.assertFalse(HttpServer.isInstrumentationAccessAllowed(context, request, response));
+  }
+
   @Test public void testBindAddress() throws Exception {
     checkBindAddress("0.0.0.0", 0, false).stop();
     // hang onto this one for a bit more testing

Modified: hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestText.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestText.java?rev=1337645&r1=1337644&r2=1337645&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestText.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestText.java Sat May 12 20:52:34 2012
@@ -20,6 +20,7 @@ package org.apache.hadoop.io;
 
 import junit.framework.TestCase;
 
+import java.io.IOException;
 import java.nio.ByteBuffer;
 import java.nio.charset.CharacterCodingException;
 import java.util.Random;
@@ -107,7 +108,6 @@ public class TestText extends TestCase {
     }
   }
   
-  
   public void testIO() throws Exception {
     DataOutputBuffer out = new DataOutputBuffer();
     DataInputBuffer in = new DataInputBuffer();
@@ -136,6 +136,40 @@ public class TestText extends TestCase {
       assertTrue(before.equals(after2));
     }
   }
+  
+  public void doTestLimitedIO(String str, int strLen) throws IOException {
+    DataOutputBuffer out = new DataOutputBuffer();
+    DataInputBuffer in = new DataInputBuffer();
+
+    out.reset();
+    try {
+      Text.writeString(out, str, strLen);
+      fail("expected writeString to fail when told to write a string " +
+          "that was too long!  The string was '" + str + "'");
+    } catch (IOException e) {
+    }
+    Text.writeString(out, str, strLen + 1);
+
+    // test that it reads correctly
+    in.reset(out.getData(), out.getLength());
+    in.mark(strLen);
+    String after;
+    try {
+      after = Text.readString(in, strLen);
+      fail("expected readString to fail when told to read a string " +
+          "that was too long!  The string was '" + str + "'");
+    } catch (IOException e) {
+    }
+    in.reset();
+    after = Text.readString(in, strLen + 1);
+    assertTrue(str.equals(after));
+  }
+  
+  public void testLimitedIO() throws Exception {
+    doTestLimitedIO("abcd", 4);
+    doTestLimitedIO("", 0);
+    doTestLimitedIO("1", 1);
+  }
 
   public void testCompare() throws Exception {
     DataOutputBuffer out1 = new DataOutputBuffer();
@@ -192,16 +226,6 @@ public class TestText extends TestCase {
     assertTrue(text.find("\u20ac", 5)==11);
   }
 
-  public void testClear() {
-	Text text = new Text();
-	assertEquals("", text.toString());
-	assertEquals(0, text.getBytes().length);
-	text = new Text("abcd\u20acbdcd\u20ac");
-	text.clear();
-	assertEquals("", text.toString());
-	assertEquals(0, text.getBytes().length);
-  }
-
   public void testFindAfterUpdatingContents() throws Exception {
     Text text = new Text("abcd");
     text.set("a".getBytes());

Modified: hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java?rev=1337645&r1=1337644&r2=1337645&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java Sat May 12 20:52:34 2012
@@ -323,6 +323,29 @@ public class TestRPC {
   }
 
   @Test
+  public void testProxyAddress() throws Exception {
+    Server server = RPC.getServer(TestProtocol.class,
+                                  new TestImpl(), ADDRESS, 0, conf);
+    TestProtocol proxy = null;
+    
+    try {
+      server.start();
+      InetSocketAddress addr = NetUtils.getConnectAddress(server);
+
+      // create a client
+      proxy = (TestProtocol)RPC.getProxy(
+          TestProtocol.class, TestProtocol.versionID, addr, conf);
+      
+      assertEquals(addr, RPC.getServerAddress(proxy));
+    } finally {
+      server.stop();
+      if (proxy != null) {
+        RPC.stopProxy(proxy);
+      }
+    }
+  }
+
+  @Test
   public void testSlowRpc() throws Exception {
     System.out.println("Testing Slow RPC");
     // create a server with two handlers

Modified: hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestNetUtils.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestNetUtils.java?rev=1337645&r1=1337644&r2=1337645&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestNetUtils.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestNetUtils.java Sat May 12 20:52:34 2012
@@ -25,11 +25,16 @@ import java.net.ConnectException;
 import java.net.InetAddress;
 import java.net.InetSocketAddress;
 import java.net.NetworkInterface;
+import java.net.ServerSocket;
 import java.net.Socket;
 import java.net.SocketException;
+import java.net.SocketTimeoutException;
 import java.net.URI;
 import java.net.UnknownHostException;
+import java.util.Arrays;
 import java.util.Enumeration;
+import java.util.List;
+import java.util.concurrent.TimeUnit;
 
 import junit.framework.AssertionFailedError;
 
@@ -37,7 +42,9 @@ import org.apache.commons.lang.StringUti
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.security.NetUtilsTestResolver;
+import org.junit.Assume;
 import org.junit.Before;
 import org.junit.BeforeClass;
 import org.junit.Test;
@@ -51,6 +58,13 @@ public class TestNetUtils {
   private static final String LOCAL_PORT_NAME = Integer.toString(LOCAL_PORT);
 
   /**
+   * Some slop around expected times when making sure timeouts behave
+   * as expected. We assume that they will be accurate to within
+   * this threshold.
+   */
+  static final long TIME_FUDGE_MILLIS = 200;
+
+  /**
    * Test that we can't accidentally connect back to the connecting socket due
    * to a quirk in the TCP spec.
    *
@@ -81,6 +95,79 @@ public class TestNetUtils {
     }
   }
   
+  @Test
+  public void testSocketReadTimeoutWithChannel() throws Exception {
+    doSocketReadTimeoutTest(true);
+  }
+  
+  @Test
+  public void testSocketReadTimeoutWithoutChannel() throws Exception {
+    doSocketReadTimeoutTest(false);
+  }
+
+  
+  private void doSocketReadTimeoutTest(boolean withChannel)
+      throws IOException {
+    // Binding a ServerSocket is enough to accept connections.
+    // Rely on the backlog to accept for us.
+    ServerSocket ss = new ServerSocket(0);
+    
+    Socket s;
+    if (withChannel) {
+      s = NetUtils.getDefaultSocketFactory(new Configuration())
+          .createSocket();
+      Assume.assumeNotNull(s.getChannel());
+    } else {
+      s = new Socket();
+      assertNull(s.getChannel());
+    }
+    
+    SocketInputWrapper stm = null;
+    try {
+      NetUtils.connect(s, ss.getLocalSocketAddress(), 1000);
+
+      stm = NetUtils.getInputStream(s, 1000);
+      assertReadTimeout(stm, 1000);
+
+      // Change timeout, make sure it applies.
+      stm.setTimeout(1);
+      assertReadTimeout(stm, 1);
+      
+      // If there is a channel, then setting the socket timeout
+      // should not matter. If there is not a channel, it will
+      // take effect.
+      s.setSoTimeout(1000);
+      if (withChannel) {
+        assertReadTimeout(stm, 1);
+      } else {
+        assertReadTimeout(stm, 1000);        
+      }
+    } finally {
+      IOUtils.closeStream(stm);
+      IOUtils.closeSocket(s);
+      ss.close();
+    }
+  }
+  
+  private void assertReadTimeout(SocketInputWrapper stm, int timeoutMillis)
+      throws IOException {
+    long st = System.nanoTime();
+    try {
+      stm.read();
+      fail("Didn't time out");
+    } catch (SocketTimeoutException ste) {
+      assertTimeSince(st, timeoutMillis);
+    }
+  }
+
+  private void assertTimeSince(long startNanos, int expectedMillis) {
+    long durationNano = System.nanoTime() - startNanos;
+    long millis = TimeUnit.MILLISECONDS.convert(
+        durationNano, TimeUnit.NANOSECONDS);
+    assertTrue("Expected " + expectedMillis + "ms, but took " + millis,
+        Math.abs(millis - expectedMillis) < TIME_FUDGE_MILLIS);
+  }
+  
   /**
    * Test for {
    * @throws UnknownHostException @link NetUtils#getLocalInetAddress(String)
@@ -512,6 +599,26 @@ public class TestNetUtils {
     assertEquals("scheme://host.a.b/path", uri.toString());
   }
   
+  /** 
+   * Test for {@link NetUtils#normalizeHostNames}
+   */
+  @Test
+  public void testNormalizeHostName() {	
+    List<String> hosts = Arrays.asList(new String[] {"127.0.0.1",
+        "localhost", "3w.org", "UnknownHost"});
+    List<String> normalizedHosts = NetUtils.normalizeHostNames(hosts);
+    // when ipaddress is normalized, same address is expected in return
+    assertEquals(normalizedHosts.get(0), hosts.get(0));
+    // for normalizing a resolvable hostname, resolved ipaddress is expected in return
+    assertFalse(normalizedHosts.get(1).equals(hosts.get(1)));
+    assertEquals(normalizedHosts.get(1), hosts.get(0));
+    // this address HADOOP-8372: when normalizing a valid resolvable hostname start with numeric, 
+    // its ipaddress is expected to return
+    assertFalse(normalizedHosts.get(2).equals(hosts.get(2)));
+    // return the same hostname after normalizing a irresolvable hostname.
+    assertEquals(normalizedHosts.get(3), hosts.get(3));
+  }
+  
   @Test
   public void testGetHostNameOfIP() {
     assertNull(NetUtils.getHostNameOfIP(null));

Modified: hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestSocketIOWithTimeout.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestSocketIOWithTimeout.java?rev=1337645&r1=1337644&r2=1337645&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestSocketIOWithTimeout.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestSocketIOWithTimeout.java Sat May 12 20:52:34 2012
@@ -19,6 +19,7 @@ package org.apache.hadoop.net;
 
 import java.io.IOException;
 import java.io.InputStream;
+import java.io.InterruptedIOException;
 import java.io.OutputStream;
 import java.net.SocketTimeoutException;
 import java.nio.channels.Pipe;
@@ -26,8 +27,13 @@ import java.util.Arrays;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.test.MultithreadedTestUtil;
+import org.apache.hadoop.test.MultithreadedTestUtil.TestContext;
+import org.apache.hadoop.test.MultithreadedTestUtil.TestingThread;
 
-import junit.framework.TestCase;
+import org.junit.Test;
+import static org.junit.Assert.*;
 
 /**
  * This tests timout out from SocketInputStream and
@@ -36,14 +42,17 @@ import junit.framework.TestCase;
  * Normal read and write using these streams are tested by pretty much
  * every DFS unit test.
  */
-public class TestSocketIOWithTimeout extends TestCase {
+public class TestSocketIOWithTimeout {
 
   static Log LOG = LogFactory.getLog(TestSocketIOWithTimeout.class);
   
   private static int TIMEOUT = 1*1000; 
   private static String TEST_STRING = "1234567890";
+
+  private MultithreadedTestUtil.TestContext ctx = new TestContext();
   
-  private void doIO(InputStream in, OutputStream out) throws IOException {
+  private void doIO(InputStream in, OutputStream out,
+      int expectedTimeout) throws IOException {
     /* Keep on writing or reading until we get SocketTimeoutException.
      * It expects this exception to occur within 100 millis of TIMEOUT.
      */
@@ -61,34 +70,15 @@ public class TestSocketIOWithTimeout ext
         long diff = System.currentTimeMillis() - start;
         LOG.info("Got SocketTimeoutException as expected after " + 
                  diff + " millis : " + e.getMessage());
-        assertTrue(Math.abs(TIMEOUT - diff) <= 200);
+        assertTrue(Math.abs(expectedTimeout - diff) <=
+          TestNetUtils.TIME_FUDGE_MILLIS);
         break;
       }
     }
   }
   
-  /**
-   * Just reads one byte from the input stream.
-   */
-  static class ReadRunnable implements Runnable {
-    private InputStream in;
-
-    public ReadRunnable(InputStream in) {
-      this.in = in;
-    }
-    public void run() {
-      try {
-        in.read();
-      } catch (IOException e) {
-        LOG.info("Got expection while reading as expected : " + 
-                 e.getMessage());
-        return;
-      }
-      assertTrue(false);
-    }
-  }
-  
-  public void testSocketIOWithTimeout() throws IOException {
+  @Test
+  public void testSocketIOWithTimeout() throws Exception {
     
     // first open pipe:
     Pipe pipe = Pipe.open();
@@ -96,7 +86,7 @@ public class TestSocketIOWithTimeout ext
     Pipe.SinkChannel sink = pipe.sink();
     
     try {
-      InputStream in = new SocketInputStream(source, TIMEOUT);
+      final InputStream in = new SocketInputStream(source, TIMEOUT);
       OutputStream out = new SocketOutputStream(sink, TIMEOUT);
       
       byte[] writeBytes = TEST_STRING.getBytes();
@@ -105,37 +95,62 @@ public class TestSocketIOWithTimeout ext
       
       out.write(writeBytes);
       out.write(byteWithHighBit);
-      doIO(null, out);
+      doIO(null, out, TIMEOUT);
       
       in.read(readBytes);
       assertTrue(Arrays.equals(writeBytes, readBytes));
       assertEquals(byteWithHighBit & 0xff, in.read());
-      doIO(in, null);
+      doIO(in, null, TIMEOUT);
+      
+      // Change timeout on the read side.
+      ((SocketInputStream)in).setTimeout(TIMEOUT * 2);
+      doIO(in, null, TIMEOUT * 2);
+      
       
       /*
        * Verify that it handles interrupted threads properly.
-       * Use a large timeout and expect the thread to return quickly.
+       * Use a large timeout and expect the thread to return quickly
+       * upon interruption.
        */
-      in = new SocketInputStream(source, 0);
-      Thread thread = new Thread(new ReadRunnable(in));
-      thread.start();
-      
-      try {
-        Thread.sleep(1000);
-      } catch (InterruptedException ignored) {}
-      
+      ((SocketInputStream)in).setTimeout(0);
+      TestingThread thread = new TestingThread(ctx) {
+        @Override
+        public void doWork() throws Exception {
+          try {
+            in.read();
+            fail("Did not fail with interrupt");
+          } catch (InterruptedIOException ste) {
+            LOG.info("Got expection while reading as expected : " + 
+                ste.getMessage());
+          }
+        }
+      };
+      ctx.addThread(thread);
+      ctx.startThreads();
+      // If the thread is interrupted before it calls read()
+      // then it throws ClosedByInterruptException due to
+      // some Java quirk. Waiting for it to call read()
+      // gets it into select(), so we get the expected
+      // InterruptedIOException.
+      Thread.sleep(1000);
       thread.interrupt();
+      ctx.stop();
+
+      //make sure the channels are still open
+      assertTrue(source.isOpen());
+      assertTrue(sink.isOpen());
       
+      // Nevertheless, the output stream is closed, because
+      // a partial write may have succeeded (see comment in
+      // SocketOutputStream#write(byte[]), int, int)
       try {
-        thread.join();
-      } catch (InterruptedException e) {
-        throw new IOException("Unexpected InterruptedException : " + e);
+        out.write(1);
+        fail("Did not throw");
+      } catch (IOException ioe) {
+        GenericTestUtils.assertExceptionContains(
+            "stream is closed", ioe);
       }
       
-      //make sure the channels are still open
-      assertTrue(source.isOpen());
-      assertTrue(sink.isOpen());
-
       out.close();
       assertFalse(sink.isOpen());
       

Modified: hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestTableMapping.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestTableMapping.java?rev=1337645&r1=1337644&r2=1337645&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestTableMapping.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestTableMapping.java Sat May 12 20:52:34 2012
@@ -41,7 +41,7 @@ public class TestTableMapping {
   public void setUp() throws IOException {
     mappingFile = File.createTempFile(getClass().getSimpleName(), ".txt");
     Files.write("a.b.c /rack1\n" +
-                "1.2.3\t/rack2\n", mappingFile, Charsets.UTF_8);
+                "1.2.3.4\t/rack2\n", mappingFile, Charsets.UTF_8);
     mappingFile.deleteOnExit();
   }
 
@@ -55,7 +55,7 @@ public class TestTableMapping {
 
     List<String> names = new ArrayList<String>();
     names.add("a.b.c");
-    names.add("1.2.3");
+    names.add("1.2.3.4");
 
     List<String> result = mapping.resolve(names);
     assertEquals(names.size(), result.size());
@@ -73,7 +73,7 @@ public class TestTableMapping {
 
     List<String> names = new ArrayList<String>();
     names.add("a.b.c");
-    names.add("1.2.3");
+    names.add("1.2.3.4");
 
     List<String> result1 = mapping.resolve(names);
     assertEquals(names.size(), result1.size());
@@ -96,7 +96,7 @@ public class TestTableMapping {
 
     List<String> names = new ArrayList<String>();
     names.add("a.b.c");
-    names.add("1.2.3");
+    names.add("1.2.3.4");
 
     List<String> result = mapping.resolve(names);
     assertEquals(names.size(), result.size());
@@ -114,7 +114,7 @@ public class TestTableMapping {
 
     List<String> names = new ArrayList<String>();
     names.add("a.b.c");
-    names.add("1.2.3");
+    names.add("1.2.3.4");
 
     List<String> result = mapping.resolve(names);
     assertEquals(names.size(), result.size());
@@ -134,7 +134,7 @@ public class TestTableMapping {
 
     List<String> names = new ArrayList<String>();
     names.add("a.b.c");
-    names.add("1.2.3");
+    names.add("1.2.3.4");
 
     List<String> result = mapping.resolve(names);
     assertEquals(names.size(), result.size());

Modified: hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/TestToken.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/TestToken.java?rev=1337645&r1=1337644&r2=1337645&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/TestToken.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/TestToken.java Sat May 12 20:52:34 2012
@@ -18,11 +18,15 @@
 
 package org.apache.hadoop.security.token;
 
+import static junit.framework.Assert.assertEquals;
+
 import java.io.*;
 import java.util.Arrays;
 
 import org.apache.hadoop.io.*;
 import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier;
+import org.apache.hadoop.security.token.delegation.TestDelegationToken.TestDelegationTokenIdentifier;
+import org.apache.hadoop.security.token.delegation.TestDelegationToken.TestDelegationTokenSecretManager;
 
 import junit.framework.TestCase;
 
@@ -94,5 +98,20 @@ public class TestToken extends TestCase 
       checkUrlSafe(encode);
     }
   }
+  
+  public void testDecodeIdentifier() throws IOException {
+    TestDelegationTokenSecretManager secretManager =
+      new TestDelegationTokenSecretManager(0, 0, 0, 0);
+    secretManager.startThreads();
+    TestDelegationTokenIdentifier id = new TestDelegationTokenIdentifier(
+        new Text("owner"), new Text("renewer"), new Text("realUser"));
+    
+    Token<TestDelegationTokenIdentifier> token =
+      new Token<TestDelegationTokenIdentifier>(id, secretManager);
+    TokenIdentifier idCopy = token.decodeIdentifier();
+    
+    assertNotSame(id, idCopy);
+    assertEquals(id, idCopy);
+  }
 
 }

Modified: hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestVersionUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestVersionUtil.java?rev=1337645&r1=1337644&r2=1337645&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestVersionUtil.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestVersionUtil.java Sat May 12 20:52:34 2012
@@ -19,7 +19,6 @@ package org.apache.hadoop.util;
 
 import static org.junit.Assert.*;
 
-import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.Test;
 
 public class TestVersionUtil {
@@ -30,6 +29,8 @@ public class TestVersionUtil {
     assertEquals(0, VersionUtil.compareVersions("2.0.0", "2.0.0"));
     assertEquals(0, VersionUtil.compareVersions("2.0.0a", "2.0.0a"));
     assertEquals(0, VersionUtil.compareVersions("1", "1"));
+    assertEquals(0, VersionUtil.compareVersions(
+        "2.0.0-SNAPSHOT", "2.0.0-SNAPSHOT"));
     
     // Assert that lower versions are lower, and higher versions are higher.
     assertExpectedValues("1", "2.0.0");
@@ -52,6 +53,13 @@ public class TestVersionUtil {
     assertExpectedValues("1.0.0a2", "1.0.0a10");
     assertExpectedValues("1.0", "1.a");
     assertExpectedValues("1.0", "1.a0");
+    
+    // Snapshot builds precede their eventual releases.
+    assertExpectedValues("1.0-SNAPSHOT", "1.0");
+    assertExpectedValues("1.0", "1.0.0-SNAPSHOT");
+    assertExpectedValues("1.0.0-SNAPSHOT", "1.0.0");
+    assertExpectedValues("1.0.0", "1.0.1-SNAPSHOT");
+    assertExpectedValues("1.0.1-SNAPSHOT", "1.0.1");
   }
   
   private static void assertExpectedValues(String lower, String higher) {