You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by cn...@apache.org on 2014/02/08 20:05:16 UTC

svn commit: r1566100 - in /hadoop/common/branches/HDFS-4685/hadoop-common-project: hadoop-common/ hadoop-common/src/ hadoop-common/src/main/docs/ hadoop-common/src/main/java/ hadoop-common/src/main/java/org/apache/hadoop/http/ hadoop-common/src/main/ja...

Author: cnauroth
Date: Sat Feb  8 19:05:12 2014
New Revision: 1566100

URL: http://svn.apache.org/r1566100
Log:
Merge trunk to HDFS-4685.

Added:
    hadoop/common/branches/HDFS-4685/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/SharedFileDescriptorFactory.java
      - copied unchanged from r1566041, hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/SharedFileDescriptorFactory.java
    hadoop/common/branches/HDFS-4685/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/unix/DomainSocketWatcher.java
      - copied unchanged from r1566041, hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/unix/DomainSocketWatcher.java
    hadoop/common/branches/HDFS-4685/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/CloseableReferenceCount.java
      - copied unchanged from r1566041, hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/CloseableReferenceCount.java
    hadoop/common/branches/HDFS-4685/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/nativeio/SharedFileDescriptorFactory.c
      - copied unchanged from r1566041, hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/nativeio/SharedFileDescriptorFactory.c
    hadoop/common/branches/HDFS-4685/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/net/unix/DomainSocketWatcher.c
      - copied unchanged from r1566041, hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/net/unix/DomainSocketWatcher.c
    hadoop/common/branches/HDFS-4685/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/nativeio/TestSharedFileDescriptorFactory.java
      - copied unchanged from r1566041, hadoop/common/trunk/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/nativeio/TestSharedFileDescriptorFactory.java
    hadoop/common/branches/HDFS-4685/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/unix/TestDomainSocketWatcher.java
      - copied unchanged from r1566041, hadoop/common/trunk/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/unix/TestDomainSocketWatcher.java
    hadoop/common/branches/HDFS-4685/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/service/TestCompositeService.java
      - copied unchanged from r1566041, hadoop/common/trunk/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/service/TestCompositeService.java
Modified:
    hadoop/common/branches/HDFS-4685/hadoop-common-project/hadoop-common/CHANGES.txt   (contents, props changed)
    hadoop/common/branches/HDFS-4685/hadoop-common-project/hadoop-common/pom.xml
    hadoop/common/branches/HDFS-4685/hadoop-common-project/hadoop-common/src/CMakeLists.txt
    hadoop/common/branches/HDFS-4685/hadoop-common-project/hadoop-common/src/main/docs/   (props changed)
    hadoop/common/branches/HDFS-4685/hadoop-common-project/hadoop-common/src/main/java/   (props changed)
    hadoop/common/branches/HDFS-4685/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpConfig.java
    hadoop/common/branches/HDFS-4685/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/NativeIO.java
    hadoop/common/branches/HDFS-4685/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java
    hadoop/common/branches/HDFS-4685/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
    hadoop/common/branches/HDFS-4685/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/unix/DomainSocket.java
    hadoop/common/branches/HDFS-4685/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/ProxyUsers.java
    hadoop/common/branches/HDFS-4685/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/ServiceAuthorizationManager.java
    hadoop/common/branches/HDFS-4685/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/CompositeService.java
    hadoop/common/branches/HDFS-4685/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/nativeio/NativeIO.c
    hadoop/common/branches/HDFS-4685/hadoop-common-project/hadoop-common/src/site/apt/SecureMode.apt.vm
    hadoop/common/branches/HDFS-4685/hadoop-common-project/hadoop-common/src/test/core/   (props changed)
    hadoop/common/branches/HDFS-4685/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/TestKeyShell.java
    hadoop/common/branches/HDFS-4685/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/IdUserGroup.java
    hadoop/common/branches/HDFS-4685/hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/nfs/nfs3/TestIdUserGroup.java
    hadoop/common/branches/HDFS-4685/hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/oncrpc/TestFrameDecoder.java

Modified: hadoop/common/branches/HDFS-4685/hadoop-common-project/hadoop-common/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4685/hadoop-common-project/hadoop-common/CHANGES.txt?rev=1566100&r1=1566099&r2=1566100&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4685/hadoop-common-project/hadoop-common/CHANGES.txt (original)
+++ hadoop/common/branches/HDFS-4685/hadoop-common-project/hadoop-common/CHANGES.txt Sat Feb  8 19:05:12 2014
@@ -113,6 +113,11 @@ Trunk (Unreleased)
 
     HADOOP-10177. Create CLI tools for managing keys. (Larry McCay via omalley)
 
+    HADOOP-10244. TestKeyShell improperly tests the results of delete (Larry
+    McCay via omalley)
+
+    HADOOP-10325. Improve jenkins javadoc warnings from test-patch.sh (cmccabe)
+
   BUG FIXES
 
     HADOOP-9451. Fault single-layer config if node group topology is enabled.
@@ -313,6 +318,15 @@ Release 2.4.0 - UNRELEASED
 
     HADOOP-10320. Javadoc in InterfaceStability.java lacks final </ul>.
     (René Nyffenegger via cnauroth)
+    
+    HADOOP-10085. CompositeService should allow adding services while being 
+    inited. (Steve Loughran via kasha)
+
+    HADOOP-10327. Trunk windows build broken after HDFS-5746.
+    (Vinay via cnauroth)
+
+    HADOOP-10330. TestFrameDecoder fails if it cannot bind port 12345.
+    (Arpit Agarwal)
 
 Release 2.3.0 - UNRELEASED
 
@@ -685,6 +699,8 @@ Release 2.3.0 - UNRELEASED
 
     HADOOP-10311. Cleanup vendor names from the code base. (tucu)
 
+    HADOOP-10273. Fix 'mvn site'. (Arpit Agarwal)
+
 Release 2.2.0 - 2013-10-13
 
   INCOMPATIBLE CHANGES

Propchange: hadoop/common/branches/HDFS-4685/hadoop-common-project/hadoop-common/CHANGES.txt
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-common-project/hadoop-common/CHANGES.txt:r1563326-1566041

Modified: hadoop/common/branches/HDFS-4685/hadoop-common-project/hadoop-common/pom.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4685/hadoop-common-project/hadoop-common/pom.xml?rev=1566100&r1=1566099&r2=1566100&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4685/hadoop-common-project/hadoop-common/pom.xml (original)
+++ hadoop/common/branches/HDFS-4685/hadoop-common-project/hadoop-common/pom.xml Sat Feb  8 19:05:12 2014
@@ -543,6 +543,7 @@
                     <javahClassName>org.apache.hadoop.io.compress.bzip2.Bzip2Decompressor</javahClassName>
                     <javahClassName>org.apache.hadoop.security.JniBasedUnixGroupsMapping</javahClassName>
                     <javahClassName>org.apache.hadoop.io.nativeio.NativeIO</javahClassName>
+                    <javahClassName>org.apache.hadoop.io.nativeio.SharedFileDescriptorFactory</javahClassName>
                     <javahClassName>org.apache.hadoop.security.JniBasedUnixGroupsNetgroupMapping</javahClassName>
                     <javahClassName>org.apache.hadoop.io.compress.snappy.SnappyCompressor</javahClassName>
                     <javahClassName>org.apache.hadoop.io.compress.snappy.SnappyDecompressor</javahClassName>
@@ -550,6 +551,7 @@
                     <javahClassName>org.apache.hadoop.io.compress.lz4.Lz4Decompressor</javahClassName>
                     <javahClassName>org.apache.hadoop.util.NativeCrc32</javahClassName>
                     <javahClassName>org.apache.hadoop.net.unix.DomainSocket</javahClassName>
+                    <javahClassName>org.apache.hadoop.net.unix.DomainSocketWatcher</javahClassName>
                   </javahClassNames>
                   <javahOutputDirectory>${project.build.directory}/native/javah</javahOutputDirectory>
                 </configuration>

Modified: hadoop/common/branches/HDFS-4685/hadoop-common-project/hadoop-common/src/CMakeLists.txt
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4685/hadoop-common-project/hadoop-common/src/CMakeLists.txt?rev=1566100&r1=1566099&r2=1566100&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4685/hadoop-common-project/hadoop-common/src/CMakeLists.txt (original)
+++ hadoop/common/branches/HDFS-4685/hadoop-common-project/hadoop-common/src/CMakeLists.txt Sat Feb  8 19:05:12 2014
@@ -178,7 +178,9 @@ add_dual_library(hadoop
     ${D}/io/nativeio/NativeIO.c
     ${D}/io/nativeio/errno_enum.c
     ${D}/io/nativeio/file_descriptor.c
+    ${D}/io/nativeio/SharedFileDescriptorFactory.c
     ${D}/net/unix/DomainSocket.c
+    ${D}/net/unix/DomainSocketWatcher.c
     ${D}/security/JniBasedUnixGroupsMapping.c
     ${D}/security/JniBasedUnixGroupsNetgroupMapping.c
     ${D}/security/hadoop_group_info.c

Propchange: hadoop/common/branches/HDFS-4685/hadoop-common-project/hadoop-common/src/main/docs/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/docs:r1563326-1566041

Propchange: hadoop/common/branches/HDFS-4685/hadoop-common-project/hadoop-common/src/main/java/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/java:r1563326-1566041

Modified: hadoop/common/branches/HDFS-4685/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpConfig.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4685/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpConfig.java?rev=1566100&r1=1566099&r2=1566100&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4685/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpConfig.java (original)
+++ hadoop/common/branches/HDFS-4685/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpConfig.java Sat Feb  8 19:05:12 2014
@@ -34,13 +34,14 @@ public class HttpConfig {
     HTTPS_ONLY,
     HTTP_AND_HTTPS;
 
+    private static final Policy[] VALUES = values();
     public static Policy fromString(String value) {
-      if (HTTPS_ONLY.name().equalsIgnoreCase(value)) {
-        return HTTPS_ONLY;
-      } else if (HTTP_AND_HTTPS.name().equalsIgnoreCase(value)) {
-        return HTTP_AND_HTTPS;
+      for (Policy p : VALUES) {
+        if (p.name().equalsIgnoreCase(value)) {
+          return p;
+        }
       }
-      return HTTP_ONLY;
+      return null;
     }
 
     public boolean isHttpEnabled() {

Modified: hadoop/common/branches/HDFS-4685/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/NativeIO.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4685/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/NativeIO.java?rev=1566100&r1=1566099&r2=1566100&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4685/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/NativeIO.java (original)
+++ hadoop/common/branches/HDFS-4685/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/NativeIO.java Sat Feb  8 19:05:12 2014
@@ -487,6 +487,16 @@ public class NativeIO {
       new ConcurrentHashMap<Integer, CachedName>();
 
     private enum IdCache { USER, GROUP }
+
+    public final static int MMAP_PROT_READ = 0x1; 
+    public final static int MMAP_PROT_WRITE = 0x2; 
+    public final static int MMAP_PROT_EXEC = 0x4; 
+
+    public static native long mmap(FileDescriptor fd, int prot,
+        boolean shared, long length) throws IOException;
+
+    public static native void munmap(long addr, long length)
+        throws IOException;
   }
 
   private static boolean workaroundNonThreadSafePasswdCalls = false;

Modified: hadoop/common/branches/HDFS-4685/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4685/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java?rev=1566100&r1=1566099&r2=1566100&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4685/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java (original)
+++ hadoop/common/branches/HDFS-4685/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java Sat Feb  8 19:05:12 2014
@@ -151,6 +151,13 @@ public class RetryPolicies {
         delayMillis, maxDelayBase);
   }
   
+  public static final RetryPolicy failoverOnNetworkException(
+      RetryPolicy fallbackPolicy, int maxFailovers, int maxRetries,
+      long delayMillis, long maxDelayBase) {
+    return new FailoverOnNetworkExceptionRetry(fallbackPolicy, maxFailovers,
+        maxRetries, delayMillis, maxDelayBase);
+  }
+  
   static class TryOnceThenFail implements RetryPolicy {
     @Override
     public RetryAction shouldRetry(Exception e, int retries, int failovers,
@@ -516,18 +523,25 @@ public class RetryPolicies {
     
     private RetryPolicy fallbackPolicy;
     private int maxFailovers;
+    private int maxRetries;
     private long delayMillis;
     private long maxDelayBase;
     
     public FailoverOnNetworkExceptionRetry(RetryPolicy fallbackPolicy,
         int maxFailovers) {
-      this(fallbackPolicy, maxFailovers, 0, 0);
+      this(fallbackPolicy, maxFailovers, 0, 0, 0);
     }
     
     public FailoverOnNetworkExceptionRetry(RetryPolicy fallbackPolicy,
         int maxFailovers, long delayMillis, long maxDelayBase) {
+      this(fallbackPolicy, maxFailovers, 0, delayMillis, maxDelayBase);
+    }
+    
+    public FailoverOnNetworkExceptionRetry(RetryPolicy fallbackPolicy,
+        int maxFailovers, int maxRetries, long delayMillis, long maxDelayBase) {
       this.fallbackPolicy = fallbackPolicy;
       this.maxFailovers = maxFailovers;
+      this.maxRetries = maxRetries;
       this.delayMillis = delayMillis;
       this.maxDelayBase = maxDelayBase;
     }
@@ -549,6 +563,10 @@ public class RetryPolicies {
             "failovers (" + failovers + ") exceeded maximum allowed ("
             + maxFailovers + ")");
       }
+      if (retries - failovers > maxRetries) {
+        return new RetryAction(RetryAction.RetryDecision.FAIL, 0, "retries ("
+            + retries + ") exceeded maximum allowed (" + maxRetries + ")");
+      }
       
       if (e instanceof ConnectException ||
           e instanceof NoRouteToHostException ||

Modified: hadoop/common/branches/HDFS-4685/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4685/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java?rev=1566100&r1=1566099&r2=1566100&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4685/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java (original)
+++ hadoop/common/branches/HDFS-4685/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java Sat Feb  8 19:05:12 2014
@@ -451,6 +451,14 @@ public abstract class Server {
   }
 
   /**
+   * Refresh the service authorization ACL for the service handled by this server
+   * using the specified Configuration.
+   */
+  public void refreshServiceAclWithConfigration(Configuration conf,
+      PolicyProvider provider) {
+    serviceAuthorizationManager.refreshWithConfiguration(conf, provider);
+  }
+  /**
    * Returns a handle to the serviceAuthorizationManager (required in tests)
    * @return instance of ServiceAuthorizationManager for this server
    */

Modified: hadoop/common/branches/HDFS-4685/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/unix/DomainSocket.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4685/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/unix/DomainSocket.java?rev=1566100&r1=1566099&r2=1566100&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4685/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/unix/DomainSocket.java (original)
+++ hadoop/common/branches/HDFS-4685/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/unix/DomainSocket.java Sat Feb  8 19:05:12 2014
@@ -24,17 +24,15 @@ import java.io.FileInputStream;
 import java.io.IOException;
 import java.io.InputStream;
 import java.io.OutputStream;
-import java.net.SocketException;
-import java.nio.channels.AsynchronousCloseException;
 import java.nio.channels.ClosedChannelException;
 import java.nio.channels.ReadableByteChannel;
 import java.nio.ByteBuffer;
-import java.util.concurrent.atomic.AtomicInteger;
 
 import org.apache.commons.lang.SystemUtils;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.util.NativeCodeLoader;
+import org.apache.hadoop.util.CloseableReferenceCount;
 
 import com.google.common.annotations.VisibleForTesting;
 
@@ -132,104 +130,14 @@ public class DomainSocket implements Clo
   }
 
   /**
-   * Tracks the reference count of the file descriptor, and also whether it is
-   * open or closed.
+   * The socket reference count and closed bit.
    */
-  private static class Status {
-    /**
-     * Bit mask representing a closed domain socket. 
-     */
-    private static final int STATUS_CLOSED_MASK = 1 << 30;
-    
-    /**
-     * Status bits
-     * 
-     * Bit 30: 0 = DomainSocket open, 1 = DomainSocket closed
-     * Bits 29 to 0: the reference count.
-     */
-    private final AtomicInteger bits = new AtomicInteger(0);
-
-    Status() { }
-
-    /**
-     * Increment the reference count of the underlying file descriptor.
-     *
-     * @throws ClosedChannelException      If the file descriptor is closed.
-     */
-    void reference() throws ClosedChannelException {
-      int curBits = bits.incrementAndGet();
-      if ((curBits & STATUS_CLOSED_MASK) != 0) {
-        bits.decrementAndGet();
-        throw new ClosedChannelException();
-      }
-    }
-
-    /**
-     * Decrement the reference count of the underlying file descriptor.
-     *
-     * @param checkClosed        Whether to throw an exception if the file
-     *                           descriptor is closed.
-     *
-     * @throws AsynchronousCloseException  If the file descriptor is closed and
-     *                                     checkClosed is set.
-     */
-    void unreference(boolean checkClosed) throws AsynchronousCloseException {
-      int newCount = bits.decrementAndGet();
-      assert (newCount & ~STATUS_CLOSED_MASK) >= 0;
-      if (checkClosed && ((newCount & STATUS_CLOSED_MASK) != 0)) {
-        throw new AsynchronousCloseException();
-      }
-    }
-
-    /**
-     * Return true if the file descriptor is currently open.
-     * 
-     * @return                 True if the file descriptor is currently open.
-     */
-    boolean isOpen() {
-      return ((bits.get() & STATUS_CLOSED_MASK) == 0);
-    }
-
-    /**
-     * Mark the file descriptor as closed.
-     *
-     * Once the file descriptor is closed, it cannot be reopened.
-     *
-     * @return                         The current reference count.
-     * @throws ClosedChannelException  If someone else closes the file 
-     *                                 descriptor before we do.
-     */
-    int setClosed() throws ClosedChannelException {
-      while (true) {
-        int curBits = bits.get();
-        if ((curBits & STATUS_CLOSED_MASK) != 0) {
-          throw new ClosedChannelException();
-        }
-        if (bits.compareAndSet(curBits, curBits | STATUS_CLOSED_MASK)) {
-          return curBits & (~STATUS_CLOSED_MASK);
-        }
-      }
-    }
-
-    /**
-     * Get the current reference count.
-     *
-     * @return                 The current reference count.
-     */
-    int getReferenceCount() {
-      return bits.get() & (~STATUS_CLOSED_MASK);
-    }
-  }
-
-  /**
-   * The socket status.
-   */
-  private final Status status;
+  final CloseableReferenceCount refCount;
 
   /**
    * The file descriptor associated with this UNIX domain socket.
    */
-  private final int fd;
+  final int fd;
 
   /**
    * The path associated with this UNIX domain socket.
@@ -252,13 +160,21 @@ public class DomainSocket implements Clo
   private final DomainChannel channel = new DomainChannel();
 
   private DomainSocket(String path, int fd) {
-    this.status = new Status();
+    this.refCount = new CloseableReferenceCount();
     this.fd = fd;
     this.path = path;
   }
 
   private static native int bind0(String path) throws IOException;
 
+  private void unreference(boolean checkClosed) throws ClosedChannelException {
+    if (checkClosed) {
+      refCount.unreferenceCheckClosed();
+    } else {
+      refCount.unreference();
+    }
+  }
+
   /**
    * Create a new DomainSocket listening on the given path.
    *
@@ -308,14 +224,14 @@ public class DomainSocket implements Clo
    * @throws SocketTimeoutException       If the accept timed out.
    */
   public DomainSocket accept() throws IOException {
-    status.reference();
+    refCount.reference();
     boolean exc = true;
     try {
       DomainSocket ret = new DomainSocket(path, accept0(fd));
       exc = false;
       return ret;
     } finally {
-      status.unreference(exc);
+      unreference(exc);
     }
   }
 
@@ -335,14 +251,14 @@ public class DomainSocket implements Clo
     return new DomainSocket(path, fd);
   }
 
- /**
-  * Return true if the file descriptor is currently open.
-  *
-  * @return                 True if the file descriptor is currently open.
-  */
- public boolean isOpen() {
-   return status.isOpen();
- }
+  /**
+   * Return true if the file descriptor is currently open.
+   *
+   * @return                 True if the file descriptor is currently open.
+   */
+  public boolean isOpen() {
+    return refCount.isOpen();
+  }
 
   /**
    * @return                 The socket path.
@@ -381,20 +297,20 @@ public class DomainSocket implements Clo
       throws IOException;
 
   public void setAttribute(int type, int size) throws IOException {
-    status.reference();
+    refCount.reference();
     boolean exc = true;
     try {
       setAttribute0(fd, type, size);
       exc = false;
     } finally {
-      status.unreference(exc);
+      unreference(exc);
     }
   }
 
   private native int getAttribute0(int fd, int type) throws IOException;
 
   public int getAttribute(int type) throws IOException {
-    status.reference();
+    refCount.reference();
     int attribute;
     boolean exc = true;
     try {
@@ -402,7 +318,7 @@ public class DomainSocket implements Clo
       exc = false;
       return attribute;
     } finally {
-      status.unreference(exc);
+      unreference(exc);
     }
   }
 
@@ -419,9 +335,9 @@ public class DomainSocket implements Clo
   @Override
   public void close() throws IOException {
     // Set the closed bit on this DomainSocket
-    int refCount;
+    int count;
     try {
-      refCount = status.setClosed();
+      count = refCount.setClosed();
     } catch (ClosedChannelException e) {
       // Someone else already closed the DomainSocket.
       return;
@@ -429,7 +345,7 @@ public class DomainSocket implements Clo
     // Wait for all references to go away
     boolean didShutdown = false;
     boolean interrupted = false;
-    while (refCount > 0) {
+    while (count > 0) {
       if (!didShutdown) {
         try {
           // Calling shutdown on the socket will interrupt blocking system
@@ -446,7 +362,7 @@ public class DomainSocket implements Clo
       } catch (InterruptedException e) {
         interrupted = true;
       }
-      refCount = status.getReferenceCount();
+      count = refCount.getReferenceCount();
     }
 
     // At this point, nobody has a reference to the file descriptor, 
@@ -478,13 +394,13 @@ public class DomainSocket implements Clo
    */
   public void sendFileDescriptors(FileDescriptor descriptors[],
       byte jbuf[], int offset, int length) throws IOException {
-    status.reference();
+    refCount.reference();
     boolean exc = true;
     try {
       sendFileDescriptors0(fd, descriptors, jbuf, offset, length);
       exc = false;
     } finally {
-      status.unreference(exc);
+      unreference(exc);
     }
   }
 
@@ -515,14 +431,14 @@ public class DomainSocket implements Clo
    */
   public int receiveFileDescriptors(FileDescriptor[] descriptors,
       byte jbuf[], int offset, int length) throws IOException {
-    status.reference();
+    refCount.reference();
     boolean exc = true;
     try {
       int nBytes = receiveFileDescriptors0(fd, descriptors, jbuf, offset, length);
       exc = false;
       return nBytes;
     } finally {
-      status.unreference(exc);
+      unreference(exc);
     }
   }
 
@@ -539,7 +455,7 @@ public class DomainSocket implements Clo
     for (int i = 0; i < streams.length; i++) {
       streams[i] = null;
     }
-    status.reference();
+    refCount.reference();
     try {
       int ret = receiveFileDescriptors0(fd, descriptors, buf, offset, length);
       for (int i = 0, j = 0; i < descriptors.length; i++) {
@@ -569,7 +485,7 @@ public class DomainSocket implements Clo
           }
         }
       }
-      status.unreference(!success);
+      unreference(!success);
     }
   }
 
@@ -593,7 +509,7 @@ public class DomainSocket implements Clo
   public class DomainInputStream extends InputStream {
     @Override
     public int read() throws IOException {
-      status.reference();
+      refCount.reference();
       boolean exc = true;
       try {
         byte b[] = new byte[1];
@@ -601,33 +517,33 @@ public class DomainSocket implements Clo
         exc = false;
         return (ret >= 0) ? b[0] : -1;
       } finally {
-        status.unreference(exc);
+        unreference(exc);
       }
     }
     
     @Override
     public int read(byte b[], int off, int len) throws IOException {
-      status.reference();
+      refCount.reference();
       boolean exc = true;
       try {
         int nRead = DomainSocket.readArray0(DomainSocket.this.fd, b, off, len);
         exc = false;
         return nRead;
       } finally {
-        status.unreference(exc);
+        unreference(exc);
       }
     }
 
     @Override
     public int available() throws IOException {
-      status.reference();
+      refCount.reference();
       boolean exc = true;
       try {
         int nAvailable = DomainSocket.available0(DomainSocket.this.fd);
         exc = false;
         return nAvailable;
       } finally {
-        status.unreference(exc);
+        unreference(exc);
       }
     }
 
@@ -649,7 +565,7 @@ public class DomainSocket implements Clo
 
     @Override
     public void write(int val) throws IOException {
-      status.reference();
+      refCount.reference();
       boolean exc = true;
       try {
         byte b[] = new byte[1];
@@ -657,19 +573,19 @@ public class DomainSocket implements Clo
         DomainSocket.writeArray0(DomainSocket.this.fd, b, 0, 1);
         exc = false;
       } finally {
-        status.unreference(exc);
+        unreference(exc);
       }
     }
 
     @Override
     public void write(byte[] b, int off, int len) throws IOException {
-      status.reference();
-        boolean exc = true;
+      refCount.reference();
+      boolean exc = true;
       try {
         DomainSocket.writeArray0(DomainSocket.this.fd, b, off, len);
         exc = false;
       } finally {
-        status.unreference(exc);
+        unreference(exc);
       }
     }
   }
@@ -688,7 +604,7 @@ public class DomainSocket implements Clo
 
     @Override
     public int read(ByteBuffer dst) throws IOException {
-      status.reference();
+      refCount.reference();
       boolean exc = true;
       try {
         int nread = 0;
@@ -710,7 +626,7 @@ public class DomainSocket implements Clo
         exc = false;
         return nread;
       } finally {
-        status.unreference(exc);
+        unreference(exc);
       }
     }
   }

Modified: hadoop/common/branches/HDFS-4685/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/ProxyUsers.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4685/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/ProxyUsers.java?rev=1566100&r1=1566099&r2=1566100&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4685/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/ProxyUsers.java (original)
+++ hadoop/common/branches/HDFS-4685/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/ProxyUsers.java Sat Feb  8 19:05:12 2014
@@ -30,6 +30,8 @@ import org.apache.hadoop.conf.Configurat
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.StringUtils;
 
+import com.google.common.annotations.VisibleForTesting;
+
 @InterfaceAudience.Private
 public class ProxyUsers {
 
@@ -177,4 +179,13 @@ public class ProxyUsers {
       (list.contains("*"));
   }
 
+  @VisibleForTesting
+  public static Map<String, Collection<String>> getProxyGroups() {
+    return proxyGroups;
+  }
+
+  @VisibleForTesting
+  public static Map<String, Collection<String>> getProxyHosts() {
+    return proxyHosts;
+  }
 }

Modified: hadoop/common/branches/HDFS-4685/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/ServiceAuthorizationManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4685/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/ServiceAuthorizationManager.java?rev=1566100&r1=1566099&r2=1566100&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4685/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/ServiceAuthorizationManager.java (original)
+++ hadoop/common/branches/HDFS-4685/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/ServiceAuthorizationManager.java Sat Feb  8 19:05:12 2014
@@ -33,6 +33,8 @@ import org.apache.hadoop.security.Kerber
 import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.UserGroupInformation;
 
+import com.google.common.annotations.VisibleForTesting;
+
 /**
  * An authorization manager which handles service-level authorization
  * for incoming service requests.
@@ -120,19 +122,23 @@ public class ServiceAuthorizationManager
     // Make a copy of the original config, and load the policy file
     Configuration policyConf = new Configuration(conf);
     policyConf.addResource(policyFile);
-    
+    refreshWithConfiguration(policyConf, provider);
+  }
+
+  public synchronized void refreshWithConfiguration(Configuration conf,
+      PolicyProvider provider) {
     final Map<Class<?>, AccessControlList> newAcls =
-      new IdentityHashMap<Class<?>, AccessControlList>();
+        new IdentityHashMap<Class<?>, AccessControlList>();
 
     // Parse the config file
     Service[] services = provider.getServices();
     if (services != null) {
       for (Service service : services) {
-        AccessControlList acl = 
-          new AccessControlList(
-              policyConf.get(service.getServiceKey(), 
-                             AccessControlList.WILDCARD_ACL_VALUE)
-              );
+        AccessControlList acl =
+            new AccessControlList(
+                conf.get(service.getServiceKey(),
+                    AccessControlList.WILDCARD_ACL_VALUE)
+            );
         newAcls.put(service.getProtocol(), acl);
       }
     }
@@ -141,8 +147,13 @@ public class ServiceAuthorizationManager
     protocolToAcl = newAcls;
   }
 
-  // Package-protected for use in tests.
-  Set<Class<?>> getProtocolsWithAcls() {
+  @VisibleForTesting
+  public Set<Class<?>> getProtocolsWithAcls() {
     return protocolToAcl.keySet();
   }
+
+  @VisibleForTesting
+  public AccessControlList getProtocolsAcls(Class<?> className) {
+    return protocolToAcl.get(className);
+  }
 }

Modified: hadoop/common/branches/HDFS-4685/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/CompositeService.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4685/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/CompositeService.java?rev=1566100&r1=1566099&r2=1566100&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4685/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/CompositeService.java (original)
+++ hadoop/common/branches/HDFS-4685/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/CompositeService.java Sat Feb  8 19:05:12 2014
@@ -19,7 +19,6 @@
 package org.apache.hadoop.service;
 
 import java.util.ArrayList;
-import java.util.Collections;
 import java.util.List;
 
 import org.apache.commons.logging.Log;
@@ -54,13 +53,13 @@ public class CompositeService extends Ab
   }
 
   /**
-   * Get an unmodifiable list of services
+   * Get a cloned list of services
    * @return a list of child services at the time of invocation -
    * added services will not be picked up.
    */
   public List<Service> getServices() {
     synchronized (serviceList) {
-      return Collections.unmodifiableList(serviceList);
+      return new ArrayList<Service>(serviceList);
     }
   }
 

Modified: hadoop/common/branches/HDFS-4685/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/nativeio/NativeIO.c
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4685/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/nativeio/NativeIO.c?rev=1566100&r1=1566099&r2=1566100&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4685/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/nativeio/NativeIO.c (original)
+++ hadoop/common/branches/HDFS-4685/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/nativeio/NativeIO.c Sat Feb  8 19:05:12 2014
@@ -18,6 +18,7 @@
 
 #include "org_apache_hadoop.h"
 #include "org_apache_hadoop_io_nativeio_NativeIO.h"
+#include "org_apache_hadoop_io_nativeio_NativeIO_POSIX.h"
 
 #ifdef UNIX
 #include <assert.h>
@@ -49,6 +50,10 @@
 #include "file_descriptor.h"
 #include "errno_enum.h"
 
+#define MMAP_PROT_READ org_apache_hadoop_io_nativeio_NativeIO_POSIX_MMAP_PROT_READ
+#define MMAP_PROT_WRITE org_apache_hadoop_io_nativeio_NativeIO_POSIX_MMAP_PROT_WRITE
+#define MMAP_PROT_EXEC org_apache_hadoop_io_nativeio_NativeIO_POSIX_MMAP_PROT_EXEC
+
 // the NativeIO$POSIX$Stat inner class and its constructor
 static jclass stat_clazz;
 static jmethodID stat_ctor;
@@ -661,6 +666,55 @@ cleanup:
 #endif
 }
 
+JNIEXPORT jlong JNICALL 
+Java_org_apache_hadoop_io_nativeio_NativeIO_00024POSIX_mmap(
+  JNIEnv *env, jclass clazz, jobject jfd, jint jprot,
+  jboolean jshared, jlong length)
+{
+#ifdef UNIX
+  void *addr = 0;
+  int prot, flags, fd;
+  
+  prot = ((jprot & MMAP_PROT_READ) ? PROT_READ : 0) |
+         ((jprot & MMAP_PROT_WRITE) ? PROT_WRITE : 0) |
+         ((jprot & MMAP_PROT_EXEC) ? PROT_EXEC : 0);
+  flags = (jshared == JNI_TRUE) ? MAP_SHARED : MAP_PRIVATE;
+  fd = fd_get(env, jfd);
+  addr = mmap(NULL, length, prot, flags, fd, 0);
+  if (addr == MAP_FAILED) {
+    throw_ioe(env, errno);
+  }
+  return (jlong)(intptr_t)addr;
+#endif  //   UNIX
+
+#ifdef WINDOWS
+  THROW(env, "java/io/IOException",
+    "The function POSIX.mmap() is not supported on Windows");
+  return NULL;
+#endif
+}
+
+JNIEXPORT void JNICALL 
+Java_org_apache_hadoop_io_nativeio_NativeIO_00024POSIX_munmap(
+  JNIEnv *env, jclass clazz, jlong jaddr, jlong length)
+{
+#ifdef UNIX
+  void *addr;
+
+  addr = (void*)(intptr_t)jaddr;
+  if (munmap(addr, length) < 0) {
+    throw_ioe(env, errno);
+  }
+#endif  //   UNIX
+
+#ifdef WINDOWS
+  THROW(env, "java/io/IOException",
+    "The function POSIX.munmap() is not supported on Windows");
+  return NULL;
+#endif
+}
+
+
 /*
  * static native String getGroupName(int gid);
  *
@@ -1012,4 +1066,3 @@ JNIEnv *env, jclass clazz)
 /**
  * vim: sw=2: ts=2: et:
  */
-

Modified: hadoop/common/branches/HDFS-4685/hadoop-common-project/hadoop-common/src/site/apt/SecureMode.apt.vm
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4685/hadoop-common-project/hadoop-common/src/site/apt/SecureMode.apt.vm?rev=1566100&r1=1566099&r2=1566100&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4685/hadoop-common-project/hadoop-common/src/site/apt/SecureMode.apt.vm (original)
+++ hadoop/common/branches/HDFS-4685/hadoop-common-project/hadoop-common/src/site/apt/SecureMode.apt.vm Sat Feb  8 19:05:12 2014
@@ -352,7 +352,8 @@ Configuration for <<<conf/core-site.xml>
 | | | This value is deprecated. Use dfs.http.policy |
 *-------------------------+-------------------------+------------------------+
 | <<<dfs.http.policy>>> | <HTTP_ONLY> or <HTTPS_ONLY> or <HTTP_AND_HTTPS> | |
-| | | HTTPS_ONLY turns off http access |
+| | | HTTPS_ONLY turns off http access. This option takes precedence over |
+| | | the deprecated configuration dfs.https.enable and hadoop.ssl.enabled. |
 *-------------------------+-------------------------+------------------------+
 | <<<dfs.namenode.https-address>>> | <nn_host_fqdn:50470> | |
 *-------------------------+-------------------------+------------------------+

Propchange: hadoop/common/branches/HDFS-4685/hadoop-common-project/hadoop-common/src/test/core/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-common-project/hadoop-common/src/test/core:r1563326-1566041

Modified: hadoop/common/branches/HDFS-4685/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/TestKeyShell.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4685/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/TestKeyShell.java?rev=1566100&r1=1566099&r2=1566100&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4685/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/TestKeyShell.java (original)
+++ hadoop/common/branches/HDFS-4685/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/TestKeyShell.java Sat Feb  8 19:05:12 2014
@@ -41,7 +41,7 @@ public class TestKeyShell {
   
   @Test
   public void testKeySuccessfulKeyLifecycle() throws Exception {
-    outContent.flush();
+    outContent.reset();
     String[] args1 = {"create", "key1", "--provider", 
         "jceks://file" + tmpDir + "/keystore.jceks"};
     int rc = 0;
@@ -52,14 +52,14 @@ public class TestKeyShell {
     assertTrue(outContent.toString().contains("key1 has been successfully " +
     		"created."));
 
-    outContent.flush();
+    outContent.reset();
     String[] args2 = {"list", "--provider", 
         "jceks://file" + tmpDir + "/keystore.jceks"};
     rc = ks.run(args2);
     assertEquals(0, rc);
     assertTrue(outContent.toString().contains("key1"));
 
-    outContent.flush();
+    outContent.reset();
     String[] args3 = {"roll", "key1", "--provider", 
         "jceks://file" + tmpDir + "/keystore.jceks"};
     rc = ks.run(args3);
@@ -67,7 +67,7 @@ public class TestKeyShell {
     assertTrue(outContent.toString().contains("key1 has been successfully " +
     		"rolled."));
 
-    outContent.flush();
+    outContent.reset();
     String[] args4 = {"delete", "key1", "--provider", 
         "jceks://file" + tmpDir + "/keystore.jceks"};
     rc = ks.run(args4);
@@ -75,12 +75,12 @@ public class TestKeyShell {
     assertTrue(outContent.toString().contains("key1 has been successfully " +
     		"deleted."));
 
-    outContent.flush();
+    outContent.reset();
     String[] args5 = {"list", "--provider", 
         "jceks://file" + tmpDir + "/keystore.jceks"};
     rc = ks.run(args5);
     assertEquals(0, rc);
-    assertTrue(outContent.toString().contains("key1"));
+    assertFalse(outContent.toString(), outContent.toString().contains("key1"));
   }
   
   @Test
@@ -165,7 +165,7 @@ public class TestKeyShell {
     assertTrue(outContent.toString().contains("key1 has been successfully " +
     		"created."));
 
-    outContent.flush();
+    outContent.reset();
     String[] args2 = {"delete", "key1", "--provider", 
         "jceks://file" + tmpDir + "/keystore.jceks"};
     rc = ks.run(args2);

Modified: hadoop/common/branches/HDFS-4685/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/IdUserGroup.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4685/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/IdUserGroup.java?rev=1566100&r1=1566099&r2=1566100&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4685/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/IdUserGroup.java (original)
+++ hadoop/common/branches/HDFS-4685/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/IdUserGroup.java Sat Feb  8 19:05:12 2014
@@ -50,14 +50,6 @@ public class IdUserGroup {
   private BiMap<Integer, String> gidNameMap = HashBiMap.create();
 
   private long lastUpdateTime = 0; // Last time maps were updated
-
-  static public class DuplicateNameOrIdException extends IOException {
-    private static final long serialVersionUID = 1L;
-
-    public DuplicateNameOrIdException(String msg) {
-      super(msg);
-    }
-  }
   
   public IdUserGroup() throws IOException {
     updateMaps();
@@ -80,7 +72,8 @@ public class IdUserGroup {
     }
   }
 
-  private static final String DUPLICATE_NAME_ID_DEBUG_INFO = "NFS gateway can't start with duplicate name or id on the host system.\n"
+  private static final String DUPLICATE_NAME_ID_DEBUG_INFO =
+      "NFS gateway could have problem starting with duplicate name or id on the host system.\n"
       + "This is because HDFS (non-kerberos cluster) uses name as the only way to identify a user or group.\n"
       + "The host system with duplicated user/group name or id might work fine most of the time by itself.\n"
       + "However when NFS gateway talks to HDFS, HDFS accepts only user and group name.\n"
@@ -88,6 +81,16 @@ public class IdUserGroup {
       + "<getent passwd | cut -d: -f1,3> and <getent group | cut -d: -f1,3> on Linux systms,\n"
       + "<dscl . -list /Users UniqueID> and <dscl . -list /Groups PrimaryGroupID> on MacOS.";
   
+  private static void reportDuplicateEntry(final String header,
+      final Integer key, final String value,
+      final Integer ekey, final String evalue) {    
+      LOG.warn("\n" + header + String.format(
+          "new entry (%d, %s), existing entry: (%d, %s).\n%s\n%s",
+          key, value, ekey, evalue,
+          "The new entry is to be ignored for the following reason.",
+          DUPLICATE_NAME_ID_DEBUG_INFO));
+  }
+      
   /**
    * Get the whole list of users and groups and save them in the maps.
    * @throws IOException 
@@ -108,22 +111,27 @@ public class IdUserGroup {
         }
         LOG.debug("add to " + mapName + "map:" + nameId[0] + " id:" + nameId[1]);
         // HDFS can't differentiate duplicate names with simple authentication
-        Integer key = Integer.valueOf(nameId[1]);
-        String value = nameId[0];
+        final Integer key = Integer.valueOf(nameId[1]);
+        final String value = nameId[0];        
         if (map.containsKey(key)) {
-          LOG.error(String.format(
-              "Got duplicate id:(%d, %s), existing entry: (%d, %s).\n%s", key,
-              value, key, map.get(key), DUPLICATE_NAME_ID_DEBUG_INFO));
-          throw new DuplicateNameOrIdException("Got duplicate id.");
+          final String prevValue = map.get(key);
+          if (value.equals(prevValue)) {
+            // silently ignore equivalent entries
+            continue;
+          }
+          reportDuplicateEntry(
+              "Got multiple names associated with the same id: ",
+              key, value, key, prevValue);           
+          continue;
         }
-        if (map.containsValue(nameId[0])) {
-          LOG.error(String.format(
-              "Got duplicate name:(%d, %s), existing entry: (%d, %s) \n%s",
-              key, value, map.inverse().get(value), value,
-              DUPLICATE_NAME_ID_DEBUG_INFO));
-          throw new DuplicateNameOrIdException("Got duplicate name");
+        if (map.containsValue(value)) {
+          final Integer prevKey = map.inverse().get(value);
+          reportDuplicateEntry(
+              "Got multiple ids associated with the same name: ",
+              key, value, prevKey, value);
+          continue;
         }
-        map.put(Integer.valueOf(nameId[1]), nameId[0]);
+        map.put(key, value);
       }
       LOG.info("Updated " + mapName + " map size:" + map.size());
       

Modified: hadoop/common/branches/HDFS-4685/hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/nfs/nfs3/TestIdUserGroup.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4685/hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/nfs/nfs3/TestIdUserGroup.java?rev=1566100&r1=1566099&r2=1566100&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4685/hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/nfs/nfs3/TestIdUserGroup.java (original)
+++ hadoop/common/branches/HDFS-4685/hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/nfs/nfs3/TestIdUserGroup.java Sat Feb  8 19:05:12 2014
@@ -17,11 +17,10 @@
  */
 package org.apache.hadoop.nfs.nfs3;
 
-import static org.junit.Assert.fail;
-
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
 import java.io.IOException;
 
-import org.apache.hadoop.nfs.nfs3.IdUserGroup.DuplicateNameOrIdException;
 import org.junit.Test;
 
 import com.google.common.collect.BiMap;
@@ -33,24 +32,36 @@ public class TestIdUserGroup {
   public void testDuplicates() throws IOException {
     String GET_ALL_USERS_CMD = "echo \"root:x:0:0:root:/root:/bin/bash\n"
         + "hdfs:x:11501:10787:Grid Distributed File System:/home/hdfs:/bin/bash\n"
-        + "hdfs:x:11502:10788:Grid Distributed File System:/home/hdfs:/bin/bash\""
+        + "hdfs:x:11502:10788:Grid Distributed File System:/home/hdfs:/bin/bash\n"
+        + "hdfs1:x:11501:10787:Grid Distributed File System:/home/hdfs:/bin/bash\n"
+        + "hdfs2:x:11502:10787:Grid Distributed File System:/home/hdfs:/bin/bash\n"
+        + "bin:x:2:2:bin:/bin:/bin/sh\n"
+        + "bin:x:1:1:bin:/bin:/sbin/nologin\n"
+        + "daemon:x:1:1:daemon:/usr/sbin:/bin/sh\n"
+        + "daemon:x:2:2:daemon:/sbin:/sbin/nologin\""
         + " | cut -d: -f1,3";
     String GET_ALL_GROUPS_CMD = "echo \"hdfs:*:11501:hrt_hdfs\n"
-        + "mapred:x:497\n" + "mapred2:x:497\"" + " | cut -d: -f1,3";
+        + "mapred:x:497\n"
+        + "mapred2:x:497\n"
+        + "mapred:x:498\n" 
+        + "mapred3:x:498\"" 
+        + " | cut -d: -f1,3";
     // Maps for id to name map
     BiMap<Integer, String> uMap = HashBiMap.create();
     BiMap<Integer, String> gMap = HashBiMap.create();
 
-    try {
-      IdUserGroup.updateMapInternal(uMap, "user", GET_ALL_USERS_CMD, ":");
-      fail("didn't detect the duplicate name");
-    } catch (DuplicateNameOrIdException e) {
-    }
+    IdUserGroup.updateMapInternal(uMap, "user", GET_ALL_USERS_CMD, ":");
+    assertTrue(uMap.size() == 5);
+    assertEquals(uMap.get(0), "root");
+    assertEquals(uMap.get(11501), "hdfs");
+    assertEquals(uMap.get(11502), "hdfs2");
+    assertEquals(uMap.get(2), "bin");
+    assertEquals(uMap.get(1), "daemon");
     
-    try {
-      IdUserGroup.updateMapInternal(gMap, "group", GET_ALL_GROUPS_CMD, ":");
-      fail("didn't detect the duplicate id");
-    } catch (DuplicateNameOrIdException e) {
-    }
+    IdUserGroup.updateMapInternal(gMap, "group", GET_ALL_GROUPS_CMD, ":");
+    assertTrue(gMap.size() == 3);
+    assertEquals(gMap.get(11501), "hdfs");
+    assertEquals(gMap.get(497), "mapred");
+    assertEquals(gMap.get(498), "mapred3");    
   }
 }

Modified: hadoop/common/branches/HDFS-4685/hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/oncrpc/TestFrameDecoder.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4685/hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/oncrpc/TestFrameDecoder.java?rev=1566100&r1=1566099&r2=1566100&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4685/hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/oncrpc/TestFrameDecoder.java (original)
+++ hadoop/common/branches/HDFS-4685/hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/oncrpc/TestFrameDecoder.java Sat Feb  8 19:05:12 2014
@@ -23,6 +23,7 @@ import static org.junit.Assert.assertFal
 import static org.junit.Assert.assertTrue;
 
 import java.nio.ByteBuffer;
+import java.util.Random;
 
 import org.apache.hadoop.oncrpc.RpcUtil.RpcFrameDecoder;
 import org.apache.hadoop.oncrpc.security.CredentialsNone;
@@ -31,17 +32,17 @@ import org.jboss.netty.buffer.ByteBuffer
 import org.jboss.netty.buffer.ChannelBuffer;
 import org.jboss.netty.buffer.ChannelBuffers;
 import org.jboss.netty.channel.Channel;
+import org.jboss.netty.channel.ChannelException;
 import org.jboss.netty.channel.ChannelHandlerContext;
 import org.junit.Test;
 import org.mockito.Mockito;
 
 public class TestFrameDecoder {
 
-  private static int port = 12345; // some random server port
   private static int resultSize;
 
-  static void testRequest(XDR request) {
-    SimpleTcpClient tcpClient = new SimpleTcpClient("localhost", port, request,
+  static void testRequest(XDR request, int serverPort) {
+    SimpleTcpClient tcpClient = new SimpleTcpClient("localhost", serverPort, request,
         true);
     tcpClient.run();
   }
@@ -148,10 +149,25 @@ public class TestFrameDecoder {
   @Test
   public void testFrames() {
 
-    RpcProgram program = new TestFrameDecoder.TestRpcProgram("TestRpcProgram",
-        "localhost", port, 100000, 1, 2);
-    SimpleTcpServer tcpServer = new SimpleTcpServer(port, program, 1);
-    tcpServer.run();
+    Random rand = new Random();
+    int serverPort = 30000 + rand.nextInt(10000);
+    int retries = 10;    // A few retries in case initial choice is in use.
+
+    while (true) {
+      try {
+        RpcProgram program = new TestFrameDecoder.TestRpcProgram("TestRpcProgram",
+            "localhost", serverPort, 100000, 1, 2);
+        SimpleTcpServer tcpServer = new SimpleTcpServer(serverPort, program, 1);
+        tcpServer.run();
+        break;          // Successfully bound a port, break out.
+      } catch (ChannelException ce) {
+        if (retries-- > 0) {
+          serverPort += rand.nextInt(20); // Port in use? Try another.
+        } else {
+          throw ce;     // Out of retries.
+        }
+      }
+    }
 
     XDR xdrOut = createGetportMount();
     int headerSize = xdrOut.size();
@@ -161,7 +177,7 @@ public class TestFrameDecoder {
     int requestSize = xdrOut.size() - headerSize;
 
     // Send the request to the server
-    testRequest(xdrOut);
+    testRequest(xdrOut, serverPort);
 
     // Verify the server got the request with right size
     assertEquals(requestSize, resultSize);