You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by cm...@apache.org on 2014/08/20 01:50:11 UTC

svn commit: r1619012 [14/14] - in /hadoop/common/branches/HADOOP-10388/hadoop-common-project: ./ hadoop-auth/ hadoop-auth/dev-support/ hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/ hadoop-auth/src/main/java/org/apache/hado...

Modified: hadoop/common/branches/HADOOP-10388/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/IdUserGroup.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/IdUserGroup.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/IdUserGroup.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/IdUserGroup.java Tue Aug 19 23:49:39 2014
@@ -18,12 +18,19 @@
 package org.apache.hadoop.nfs.nfs3;
 
 import java.io.BufferedReader;
+import java.io.File;
+import java.io.FileInputStream;
 import java.io.IOException;
 import java.io.InputStreamReader;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.util.Time;
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.collect.BiMap;
@@ -43,11 +50,15 @@ public class IdUserGroup {
   static final String MAC_GET_ALL_USERS_CMD = "dscl . -list /Users UniqueID";
   static final String MAC_GET_ALL_GROUPS_CMD = "dscl . -list /Groups PrimaryGroupID";
 
-  // Do update every 15 minutes by default
-  final static long TIMEOUT_DEFAULT = 15 * 60 * 1000; // ms
-  final static long TIMEOUT_MIN = 1 * 60 * 1000; // ms
+  private final File staticMappingFile;
+
+  // Used for parsing the static mapping file.
+  private static final Pattern EMPTY_LINE = Pattern.compile("^\\s*$");
+  private static final Pattern COMMENT_LINE = Pattern.compile("^\\s*#.*$");
+  private static final Pattern MAPPING_LINE =
+      Pattern.compile("^(uid|gid)\\s+(\\d+)\\s+(\\d+)\\s*(#.*)?$");
+
   final private long timeout;
-  final static String NFS_USERUPDATE_MILLY = "hadoop.nfs.userupdate.milly";
   
   // Maps for id to name map. Guarded by this object monitor lock
   private BiMap<Integer, String> uidNameMap = HashBiMap.create();
@@ -55,21 +66,23 @@ public class IdUserGroup {
 
   private long lastUpdateTime = 0; // Last time maps were updated
   
-  public IdUserGroup() throws IOException {
-    timeout = TIMEOUT_DEFAULT;
-    updateMaps();
-  }
-  
   public IdUserGroup(Configuration conf) throws IOException {
-    long updateTime = conf.getLong(NFS_USERUPDATE_MILLY, TIMEOUT_DEFAULT);
+    long updateTime = conf.getLong(
+        Nfs3Constant.NFS_USERGROUP_UPDATE_MILLIS_KEY,
+        Nfs3Constant.NFS_USERGROUP_UPDATE_MILLIS_DEFAULT);
     // Minimal interval is 1 minute
-    if (updateTime < TIMEOUT_MIN) {
+    if (updateTime < Nfs3Constant.NFS_USERGROUP_UPDATE_MILLIS_MIN) {
       LOG.info("User configured user account update time is less"
           + " than 1 minute. Use 1 minute instead.");
-      timeout = TIMEOUT_MIN;
+      timeout = Nfs3Constant.NFS_USERGROUP_UPDATE_MILLIS_MIN;
     } else {
       timeout = updateTime;
     }
+    
+    String staticFilePath = conf.get(Nfs3Constant.NFS_STATIC_MAPPING_FILE_KEY,
+        Nfs3Constant.NFS_STATIC_MAPPING_FILE_DEFAULT);
+    staticMappingFile = new File(staticFilePath);
+    
     updateMaps();
   }
 
@@ -79,7 +92,7 @@ public class IdUserGroup {
   }
   
   synchronized private boolean isExpired() {
-    return lastUpdateTime - System.currentTimeMillis() > timeout;
+    return Time.monotonicNow() - lastUpdateTime > timeout;
   }
 
   // If can't update the maps, will keep using the old ones
@@ -113,14 +126,31 @@ public class IdUserGroup {
           "The new entry is to be ignored for the following reason.",
           DUPLICATE_NAME_ID_DEBUG_INFO));
   }
-      
+
+  /**
+   * uid and gid are defined as uint32 in linux. Some systems create
+   * (intended or unintended) <nfsnobody, 4294967294> kind of <name,Id>
+   * mapping, where 4294967294 is 2**32-2 as unsigned int32. As an example,
+   *   https://bugzilla.redhat.com/show_bug.cgi?id=511876.
+   * Because user or group id are treated as Integer (signed integer or int32)
+   * here, the number 4294967294 is out of range. The solution is to convert
+   * uint32 to int32, so to map the out-of-range ID to the negative side of
+   * Integer, e.g. 4294967294 maps to -2 and 4294967295 maps to -1.
+   */
+  private static Integer parseId(final String idStr) {
+    Long longVal = Long.parseLong(idStr);
+    int intVal = longVal.intValue();
+    return Integer.valueOf(intVal);
+  }
+  
   /**
    * Get the whole list of users and groups and save them in the maps.
    * @throws IOException 
    */
   @VisibleForTesting
   public static void updateMapInternal(BiMap<Integer, String> map, String mapName,
-      String command, String regex) throws IOException  {
+      String command, String regex, Map<Integer, Integer> staticMapping)
+      throws IOException  {
     BufferedReader br = null;
     try {
       Process process = Runtime.getRuntime().exec(
@@ -134,8 +164,8 @@ public class IdUserGroup {
         }
         LOG.debug("add to " + mapName + "map:" + nameId[0] + " id:" + nameId[1]);
         // HDFS can't differentiate duplicate names with simple authentication
-        final Integer key = Integer.valueOf(nameId[1]);
-        final String value = nameId[0];        
+        final Integer key = staticMapping.get(parseId(nameId[1]));
+        final String value = nameId[0];
         if (map.containsKey(key)) {
           final String prevValue = map.get(key);
           if (value.equals(prevValue)) {
@@ -156,7 +186,7 @@ public class IdUserGroup {
         }
         map.put(key, value);
       }
-      LOG.info("Updated " + mapName + " map size:" + map.size());
+      LOG.info("Updated " + mapName + " map size: " + map.size());
       
     } catch (IOException e) {
       LOG.error("Can't update " + mapName + " map");
@@ -182,19 +212,114 @@ public class IdUserGroup {
           + " 'nobody' will be used for any user and group.");
       return;
     }
+    
+    StaticMapping staticMapping = new StaticMapping(
+        new HashMap<Integer, Integer>(), new HashMap<Integer, Integer>());
+    if (staticMappingFile.exists()) {
+      LOG.info("Using '" + staticMappingFile + "' for static UID/GID mapping...");
+      staticMapping = parseStaticMap(staticMappingFile);
+    } else {
+      LOG.info("Not doing static UID/GID mapping because '" + staticMappingFile
+          + "' does not exist.");
+    }
 
     if (OS.startsWith("Linux")) {
-      updateMapInternal(uMap, "user", LINUX_GET_ALL_USERS_CMD, ":");
-      updateMapInternal(gMap, "group", LINUX_GET_ALL_GROUPS_CMD, ":");
+      updateMapInternal(uMap, "user", LINUX_GET_ALL_USERS_CMD, ":",
+          staticMapping.uidMapping);
+      updateMapInternal(gMap, "group", LINUX_GET_ALL_GROUPS_CMD, ":",
+          staticMapping.gidMapping);
     } else {
       // Mac
-      updateMapInternal(uMap, "user", MAC_GET_ALL_USERS_CMD, "\\s+");
-      updateMapInternal(gMap, "group", MAC_GET_ALL_GROUPS_CMD, "\\s+");
+      updateMapInternal(uMap, "user", MAC_GET_ALL_USERS_CMD, "\\s+",
+          staticMapping.uidMapping);
+      updateMapInternal(gMap, "group", MAC_GET_ALL_GROUPS_CMD, "\\s+",
+          staticMapping.gidMapping);
     }
 
     uidNameMap = uMap;
     gidNameMap = gMap;
-    lastUpdateTime = System.currentTimeMillis();
+    lastUpdateTime = Time.monotonicNow();
+  }
+  
+  @SuppressWarnings("serial")
+  static final class PassThroughMap<K> extends HashMap<K, K> {
+    
+    public PassThroughMap() {
+      this(new HashMap<K, K>());
+    }
+    
+    public PassThroughMap(Map<K, K> mapping) {
+      super();
+      for (Map.Entry<K, K> entry : mapping.entrySet()) {
+        super.put(entry.getKey(), entry.getValue());
+      }
+    }
+
+    @SuppressWarnings("unchecked")
+    @Override
+    public K get(Object key) {
+      if (super.containsKey(key)) {
+        return super.get(key);
+      } else {
+        return (K) key;
+      }
+    }
+  }
+  
+  @VisibleForTesting
+  static final class StaticMapping {
+    final Map<Integer, Integer> uidMapping;
+    final Map<Integer, Integer> gidMapping;
+    
+    public StaticMapping(Map<Integer, Integer> uidMapping,
+        Map<Integer, Integer> gidMapping) {
+      this.uidMapping = new PassThroughMap<Integer>(uidMapping);
+      this.gidMapping = new PassThroughMap<Integer>(gidMapping);
+    }
+  }
+  
+  static StaticMapping parseStaticMap(File staticMapFile)
+      throws IOException {
+    
+    Map<Integer, Integer> uidMapping = new HashMap<Integer, Integer>();
+    Map<Integer, Integer> gidMapping = new HashMap<Integer, Integer>();
+    
+    BufferedReader in = new BufferedReader(new InputStreamReader(
+        new FileInputStream(staticMapFile)));
+    
+    try {
+      String line = null;
+      while ((line = in.readLine()) != null) {
+        // Skip entirely empty and comment lines.
+        if (EMPTY_LINE.matcher(line).matches() ||
+            COMMENT_LINE.matcher(line).matches()) {
+          continue;
+        }
+        
+        Matcher lineMatcher = MAPPING_LINE.matcher(line);
+        if (!lineMatcher.matches()) {
+          LOG.warn("Could not parse line '" + line + "'. Lines should be of " +
+              "the form '[uid|gid] [remote id] [local id]'. Blank lines and " +
+              "everything following a '#' on a line will be ignored.");
+          continue;
+        }
+        
+        // We know the line is fine to parse without error checking like this
+        // since it matched the regex above.
+        String firstComponent = lineMatcher.group(1);
+        int remoteId = Integer.parseInt(lineMatcher.group(2));
+        int localId = Integer.parseInt(lineMatcher.group(3));
+        if (firstComponent.equals("uid")) {
+          uidMapping.put(localId, remoteId);
+        } else {
+          gidMapping.put(localId, remoteId);
+        }
+      }
+    } finally {
+      in.close();
+    }
+    
+    return new StaticMapping(uidMapping, gidMapping);
   }
 
   synchronized public int getUid(String user) throws IOException {

Modified: hadoop/common/branches/HADOOP-10388/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3Base.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3Base.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3Base.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3Base.java Tue Aug 19 23:49:39 2014
@@ -25,6 +25,8 @@ import org.apache.hadoop.oncrpc.SimpleTc
 import org.apache.hadoop.portmap.PortmapMapping;
 import org.apache.hadoop.util.ShutdownHookManager;
 
+import static org.apache.hadoop.util.ExitUtil.terminate;
+
 /**
  * Nfs server. Supports NFS v3 using {@link RpcProgram}.
  * Currently Mountd program is also started inside this class.
@@ -33,38 +35,40 @@ import org.apache.hadoop.util.ShutdownHo
 public abstract class Nfs3Base {
   public static final Log LOG = LogFactory.getLog(Nfs3Base.class);
   private final RpcProgram rpcProgram;
-  private final int nfsPort;
   private int nfsBoundPort; // Will set after server starts
-    
+
   public RpcProgram getRpcProgram() {
     return rpcProgram;
   }
 
   protected Nfs3Base(RpcProgram rpcProgram, Configuration conf) {
     this.rpcProgram = rpcProgram;
-    this.nfsPort = conf.getInt(Nfs3Constant.NFS3_SERVER_PORT,
-        Nfs3Constant.NFS3_SERVER_PORT_DEFAULT);
-    LOG.info("NFS server port set to: " + nfsPort);
+    LOG.info("NFS server port set to: " + rpcProgram.getPort());
   }
 
   public void start(boolean register) {
     startTCPServer(); // Start TCP server
-    
+
     if (register) {
       ShutdownHookManager.get().addShutdownHook(new Unregister(),
           SHUTDOWN_HOOK_PRIORITY);
-      rpcProgram.register(PortmapMapping.TRANSPORT_TCP, nfsBoundPort);
+      try {
+        rpcProgram.register(PortmapMapping.TRANSPORT_TCP, nfsBoundPort);
+      } catch (Throwable e) {
+        LOG.fatal("Failed to start the server. Cause:", e);
+        terminate(1, e);
+      }
     }
   }
 
   private void startTCPServer() {
-    SimpleTcpServer tcpServer = new SimpleTcpServer(nfsPort,
+    SimpleTcpServer tcpServer = new SimpleTcpServer(rpcProgram.getPort(),
         rpcProgram, 0);
     rpcProgram.startDaemons();
     tcpServer.run();
     nfsBoundPort = tcpServer.getBoundPort();
   }
-  
+
   /**
    * Priority of the nfsd shutdown hook.
    */

Modified: hadoop/common/branches/HADOOP-10388/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3Constant.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3Constant.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3Constant.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3Constant.java Tue Aug 19 23:49:39 2014
@@ -25,10 +25,6 @@ public class Nfs3Constant {
   // The local rpcbind/portmapper port.
   public final static int SUN_RPCBIND = 111;
 
-  // The IP port number for NFS.
-  public final static String NFS3_SERVER_PORT = "nfs3.server.port";
-  public final static int NFS3_SERVER_PORT_DEFAULT = 2049;
-
   // The RPC program number for NFS.
   public final static int PROGRAM = 100003;
 
@@ -191,36 +187,22 @@ public class Nfs3Constant {
   public final static int CREATE_GUARDED = 1;
   public final static int CREATE_EXCLUSIVE = 2;
   
-  public static final String EXPORTS_ALLOWED_HOSTS_SEPARATOR = ";";
-  /** Allowed hosts for nfs exports */
-  public static final String EXPORTS_ALLOWED_HOSTS_KEY = "dfs.nfs.exports.allowed.hosts";
-  public static final String EXPORTS_ALLOWED_HOSTS_KEY_DEFAULT = "* rw";
   /** Size for nfs exports cache */
-  public static final String EXPORTS_CACHE_SIZE_KEY = "dfs.nfs.exports.cache.size";
-  public static final int EXPORTS_CACHE_SIZE_DEFAULT = 512;
+  public static final String NFS_EXPORTS_CACHE_SIZE_KEY = "nfs.exports.cache.size";
+  public static final int NFS_EXPORTS_CACHE_SIZE_DEFAULT = 512;
   /** Expiration time for nfs exports cache entry */
-  public static final String EXPORTS_CACHE_EXPIRYTIME_MILLIS_KEY = "dfs.nfs.exports.cache.expirytime.millis";
-  public static final long EXPORTS_CACHE_EXPIRYTIME_MILLIS_DEFAULT = 15 * 60 * 1000; // 15 min
+  public static final String NFS_EXPORTS_CACHE_EXPIRYTIME_MILLIS_KEY = "nfs.exports.cache.expirytime.millis";
+  public static final long NFS_EXPORTS_CACHE_EXPIRYTIME_MILLIS_DEFAULT = 15 * 60 * 1000; // 15 min
 
-  public static final String FILE_DUMP_DIR_KEY = "dfs.nfs3.dump.dir";
-  public static final String FILE_DUMP_DIR_DEFAULT = "/tmp/.hdfs-nfs";
-  public static final String ENABLE_FILE_DUMP_KEY = "dfs.nfs3.enableDump";
-  public static final boolean ENABLE_FILE_DUMP_DEFAULT = true;
-  public static final String MAX_READ_TRANSFER_SIZE_KEY = "dfs.nfs.rtmax";
-  public static final int MAX_READ_TRANSFER_SIZE_DEFAULT = 1024 * 1024;
-  public static final String MAX_WRITE_TRANSFER_SIZE_KEY = "dfs.nfs.wtmax";
-  public static final int MAX_WRITE_TRANSFER_SIZE_DEFAULT = 1024 * 1024;
-  public static final String MAX_READDIR_TRANSFER_SIZE_KEY = "dfs.nfs.dtmax";
-  public static final int MAX_READDIR_TRANSFER_SIZE_DEFAULT = 64 * 1024;
-  public static final String MAX_OPEN_FILES = "dfs.nfs3.max.open.files";
-  public static final int MAX_OPEN_FILES_DEFAULT = 256;
-  public static final String OUTPUT_STREAM_TIMEOUT = "dfs.nfs3.stream.timeout";
-  public static final long OUTPUT_STREAM_TIMEOUT_DEFAULT = 10 * 60 * 1000; // 10 minutes
-  public static final long OUTPUT_STREAM_TIMEOUT_MIN_DEFAULT = 10 * 1000; //10 seconds
+  /** Do user/group update every 15 minutes by default, minimum 1 minute */
+  public final static String NFS_USERGROUP_UPDATE_MILLIS_KEY = "nfs.usergroup.update.millis";
+  public final static long NFS_USERGROUP_UPDATE_MILLIS_DEFAULT = 15 * 60 * 1000; // ms
+  final static long NFS_USERGROUP_UPDATE_MILLIS_MIN = 1 * 60 * 1000; // ms
   
   public final static String UNKNOWN_USER = "nobody";
   public final static String UNKNOWN_GROUP = "nobody";
   
-  public final static String EXPORT_POINT = "dfs.nfs3.export.point";
-  public final static String EXPORT_POINT_DEFAULT = "/";
+  // Used for finding the configured static mapping file.
+  public static final String NFS_STATIC_MAPPING_FILE_KEY = "nfs.static.mapping.file";
+  public static final String NFS_STATIC_MAPPING_FILE_DEFAULT = "/etc/nfs.map";
 }

Modified: hadoop/common/branches/HADOOP-10388/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3Interface.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3Interface.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3Interface.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3Interface.java Tue Aug 19 23:49:39 2014
@@ -17,12 +17,9 @@
  */
 package org.apache.hadoop.nfs.nfs3;
 
-import java.net.InetAddress;
-
 import org.apache.hadoop.nfs.nfs3.response.NFS3Response;
+import org.apache.hadoop.oncrpc.RpcInfo;
 import org.apache.hadoop.oncrpc.XDR;
-import org.apache.hadoop.oncrpc.security.SecurityHandler;
-import org.jboss.netty.channel.Channel;
 
 /**
  * RPC procedures as defined in RFC 1813.
@@ -33,70 +30,65 @@ public interface Nfs3Interface {
   public NFS3Response nullProcedure();
 
   /** GETATTR: Get file attributes */
-  public NFS3Response getattr(XDR xdr, SecurityHandler securityHandler,
-      InetAddress client);
+  public NFS3Response getattr(XDR xdr, RpcInfo info);
 
   /** SETATTR: Set file attributes */
-  public NFS3Response setattr(XDR xdr, SecurityHandler securityHandler,
-      InetAddress client);
+  public NFS3Response setattr(XDR xdr, RpcInfo info);
 
   /** LOOKUP: Lookup filename */
-  public NFS3Response lookup(XDR xdr, SecurityHandler securityHandler,
-      InetAddress client);
+  public NFS3Response lookup(XDR xdr, RpcInfo info);
 
   /** ACCESS: Check access permission */
-  public NFS3Response access(XDR xdr, SecurityHandler securityHandler,
-      InetAddress client);
+  public NFS3Response access(XDR xdr, RpcInfo info);
+
+    /** READLINK: Read from symbolic link */
+  public NFS3Response readlink(XDR xdr, RpcInfo info);
 
   /** READ: Read from file */
-  public NFS3Response read(XDR xdr, SecurityHandler securityHandler,
-      InetAddress client);
+  public NFS3Response read(XDR xdr, RpcInfo info);
 
   /** WRITE: Write to file */
-  public NFS3Response write(XDR xdr, Channel channel, int xid,
-      SecurityHandler securityHandler, InetAddress client);
+  public NFS3Response write(XDR xdr, RpcInfo info);
 
   /** CREATE: Create a file */
-  public NFS3Response create(XDR xdr, SecurityHandler securityHandler,
-      InetAddress client);
+  public NFS3Response create(XDR xdr, RpcInfo info);
 
   /** MKDIR: Create a directory */
-  public NFS3Response mkdir(XDR xdr, SecurityHandler securityHandler,
-      InetAddress client);
+  public NFS3Response mkdir(XDR xdr, RpcInfo info);
+
+  /** SYMLINK: Create a symbolic link */
+  public NFS3Response symlink(XDR xdr, RpcInfo info);
+
+  /** MKNOD: Create a special device */
+  public NFS3Response mknod(XDR xdr, RpcInfo info);
 
   /** REMOVE: Remove a file */
-  public NFS3Response remove(XDR xdr, SecurityHandler securityHandler,
-      InetAddress client);
+  public NFS3Response remove(XDR xdr, RpcInfo info);
 
   /** RMDIR: Remove a directory */
-  public NFS3Response rmdir(XDR xdr, SecurityHandler securityHandler,
-      InetAddress client);
+  public NFS3Response rmdir(XDR xdr, RpcInfo info);
 
   /** RENAME: Rename a file or directory */
-  public NFS3Response rename(XDR xdr, SecurityHandler securityHandler,
-      InetAddress client);
+  public NFS3Response rename(XDR xdr, RpcInfo info);
 
-  /** SYMLINK: Create a symbolic link */
-  public NFS3Response symlink(XDR xdr, SecurityHandler securityHandler,
-      InetAddress client);
+  /** LINK: create link to an object */
+  public NFS3Response link(XDR xdr, RpcInfo info);
 
   /** READDIR: Read From directory */
-  public NFS3Response readdir(XDR xdr, SecurityHandler securityHandler,
-      InetAddress client);
+  public NFS3Response readdir(XDR xdr, RpcInfo info);
 
+  /** READDIRPLUS: Extended read from directory */
+  public NFS3Response readdirplus(XDR xdr, RpcInfo info);
+  
   /** FSSTAT: Get dynamic file system information */
-  public NFS3Response fsstat(XDR xdr, SecurityHandler securityHandler,
-      InetAddress client);
+  public NFS3Response fsstat(XDR xdr, RpcInfo info);
 
   /** FSINFO: Get static file system information */
-  public NFS3Response fsinfo(XDR xdr, SecurityHandler securityHandler,
-      InetAddress client);
+  public NFS3Response fsinfo(XDR xdr, RpcInfo info);
 
   /** PATHCONF: Retrieve POSIX information */
-  public NFS3Response pathconf(XDR xdr, SecurityHandler securityHandler,
-      InetAddress client);
+  public NFS3Response pathconf(XDR xdr, RpcInfo info);
 
   /** COMMIT: Commit cached data on a server to stable storage */
-  public NFS3Response commit(XDR xdr, Channel channel, int xid,
-      SecurityHandler securityHandler, InetAddress client);
+  public NFS3Response commit(XDR xdr, RpcInfo info);
 }

Modified: hadoop/common/branches/HADOOP-10388/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/ACCESS3Response.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/ACCESS3Response.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/ACCESS3Response.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/ACCESS3Response.java Tue Aug 19 23:49:39 2014
@@ -46,10 +46,12 @@ public class ACCESS3Response extends NFS
   @Override
   public XDR writeHeaderAndResponse(XDR out, int xid, Verifier verifier) {
     super.writeHeaderAndResponse(out, xid, verifier);
-    out.writeBoolean(true);
-    postOpAttr.serialize(out);
     if (this.getStatus() == Nfs3Status.NFS3_OK) {
+      out.writeBoolean(true);
+      postOpAttr.serialize(out);
       out.writeInt(access);
+    } else {
+      out.writeBoolean(false);
     }
     return out;
   }

Modified: hadoop/common/branches/HADOOP-10388/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcProgram.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcProgram.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcProgram.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcProgram.java Tue Aug 19 23:49:39 2014
@@ -18,11 +18,15 @@
 package org.apache.hadoop.oncrpc;
 
 import java.io.IOException;
+import java.net.DatagramSocket;
+import java.net.InetSocketAddress;
+import java.net.SocketAddress;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.oncrpc.RpcAcceptedReply.AcceptState;
 import org.apache.hadoop.oncrpc.security.Verifier;
+import org.apache.hadoop.oncrpc.security.VerifierNone;
 import org.apache.hadoop.portmap.PortmapMapping;
 import org.apache.hadoop.portmap.PortmapRequest;
 import org.jboss.netty.buffer.ChannelBuffer;
@@ -36,7 +40,7 @@ import org.jboss.netty.channel.SimpleCha
  * and implement {@link #handleInternal} to handle the requests received.
  */
 public abstract class RpcProgram extends SimpleChannelUpstreamHandler {
-  private static final Log LOG = LogFactory.getLog(RpcProgram.class);
+  static final Log LOG = LogFactory.getLog(RpcProgram.class);
   public static final int RPCB_PORT = 111;
   private final String program;
   private final String host;
@@ -44,6 +48,13 @@ public abstract class RpcProgram extends
   private final int progNumber;
   private final int lowProgVersion;
   private final int highProgVersion;
+  protected final boolean allowInsecurePorts;
+  
+  /**
+   * If not null, this will be used as the socket to use to connect to the
+   * system portmap daemon when registering this RPC server program.
+   */
+  private final DatagramSocket registrationSocket;
   
   /**
    * Constructor
@@ -54,15 +65,24 @@ public abstract class RpcProgram extends
    * @param progNumber program number as defined in RFC 1050
    * @param lowProgVersion lowest version of the specification supported
    * @param highProgVersion highest version of the specification supported
+   * @param DatagramSocket registrationSocket if not null, use this socket to
+   *        register with portmap daemon
+   * @param allowInsecurePorts true to allow client connections from
+   *        unprivileged ports, false otherwise
    */
   protected RpcProgram(String program, String host, int port, int progNumber,
-      int lowProgVersion, int highProgVersion) {
+      int lowProgVersion, int highProgVersion,
+      DatagramSocket registrationSocket, boolean allowInsecurePorts) {
     this.program = program;
     this.host = host;
     this.port = port;
     this.progNumber = progNumber;
     this.lowProgVersion = lowProgVersion;
     this.highProgVersion = highProgVersion;
+    this.registrationSocket = registrationSocket;
+    this.allowInsecurePorts = allowInsecurePorts;
+    LOG.info("Will " + (allowInsecurePorts ? "" : "not ") + "accept client "
+        + "connections from unprivileged ports");
   }
 
   /**
@@ -105,14 +125,14 @@ public abstract class RpcProgram extends
   protected void register(PortmapMapping mapEntry, boolean set) {
     XDR mappingRequest = PortmapRequest.create(mapEntry, set);
     SimpleUdpClient registrationClient = new SimpleUdpClient(host, RPCB_PORT,
-        mappingRequest);
+        mappingRequest, registrationSocket);
     try {
       registrationClient.run();
     } catch (IOException e) {
       String request = set ? "Registration" : "Unregistration";
       LOG.error(request + " failure with " + host + ":" + port
           + ", portmap entry: " + mapEntry);
-      throw new RuntimeException(request + " failure");
+      throw new RuntimeException(request + " failure", e);
     }
   }
 
@@ -124,43 +144,80 @@ public abstract class RpcProgram extends
       throws Exception {
     RpcInfo info = (RpcInfo) e.getMessage();
     RpcCall call = (RpcCall) info.header();
+    
+    SocketAddress remoteAddress = info.remoteAddress();
     if (LOG.isTraceEnabled()) {
       LOG.trace(program + " procedure #" + call.getProcedure());
     }
     
     if (this.progNumber != call.getProgram()) {
       LOG.warn("Invalid RPC call program " + call.getProgram());
-      RpcAcceptedReply reply = RpcAcceptedReply.getInstance(call.getXid(),
-          AcceptState.PROG_UNAVAIL, Verifier.VERIFIER_NONE);
-
-      XDR out = new XDR();
-      reply.write(out);
-      ChannelBuffer b = ChannelBuffers.wrappedBuffer(out.asReadOnlyWrap()
-          .buffer());
-      RpcResponse rsp = new RpcResponse(b, info.remoteAddress());
-      RpcUtil.sendRpcResponse(ctx, rsp);
+      sendAcceptedReply(call, remoteAddress, AcceptState.PROG_UNAVAIL, ctx);
       return;
     }
 
     int ver = call.getVersion();
     if (ver < lowProgVersion || ver > highProgVersion) {
       LOG.warn("Invalid RPC call version " + ver);
-      RpcAcceptedReply reply = RpcAcceptedReply.getInstance(call.getXid(),
-          AcceptState.PROG_MISMATCH, Verifier.VERIFIER_NONE);
-
-      XDR out = new XDR();
-      reply.write(out);
-      out.writeInt(lowProgVersion);
-      out.writeInt(highProgVersion);
-      ChannelBuffer b = ChannelBuffers.wrappedBuffer(out.asReadOnlyWrap()
-          .buffer());
-      RpcResponse rsp = new RpcResponse(b, info.remoteAddress());
-      RpcUtil.sendRpcResponse(ctx, rsp);
+      sendAcceptedReply(call, remoteAddress, AcceptState.PROG_MISMATCH, ctx);
       return;
     }
     
     handleInternal(ctx, info);
   }
+  
+  public boolean doPortMonitoring(SocketAddress remoteAddress) {
+    if (!allowInsecurePorts) {
+      if (LOG.isTraceEnabled()) {
+        LOG.trace("Will not allow connections from unprivileged ports. "
+            + "Checking for valid client port...");
+      }
+
+      if (remoteAddress instanceof InetSocketAddress) {
+        InetSocketAddress inetRemoteAddress = (InetSocketAddress) remoteAddress;
+        if (inetRemoteAddress.getPort() > 1023) {
+          LOG.warn("Connection attempted from '" + inetRemoteAddress + "' "
+              + "which is an unprivileged port. Rejecting connection.");
+          return false;
+        }
+      } else {
+        LOG.warn("Could not determine remote port of socket address '"
+            + remoteAddress + "'. Rejecting connection.");
+        return false;
+      }
+    }
+    return true;
+  }
+  
+  private void sendAcceptedReply(RpcCall call, SocketAddress remoteAddress,
+      AcceptState acceptState, ChannelHandlerContext ctx) {
+    RpcAcceptedReply reply = RpcAcceptedReply.getInstance(call.getXid(),
+        acceptState, Verifier.VERIFIER_NONE);
+
+    XDR out = new XDR();
+    reply.write(out);
+    if (acceptState == AcceptState.PROG_MISMATCH) {
+      out.writeInt(lowProgVersion);
+      out.writeInt(highProgVersion);
+    }
+    ChannelBuffer b = ChannelBuffers.wrappedBuffer(out.asReadOnlyWrap()
+        .buffer());
+    RpcResponse rsp = new RpcResponse(b, remoteAddress);
+    RpcUtil.sendRpcResponse(ctx, rsp);
+  }
+  
+  protected static void sendRejectedReply(RpcCall call,
+      SocketAddress remoteAddress, ChannelHandlerContext ctx) {
+    XDR out = new XDR();
+    RpcDeniedReply reply = new RpcDeniedReply(call.getXid(),
+        RpcReply.ReplyState.MSG_DENIED,
+        RpcDeniedReply.RejectState.AUTH_ERROR, new VerifierNone());
+    reply.write(out);
+    ChannelBuffer buf = ChannelBuffers.wrappedBuffer(out.asReadOnlyWrap()
+        .buffer());
+    RpcResponse rsp = new RpcResponse(buf, remoteAddress);
+    RpcUtil.sendRpcResponse(ctx, rsp);
+  }
 
   protected abstract void handleInternal(ChannelHandlerContext ctx, RpcInfo info);
   

Modified: hadoop/common/branches/HADOOP-10388/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/SimpleUdpClient.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/SimpleUdpClient.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/SimpleUdpClient.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/SimpleUdpClient.java Tue Aug 19 23:49:39 2014
@@ -27,43 +27,57 @@ import java.util.Arrays;
  * A simple UDP based RPC client which just sends one request to a server.
  */
 public class SimpleUdpClient {
+  
   protected final String host;
   protected final int port;
   protected final XDR request;
   protected final boolean oneShot;
+  protected final DatagramSocket clientSocket;
 
-  public SimpleUdpClient(String host, int port, XDR request) {
-    this(host, port, request, true);
+  public SimpleUdpClient(String host, int port, XDR request,
+      DatagramSocket clientSocket) {
+    this(host, port, request, true, clientSocket);
   }
 
-  public SimpleUdpClient(String host, int port, XDR request, Boolean oneShot) {
+  public SimpleUdpClient(String host, int port, XDR request, Boolean oneShot,
+      DatagramSocket clientSocket) {
     this.host = host;
     this.port = port;
     this.request = request;
     this.oneShot = oneShot;
+    this.clientSocket = clientSocket;
   }
 
   public void run() throws IOException {
-    DatagramSocket clientSocket = new DatagramSocket();
     InetAddress IPAddress = InetAddress.getByName(host);
     byte[] sendData = request.getBytes();
     byte[] receiveData = new byte[65535];
-
-    DatagramPacket sendPacket = new DatagramPacket(sendData, sendData.length,
-        IPAddress, port);
-    clientSocket.send(sendPacket);
-    DatagramPacket receivePacket = new DatagramPacket(receiveData,
-        receiveData.length);
-    clientSocket.receive(receivePacket);
-
-    // Check reply status
-    XDR xdr = new XDR(Arrays.copyOfRange(receiveData, 0,
-        receivePacket.getLength()));
-    RpcReply reply = RpcReply.read(xdr);
-    if (reply.getState() != RpcReply.ReplyState.MSG_ACCEPTED) {
-      throw new IOException("Request failed: " + reply.getState());
+    // Use the provided socket if there is one, else just make a new one.
+    DatagramSocket socket = this.clientSocket == null ?
+        new DatagramSocket() : this.clientSocket;
+
+    try {
+      DatagramPacket sendPacket = new DatagramPacket(sendData, sendData.length,
+          IPAddress, port);
+      socket.send(sendPacket);
+      socket.setSoTimeout(500);
+      DatagramPacket receivePacket = new DatagramPacket(receiveData,
+          receiveData.length);
+      socket.receive(receivePacket);
+  
+      // Check reply status
+      XDR xdr = new XDR(Arrays.copyOfRange(receiveData, 0,
+          receivePacket.getLength()));
+      RpcReply reply = RpcReply.read(xdr);
+      if (reply.getState() != RpcReply.ReplyState.MSG_ACCEPTED) {
+        throw new IOException("Request failed: " + reply.getState());
+      }
+    } finally {
+      // If the client socket was passed in to this UDP client, it's on the
+      // caller of this UDP client to close that socket.
+      if (this.clientSocket == null) {
+        socket.close();
+      }
     }
-
-    clientSocket.close();
   }
 }

Modified: hadoop/common/branches/HADOOP-10388/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/security/CredentialsSys.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/security/CredentialsSys.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/security/CredentialsSys.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/security/CredentialsSys.java Tue Aug 19 23:49:39 2014
@@ -58,6 +58,10 @@ public class CredentialsSys extends Cred
     return mUID;
   }
 
+  public int[] getAuxGIDs() {
+    return mAuxGIDs;
+  }
+
   public void setGID(int gid) {
     this.mGID = gid;
   }
@@ -65,7 +69,7 @@ public class CredentialsSys extends Cred
   public void setUID(int uid) {
     this.mUID = uid;
   }
-  
+
   public void setStamp(int stamp) {
     this.mStamp = stamp;
   }

Modified: hadoop/common/branches/HADOOP-10388/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/security/SecurityHandler.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/security/SecurityHandler.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/security/SecurityHandler.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/security/SecurityHandler.java Tue Aug 19 23:49:39 2014
@@ -60,4 +60,9 @@ public abstract class SecurityHandler {
   public int getGid() {
     throw new UnsupportedOperationException();
   }
+
+  /** Used by AUTH_SYS */
+  public int[] getAuxGids() {
+    throw new UnsupportedOperationException();
+  }
 }

Modified: hadoop/common/branches/HADOOP-10388/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/security/SysSecurityHandler.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/security/SysSecurityHandler.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/security/SysSecurityHandler.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/security/SysSecurityHandler.java Tue Aug 19 23:49:39 2014
@@ -56,4 +56,9 @@ public class SysSecurityHandler extends 
   public int getGid() {
     return mCredentialsSys.getGID();
   }
+
+  @Override
+  public int[] getAuxGids() {
+    return mCredentialsSys.getAuxGIDs();
+  }
 }

Modified: hadoop/common/branches/HADOOP-10388/hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/nfs/TestNfsExports.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/nfs/TestNfsExports.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/nfs/TestNfsExports.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/nfs/TestNfsExports.java Tue Aug 19 23:49:39 2014
@@ -17,11 +17,8 @@
  */
 package org.apache.hadoop.nfs;
 
-import junit.framework.Assert;
-
-import org.apache.hadoop.nfs.AccessPrivilege;
-import org.apache.hadoop.nfs.NfsExports;
 import org.apache.hadoop.nfs.nfs3.Nfs3Constant;
+import org.junit.Assert;
 import org.junit.Test;
 
 public class TestNfsExports {
@@ -32,9 +29,9 @@ public class TestNfsExports {
   private final String hostname2 = "a.b.org";
   
   private static final long ExpirationPeriod = 
-      Nfs3Constant.EXPORTS_CACHE_EXPIRYTIME_MILLIS_DEFAULT * 1000 * 1000;
+      Nfs3Constant.NFS_EXPORTS_CACHE_EXPIRYTIME_MILLIS_DEFAULT * 1000 * 1000;
   
-  private static final int CacheSize = Nfs3Constant.EXPORTS_CACHE_SIZE_DEFAULT;
+  private static final int CacheSize = Nfs3Constant.NFS_EXPORTS_CACHE_SIZE_DEFAULT;
   private static final long NanosPerMillis = 1000000;
 
   @Test
@@ -197,4 +194,16 @@ public class TestNfsExports {
     } while ((System.nanoTime() - startNanos) / NanosPerMillis < 5000);
     Assert.assertEquals(AccessPrivilege.NONE, ap);
   }
+
+  @Test(expected=IllegalArgumentException.class)
+  public void testInvalidHost() {
+      NfsExports matcher = new NfsExports(CacheSize, ExpirationPeriod,
+        "foo#bar");
+  }
+
+  @Test(expected=IllegalArgumentException.class)
+  public void testInvalidSeparator() {
+      NfsExports matcher = new NfsExports(CacheSize, ExpirationPeriod,
+        "foo ro : bar rw");
+  }
 }

Modified: hadoop/common/branches/HADOOP-10388/hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/nfs/TestNfsTime.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/nfs/TestNfsTime.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/nfs/TestNfsTime.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/nfs/TestNfsTime.java Tue Aug 19 23:49:39 2014
@@ -17,7 +17,7 @@
  */
 package org.apache.hadoop.nfs;
 
-import junit.framework.Assert;
+import org.junit.Assert;
 
 import org.apache.hadoop.nfs.NfsTime;
 import org.apache.hadoop.oncrpc.XDR;

Modified: hadoop/common/branches/HADOOP-10388/hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/nfs/nfs3/TestFileHandle.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/nfs/nfs3/TestFileHandle.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/nfs/nfs3/TestFileHandle.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/nfs/nfs3/TestFileHandle.java Tue Aug 19 23:49:39 2014
@@ -17,7 +17,7 @@
  */
 package org.apache.hadoop.nfs.nfs3;
 
-import junit.framework.Assert;
+import org.junit.Assert;
 
 import org.apache.hadoop.nfs.nfs3.FileHandle;
 import org.apache.hadoop.oncrpc.XDR;

Modified: hadoop/common/branches/HADOOP-10388/hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/nfs/nfs3/TestIdUserGroup.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/nfs/nfs3/TestIdUserGroup.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/nfs/nfs3/TestIdUserGroup.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/nfs/nfs3/TestIdUserGroup.java Tue Aug 19 23:49:39 2014
@@ -19,15 +19,97 @@ package org.apache.hadoop.nfs.nfs3;
 
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
+
+import java.io.File;
+import java.io.FileOutputStream;
 import java.io.IOException;
+import java.io.OutputStream;
+import java.util.Map;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.nfs.nfs3.IdUserGroup.PassThroughMap;
+import org.apache.hadoop.nfs.nfs3.IdUserGroup.StaticMapping;
 import org.junit.Test;
 
 import com.google.common.collect.BiMap;
 import com.google.common.collect.HashBiMap;
 
 public class TestIdUserGroup {
+  
+  private static final Map<Integer, Integer> EMPTY_PASS_THROUGH_MAP =
+      new PassThroughMap<Integer>();
+  
+  @Test
+  public void testStaticMapParsing() throws IOException {
+    File tempStaticMapFile = File.createTempFile("nfs-", ".map");
+    final String staticMapFileContents =
+        "uid 10 100\n" +
+        "gid 10 200\n" +
+        "uid 11 201 # comment at the end of a line\n" +
+        "uid 12 301\n" +
+        "# Comment at the beginning of a line\n" +
+        "    # Comment that starts late in the line\n" +
+        "uid 10000 10001# line without whitespace before comment\n" +
+        "uid 13 302\n" +
+        "gid\t11\t201\n" + // Tabs instead of spaces.
+        "\n" + // Entirely empty line.
+        "gid 12 202";
+    OutputStream out = new FileOutputStream(tempStaticMapFile);
+    out.write(staticMapFileContents.getBytes());
+    out.close();
+    StaticMapping parsedMap = IdUserGroup.parseStaticMap(tempStaticMapFile);
+    
+    assertEquals(10, (int)parsedMap.uidMapping.get(100));
+    assertEquals(11, (int)parsedMap.uidMapping.get(201));
+    assertEquals(12, (int)parsedMap.uidMapping.get(301));
+    assertEquals(13, (int)parsedMap.uidMapping.get(302));
+    assertEquals(10, (int)parsedMap.gidMapping.get(200));
+    assertEquals(11, (int)parsedMap.gidMapping.get(201));
+    assertEquals(12, (int)parsedMap.gidMapping.get(202));
+    assertEquals(10000, (int)parsedMap.uidMapping.get(10001));
+    // Ensure pass-through of unmapped IDs works.
+    assertEquals(1000, (int)parsedMap.uidMapping.get(1000));
+  }
+  
+  @Test
+  public void testStaticMapping() throws IOException {
+    Map<Integer, Integer> uidStaticMap = new PassThroughMap<Integer>();
+    Map<Integer, Integer> gidStaticMap = new PassThroughMap<Integer>();
+    
+    uidStaticMap.put(11501, 10);
+    gidStaticMap.put(497, 200);
+    
+    // Maps for id to name map
+    BiMap<Integer, String> uMap = HashBiMap.create();
+    BiMap<Integer, String> gMap = HashBiMap.create();
+    
+    String GET_ALL_USERS_CMD =
+        "echo \"atm:x:1000:1000:Aaron T. Myers,,,:/home/atm:/bin/bash\n"
+        + "hdfs:x:11501:10787:Grid Distributed File System:/home/hdfs:/bin/bash\""
+        + " | cut -d: -f1,3";
+    
+    String GET_ALL_GROUPS_CMD = "echo \"hdfs:*:11501:hrt_hdfs\n"
+        + "mapred:x:497\n"
+        + "mapred2:x:498\""
+        + " | cut -d: -f1,3";
+
+    IdUserGroup.updateMapInternal(uMap, "user", GET_ALL_USERS_CMD, ":",
+        uidStaticMap);
+    IdUserGroup.updateMapInternal(gMap, "group", GET_ALL_GROUPS_CMD, ":",
+        gidStaticMap);
+    
+    assertEquals("hdfs", uMap.get(10));
+    assertEquals(10, (int)uMap.inverse().get("hdfs"));
+    assertEquals("atm", uMap.get(1000));
+    assertEquals(1000, (int)uMap.inverse().get("atm"));
+    
+    assertEquals("hdfs", gMap.get(11501));
+    assertEquals(11501, (int)gMap.inverse().get("hdfs"));
+    assertEquals("mapred", gMap.get(200));
+    assertEquals(200, (int)gMap.inverse().get("mapred"));
+    assertEquals("mapred2", gMap.get(498));
+    assertEquals(498, (int)gMap.inverse().get("mapred2"));
+  }
 
   @Test
   public void testDuplicates() throws IOException {
@@ -51,15 +133,17 @@ public class TestIdUserGroup {
     BiMap<Integer, String> uMap = HashBiMap.create();
     BiMap<Integer, String> gMap = HashBiMap.create();
 
-    IdUserGroup.updateMapInternal(uMap, "user", GET_ALL_USERS_CMD, ":");
-    assertTrue(uMap.size() == 5);
+    IdUserGroup.updateMapInternal(uMap, "user", GET_ALL_USERS_CMD, ":",
+        EMPTY_PASS_THROUGH_MAP);
+    assertEquals(5, uMap.size());
     assertEquals("root", uMap.get(0));
     assertEquals("hdfs", uMap.get(11501));
     assertEquals("hdfs2",uMap.get(11502));
     assertEquals("bin", uMap.get(2));
     assertEquals("daemon", uMap.get(1));
 
-    IdUserGroup.updateMapInternal(gMap, "group", GET_ALL_GROUPS_CMD, ":");
+    IdUserGroup.updateMapInternal(gMap, "group", GET_ALL_GROUPS_CMD, ":",
+        EMPTY_PASS_THROUGH_MAP);
     assertTrue(gMap.size() == 3);
     assertEquals("hdfs",gMap.get(11501));
     assertEquals("mapred", gMap.get(497));
@@ -67,18 +151,67 @@ public class TestIdUserGroup {
   }
 
   @Test
+  public void testIdOutOfIntegerRange() throws IOException {
+    String GET_ALL_USERS_CMD = "echo \""
+        + "nfsnobody:x:4294967294:4294967294:Anonymous NFS User:/var/lib/nfs:/sbin/nologin\n"
+        + "nfsnobody1:x:4294967295:4294967295:Anonymous NFS User:/var/lib/nfs1:/sbin/nologin\n"
+        + "maxint:x:2147483647:2147483647:Grid Distributed File System:/home/maxint:/bin/bash\n"
+        + "minint:x:2147483648:2147483648:Grid Distributed File System:/home/minint:/bin/bash\n"
+        + "archivebackup:*:1031:4294967294:Archive Backup:/home/users/archivebackup:/bin/sh\n"
+        + "hdfs:x:11501:10787:Grid Distributed File System:/home/hdfs:/bin/bash\n"
+        + "daemon:x:2:2:daemon:/sbin:/sbin/nologin\""
+        + " | cut -d: -f1,3";
+    String GET_ALL_GROUPS_CMD = "echo \""
+        + "hdfs:*:11501:hrt_hdfs\n"
+        + "rpcuser:*:29:\n"
+        + "nfsnobody:*:4294967294:\n"
+        + "nfsnobody1:*:4294967295:\n"
+        + "maxint:*:2147483647:\n"
+        + "minint:*:2147483648:\n"
+        + "mapred3:x:498\"" 
+        + " | cut -d: -f1,3";
+    // Maps for id to name map
+    BiMap<Integer, String> uMap = HashBiMap.create();
+    BiMap<Integer, String> gMap = HashBiMap.create();
+
+    IdUserGroup.updateMapInternal(uMap, "user", GET_ALL_USERS_CMD, ":",
+        EMPTY_PASS_THROUGH_MAP);
+    assertTrue(uMap.size() == 7);
+    assertEquals("nfsnobody", uMap.get(-2));
+    assertEquals("nfsnobody1", uMap.get(-1));
+    assertEquals("maxint", uMap.get(2147483647));
+    assertEquals("minint", uMap.get(-2147483648));
+    assertEquals("archivebackup", uMap.get(1031));
+    assertEquals("hdfs",uMap.get(11501));
+    assertEquals("daemon", uMap.get(2));
+
+    IdUserGroup.updateMapInternal(gMap, "group", GET_ALL_GROUPS_CMD, ":",
+        EMPTY_PASS_THROUGH_MAP);
+    assertTrue(gMap.size() == 7);
+    assertEquals("hdfs",gMap.get(11501));
+    assertEquals("rpcuser", gMap.get(29));
+    assertEquals("nfsnobody", gMap.get(-2));
+    assertEquals("nfsnobody1", gMap.get(-1));
+    assertEquals("maxint", gMap.get(2147483647));
+    assertEquals("minint", gMap.get(-2147483648));
+    assertEquals("mapred3", gMap.get(498));
+  }
+
+  @Test
   public void testUserUpdateSetting() throws IOException {
-    IdUserGroup iug = new IdUserGroup();
-    assertEquals(iug.getTimeout(), IdUserGroup.TIMEOUT_DEFAULT);
+    IdUserGroup iug = new IdUserGroup(new Configuration());
+    assertEquals(iug.getTimeout(),
+        Nfs3Constant.NFS_USERGROUP_UPDATE_MILLIS_DEFAULT);
 
     Configuration conf = new Configuration();
-    conf.setLong(IdUserGroup.NFS_USERUPDATE_MILLY, 0);
+    conf.setLong(Nfs3Constant.NFS_USERGROUP_UPDATE_MILLIS_KEY, 0);
     iug = new IdUserGroup(conf);
-    assertEquals(iug.getTimeout(), IdUserGroup.TIMEOUT_MIN);
+    assertEquals(iug.getTimeout(), Nfs3Constant.NFS_USERGROUP_UPDATE_MILLIS_MIN);
 
-    conf.setLong(IdUserGroup.NFS_USERUPDATE_MILLY,
-        IdUserGroup.TIMEOUT_DEFAULT * 2);
+    conf.setLong(Nfs3Constant.NFS_USERGROUP_UPDATE_MILLIS_KEY,
+        Nfs3Constant.NFS_USERGROUP_UPDATE_MILLIS_DEFAULT * 2);
     iug = new IdUserGroup(conf);
-    assertEquals(iug.getTimeout(), IdUserGroup.TIMEOUT_DEFAULT * 2);
+    assertEquals(iug.getTimeout(),
+        Nfs3Constant.NFS_USERGROUP_UPDATE_MILLIS_DEFAULT * 2);
   }
 }

Modified: hadoop/common/branches/HADOOP-10388/hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/oncrpc/TestFrameDecoder.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/oncrpc/TestFrameDecoder.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/oncrpc/TestFrameDecoder.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/oncrpc/TestFrameDecoder.java Tue Aug 19 23:49:39 2014
@@ -28,6 +28,8 @@ import java.util.Random;
 import org.apache.hadoop.oncrpc.RpcUtil.RpcFrameDecoder;
 import org.apache.hadoop.oncrpc.security.CredentialsNone;
 import org.apache.hadoop.oncrpc.security.VerifierNone;
+import org.apache.log4j.Level;
+import org.apache.commons.logging.impl.Log4JLogger;
 import org.jboss.netty.buffer.ByteBufferBackedChannelBuffer;
 import org.jboss.netty.buffer.ChannelBuffer;
 import org.jboss.netty.buffer.ChannelBuffers;
@@ -38,10 +40,16 @@ import org.junit.Test;
 import org.mockito.Mockito;
 
 public class TestFrameDecoder {
+  
+  static {
+    ((Log4JLogger) RpcProgram.LOG).getLogger().setLevel(Level.ALL);
+  }
 
   private static int resultSize;
 
   static void testRequest(XDR request, int serverPort) {
+    // Reset resultSize so as to avoid interference from other tests in this class.
+    resultSize = 0;
     SimpleTcpClient tcpClient = new SimpleTcpClient("localhost", serverPort, request,
         true);
     tcpClient.run();
@@ -50,12 +58,26 @@ public class TestFrameDecoder {
   static class TestRpcProgram extends RpcProgram {
 
     protected TestRpcProgram(String program, String host, int port,
-        int progNumber, int lowProgVersion, int highProgVersion) {
-      super(program, host, port, progNumber, lowProgVersion, highProgVersion);
+        int progNumber, int lowProgVersion, int highProgVersion,
+        boolean allowInsecurePorts) {
+      super(program, host, port, progNumber, lowProgVersion, highProgVersion,
+          null, allowInsecurePorts);
     }
 
     @Override
     protected void handleInternal(ChannelHandlerContext ctx, RpcInfo info) {
+      // This is just like what's done in RpcProgramMountd#handleInternal and
+      // RpcProgramNfs3#handleInternal.
+      RpcCall rpcCall = (RpcCall) info.header();
+      final int procedure = rpcCall.getProcedure();
+      if (procedure != 0) {
+        boolean portMonitorSuccess = doPortMonitoring(info.remoteAddress());
+        if (!portMonitorSuccess) {
+          sendRejectedReply(rpcCall, info.remoteAddress(), ctx);
+          return;
+        }
+      }
+      
       resultSize = info.data().readableBytes();
       RpcAcceptedReply reply = RpcAcceptedReply.getAcceptInstance(1234,
           new VerifierNone());
@@ -148,7 +170,55 @@ public class TestFrameDecoder {
 
   @Test
   public void testFrames() {
+    int serverPort = startRpcServer(true);
+
+    XDR xdrOut = createGetportMount();
+    int headerSize = xdrOut.size();
+    int bufsize = 2 * 1024 * 1024;
+    byte[] buffer = new byte[bufsize];
+    xdrOut.writeFixedOpaque(buffer);
+    int requestSize = xdrOut.size() - headerSize;
+
+    // Send the request to the server
+    testRequest(xdrOut, serverPort);
+
+    // Verify the server got the request with right size
+    assertEquals(requestSize, resultSize);
+  }
+  
+  @Test
+  public void testUnprivilegedPort() {
+    // Don't allow connections from unprivileged ports. Given that this test is
+    // presumably not being run by root, this will be the case.
+    int serverPort = startRpcServer(false);
+
+    XDR xdrOut = createGetportMount();
+    int bufsize = 2 * 1024 * 1024;
+    byte[] buffer = new byte[bufsize];
+    xdrOut.writeFixedOpaque(buffer);
+
+    // Send the request to the server
+    testRequest(xdrOut, serverPort);
 
+    // Verify the server rejected the request.
+    assertEquals(0, resultSize);
+    
+    // Ensure that the NULL procedure does in fact succeed.
+    xdrOut = new XDR();
+    createPortmapXDRheader(xdrOut, 0);
+    int headerSize = xdrOut.size();
+    buffer = new byte[bufsize];
+    xdrOut.writeFixedOpaque(buffer);
+    int requestSize = xdrOut.size() - headerSize;
+    
+    // Send the request to the server
+    testRequest(xdrOut, serverPort);
+
+    // Verify the server did not reject the request.
+    assertEquals(requestSize, resultSize);
+  }
+  
+  private static int startRpcServer(boolean allowInsecurePorts) {
     Random rand = new Random();
     int serverPort = 30000 + rand.nextInt(10000);
     int retries = 10;    // A few retries in case initial choice is in use.
@@ -156,7 +226,7 @@ public class TestFrameDecoder {
     while (true) {
       try {
         RpcProgram program = new TestFrameDecoder.TestRpcProgram("TestRpcProgram",
-            "localhost", serverPort, 100000, 1, 2);
+            "localhost", serverPort, 100000, 1, 2, allowInsecurePorts);
         SimpleTcpServer tcpServer = new SimpleTcpServer(serverPort, program, 1);
         tcpServer.run();
         break;          // Successfully bound a port, break out.
@@ -168,19 +238,7 @@ public class TestFrameDecoder {
         }
       }
     }
-
-    XDR xdrOut = createGetportMount();
-    int headerSize = xdrOut.size();
-    int bufsize = 2 * 1024 * 1024;
-    byte[] buffer = new byte[bufsize];
-    xdrOut.writeFixedOpaque(buffer);
-    int requestSize = xdrOut.size() - headerSize;
-
-    // Send the request to the server
-    testRequest(xdrOut, serverPort);
-
-    // Verify the server got the request with right size
-    assertEquals(requestSize, resultSize);
+    return serverPort;
   }
 
   static void createPortmapXDRheader(XDR xdr_out, int procedure) {

Modified: hadoop/common/branches/HADOOP-10388/hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/portmap/TestPortmap.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/portmap/TestPortmap.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/portmap/TestPortmap.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/portmap/TestPortmap.java Tue Aug 19 23:49:39 2014
@@ -25,7 +25,7 @@ import java.net.InetSocketAddress;
 import java.net.Socket;
 import java.util.Map;
 
-import junit.framework.Assert;
+import org.junit.Assert;
 
 import org.apache.hadoop.oncrpc.RpcCall;
 import org.apache.hadoop.oncrpc.XDR;

Modified: hadoop/common/branches/HADOOP-10388/hadoop-common-project/pom.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-common-project/pom.xml?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-common-project/pom.xml (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-common-project/pom.xml Tue Aug 19 23:49:39 2014
@@ -37,6 +37,7 @@
     <module>hadoop-annotations</module>
     <module>hadoop-nfs</module>
     <module>hadoop-minikdc</module>
+    <module>hadoop-kms</module>
   </modules>
 
   <build>