You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by ki...@apache.org on 2014/04/21 20:28:54 UTC

svn commit: r1588950 - in /hadoop/common/branches/branch-2/hadoop-common-project/hadoop-common: ./ src/main/native/src/org/apache/hadoop/security/

Author: kihwal
Date: Mon Apr 21 18:28:53 2014
New Revision: 1588950

URL: http://svn.apache.org/r1588950
Log:
svn merge -c 1588949 merging from trunk to branch-2 to fix:HADOOP-10522. JniBasedUnixGroupMapping mishandles errors. Contributed by Kihwal Lee.

Modified:
    hadoop/common/branches/branch-2/hadoop-common-project/hadoop-common/CHANGES.txt
    hadoop/common/branches/branch-2/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/security/JniBasedUnixGroupsMapping.c
    hadoop/common/branches/branch-2/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/security/hadoop_group_info.c
    hadoop/common/branches/branch-2/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/security/hadoop_user_info.c

Modified: hadoop/common/branches/branch-2/hadoop-common-project/hadoop-common/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-common-project/hadoop-common/CHANGES.txt?rev=1588950&r1=1588949&r2=1588950&view=diff
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-common-project/hadoop-common/CHANGES.txt (original)
+++ hadoop/common/branches/branch-2/hadoop-common-project/hadoop-common/CHANGES.txt Mon Apr 21 18:28:53 2014
@@ -100,6 +100,8 @@ Release 2.4.1 - UNRELEASED
     HADOOP-10490. TestMapFile and TestBloomMapFile leak file descriptors.
     (cnauroth)
 
+    HADOOP-10522. JniBasedUnixGroupMapping mishandles errors. (kihwal)
+
 Release 2.4.0 - 2014-04-07 
 
   INCOMPATIBLE CHANGES

Modified: hadoop/common/branches/branch-2/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/security/JniBasedUnixGroupsMapping.c
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/security/JniBasedUnixGroupsMapping.c?rev=1588950&r1=1588949&r2=1588950&view=diff
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/security/JniBasedUnixGroupsMapping.c (original)
+++ hadoop/common/branches/branch-2/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/security/JniBasedUnixGroupsMapping.c Mon Apr 21 18:28:53 2014
@@ -120,10 +120,18 @@ Java_org_apache_hadoop_security_JniBased
     goto done;
   }
   ret = hadoop_user_info_fetch(uinfo, username);
-  if (ret == ENOENT) {
-    jgroups = (*env)->NewObjectArray(env, 0, g_string_clazz, NULL);
+  if (ret) {
+    if (ret == ENOENT) {
+      jgroups = (*env)->NewObjectArray(env, 0, g_string_clazz, NULL);
+    } else { // handle other errors
+      char buf[128];
+      snprintf(buf, sizeof(buf), "getgrouplist: error looking up user. %d (%s)",
+               ret, terror(ret));
+      THROW(env, "java/lang/RuntimeException", buf);
+    }
     goto done;
   }
+
   ginfo = hadoop_group_info_alloc();
   if (!ginfo) {
     THROW(env, "java/lang/OutOfMemoryError", NULL);
@@ -135,7 +143,7 @@ Java_org_apache_hadoop_security_JniBased
       THROW(env, "java/lang/OutOfMemoryError", NULL);
     } else {
       char buf[128];
-      snprintf(buf, sizeof(buf), "getgrouplist error %d (%s)",
+      snprintf(buf, sizeof(buf), "getgrouplist: error looking up groups. %d (%s)",
                ret, terror(ret));
       THROW(env, "java/lang/RuntimeException", buf);
     }

Modified: hadoop/common/branches/branch-2/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/security/hadoop_group_info.c
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/security/hadoop_group_info.c?rev=1588950&r1=1588949&r2=1588950&view=diff
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/security/hadoop_group_info.c (original)
+++ hadoop/common/branches/branch-2/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/security/hadoop_group_info.c Mon Apr 21 18:28:53 2014
@@ -27,6 +27,8 @@
 #include <string.h>
 #include <unistd.h>
 
+#define MAX_GROUP_LOOKUP_TRIES 5
+
 struct hadoop_group_info *hadoop_group_info_alloc(void)
 {
   struct hadoop_group_info *ginfo;
@@ -84,31 +86,48 @@ static int getgrgid_error_translate(int 
 int hadoop_group_info_fetch(struct hadoop_group_info *ginfo, gid_t gid)
 {
   struct group *group;
-  int err;
+  int ret, i; 
   size_t buf_sz;
   char *nbuf;
 
   hadoop_group_info_clear(ginfo);
-  for (;;) {
-    do {
-      group = NULL;
-      err = getgrgid_r(gid, &ginfo->group, ginfo->buf,
-                         ginfo->buf_sz, &group);
-    } while ((!group) && (err == EINTR));
-    if (group) {
-      return 0;
-    }
-    if (err != ERANGE) {
-      return getgrgid_error_translate(errno);
+  for (i = 0, ret = 0; i < MAX_GROUP_LOOKUP_TRIES; i++) {
+    // If the previous call returned ERANGE, increase the buffer size
+    if (ret == ERANGE) {
+      buf_sz = ginfo->buf_sz * 2;
+      nbuf = realloc(ginfo->buf, buf_sz);
+      if (!nbuf) {
+        return ENOMEM;
+      }
+      ginfo->buf = nbuf;
+      ginfo->buf_sz = buf_sz;
     }
-    buf_sz = ginfo->buf_sz * 2;
-    nbuf = realloc(ginfo->buf, buf_sz);
-    if (!nbuf) {
-      return ENOMEM;
+
+    // The following call returns errno. Reading the global errno wihtout
+    // locking is not thread-safe.
+    group = NULL;
+    ret = getgrgid_r(gid, &ginfo->group, ginfo->buf,
+                       ginfo->buf_sz, &group);
+    switch(ret) {
+      case 0:
+        if (!group) {
+          // The underlying library likely has a bug.
+          return EIO;
+        }
+        return 0;
+      case EINTR:
+      case ERANGE:
+        // Retry on these errors.
+        // EINTR: a signal was handled and this thread was allowed to continue.
+        // ERANGE: the buffer was not big enough.
+        break;
+      default:
+        // Lookup failed.
+        return getgrgid_error_translate(ret);
     }
-    ginfo->buf = nbuf;
-    ginfo->buf_sz = buf_sz;
   }
+  // Did not succeed after the retries. Return the last error.
+  return getgrgid_error_translate(ret);
 }
 
 #ifdef GROUP_TESTING

Modified: hadoop/common/branches/branch-2/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/security/hadoop_user_info.c
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/security/hadoop_user_info.c?rev=1588950&r1=1588949&r2=1588950&view=diff
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/security/hadoop_user_info.c (original)
+++ hadoop/common/branches/branch-2/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/security/hadoop_user_info.c Mon Apr 21 18:28:53 2014
@@ -28,6 +28,7 @@
 #include <unistd.h>
 
 #define INITIAL_GIDS_SIZE 32
+#define MAX_USER_LOOKUP_TRIES 5
 
 struct hadoop_user_info *hadoop_user_info_alloc(void)
 {
@@ -95,31 +96,48 @@ int hadoop_user_info_fetch(struct hadoop
                            const char *username)
 {
   struct passwd *pwd;
-  int err;
+  int ret, i;
   size_t buf_sz;
   char *nbuf;
 
   hadoop_user_info_clear(uinfo);
-  for (;;) {
-    do {
-      pwd = NULL;
-      err = getpwnam_r(username, &uinfo->pwd, uinfo->buf,
-                         uinfo->buf_sz, &pwd);
-    } while ((!pwd) && (errno == EINTR));
-    if (pwd) {
-      return 0;
-    }
-    if (err != ERANGE) {
-      return getpwnam_error_translate(errno);
+  for (i = 0, ret = 0; i < MAX_USER_LOOKUP_TRIES; i++) {
+    // If the previous call returned ERANGE, increase the buffer size
+    if (ret == ERANGE) {
+      buf_sz = uinfo->buf_sz * 2;
+      nbuf = realloc(uinfo->buf, buf_sz);
+      if (!nbuf) {
+        return ENOMEM;
+      }
+      uinfo->buf = nbuf;
+      uinfo->buf_sz = buf_sz;
     }
-    buf_sz = uinfo->buf_sz * 2;
-    nbuf = realloc(uinfo->buf, buf_sz);
-    if (!nbuf) {
-      return ENOMEM;
+
+    // The following call returns errno. Reading the global errno wihtout
+    // locking is not thread-safe.
+    pwd = NULL;
+    ret = getpwnam_r(username, &uinfo->pwd, uinfo->buf,
+                         uinfo->buf_sz, &pwd);
+    switch(ret) {
+      case 0:
+        if (!pwd) {
+          // The underlying library likely has a bug.
+          return EIO;
+        }
+        return 0;
+      case EINTR:
+      case ERANGE:
+        // Retry on these errors.
+        // EINTR: a signal was handled and this thread was allowed to continue.
+        // ERANGE: the buffer was not big enough.
+        break;
+      default:
+        // Lookup failed.
+        return getpwnam_error_translate(ret);
     }
-    uinfo->buf = nbuf;
-    uinfo->buf_sz = buf_sz;
   }
+  // Did not succeed after the retries. Return the last error.
+  return getpwnam_error_translate(ret);
 }
 
 static int put_primary_gid_first(struct hadoop_user_info *uinfo)