You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by to...@apache.org on 2019/03/26 23:34:33 UTC
[hadoop] branch trunk updated: HDFS-14348: Fix JNI exception
handling issues in libhdfs
This is an automated email from the ASF dual-hosted git repository.
todd pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git
The following commit(s) were added to refs/heads/trunk by this push:
new fe29b39 HDFS-14348: Fix JNI exception handling issues in libhdfs
fe29b39 is described below
commit fe29b3901be1b06db92379c7b7fac4954253e6e2
Author: Sahil Takiar <st...@cloudera.com>
AuthorDate: Thu Mar 21 20:53:01 2019 -0700
HDFS-14348: Fix JNI exception handling issues in libhdfs
This closes #600
Signed-off-by: Todd Lipcon <to...@apache.org>
---
.../src/main/native/libhdfs/hdfs.c | 55 +++++++++----
.../src/main/native/libhdfs/jni_helper.c | 8 +-
.../native/libhdfs/os/posix/thread_local_storage.c | 94 ++++++++++++++--------
3 files changed, 108 insertions(+), 49 deletions(-)
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/hdfs.c b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/hdfs.c
index 41caffd..ec0ad4b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/hdfs.c
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/hdfs.c
@@ -2491,6 +2491,8 @@ int hadoopRzOptionsSetByteBufferPool(
JNIEnv *env;
jthrowable jthr;
jobject byteBufferPool = NULL;
+ jobject globalByteBufferPool = NULL;
+ int ret;
env = getJNIEnv();
if (!env) {
@@ -2507,15 +2509,37 @@ int hadoopRzOptionsSetByteBufferPool(
if (jthr) {
printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
"hadoopRzOptionsSetByteBufferPool(className=%s): ", className);
- errno = EINVAL;
- return -1;
+ ret = EINVAL;
+ goto done;
}
- }
- if (opts->byteBufferPool) {
- // Delete any previous ByteBufferPool we had.
+ // Only set opts->byteBufferPool if creating a global reference is
+ // successful
+ globalByteBufferPool = (*env)->NewGlobalRef(env, byteBufferPool);
+ if (!globalByteBufferPool) {
+ printPendingExceptionAndFree(env, PRINT_EXC_ALL,
+ "hadoopRzOptionsSetByteBufferPool(className=%s): ",
+ className);
+ ret = EINVAL;
+ goto done;
+ }
+ // Delete any previous ByteBufferPool we had before setting a new one.
+ if (opts->byteBufferPool) {
+ (*env)->DeleteGlobalRef(env, opts->byteBufferPool);
+ }
+ opts->byteBufferPool = globalByteBufferPool;
+ } else if (opts->byteBufferPool) {
+ // If the specified className is NULL, delete any previous
+ // ByteBufferPool we had.
(*env)->DeleteGlobalRef(env, opts->byteBufferPool);
+ opts->byteBufferPool = NULL;
+ }
+ ret = 0;
+done:
+ destroyLocalReference(env, byteBufferPool);
+ if (ret) {
+ errno = ret;
+ return -1;
}
- opts->byteBufferPool = (*env)->NewGlobalRef(env, byteBufferPool);
return 0;
}
@@ -2570,8 +2594,7 @@ static jthrowable hadoopRzOptionsGetEnumSet(JNIEnv *env,
} else {
jclass clazz = (*env)->FindClass(env, READ_OPTION);
if (!clazz) {
- jthr = newRuntimeError(env, "failed "
- "to find class for %s", READ_OPTION);
+ jthr = getPendingExceptionAndClear(env);
goto done;
}
jthr = invokeMethod(env, &jVal, STATIC, NULL,
@@ -2697,6 +2720,7 @@ static int translateZCRException(JNIEnv *env, jthrowable exc)
}
if (!strcmp(className, "java.lang.UnsupportedOperationException")) {
ret = EPROTONOSUPPORT;
+ destroyLocalReference(env, exc);
goto done;
}
ret = printExceptionAndFree(env, exc, PRINT_EXC_ALL,
@@ -2896,8 +2920,9 @@ hdfsGetHosts(hdfsFS fs, const char *path, tOffset start, tOffset length)
for (i = 0; i < jNumFileBlocks; ++i) {
jFileBlock =
(*env)->GetObjectArrayElement(env, jBlockLocations, i);
- if (!jFileBlock) {
- ret = printPendingExceptionAndFree(env, PRINT_EXC_ALL,
+ jthr = (*env)->ExceptionOccurred(env);
+ if (jthr || !jFileBlock) {
+ ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
"hdfsGetHosts(path=%s, start=%"PRId64", length=%"PRId64"):"
"GetObjectArrayElement(%d)", path, start, length, i);
goto done;
@@ -2930,8 +2955,9 @@ hdfsGetHosts(hdfsFS fs, const char *path, tOffset start, tOffset length)
//Now parse each hostname
for (j = 0; j < jNumBlockHosts; ++j) {
jHost = (*env)->GetObjectArrayElement(env, jFileBlockHosts, j);
- if (!jHost) {
- ret = printPendingExceptionAndFree(env, PRINT_EXC_ALL,
+ jthr = (*env)->ExceptionOccurred(env);
+ if (jthr || !jHost) {
+ ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
"hdfsGetHosts(path=%s, start=%"PRId64", length=%"PRId64"): "
"NewByteArray", path, start, length);
goto done;
@@ -3419,8 +3445,9 @@ hdfsFileInfo* hdfsListDirectory(hdfsFS fs, const char *path, int *numEntries)
//Save path information in pathList
for (i=0; i < jPathListSize; ++i) {
tmpStat = (*env)->GetObjectArrayElement(env, jPathList, i);
- if (!tmpStat) {
- ret = printPendingExceptionAndFree(env, PRINT_EXC_ALL,
+ jthr = (*env)->ExceptionOccurred(env);
+ if (jthr || !tmpStat) {
+ ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
"hdfsListDirectory(%s): GetObjectArrayElement(%d out of %d)",
path, i, jPathListSize);
goto done;
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/jni_helper.c b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/jni_helper.c
index c45d598..1d4b405 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/jni_helper.c
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/jni_helper.c
@@ -328,6 +328,11 @@ jthrowable classNameOfObject(jobject jobj, JNIEnv *env, char **name)
goto done;
}
str = (*env)->CallObjectMethod(env, cls, mid);
+ jthr = (*env)->ExceptionOccurred(env);
+ if (jthr) {
+ (*env)->ExceptionClear(env);
+ goto done;
+ }
if (str == NULL) {
jthr = getPendingExceptionAndClear(env);
goto done;
@@ -644,8 +649,7 @@ jthrowable fetchEnumInstance(JNIEnv *env, const char *className,
clazz = (*env)->FindClass(env, className);
if (!clazz) {
- return newRuntimeError(env, "fetchEnum(%s, %s): failed to find class.",
- className, valueName);
+ return getPendingExceptionAndClear(env);
}
if (snprintf(prettyClass, sizeof(prettyClass), "L%s;", className)
>= sizeof(prettyClass)) {
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/os/posix/thread_local_storage.c b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/os/posix/thread_local_storage.c
index 22e2fcf..39f76dd 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/os/posix/thread_local_storage.c
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/os/posix/thread_local_storage.c
@@ -23,6 +23,9 @@
#include <pthread.h>
#include <stdio.h>
+#include "exception.h"
+#include "jni_helper.h"
+
#define UNKNOWN "UNKNOWN"
#define MAXTHRID 256
@@ -46,6 +49,7 @@ void hdfsThreadDestructor(void *v)
struct ThreadLocalState *state = (struct ThreadLocalState*)v;
JNIEnv *env = state->env;;
jint ret;
+ jthrowable jthr;
char thr_name[MAXTHRID];
/* Detach the current thread from the JVM */
@@ -55,12 +59,20 @@ void hdfsThreadDestructor(void *v)
if (ret != 0) {
fprintf(stderr, "hdfsThreadDestructor: GetJavaVM failed with error %d\n",
ret);
- (*env)->ExceptionDescribe(env);
+ jthr = (*env)->ExceptionOccurred(env);
+ if (jthr) {
+ (*env)->ExceptionDescribe(env);
+ (*env)->ExceptionClear(env);
+ }
} else {
ret = (*vm)->DetachCurrentThread(vm);
if (ret != JNI_OK) {
- (*env)->ExceptionDescribe(env);
+ jthr = (*env)->ExceptionOccurred(env);
+ if (jthr) {
+ (*env)->ExceptionDescribe(env);
+ (*env)->ExceptionClear(env);
+ }
get_current_thread_id(env, thr_name, MAXTHRID);
fprintf(stderr, "hdfsThreadDestructor: Unable to detach thread %s "
@@ -78,44 +90,60 @@ void hdfsThreadDestructor(void *v)
}
static void get_current_thread_id(JNIEnv* env, char* id, int max) {
- jclass cls;
- jmethodID mth;
- jobject thr;
- jstring thr_name;
+ jvalue jVal;
+ jobject thr = NULL;
+ jstring thr_name = NULL;
jlong thr_id = 0;
+ jthrowable jthr = NULL;
const char *thr_name_str;
- cls = (*env)->FindClass(env, "java/lang/Thread");
- mth = (*env)->GetStaticMethodID(env, cls, "currentThread",
- "()Ljava/lang/Thread;");
- thr = (*env)->CallStaticObjectMethod(env, cls, mth);
-
- if (thr != NULL) {
- mth = (*env)->GetMethodID(env, cls, "getId", "()J");
- thr_id = (*env)->CallLongMethod(env, thr, mth);
- (*env)->ExceptionDescribe(env);
-
- mth = (*env)->GetMethodID(env, cls, "toString", "()Ljava/lang/String;");
- thr_name = (jstring)(*env)->CallObjectMethod(env, thr, mth);
+ jthr = invokeMethod(env, &jVal, STATIC, NULL, "java/lang/Thread",
+ "currentThread", "()Ljava/lang/Thread;");
+ if (jthr) {
+ snprintf(id, max, "%s", UNKNOWN);
+ printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
+ "get_current_thread_id: Thread#currentThread failed: ");
+ goto done;
+ }
+ thr = jVal.l;
- if (thr_name != NULL) {
- thr_name_str = (*env)->GetStringUTFChars(env, thr_name, NULL);
+ jthr = invokeMethod(env, &jVal, INSTANCE, thr, "java/lang/Thread",
+ "getId", "()J");
+ if (jthr) {
+ snprintf(id, max, "%s", UNKNOWN);
+ printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
+ "get_current_thread_id: Thread#getId failed: ");
+ goto done;
+ }
+ thr_id = jVal.j;
+
+ jthr = invokeMethod(env, &jVal, INSTANCE, thr, "java/lang/Thread",
+ "toString", "()Ljava/lang/String;");
+ if (jthr) {
+ snprintf(id, max, "%s:%ld", UNKNOWN, thr_id);
+ printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
+ "get_current_thread_id: Thread#toString failed: ");
+ goto done;
+ }
+ thr_name = jVal.l;
+
+ thr_name_str = (*env)->GetStringUTFChars(env, thr_name, NULL);
+ if (!thr_name_str) {
+ printPendingExceptionAndFree(env, PRINT_EXC_ALL,
+ "get_current_thread_id: GetStringUTFChars failed: ");
+ snprintf(id, max, "%s:%ld", UNKNOWN, thr_id);
+ goto done;
+ }
- // Treating the jlong as a long *should* be safe
- snprintf(id, max, "%s:%ld", thr_name_str, thr_id);
+ // Treating the jlong as a long *should* be safe
+ snprintf(id, max, "%s:%ld", thr_name_str, thr_id);
- // Release the char*
- (*env)->ReleaseStringUTFChars(env, thr_name, thr_name_str);
- } else {
- (*env)->ExceptionDescribe(env);
+ // Release the char*
+ (*env)->ReleaseStringUTFChars(env, thr_name, thr_name_str);
- // Treating the jlong as a long *should* be safe
- snprintf(id, max, "%s:%ld", UNKNOWN, thr_id);
- }
- } else {
- (*env)->ExceptionDescribe(env);
- snprintf(id, max, "%s", UNKNOWN);
- }
+done:
+ destroyLocalReference(env, thr);
+ destroyLocalReference(env, thr_name);
// Make sure the id is null terminated in case we overflow the max length
id[max - 1] = '\0';
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org