You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by om...@apache.org on 2008/08/18 23:47:45 UTC
svn commit: r686873 - in /hadoop/core/trunk: CHANGES.txt
src/c++/libhdfs/hdfs.c src/c++/libhdfs/hdfsJniHelper.c
src/c++/libhdfs/hdfsJniHelper.h
Author: omalley
Date: Mon Aug 18 14:47:44 2008
New Revision: 686873
URL: http://svn.apache.org/viewvc?rev=686873&view=rev
Log:
HADOOP-3549. Give more meaningful errno's in libhdfs. In particular,
EACCES is returned for permission problems. (Ben Slusky via omalley)
Modified:
hadoop/core/trunk/CHANGES.txt
hadoop/core/trunk/src/c++/libhdfs/hdfs.c
hadoop/core/trunk/src/c++/libhdfs/hdfsJniHelper.c
hadoop/core/trunk/src/c++/libhdfs/hdfsJniHelper.h
Modified: hadoop/core/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/CHANGES.txt?rev=686873&r1=686872&r2=686873&view=diff
==============================================================================
--- hadoop/core/trunk/CHANGES.txt (original)
+++ hadoop/core/trunk/CHANGES.txt Mon Aug 18 14:47:44 2008
@@ -40,6 +40,9 @@
HADOOP-3664. Remove the deprecated method InputFormat.validateInput,
which is no longer needed. (tomwhite via omalley)
+ HADOOP-3549. Give more meaningful errno's in libhdfs. In particular,
+ EACCES is returned for permission problems. (Ben Slusky via omalley)
+
NEW FEATURES
HADOOP-3341. Allow streaming jobs to specify the field separator for map
Modified: hadoop/core/trunk/src/c++/libhdfs/hdfs.c
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/c%2B%2B/libhdfs/hdfs.c?rev=686873&r1=686872&r2=686873&view=diff
==============================================================================
--- hadoop/core/trunk/src/c++/libhdfs/hdfs.c (original)
+++ hadoop/core/trunk/src/c++/libhdfs/hdfs.c Mon Aug 18 14:47:44 2008
@@ -80,7 +80,7 @@
//Construct the org.apache.hadoop.fs.Path object
jobject jPath =
- constructNewObjectOfClass(env, "org/apache/hadoop/fs/Path",
+ constructNewObjectOfClass(env, NULL, "org/apache/hadoop/fs/Path",
"(Ljava/lang/String;)V", jPathString);
if (jPath == NULL) {
fprintf(stderr, "Can't construct instance of class "
@@ -96,9 +96,53 @@
}
+/**
+ * Helper function to translate an exception into a meaningful errno value.
+ * @param exc: The exception.
+ * @param env: The JNIEnv Pointer.
+ * @param method: The name of the method that threw the exception. This
+ * may be format string to be used in conjuction with additional arguments.
+ * @return Returns a meaningful errno value if possible, or EINTERNAL if not.
+ */
+static int errnoFromException(jthrowable exc, JNIEnv *env,
+ const char *method, ...)
+{
+ va_list ap;
+ int errnum = 0;
+ char *excClass = NULL;
+
+ if (exc == NULL)
+ goto default_error;
+
+ excClass = classNameOfObject((jobject) exc, env);
+ if (!strcmp(excClass, "org.apache.hadoop.fs.permission."
+ "AccessControlException")) {
+ errnum = EACCES;
+ goto done;
+ }
+
+ //TODO: interpret more exceptions; maybe examine exc.getMessage()
+
+default_error:
+ //Can't tell what went wrong, so just punt
+ (*env)->ExceptionDescribe(env);
+ fprintf(stderr, "Call to ");
+ va_start(ap, method);
+ vfprintf(stderr, method, ap);
+ va_end(ap);
+ fprintf(stderr, " failed!\n");
+ errnum = EINTERNAL;
+done:
+ (*env)->ExceptionClear(env);
+
+ if (excClass != NULL)
+ free(excClass);
+
+ return errnum;
+}
hdfsFS hdfsConnect(const char* host, tPort port)
{
@@ -112,6 +156,7 @@
jobject jURI = NULL;
jstring jURIString = NULL;
jvalue jVal;
+ jthrowable jExc = NULL;
char *cURI = 0;
jobject gFsRef = NULL;
@@ -121,7 +166,7 @@
//Create the org.apache.hadoop.conf.Configuration object
jConfiguration =
- constructNewObjectOfClass(env, HADOOP_CONF, "()V");
+ constructNewObjectOfClass(env, NULL, HADOOP_CONF, "()V");
if (jConfiguration == NULL) {
fprintf(stderr, "Can't construct instance of class "
@@ -133,27 +178,25 @@
//Check what type of FileSystem the caller wants...
if (host == NULL) {
// fs = FileSytem::getLocal(conf);
- if (invokeMethod(env, &jVal, STATIC, NULL, HADOOP_FS, "getLocal",
+ if (invokeMethod(env, &jVal, &jExc, STATIC, NULL, HADOOP_FS, "getLocal",
JMETHOD1(JPARAM(HADOOP_CONF),
JPARAM(HADOOP_LOCALFS)),
jConfiguration) != 0) {
- fprintf(stderr, "Call to org.apache.hadoop.fs."
- "FileSystem::getLocal failed!\n");
- errno = EINTERNAL;
- goto done;
+ errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
+ "FileSystem::getLocal");
+ goto done;
}
jFS = jVal.l;
}
else if (!strcmp(host, "default") && port == 0) {
//fs = FileSystem::get(conf);
- if (invokeMethod(env, &jVal, STATIC, NULL,
+ if (invokeMethod(env, &jVal, &jExc, STATIC, NULL,
HADOOP_FS, "get",
JMETHOD1(JPARAM(HADOOP_CONF),
JPARAM(HADOOP_FS)),
jConfiguration) != 0) {
- fprintf(stderr, "Call to org.apache.hadoop.fs."
- "FileSystem::get failed!\n");
- errno = EINTERNAL;
+ errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
+ "FileSystem::get");
goto done;
}
jFS = jVal.l;
@@ -164,23 +207,21 @@
sprintf(cURI, "hdfs://%s:%d", host, (int)(port));
jURIString = (*env)->NewStringUTF(env, cURI);
- if (invokeMethod(env, &jVal, STATIC, NULL, JAVA_NET_URI,
+ if (invokeMethod(env, &jVal, &jExc, STATIC, NULL, JAVA_NET_URI,
"create", "(Ljava/lang/String;)Ljava/net/URI;",
jURIString) != 0) {
- fprintf(stderr, "Call to java.net.URI::create failed!\n");
- errno = EINTERNAL;
- goto done;
+ errno = errnoFromException(jExc, env, "java.net.URI::create");
+ goto done;
}
jURI = jVal.l;
- if (invokeMethod(env, &jVal, STATIC, NULL, HADOOP_FS, "get",
+ if (invokeMethod(env, &jVal, &jExc, STATIC, NULL, HADOOP_FS, "get",
JMETHOD2(JPARAM(JAVA_NET_URI),
JPARAM(HADOOP_CONF), JPARAM(HADOOP_FS)),
jURI, jConfiguration) != 0) {
- fprintf(stderr, "Call to org.apache.hadoop.fs."
- "FileSystem::get(URI, Configuration) failed!\n");
- errno = EINTERNAL;
- goto done;
+ errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
+ "Filesystem::get(URI, Configuration)");
+ goto done;
}
jFS = jVal.l;
@@ -217,16 +258,18 @@
//Parameters
jobject jFS = (jobject)fs;
+ //Caught exception
+ jthrowable jExc = NULL;
+
//Sanity check
if (fs == NULL) {
errno = EBADF;
return -1;
}
- if (invokeMethod(env, NULL, INSTANCE, jFS, HADOOP_FS,
+ if (invokeMethod(env, NULL, &jExc, INSTANCE, jFS, HADOOP_FS,
"close", "()V") != 0) {
- fprintf(stderr, "Call to FileSystem::close failed!\n");
- errno = EINTERNAL;
+ errno = errnoFromException(jExc, env, "Filesystem::close");
return -1;
}
@@ -278,11 +321,11 @@
/* Get the Configuration object from the FileSystem object */
jvalue jVal;
jobject jConfiguration = NULL;
- if (invokeMethod(env, &jVal, INSTANCE, jFS, HADOOP_FS,
+ jthrowable jExc = NULL;
+ if (invokeMethod(env, &jVal, &jExc, INSTANCE, jFS, HADOOP_FS,
"getConf", JMETHOD1("", JPARAM(HADOOP_CONF))) != 0) {
- fprintf(stderr, "Failed to get configuration object from "
- "filesystem\n");
- errno = EINTERNAL;
+ errno = errnoFromException(jExc, env, "get configuration object "
+ "from filesystem");
destroyLocalReference(env, jPath);
return NULL;
}
@@ -298,12 +341,11 @@
//bufferSize
if (!bufferSize) {
- if (invokeMethod(env, &jVal, INSTANCE, jConfiguration,
+ if (invokeMethod(env, &jVal, &jExc, INSTANCE, jConfiguration,
HADOOP_CONF, "getInt", "(Ljava/lang/String;I)I",
jStrBufferSize, 4096) != 0) {
- fprintf(stderr, "Call to org.apache.hadoop.conf."
- "Configuration::getInt failed!\n");
- errno = EINTERNAL;
+ errno = errnoFromException(jExc, env, "org.apache.hadoop.conf."
+ "Configuration::getInt");
goto done;
}
jBufferSize = jVal.i;
@@ -313,12 +355,11 @@
//replication
if (!replication) {
- if (invokeMethod(env, &jVal, INSTANCE, jConfiguration,
+ if (invokeMethod(env, &jVal, &jExc, INSTANCE, jConfiguration,
HADOOP_CONF, "getInt", "(Ljava/lang/String;I)I",
jStrReplication, 1) != 0) {
- fprintf(stderr, "Call to org.apache.hadoop.conf."
- "Configuration::getInt failed!\n");
- errno = EINTERNAL;
+ errno = errnoFromException(jExc, env, "org.apache.hadoop.conf."
+ "Configuration::getInt");
goto done;
}
jReplication = jVal.i;
@@ -326,12 +367,12 @@
//blockSize
if (!blockSize) {
- if (invokeMethod(env, &jVal, INSTANCE, jConfiguration,
+ if (invokeMethod(env, &jVal, &jExc, INSTANCE, jConfiguration,
HADOOP_CONF, "getLong", "(Ljava/lang/String;J)J",
jStrBlockSize, 67108864)) {
- fprintf(stderr, "Call to org.apache.hadoop.fs."
- "FileSystem::%s(%s) failed!\n", method, signature);
- errno = EINTERNAL;
+ errno = errnoFromException(jExc, env, "org.apache.hadoop.conf."
+ "FileSystem::%s(%s)", method,
+ signature);
goto done;
}
jBlockSize = jVal.j;
@@ -342,22 +383,22 @@
FSDataOutputStream references jobject jStream */
if ((flags & O_WRONLY) == 0) {
- if (invokeMethod(env, &jVal, INSTANCE, jFS, HADOOP_FS,
+ if (invokeMethod(env, &jVal, &jExc, INSTANCE, jFS, HADOOP_FS,
method, signature, jPath, jBufferSize)) {
- fprintf(stderr, "Call to org.apache.hadoop.fs."
- "FileSystem::%s(%s) failed!\n", method, signature);
- errno = EINTERNAL;
+ errno = errnoFromException(jExc, env, "org.apache.hadoop.conf."
+ "FileSystem::%s(%s)", method,
+ signature);
goto done;
}
}
else {
jboolean jOverWrite = 1;
- if (invokeMethod(env, &jVal, INSTANCE, jFS, HADOOP_FS,
+ if (invokeMethod(env, &jVal, &jExc, INSTANCE, jFS, HADOOP_FS,
method, signature, jPath, jOverWrite,
jBufferSize, jReplication, jBlockSize)) {
- fprintf(stderr, "Call to org.apache.hadoop.fs."
- "FileSystem::%s(%s) failed!\n", method, signature);
- errno = EINTERNAL;
+ errno = errnoFromException(jExc, env, "org.apache.hadoop.conf."
+ "FileSystem::%s(%s)", method,
+ signature);
goto done;
}
}
@@ -397,6 +438,9 @@
//Parameters
jobject jStream = (jobject)(file ? file->file : NULL);
+ //Caught exception
+ jthrowable jExc = NULL;
+
//Sanity check
if (!file || file->type == UNINITIALIZED) {
errno = EBADF;
@@ -407,10 +451,9 @@
const char* interface = (file->type == INPUT) ?
HADOOP_ISTRM : HADOOP_OSTRM;
- if (invokeMethod(env, NULL, INSTANCE, jStream, interface,
+ if (invokeMethod(env, NULL, &jExc, INSTANCE, jStream, interface,
"close", "()V") != 0) {
- fprintf(stderr, "Call to %s::close failed!\n", interface);
- errno = EINTERNAL;
+ errno = errnoFromException(jExc, env, "%s::close", interface);
return -1;
}
@@ -428,18 +471,18 @@
JNIEnv *env = getJNIEnv();
jobject jPath = constructNewObjectOfPath(env, path);
jvalue jVal;
+ jthrowable jExc = NULL;
jobject jFS = (jobject)fs;
if (jPath == NULL) {
return -1;
}
- if (invokeMethod(env, &jVal, INSTANCE, jFS, HADOOP_FS,
+ if (invokeMethod(env, &jVal, &jExc, INSTANCE, jFS, HADOOP_FS,
"exists", JMETHOD1(JPARAM(HADOOP_PATH), "Z"),
jPath) != 0) {
- fprintf(stderr, "Call to org.apache.hadoop.fs."
- "FileSystem::exists failed!\n");
- errno = EINTERNAL;
+ errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
+ "FileSystem::exists");
return -1;
}
@@ -463,6 +506,7 @@
jbyteArray jbRarray;
jint noReadBytes = 0;
jvalue jVal;
+ jthrowable jExc = NULL;
//Sanity check
if (!f || f->type == UNINITIALIZED) {
@@ -479,11 +523,10 @@
//Read the requisite bytes
jbRarray = (*env)->NewByteArray(env, length);
- if (invokeMethod(env, &jVal, INSTANCE, jInputStream, HADOOP_ISTRM,
+ if (invokeMethod(env, &jVal, &jExc, INSTANCE, jInputStream, HADOOP_ISTRM,
"read", "([B)I", jbRarray) != 0) {
- fprintf(stderr, "Call to org.apache.hadoop.fs."
- "FSDataInputStream::read failed!\n");
- errno = EINTERNAL;
+ errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
+ "FSDataInputStream::read");
noReadBytes = -1;
}
else {
@@ -520,6 +563,7 @@
jbyteArray jbRarray;
jint noReadBytes = 0;
jvalue jVal;
+ jthrowable jExc = NULL;
//Sanity check
if (!f || f->type == UNINITIALIZED) {
@@ -536,11 +580,10 @@
//Read the requisite bytes
jbRarray = (*env)->NewByteArray(env, length);
- if (invokeMethod(env, &jVal, INSTANCE, jInputStream, HADOOP_ISTRM,
+ if (invokeMethod(env, &jVal, &jExc, INSTANCE, jInputStream, HADOOP_ISTRM,
"read", "(J[BII)I", position, jbRarray, 0, length) != 0) {
- fprintf(stderr, "Call to org.apache.hadoop.fs."
- "FSDataInputStream::read failed!\n");
- errno = EINTERNAL;
+ errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
+ "FSDataInputStream::read");
noReadBytes = -1;
}
else {
@@ -573,6 +616,9 @@
jobject jOutputStream = (jobject)(f ? f->file : 0);
jbyteArray jbWarray;
+ //Caught exception
+ jthrowable jExc = NULL;
+
//Sanity check
if (!f || f->type == UNINITIALIZED) {
errno = EBADF;
@@ -596,12 +642,11 @@
//Write the requisite bytes into the file
jbWarray = (*env)->NewByteArray(env, length);
(*env)->SetByteArrayRegion(env, jbWarray, 0, length, buffer);
- if (invokeMethod(env, NULL, INSTANCE, jOutputStream,
+ if (invokeMethod(env, NULL, &jExc, INSTANCE, jOutputStream,
HADOOP_OSTRM, "write",
"([B)V", jbWarray) != 0) {
- fprintf(stderr, "Call to org.apache.hadoop.fs."
- "FSDataOutputStream::write failed!\n");
- errno = EINTERNAL;
+ errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
+ "FSDataOutputStream::write");
length = -1;
}
destroyLocalReference(env, jbWarray);
@@ -625,17 +670,19 @@
//Parameters
jobject jInputStream = (jobject)(f ? f->file : 0);
+ //Caught exception
+ jthrowable jExc = NULL;
+
//Sanity check
if (!f || f->type != INPUT) {
errno = EBADF;
return -1;
}
- if (invokeMethod(env, NULL, INSTANCE, jInputStream, HADOOP_ISTRM,
+ if (invokeMethod(env, NULL, &jExc, INSTANCE, jInputStream, HADOOP_ISTRM,
"seek", "(J)V", desiredPos) != 0) {
- fprintf(stderr, "Call to org.apache.hadoop.fs."
- "FSDataInputStream::seek failed!\n");
- errno = EINTERNAL;
+ errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
+ "FSDataInputStream::seek");
return -1;
}
@@ -666,11 +713,11 @@
jlong currentPos = -1;
jvalue jVal;
- if (invokeMethod(env, &jVal, INSTANCE, jStream,
+ jthrowable jExc = NULL;
+ if (invokeMethod(env, &jVal, &jExc, INSTANCE, jStream,
interface, "getPos", "()J") != 0) {
- fprintf(stderr, "Call to org.apache.hadoop.fs."
- "FSDataInputStream::getPos failed!\n");
- errno = EINTERNAL;
+ errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
+ "FSDataInputStream::getPos");
return -1;
}
currentPos = jVal.j;
@@ -691,17 +738,19 @@
//Parameters
jobject jOutputStream = (jobject)(f ? f->file : 0);
+ //Caught exception
+ jthrowable jExc = NULL;
+
//Sanity check
if (!f || f->type != OUTPUT) {
errno = EBADF;
return -1;
}
- if (invokeMethod(env, NULL, INSTANCE, jOutputStream,
+ if (invokeMethod(env, NULL, &jExc, INSTANCE, jOutputStream,
HADOOP_OSTRM, "flush", "()V") != 0) {
- fprintf(stderr, "Call to org.apache.hadoop.fs."
- "FSDataInputStream::flush failed!\n");
- errno = EINTERNAL;
+ errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
+ "FSDataInputStream::flush");
return -1;
}
@@ -721,6 +770,9 @@
//Parameters
jobject jInputStream = (jobject)(f ? f->file : 0);
+ //Caught exception
+ jthrowable jExc = NULL;
+
//Sanity check
if (!f || f->type != INPUT) {
errno = EBADF;
@@ -729,11 +781,10 @@
jint available = -1;
jvalue jVal;
- if (invokeMethod(env, &jVal, INSTANCE, jInputStream,
+ if (invokeMethod(env, &jVal, &jExc, INSTANCE, jInputStream,
HADOOP_ISTRM, "available", "()I") != 0) {
- fprintf(stderr, "Call to org.apache.hadoop.fs."
- "FSDataInputStream::available failed!\n");
- errno = EINTERNAL;
+ errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
+ "FSDataInputStream::available");
return -1;
}
available = jVal.i;
@@ -773,7 +824,7 @@
//Create the org.apache.hadoop.conf.Configuration object
jobject jConfiguration =
- constructNewObjectOfClass(env, HADOOP_CONF, "()V");
+ constructNewObjectOfClass(env, NULL, HADOOP_CONF, "()V");
if (jConfiguration == NULL) {
fprintf(stderr, "Can't construct instance of class "
"org.apache.hadoop.conf.Configuration\n");
@@ -786,14 +837,14 @@
//FileUtil::copy
jboolean deleteSource = 0; //Only copy
jvalue jVal;
- if (invokeMethod(env, &jVal, STATIC,
+ jthrowable jExc = NULL;
+ if (invokeMethod(env, &jVal, &jExc, STATIC,
NULL, "org/apache/hadoop/fs/FileUtil", "copy",
"(Lorg/apache/hadoop/fs/FileSystem;Lorg/apache/hadoop/fs/Path;Lorg/apache/hadoop/fs/FileSystem;Lorg/apache/hadoop/fs/Path;ZLorg/apache/hadoop/conf/Configuration;)Z",
jSrcFS, jSrcPath, jDstFS, jDstPath, deleteSource,
jConfiguration) != 0) {
- fprintf(stderr, "Call to org.apache.hadoop.fs."
- "FileUtil::copy failed!\n");
- errno = EINTERNAL;
+ errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
+ "FileUtil::copy");
retval = -1;
goto done;
}
@@ -841,7 +892,7 @@
//Create the org.apache.hadoop.conf.Configuration object
jobject jConfiguration =
- constructNewObjectOfClass(env, HADOOP_CONF, "()V");
+ constructNewObjectOfClass(env, NULL, HADOOP_CONF, "()V");
if (jConfiguration == NULL) {
fprintf(stderr, "Can't construct instance of class "
"org.apache.hadoop.conf.Configuration\n");
@@ -854,14 +905,14 @@
//FileUtil::copy
jboolean deleteSource = 1; //Delete src after copy
jvalue jVal;
- if (invokeMethod(env, &jVal, STATIC, NULL,
+ jthrowable jExc = NULL;
+ if (invokeMethod(env, &jVal, &jExc, STATIC, NULL,
"org/apache/hadoop/fs/FileUtil", "copy",
"(Lorg/apache/hadoop/fs/FileSystem;Lorg/apache/hadoop/fs/Path;Lorg/apache/hadoop/fs/FileSystem;Lorg/apache/hadoop/fs/Path;ZLorg/apache/hadoop/conf/Configuration;)Z",
jSrcFS, jSrcPath, jDstFS, jDstPath, deleteSource,
jConfiguration) != 0) {
- fprintf(stderr, "Call to org.apache.hadoop.fs."
- "FileUtil::copy(move) failed!\n");
- errno = EINTERNAL;
+ errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
+ "FileUtil::copy(move)");
retval = -1;
goto done;
}
@@ -897,12 +948,12 @@
//Delete the file
jvalue jVal;
- if (invokeMethod(env, &jVal, INSTANCE, jFS, HADOOP_FS,
+ jthrowable jExc = NULL;
+ if (invokeMethod(env, &jVal, &jExc, INSTANCE, jFS, HADOOP_FS,
"delete", "(Lorg/apache/hadoop/fs/Path;)Z",
jPath) != 0) {
- fprintf(stderr, "Call to org.apache.hadoop.fs."
- "FileSystem::delete failed!\n");
- errno = EINTERNAL;
+ errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
+ "FileSystem::delete");
return -1;
}
@@ -943,12 +994,12 @@
//Rename the file
jvalue jVal;
- if (invokeMethod(env, &jVal, INSTANCE, jFS, HADOOP_FS, "rename",
+ jthrowable jExc = NULL;
+ if (invokeMethod(env, &jVal, &jExc, INSTANCE, jFS, HADOOP_FS, "rename",
JMETHOD2(JPARAM(HADOOP_PATH), JPARAM(HADOOP_PATH), "Z"),
jOldPath, jNewPath) != 0) {
- fprintf(stderr, "Call to org.apache.hadoop.fs."
- "FileSystem::rename failed!\n");
- errno = EINTERNAL;
+ errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
+ "FileSystem::rename");
return -1;
}
@@ -973,25 +1024,25 @@
jobject jFS = (jobject)fs;
jobject jPath = NULL;
jvalue jVal;
+ jthrowable jExc = NULL;
//FileSystem::getWorkingDirectory()
- if (invokeMethod(env, &jVal, INSTANCE, jFS,
+ if (invokeMethod(env, &jVal, &jExc, INSTANCE, jFS,
HADOOP_FS, "getWorkingDirectory",
"()Lorg/apache/hadoop/fs/Path;") != 0 ||
jVal.l == NULL) {
- fprintf(stderr, "Call to FileSystem::getWorkingDirectory failed!\n");
- errno = EINTERNAL;
+ errno = errnoFromException(jExc, env, "FileSystem::"
+ "getWorkingDirectory");
return NULL;
}
jPath = jVal.l;
//Path::toString()
jstring jPathString;
- if (invokeMethod(env, &jVal, INSTANCE, jPath,
+ if (invokeMethod(env, &jVal, &jExc, INSTANCE, jPath,
"org/apache/hadoop/fs/Path", "toString",
"()Ljava/lang/String;") != 0) {
- fprintf(stderr, "Call to Path::toString failed!\n");
- errno = EINTERNAL;
+ errno = errnoFromException(jExc, env, "Path::toString");
destroyLocalReference(env, jPath);
return NULL;
}
@@ -1024,6 +1075,7 @@
jobject jFS = (jobject)fs;
int retval = 0;
+ jthrowable jExc = NULL;
//Create an object of org.apache.hadoop.fs.Path
jobject jPath = constructNewObjectOfPath(env, path);
@@ -1032,11 +1084,11 @@
}
//FileSystem::setWorkingDirectory()
- if (invokeMethod(env, NULL, INSTANCE, jFS, HADOOP_FS,
+ if (invokeMethod(env, NULL, &jExc, INSTANCE, jFS, HADOOP_FS,
"setWorkingDirectory",
"(Lorg/apache/hadoop/fs/Path;)V", jPath) != 0) {
- fprintf(stderr, "Call to FileSystem::setWorkingDirectory failed!\n");
- errno = EINTERNAL;
+ errno = errnoFromException(jExc, env, "FileSystem::"
+ "setWorkingDirectory");
retval = -1;
}
@@ -1067,12 +1119,12 @@
//Create the directory
jvalue jVal;
jVal.z = 0;
- if (invokeMethod(env, &jVal, INSTANCE, jFS, HADOOP_FS,
+ jthrowable jExc = NULL;
+ if (invokeMethod(env, &jVal, &jExc, INSTANCE, jFS, HADOOP_FS,
"mkdirs", "(Lorg/apache/hadoop/fs/Path;)Z",
jPath) != 0) {
- fprintf(stderr, "Call to org.apache.hadoop.fs.FileSystem::"
- "mkdirs failed!\n");
- errno = EINTERNAL;
+ errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
+ "FileSystem::mkdirs");
goto done;
}
@@ -1103,12 +1155,12 @@
//Create the directory
jvalue jVal;
- if (invokeMethod(env, &jVal, INSTANCE, jFS, HADOOP_FS,
+ jthrowable jExc = NULL;
+ if (invokeMethod(env, &jVal, &jExc, INSTANCE, jFS, HADOOP_FS,
"setReplication", "(Lorg/apache/hadoop/fs/Path;S)Z",
jPath, replication) != 0) {
- fprintf(stderr, "Call to org.apache.hadoop.fs.FileSystem::"
- "setReplication failed!\n");
- errno = EINTERNAL;
+ errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
+ "FileSystem::setReplication");
goto done;
}
@@ -1143,14 +1195,14 @@
char*** blockHosts = NULL;
jobjectArray jBlockLocations;;
jvalue jVal;
- if (invokeMethod(env, &jVal, INSTANCE, jFS,
+ jthrowable jExc = NULL;
+ if (invokeMethod(env, &jVal, &jExc, INSTANCE, jFS,
HADOOP_FS, "getFileBlockLocations",
"(Lorg/apache/hadoop/fs/Path;JJ)"
"[Lorg/apache/hadoop/fs/BlockLocation;",
jPath, start, length) != 0) {
- fprintf(stderr, "Call to org.apache.hadoop.fs."
- "FileSystem::getFileBlockLocations failed!\n");
- errno = EINTERNAL;
+ errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
+ "FileSystem::getFileCacheHints");
destroyLocalReference(env, jPath);
return NULL;
}
@@ -1179,12 +1231,11 @@
jvalue jVal;
jobjectArray jFileBlockHosts;
- if (invokeMethod(env, &jVal, INSTANCE, jFileBlock, HADOOP_BLK_LOC,
+ if (invokeMethod(env, &jVal, &jExc, INSTANCE, jFileBlock, HADOOP_BLK_LOC,
"getHosts", "()[Ljava/lang/String;") ||
jVal.l == NULL) {
- fprintf(stderr, "Call to org.apache.hadoop.fs.BlockLocation::"
- "getHosts failed!\n");
- errno = EINTERNAL;
+ errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
+ "BlockLocation::getHosts");
destroyLocalReference(env, jPath);
destroyLocalReference(env, jBlockLocations);
return NULL;
@@ -1260,11 +1311,11 @@
//FileSystem::getDefaultBlockSize()
tOffset blockSize = -1;
jvalue jVal;
- if (invokeMethod(env, &jVal, INSTANCE, jFS, HADOOP_FS,
+ jthrowable jExc = NULL;
+ if (invokeMethod(env, &jVal, &jExc, INSTANCE, jFS, HADOOP_FS,
"getDefaultBlockSize", "()J") != 0) {
- fprintf(stderr, "Call to org.apache.hadoop.fs."
- "FileSystem::getDefaultBlockSize failed!\n");
- errno = EINTERNAL;
+ errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
+ "FileSystem::getDefaultBlockSize");
return -1;
}
blockSize = jVal.j;
@@ -1293,11 +1344,11 @@
//FileSystem::getRawCapacity()
jvalue jVal;
- if (invokeMethod(env, &jVal, INSTANCE, jFS, HADOOP_DFS,
+ jthrowable jExc = NULL;
+ if (invokeMethod(env, &jVal, &jExc, INSTANCE, jFS, HADOOP_DFS,
"getRawCapacity", "()J") != 0) {
- fprintf(stderr, "Call to org.apache.hadoop.fs."
- "FileSystem::getRawCapacity failed!\n");
- errno = EINTERNAL;
+ errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
+ "FileSystem::getRawCapacity");
return -1;
}
@@ -1325,11 +1376,11 @@
//FileSystem::getRawUsed()
jvalue jVal;
- if (invokeMethod(env, &jVal, INSTANCE, jFS, HADOOP_DFS,
+ jthrowable jExc = NULL;
+ if (invokeMethod(env, &jVal, &jExc, INSTANCE, jFS, HADOOP_DFS,
"getRawUsed", "()J") != 0) {
- fprintf(stderr, "Call to org.apache.hadoop.fs."
- "FileSystem::getRawUsed failed!\n");
- errno = EINTERNAL;
+ errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
+ "FileSystem::getRawUsed");
return -1;
}
@@ -1342,71 +1393,65 @@
getFileInfoFromStat(JNIEnv *env, jobject jStat, hdfsFileInfo *fileInfo)
{
jvalue jVal;
- if (invokeMethod(env, &jVal, INSTANCE, jStat,
+ jthrowable jExc = NULL;
+ if (invokeMethod(env, &jVal, &jExc, INSTANCE, jStat,
HADOOP_STAT, "isDir", "()Z") != 0) {
- fprintf(stderr, "Call to org.apache.hadoop.fs."
- "FileStatus::isDir failed!\n");
- errno = EINTERNAL;
+ errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
+ "FileStatus::isDir");
return -1;
}
fileInfo->mKind = jVal.z ? kObjectKindDirectory : kObjectKindFile;
- if (invokeMethod(env, &jVal, INSTANCE, jStat,
+ if (invokeMethod(env, &jVal, &jExc, INSTANCE, jStat,
HADOOP_STAT, "getReplication", "()S") != 0) {
- fprintf(stderr, "Call to org.apache.hadoop.fs."
- "FileStatus::getReplication failed!\n");
- errno = EINTERNAL;
+ errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
+ "FileStatus::getReplication");
return -1;
}
fileInfo->mReplication = jVal.s;
- if (invokeMethod(env, &jVal, INSTANCE, jStat,
+ if (invokeMethod(env, &jVal, &jExc, INSTANCE, jStat,
HADOOP_STAT, "getBlockSize", "()J") != 0) {
- fprintf(stderr, "Call to org.apache.hadoop.fs."
- "FileStatus::getBlockSize failed!\n");
- errno = EINTERNAL;
+ errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
+ "FileStatus::getBlockSize");
return -1;
}
fileInfo->mBlockSize = jVal.j;
- if (invokeMethod(env, &jVal, INSTANCE, jStat,
+ if (invokeMethod(env, &jVal, &jExc, INSTANCE, jStat,
HADOOP_STAT, "getModificationTime", "()J") != 0) {
- fprintf(stderr, "Call to org.apache.hadoop.fs."
- "FileStatus::getModificationTime failed!\n");
- errno = EINTERNAL;
+ errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
+ "FileStatus::getModificationTime");
return -1;
}
fileInfo->mLastMod = (tTime) (jVal.j / 1000);
if (fileInfo->mKind == kObjectKindFile) {
- if (invokeMethod(env, &jVal, INSTANCE, jStat,
+ if (invokeMethod(env, &jVal, &jExc, INSTANCE, jStat,
HADOOP_STAT, "getLen", "()J") != 0) {
- fprintf(stderr, "Call to org.apache.hadoop.fs."
- "FileStatus::getLen failed!\n");
- errno = EINTERNAL;
+ errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
+ "FileStatus::getLen");
return -1;
}
fileInfo->mSize = jVal.j;
}
jobject jPath;
- if (invokeMethod(env, &jVal, INSTANCE, jStat, HADOOP_STAT,
+ if (invokeMethod(env, &jVal, &jExc, INSTANCE, jStat, HADOOP_STAT,
"getPath", "()Lorg/apache/hadoop/fs/Path;") ||
jVal.l == NULL) {
- fprintf(stderr, "Call to org.apache.hadoop.fs."
- "FileStatus::getPath failed!\n");
- errno = EINTERNAL;
+ errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
+ "Path::getPath");
return -1;
}
jPath = jVal.l;
jstring jPathName;
const char *cPathName;
- if (invokeMethod(env, &jVal, INSTANCE, jPath, HADOOP_PATH,
+ if (invokeMethod(env, &jVal, &jExc, INSTANCE, jPath, HADOOP_PATH,
"toString", "()Ljava/lang/String;")) {
- fprintf(stderr, "Call to org.apache.hadoop.fs."
- "Path::toString failed!\n");
- errno = EINTERNAL;
+ errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
+ "Path::toString");
destroyLocalReference(env, jPath);
return -1;
}
@@ -1431,13 +1476,13 @@
jobject jStat;
jvalue jVal;
+ jthrowable jExc = NULL;
- if (invokeMethod(env, &jVal, INSTANCE, jFS, HADOOP_FS,
+ if (invokeMethod(env, &jVal, &jExc, INSTANCE, jFS, HADOOP_FS,
"exists", JMETHOD1(JPARAM(HADOOP_PATH), "Z"),
jPath) != 0) {
- fprintf(stderr, "Call to org.apache.hadoop.fs."
- "FileSystem::exists failed!\n");
- errno = EINTERNAL;
+ errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
+ "FileSystem::exists");
return -1;
}
@@ -1446,12 +1491,11 @@
return -1;
}
- if (invokeMethod(env, &jVal, INSTANCE, jFS, HADOOP_FS,
+ if (invokeMethod(env, &jVal, &jExc, INSTANCE, jFS, HADOOP_FS,
"getFileStatus", JMETHOD1(JPARAM(HADOOP_PATH), JPARAM(HADOOP_STAT)),
jPath) != 0) {
- fprintf(stderr, "Call to org.apache.hadoop.fs."
- "FileSystem::getFileStatus failed!\n");
- errno = EINTERNAL;
+ errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
+ "FileSystem::getFileStatus");
return -1;
}
jStat = jVal.l;
@@ -1485,12 +1529,12 @@
jobjectArray jPathList = NULL;
jvalue jVal;
- if (invokeMethod(env, &jVal, INSTANCE, jFS, HADOOP_DFS, "listStatus",
+ jthrowable jExc = NULL;
+ if (invokeMethod(env, &jVal, &jExc, INSTANCE, jFS, HADOOP_DFS, "listStatus",
JMETHOD1(JPARAM(HADOOP_PATH), JARRPARAM(HADOOP_STAT)),
jPath) != 0) {
- fprintf(stderr, "Call to org.apache.hadoop.fs."
- "FileSystem::listStatus failed!\n");
- errno = EINTERNAL;
+ errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
+ "FileSystem::listStatus");
destroyLocalReference(env, jPath);
return NULL;
}
@@ -1517,7 +1561,6 @@
for (i=0; i < jPathListSize; ++i) {
tmpStat = (*env)->GetObjectArrayElement(env, jPathList, i);
if (getFileInfoFromStat(env, tmpStat, &pathList[i])) {
- errno = EINTERNAL;
hdfsFreeFileInfo(pathList, jPathListSize);
destroyLocalReference(env, tmpStat);
pathList = NULL;
Modified: hadoop/core/trunk/src/c++/libhdfs/hdfsJniHelper.c
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/c%2B%2B/libhdfs/hdfsJniHelper.c?rev=686873&r1=686872&r2=686873&view=diff
==============================================================================
--- hadoop/core/trunk/src/c++/libhdfs/hdfsJniHelper.c (original)
+++ hadoop/core/trunk/src/c++/libhdfs/hdfsJniHelper.c Mon Aug 18 14:47:44 2008
@@ -15,6 +15,7 @@
*/
#include <string.h>
+#include <error.h>
#include "hdfsJniHelper.h"
static pthread_mutex_t hdfsHashMutex = PTHREAD_MUTEX_INITIALIZER;
@@ -45,14 +46,6 @@
#define MAX_HASH_TABLE_ELEM 4096
-#define CHECK_EXCEPTION_IN_METH_INVOC \
- if ((*env)->ExceptionCheck(env)) {\
- (*env)->ExceptionDescribe(env);\
- va_end(args);\
- return -1;\
- }\
-
-
static void validateMethodType(MethType methType)
{
if (methType != STATIC && methType != INSTANCE) {
@@ -120,13 +113,14 @@
-int invokeMethod(JNIEnv *env, RetVal *retval, MethType methType,
+int invokeMethod(JNIEnv *env, RetVal *retval, Exc *exc, MethType methType,
jobject instObj, const char *className,
const char *methName, const char *methSignature, ...)
{
va_list args;
jclass cls;
jmethodID mid;
+ jthrowable jthr;
const char *str;
char returnType;
@@ -152,7 +146,6 @@
else if (methType == INSTANCE) {
jobj = (*env)->CallObjectMethodV(env, instObj, mid, args);
}
- CHECK_EXCEPTION_IN_METH_INVOC
retval->l = jobj;
}
else if (returnType == VOID) {
@@ -162,7 +155,6 @@
else if (methType == INSTANCE) {
(*env)->CallVoidMethodV(env, instObj, mid, args);
}
- CHECK_EXCEPTION_IN_METH_INVOC
}
else if (returnType == JBOOLEAN) {
jboolean jbool = 0;
@@ -172,7 +164,6 @@
else if (methType == INSTANCE) {
jbool = (*env)->CallBooleanMethodV(env, instObj, mid, args);
}
- CHECK_EXCEPTION_IN_METH_INVOC
retval->z = jbool;
}
else if (returnType == JSHORT) {
@@ -183,7 +174,6 @@
else if (methType == INSTANCE) {
js = (*env)->CallShortMethodV(env, instObj, mid, args);
}
- CHECK_EXCEPTION_IN_METH_INVOC
retval->s = js;
}
else if (returnType == JLONG) {
@@ -194,7 +184,6 @@
else if (methType == INSTANCE) {
jl = (*env)->CallLongMethodV(env, instObj, mid, args);
}
- CHECK_EXCEPTION_IN_METH_INVOC
retval->j = jl;
}
else if (returnType == JINT) {
@@ -205,21 +194,30 @@
else if (methType == INSTANCE) {
ji = (*env)->CallIntMethodV(env, instObj, mid, args);
}
- CHECK_EXCEPTION_IN_METH_INVOC
retval->i = ji;
}
va_end(args);
+
+ jthr = (*env)->ExceptionOccurred(env);
+ if (jthr != NULL) {
+ if (exc != NULL)
+ *exc = jthr;
+ else
+ (*env)->ExceptionDescribe(env);
+ return -1;
+ }
return 0;
}
-jobject constructNewObjectOfClass(JNIEnv *env, const char *className,
+jobject constructNewObjectOfClass(JNIEnv *env, Exc *exc, const char *className,
const char *ctorSignature, ...)
{
va_list args;
jclass cls;
jmethodID mid;
jobject jobj;
+ jthrowable jthr;
cls = globalClassReference(className, env);
mid = methodIdFromClass(className, "<init>", ctorSignature,
@@ -231,8 +229,12 @@
va_start(args, ctorSignature);
jobj = (*env)->NewObjectV(env, cls, mid, args);
va_end(args);
- if ((*env)->ExceptionCheck(env)) {
- (*env)->ExceptionDescribe(env);
+ jthr = (*env)->ExceptionOccurred(env);
+ if (jthr != NULL) {
+ if (exc != NULL)
+ *exc = jthr;
+ else
+ (*env)->ExceptionDescribe(env);
}
return jobj;
}
@@ -281,6 +283,45 @@
}
+char *classNameOfObject(jobject jobj, JNIEnv *env) {
+ jclass cls, clsClass;
+ jmethodID mid;
+ jstring str;
+ const char *cstr;
+ char *newstr;
+
+ cls = (*env)->GetObjectClass(env, jobj);
+ if (cls == NULL) {
+ (*env)->ExceptionDescribe(env);
+ exit(1);
+ }
+ clsClass = (*env)->FindClass(env, "java/lang/Class");
+ if (clsClass == NULL) {
+ (*env)->ExceptionDescribe(env);
+ exit(1);
+ }
+ mid = (*env)->GetMethodID(env, clsClass, "getName", "()Ljava/lang/String;");
+ if (mid == NULL) {
+ (*env)->ExceptionDescribe(env);
+ exit(1);
+ }
+ str = (*env)->CallObjectMethod(env, cls, mid);
+ if (str == NULL) {
+ (*env)->ExceptionDescribe(env);
+ exit(1);
+ }
+
+ cstr = (*env)->GetStringUTFChars(env, str, NULL);
+ newstr = strdup(cstr);
+ (*env)->ReleaseStringUTFChars(env, str, cstr);
+ if (newstr == NULL) {
+ perror("classNameOfObject: strdup");
+ exit(1);
+ }
+ return newstr;
+}
+
+
/**
Modified: hadoop/core/trunk/src/c++/libhdfs/hdfsJniHelper.h
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/c%2B%2B/libhdfs/hdfsJniHelper.h?rev=686873&r1=686872&r2=686873&view=diff
==============================================================================
--- hadoop/core/trunk/src/c++/libhdfs/hdfsJniHelper.h (original)
+++ hadoop/core/trunk/src/c++/libhdfs/hdfsJniHelper.h Mon Aug 18 14:47:44 2008
@@ -45,6 +45,9 @@
*/
typedef jvalue RetVal;
+/** Used for returning the exception after invoking a method */
+typedef jthrowable Exc;
+
/** invokeMethod: Invoke a Static or Instance method.
* className: Name of the class where the method can be found
* methName: Name of the method
@@ -60,9 +63,9 @@
* exc: If the methods throws any exception, this will contain the reference
* Arguments (the method arguments) must be passed after methSignature
* RETURNS: -1 on error and 0 on success. If -1 is returned, exc will have
- a valid exception reference.
+ a valid exception reference, and the result stored at retval is undefined.
*/
-int invokeMethod(JNIEnv *env, RetVal *retval, MethType methType,
+int invokeMethod(JNIEnv *env, RetVal *retval, Exc *exc, MethType methType,
jobject instObj, const char *className, const char *methName,
const char *methSignature, ...);
@@ -73,7 +76,7 @@
* exc: If the ctor throws any exception, this will contain the reference
* Arguments to the ctor must be passed after ctorSignature
*/
-jobject constructNewObjectOfClass(JNIEnv *env, const char *className,
+jobject constructNewObjectOfClass(JNIEnv *env, Exc *exc, const char *className,
const char *ctorSignature, ...);
jmethodID methodIdFromClass(const char *className, const char *methName,
@@ -82,6 +85,14 @@
jclass globalClassReference(const char *className, JNIEnv *env);
+/** classNameOfObject: Get an object's class name.
+ * @param jobj: The object.
+ * @param env: The JNIEnv pointer.
+ * @return Returns a pointer to a string containing the class name. This string
+ * must be freed by the caller.
+ */
+char *classNameOfObject(jobject jobj, JNIEnv *env);
+
/** getJNIEnv: A helper function to get the JNIEnv* for the given thread.
* If no JVM exists, then one will be created. JVM command line arguments
* are obtained from the LIBHDFS_OPTS environment variable.