You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by dh...@apache.org on 2009/03/11 19:58:06 UTC
svn commit: r752579 - in /hadoop/core/branches/branch-0.19: CHANGES.txt
src/c++/libhdfs/Makefile src/c++/libhdfs/hdfs.c
src/c++/libhdfs/hdfsJniHelper.c src/c++/libhdfs/tests/conf/hadoop-site.xml
Author: dhruba
Date: Wed Mar 11 18:58:05 2009
New Revision: 752579
URL: http://svn.apache.org/viewvc?rev=752579&view=rev
Log:
HADOOP-5333. libhdfs supports appending to files. (dhruba)
Modified:
hadoop/core/branches/branch-0.19/CHANGES.txt
hadoop/core/branches/branch-0.19/src/c++/libhdfs/Makefile
hadoop/core/branches/branch-0.19/src/c++/libhdfs/hdfs.c
hadoop/core/branches/branch-0.19/src/c++/libhdfs/hdfsJniHelper.c
hadoop/core/branches/branch-0.19/src/c++/libhdfs/tests/conf/hadoop-site.xml
Modified: hadoop/core/branches/branch-0.19/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/core/branches/branch-0.19/CHANGES.txt?rev=752579&r1=752578&r2=752579&view=diff
==============================================================================
--- hadoop/core/branches/branch-0.19/CHANGES.txt (original)
+++ hadoop/core/branches/branch-0.19/CHANGES.txt Wed Mar 11 18:58:05 2009
@@ -58,6 +58,8 @@
HADOOP-5332. Appending to files is not allowed (by default) unless
dfs.support.append is set to true. (dhruba)
+ HADOOP-5333. libhdfs supports appending to files. (dhruba)
+
Release 0.19.1 - 2009-02-23
INCOMPATIBLE CHANGES
Modified: hadoop/core/branches/branch-0.19/src/c++/libhdfs/Makefile
URL: http://svn.apache.org/viewvc/hadoop/core/branches/branch-0.19/src/c%2B%2B/libhdfs/Makefile?rev=752579&r1=752578&r2=752579&view=diff
==============================================================================
--- hadoop/core/branches/branch-0.19/src/c++/libhdfs/Makefile (original)
+++ hadoop/core/branches/branch-0.19/src/c++/libhdfs/Makefile Wed Mar 11 18:58:05 2009
@@ -25,10 +25,11 @@
CC = gcc
LD = gcc
+OS_ARCH=amd64
CFLAGS = -g -Wall -O2 -fPIC
-LDFLAGS = -L$(JAVA_HOME)/jre/lib/$(OS_ARCH)/server -ljvm -shared -m32 -Wl,-x
+LDFLAGS = -L$(JAVA_HOME)/jre/lib/$(OS_ARCH)/server -ljvm -shared -m64 -Wl,-x
PLATFORM = $(shell echo $$OS_NAME | tr [A-Z] [a-z])
-CPPFLAGS = -m32 -I$(JAVA_HOME)/include -I$(JAVA_HOME)/include/$(PLATFORM)
+CPPFLAGS = -m64 -I$(JAVA_HOME)/include -I$(JAVA_HOME)/include/$(PLATFORM)
LIB_NAME = hdfs
SO_NAME = lib$(LIB_NAME).so
Modified: hadoop/core/branches/branch-0.19/src/c++/libhdfs/hdfs.c
URL: http://svn.apache.org/viewvc/hadoop/core/branches/branch-0.19/src/c%2B%2B/libhdfs/hdfs.c?rev=752579&r1=752578&r2=752579&view=diff
==============================================================================
--- hadoop/core/branches/branch-0.19/src/c++/libhdfs/hdfs.c (original)
+++ hadoop/core/branches/branch-0.19/src/c++/libhdfs/hdfs.c Wed Mar 11 18:58:05 2009
@@ -393,7 +393,6 @@
FSData{Input|Output}Stream f{is|os} = fs.create(f);
return f{is|os};
*/
-
/* Get the JNIEnv* corresponding to current thread */
JNIEnv* env = getJNIEnv();
@@ -504,20 +503,17 @@
signature);
goto done;
}
+ } else if ((flags & O_WRONLY) && (flags & O_APPEND)) {
// WRITE/APPEND?
- else if ((flags & O_WRONLY) && (flags & O_APPEND)) {
- if (invokeMethod(env, &jVal, &jExc, INSTANCE, jFS, HADOOP_FS,
- method, signature, jPath)) {
- errno = errnoFromException(jExc, env, "org.apache.hadoop.conf."
- "FileSystem::%s(%s)", method,
- signature);
- goto done;
- }
+ if (invokeMethod(env, &jVal, &jExc, INSTANCE, jFS, HADOOP_FS,
+ method, signature, jPath)) {
+ errno = errnoFromException(jExc, env, "org.apache.hadoop.conf."
+ "FileSystem::%s(%s)", method,
+ signature);
+ goto done;
}
-
- }
- // WRITE/CREATE
- else {
+ } else {
+ // WRITE/CREATE
jboolean jOverWrite = 1;
if (invokeMethod(env, &jVal, &jExc, INSTANCE, jFS, HADOOP_FS,
method, signature, jPath, jOverWrite,
Modified: hadoop/core/branches/branch-0.19/src/c++/libhdfs/hdfsJniHelper.c
URL: http://svn.apache.org/viewvc/hadoop/core/branches/branch-0.19/src/c%2B%2B/libhdfs/hdfsJniHelper.c?rev=752579&r1=752578&r2=752579&view=diff
==============================================================================
--- hadoop/core/branches/branch-0.19/src/c++/libhdfs/hdfsJniHelper.c (original)
+++ hadoop/core/branches/branch-0.19/src/c++/libhdfs/hdfsJniHelper.c Wed Mar 11 18:58:05 2009
@@ -19,10 +19,13 @@
#include "hdfsJniHelper.h"
static pthread_mutex_t hdfsHashMutex = PTHREAD_MUTEX_INITIALIZER;
+static pthread_mutex_t jvmMutex = PTHREAD_MUTEX_INITIALIZER;
static volatile int hashTableInited = 0;
#define LOCK_HASH_TABLE() pthread_mutex_lock(&hdfsHashMutex)
#define UNLOCK_HASH_TABLE() pthread_mutex_unlock(&hdfsHashMutex)
+#define LOCK_JVM_MUTEX() pthread_mutex_lock(&jvmMutex)
+#define UNLOCK_JVM_MUTEX() pthread_mutex_unlock(&jvmMutex)
/** The Native return types that methods could return */
@@ -391,9 +394,14 @@
jint rv = 0;
jint noVMs = 0;
+ // Only the first thread should create the JVM. The other trheads should
+ // just use the JVM created by the first thread.
+ LOCK_JVM_MUTEX();
+
rv = JNI_GetCreatedJavaVMs(&(vmBuf[0]), vmBufLength, &noVMs);
if (rv != 0) {
fprintf(stderr, "JNI_GetCreatedJavaVMs failed with error: %d\n", rv);
+ UNLOCK_JVM_MUTEX();
return NULL;
}
@@ -402,6 +410,7 @@
char *hadoopClassPath = getenv("CLASSPATH");
if (hadoopClassPath == NULL) {
fprintf(stderr, "Environment variable CLASSPATH not set!\n");
+ UNLOCK_JVM_MUTEX();
return NULL;
}
char *hadoopClassPathVMArg = "-Djava.class.path=";
@@ -447,6 +456,7 @@
if (rv != 0) {
fprintf(stderr, "Call to JNI_CreateJavaVM failed "
"with error: %d\n", rv);
+ UNLOCK_JVM_MUTEX();
return NULL;
}
@@ -459,9 +469,11 @@
if (rv != 0) {
fprintf(stderr, "Call to AttachCurrentThread "
"failed with error: %d\n", rv);
+ UNLOCK_JVM_MUTEX();
return NULL;
}
}
+ UNLOCK_JVM_MUTEX();
return env;
}
Modified: hadoop/core/branches/branch-0.19/src/c++/libhdfs/tests/conf/hadoop-site.xml
URL: http://svn.apache.org/viewvc/hadoop/core/branches/branch-0.19/src/c%2B%2B/libhdfs/tests/conf/hadoop-site.xml?rev=752579&r1=752578&r2=752579&view=diff
==============================================================================
--- hadoop/core/branches/branch-0.19/src/c++/libhdfs/tests/conf/hadoop-site.xml (original)
+++ hadoop/core/branches/branch-0.19/src/c++/libhdfs/tests/conf/hadoop-site.xml Wed Mar 11 18:58:05 2009
@@ -37,4 +37,40 @@
</description>
</property>
+<property>
+ <name>dfs.datanode.address</name>
+ <value>0.0.0.0:50012</value>
+ <description>
+ The address where the datanode server will listen to.
+ If the port is 0 then the server will start on a free port.
+ </description>
+</property>
+
+<property>
+ <name>dfs.datanode.http.address</name>
+ <value>0.0.0.0:50079</value>
+ <description>
+ The datanode http server address and port.
+ If the port is 0 then the server will start on a free port.
+ </description>
+</property>
+
+<property>
+ <name>dfs.datanode.ipc.address</name>
+ <value>0.0.0.0:50022</value>
+ <description>
+ The datanode ipc server address and port.
+ If the port is 0 then the server will start on a free port.
+ </description>
+</property>
+
+<property>
+ <name>dfs.http.address</name>
+ <value>0.0.0.0:50072</value>
+ <description>
+ The address and the base port where the dfs namenode web ui will listen on.
+ If the port is 0 then the server will start on a free port.
+ </description>
+</property>
+
</configuration>