You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by dh...@apache.org on 2009/03/11 19:46:57 UTC
svn commit: r752568 - in /hadoop/core/branches/branch-0.20: CHANGES.txt
src/c++/libhdfs/hdfs.c src/c++/libhdfs/hdfsJniHelper.c
src/c++/libhdfs/tests/conf/hdfs-site.xml
src/c++/libhdfs/tests/test-libhdfs.sh
Author: dhruba
Date: Wed Mar 11 18:46:57 2009
New Revision: 752568
URL: http://svn.apache.org/viewvc?rev=752568&view=rev
Log:
HADOOP-5333. libhdfs supports appending to files. (dhruba)
Modified:
hadoop/core/branches/branch-0.20/CHANGES.txt (contents, props changed)
hadoop/core/branches/branch-0.20/src/c++/libhdfs/hdfs.c
hadoop/core/branches/branch-0.20/src/c++/libhdfs/hdfsJniHelper.c
hadoop/core/branches/branch-0.20/src/c++/libhdfs/tests/conf/hdfs-site.xml
hadoop/core/branches/branch-0.20/src/c++/libhdfs/tests/test-libhdfs.sh
Modified: hadoop/core/branches/branch-0.20/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/core/branches/branch-0.20/CHANGES.txt?rev=752568&r1=752567&r2=752568&view=diff
==============================================================================
--- hadoop/core/branches/branch-0.20/CHANGES.txt (original)
+++ hadoop/core/branches/branch-0.20/CHANGES.txt Wed Mar 11 18:46:57 2009
@@ -722,6 +722,8 @@
HADOOP-5332. Appending to files is not allowed (by default) unless
dfs.support.append is set to true. (dhruba)
+
+ HADOOP-5333. libhdfs supports appending to files. (dhruba)
Release 0.19.1 - Unreleased
Propchange: hadoop/core/branches/branch-0.20/CHANGES.txt
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Wed Mar 11 18:46:57 2009
@@ -1,3 +1,3 @@
/hadoop/core/branches/branch-0.18/CHANGES.txt:727226
/hadoop/core/branches/branch-0.19/CHANGES.txt:713112
-/hadoop/core/trunk/CHANGES.txt:727001,727117,727191,727212,727228,727255,727869,728187,729052,729987,732385,732572,732613,732777,732838,732869,733887,734870,734916,735082,736426,738602,738697,739416,740077,740157,741703,741762,743296,743745,743816,743892,744894,745180,745268,746010,746193,746206,746227,746233,746274,746902-746903,746944,746968,746970,747279,747802,748084,748090,748783,749262,749318,749863,750533,752073,752514
+/hadoop/core/trunk/CHANGES.txt:727001,727117,727191,727212,727228,727255,727869,728187,729052,729987,732385,732572,732613,732777,732838,732869,733887,734870,734916,735082,736426,738602,738697,739416,740077,740157,741703,741762,743296,743745,743816,743892,744894,745180,745268,746010,746193,746206,746227,746233,746274,746902-746903,746944,746968,746970,747279,747802,748084,748090,748783,749262,749318,749863,750533,752073,752514,752555
Modified: hadoop/core/branches/branch-0.20/src/c++/libhdfs/hdfs.c
URL: http://svn.apache.org/viewvc/hadoop/core/branches/branch-0.20/src/c%2B%2B/libhdfs/hdfs.c?rev=752568&r1=752567&r2=752568&view=diff
==============================================================================
--- hadoop/core/branches/branch-0.20/src/c++/libhdfs/hdfs.c (original)
+++ hadoop/core/branches/branch-0.20/src/c++/libhdfs/hdfs.c Wed Mar 11 18:46:57 2009
@@ -393,7 +393,6 @@
FSData{Input|Output}Stream f{is|os} = fs.create(f);
return f{is|os};
*/
-
/* Get the JNIEnv* corresponding to current thread */
JNIEnv* env = getJNIEnv();
@@ -504,20 +503,17 @@
signature);
goto done;
}
+ } else if ((flags & O_WRONLY) && (flags & O_APPEND)) {
// WRITE/APPEND?
- else if ((flags & O_WRONLY) && (flags & O_APPEND)) {
- if (invokeMethod(env, &jVal, &jExc, INSTANCE, jFS, HADOOP_FS,
- method, signature, jPath)) {
- errno = errnoFromException(jExc, env, "org.apache.hadoop.conf."
- "FileSystem::%s(%s)", method,
- signature);
- goto done;
- }
+ if (invokeMethod(env, &jVal, &jExc, INSTANCE, jFS, HADOOP_FS,
+ method, signature, jPath)) {
+ errno = errnoFromException(jExc, env, "org.apache.hadoop.conf."
+ "FileSystem::%s(%s)", method,
+ signature);
+ goto done;
}
-
- }
- // WRITE/CREATE
- else {
+ } else {
+ // WRITE/CREATE
jboolean jOverWrite = 1;
if (invokeMethod(env, &jVal, &jExc, INSTANCE, jFS, HADOOP_FS,
method, signature, jPath, jOverWrite,
Modified: hadoop/core/branches/branch-0.20/src/c++/libhdfs/hdfsJniHelper.c
URL: http://svn.apache.org/viewvc/hadoop/core/branches/branch-0.20/src/c%2B%2B/libhdfs/hdfsJniHelper.c?rev=752568&r1=752567&r2=752568&view=diff
==============================================================================
--- hadoop/core/branches/branch-0.20/src/c++/libhdfs/hdfsJniHelper.c (original)
+++ hadoop/core/branches/branch-0.20/src/c++/libhdfs/hdfsJniHelper.c Wed Mar 11 18:46:57 2009
@@ -19,10 +19,13 @@
#include "hdfsJniHelper.h"
static pthread_mutex_t hdfsHashMutex = PTHREAD_MUTEX_INITIALIZER;
+static pthread_mutex_t jvmMutex = PTHREAD_MUTEX_INITIALIZER;
static volatile int hashTableInited = 0;
#define LOCK_HASH_TABLE() pthread_mutex_lock(&hdfsHashMutex)
#define UNLOCK_HASH_TABLE() pthread_mutex_unlock(&hdfsHashMutex)
+#define LOCK_JVM_MUTEX() pthread_mutex_lock(&jvmMutex)
+#define UNLOCK_JVM_MUTEX() pthread_mutex_unlock(&jvmMutex)
/** The Native return types that methods could return */
@@ -391,9 +394,14 @@
jint rv = 0;
jint noVMs = 0;
+ // Only the first thread should create the JVM. The other trheads should
+ // just use the JVM created by the first thread.
+ LOCK_JVM_MUTEX();
+
rv = JNI_GetCreatedJavaVMs(&(vmBuf[0]), vmBufLength, &noVMs);
if (rv != 0) {
fprintf(stderr, "JNI_GetCreatedJavaVMs failed with error: %d\n", rv);
+ UNLOCK_JVM_MUTEX();
return NULL;
}
@@ -402,6 +410,7 @@
char *hadoopClassPath = getenv("CLASSPATH");
if (hadoopClassPath == NULL) {
fprintf(stderr, "Environment variable CLASSPATH not set!\n");
+ UNLOCK_JVM_MUTEX();
return NULL;
}
char *hadoopClassPathVMArg = "-Djava.class.path=";
@@ -447,6 +456,7 @@
if (rv != 0) {
fprintf(stderr, "Call to JNI_CreateJavaVM failed "
"with error: %d\n", rv);
+ UNLOCK_JVM_MUTEX();
return NULL;
}
@@ -459,9 +469,11 @@
if (rv != 0) {
fprintf(stderr, "Call to AttachCurrentThread "
"failed with error: %d\n", rv);
+ UNLOCK_JVM_MUTEX();
return NULL;
}
}
+ UNLOCK_JVM_MUTEX();
return env;
}
Modified: hadoop/core/branches/branch-0.20/src/c++/libhdfs/tests/conf/hdfs-site.xml
URL: http://svn.apache.org/viewvc/hadoop/core/branches/branch-0.20/src/c%2B%2B/libhdfs/tests/conf/hdfs-site.xml?rev=752568&r1=752567&r2=752568&view=diff
==============================================================================
--- hadoop/core/branches/branch-0.20/src/c++/libhdfs/tests/conf/hdfs-site.xml (original)
+++ hadoop/core/branches/branch-0.20/src/c++/libhdfs/tests/conf/hdfs-site.xml Wed Mar 11 18:46:57 2009
@@ -21,4 +21,40 @@
</description>
</property>
+<property>
+ <name>dfs.datanode.address</name>
+ <value>0.0.0.0:50012</value>
+ <description>
+ The address where the datanode server will listen to.
+ If the port is 0 then the server will start on a free port.
+ </description>
+</property>
+
+<property>
+ <name>dfs.datanode.http.address</name>
+ <value>0.0.0.0:50079</value>
+ <description>
+ The datanode http server address and port.
+ If the port is 0 then the server will start on a free port.
+ </description>
+</property>
+
+<property>
+ <name>dfs.datanode.ipc.address</name>
+ <value>0.0.0.0:50022</value>
+ <description>
+ The datanode ipc server address and port.
+ If the port is 0 then the server will start on a free port.
+ </description>
+</property>
+
+<property>
+ <name>dfs.http.address</name>
+ <value>0.0.0.0:50072</value>
+ <description>
+ The address and the base port where the dfs namenode web ui will listen on.
+ If the port is 0 then the server will start on a free port.
+ </description>
+</property>
+
</configuration>
Modified: hadoop/core/branches/branch-0.20/src/c++/libhdfs/tests/test-libhdfs.sh
URL: http://svn.apache.org/viewvc/hadoop/core/branches/branch-0.20/src/c%2B%2B/libhdfs/tests/test-libhdfs.sh?rev=752568&r1=752567&r2=752568&view=diff
==============================================================================
--- hadoop/core/branches/branch-0.20/src/c++/libhdfs/tests/test-libhdfs.sh (original)
+++ hadoop/core/branches/branch-0.20/src/c++/libhdfs/tests/test-libhdfs.sh Wed Mar 11 18:46:57 2009
@@ -117,6 +117,7 @@
echo Y | $HADOOP_BIN_DIR/hadoop namenode -format &&
$HADOOP_BIN_DIR/hadoop-daemon.sh start namenode && sleep 2 &&
$HADOOP_BIN_DIR/hadoop-daemon.sh start datanode && sleep 2 &&
+sleep 20
echo CLASSPATH=$HADOOP_CONF_DIR:$CLASSPATH LD_PRELOAD="$LIBHDFS_INSTALL_DIR/libhdfs.so:$LIB_JVM_DIR/libjvm.so" $LIBHDFS_BUILD_DIR/$HDFS_TEST &&
CLASSPATH=$HADOOP_CONF_DIR:$CLASSPATH LD_PRELOAD="$LIB_JVM_DIR/libjvm.so:$LIBHDFS_INSTALL_DIR/libhdfs.so:" $LIBHDFS_BUILD_DIR/$HDFS_TEST
BUILD_STATUS=$?