You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by cn...@apache.org on 2014/08/08 18:26:46 UTC
svn commit: r1616814 [1/2] - in
/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs: ./ src/
src/main/native/fuse-dfs/ src/main/native/libhdfs/
src/main/native/libhdfs/common/ src/main/native/libhdfs/os/
src/main/native/libhdfs/os/posix/ src/main/nati...
Author: cnauroth
Date: Fri Aug 8 16:26:45 2014
New Revision: 1616814
URL: http://svn.apache.org/r1616814
Log:
HDFS-573. Porting libhdfs to Windows. Contributed by Chris Nauroth.
Added:
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/common/
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/common/htable.c
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/common/htable.h
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/mutexes.h
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/posix/
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/posix/mutexes.c
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/posix/platform.h
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/posix/thread.c
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/posix/thread_local_storage.c
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/thread.h
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/thread_local_storage.h
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/windows/
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/windows/inttypes.h
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/windows/mutexes.c
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/windows/platform.h
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/windows/thread.c
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/windows/thread_local_storage.c
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/windows/unistd.h
Modified:
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/pom.xml
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/CMakeLists.txt
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/CMakeLists.txt
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/exception.c
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/exception.h
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/expect.c
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/expect.h
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/hdfs.c
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/jni_helper.c
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/jni_helper.h
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/native_mini_dfs.c
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test/test_libhdfs_ops.c
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test/test_libhdfs_read.c
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test/test_libhdfs_write.c
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test/test_libhdfs_zerocopy.c
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test_libhdfs_threaded.c
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test_native_mini_dfs.c
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1616814&r1=1616813&r2=1616814&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Fri Aug 8 16:26:45 2014
@@ -382,6 +382,8 @@ Release 2.6.0 - UNRELEASED
HDFS-6772. Get DN storages out of blockContentsStale state faster after
NN restarts. (Ming Ma via Arpit Agarwal)
+ HDFS-573. Porting libhdfs to Windows. (cnauroth)
+
OPTIMIZATIONS
HDFS-6690. Deduplicate xattr names in memory. (wang)
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/pom.xml
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/pom.xml?rev=1616814&r1=1616813&r2=1616814&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/pom.xml (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/pom.xml Fri Aug 8 16:26:45 2014
@@ -360,16 +360,97 @@ http://maven.apache.org/xsd/maven-4.0.0.
<profiles>
<profile>
- <id>windows</id>
+ <id>native-win</id>
<activation>
<activeByDefault>false</activeByDefault>
<os>
<family>windows</family>
</os>
</activation>
- <properties>
- <windows.build>true</windows.build>
- </properties>
+ <build>
+ <plugins>
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-enforcer-plugin</artifactId>
+ <executions>
+ <execution>
+ <id>enforce-os</id>
+ <goals>
+ <goal>enforce</goal>
+ </goals>
+ <configuration>
+ <rules>
+ <requireOS>
+ <family>windows</family>
+ <message>native-win build only supported on Windows</message>
+ </requireOS>
+ </rules>
+ <fail>true</fail>
+ </configuration>
+ </execution>
+ </executions>
+ </plugin>
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-antrun-plugin</artifactId>
+ <executions>
+ <execution>
+ <id>make</id>
+ <phase>compile</phase>
+ <goals>
+ <goal>run</goal>
+ </goals>
+ <configuration>
+ <target>
+ <mkdir dir="${project.build.directory}/native"/>
+ <exec executable="cmake" dir="${project.build.directory}/native"
+ failonerror="true">
+ <arg line="${basedir}/src/ -DGENERATED_JAVAH=${project.build.directory}/native/javah -DJVM_ARCH_DATA_MODEL=${sun.arch.data.model} -DREQUIRE_LIBWEBHDFS=${require.libwebhdfs} -DREQUIRE_FUSE=${require.fuse} -G 'Visual Studio 10 Win64'"/>
+ </exec>
+ <exec executable="msbuild" dir="${project.build.directory}/native"
+ failonerror="true">
+ <arg line="ALL_BUILD.vcxproj /nologo /p:Configuration=Release"/>
+ </exec>
+ <!-- Copy for inclusion in distribution. -->
+ <copy todir="${project.build.directory}/bin">
+ <fileset dir="${project.build.directory}/native/target/bin/Release"/>
+ </copy>
+ </target>
+ </configuration>
+ </execution>
+ <execution>
+ <id>native_tests</id>
+ <phase>test</phase>
+ <goals><goal>run</goal></goals>
+ <configuration>
+ <skip>${skipTests}</skip>
+ <target>
+ <property name="compile_classpath" refid="maven.compile.classpath"/>
+ <property name="test_classpath" refid="maven.test.classpath"/>
+ <macrodef name="run-test">
+ <attribute name="test"/>
+ <sequential>
+ <echo message="Running @{test}"/>
+ <exec executable="${project.build.directory}/native/Release/@{test}" failonerror="true" dir="${project.build.directory}/native/">
+ <env key="CLASSPATH" value="${test_classpath}:${compile_classpath}"/>
+ <!-- HADOOP_HOME required to find winutils. -->
+ <env key="HADOOP_HOME" value="${hadoop.common.build.dir}"/>
+ <!-- Make sure hadoop.dll and jvm.dll are on PATH. -->
+ <env key="PATH" value="${env.PATH};${hadoop.common.build.dir}/bin;${java.home}/jre/bin/server;${java.home}/bin/server"/>
+ </exec>
+ <echo message="Finished @{test}"/>
+ </sequential>
+ </macrodef>
+ <run-test test="test_libhdfs_threaded"/>
+ <echo message="Skipping test_libhdfs_zerocopy"/>
+ <run-test test="test_native_mini_dfs"/>
+ </target>
+ </configuration>
+ </execution>
+ </executions>
+ </plugin>
+ </plugins>
+ </build>
</profile>
<profile>
<id>native</id>
@@ -407,21 +488,25 @@ http://maven.apache.org/xsd/maven-4.0.0.
<phase>test</phase>
<goals><goal>run</goal></goals>
<configuration>
+ <skip>${skipTests}</skip>
<target>
<property name="compile_classpath" refid="maven.compile.classpath"/>
<property name="test_classpath" refid="maven.test.classpath"/>
- <exec executable="sh" failonerror="true" dir="${project.build.directory}/native/">
- <arg value="-c"/>
- <arg value="[ x$SKIPTESTS = xtrue ] || ${project.build.directory}/native/test_libhdfs_threaded"/>
- <env key="CLASSPATH" value="${test_classpath}:${compile_classpath}"/>
- <env key="SKIPTESTS" value="${skipTests}"/>
- </exec>
- <exec executable="sh" failonerror="true" dir="${project.build.directory}/native/">
- <arg value="-c"/>
- <arg value="[ x$SKIPTESTS = xtrue ] || ${project.build.directory}/native/test_native_mini_dfs"/>
- <env key="CLASSPATH" value="${test_classpath}:${compile_classpath}"/>
- <env key="SKIPTESTS" value="${skipTests}"/>
- </exec>
+ <macrodef name="run-test">
+ <attribute name="test"/>
+ <sequential>
+ <echo message="Running @{test}"/>
+ <exec executable="${project.build.directory}/native/@{test}" failonerror="true" dir="${project.build.directory}/native/">
+ <env key="CLASSPATH" value="${test_classpath}:${compile_classpath}"/>
+ <!-- Make sure libhadoop.so is on LD_LIBRARY_PATH. -->
+ <env key="LD_LIBRARY_PATH" value="${env.LD_LIBRARY_PATH}:${project.build.directory}/native/target/usr/local/lib:${hadoop.common.build.dir}/native/target/usr/local/lib"/>
+ </exec>
+ <echo message="Finished @{test}"/>
+ </sequential>
+ </macrodef>
+ <run-test test="test_libhdfs_threaded"/>
+ <run-test test="test_libhdfs_zerocopy"/>
+ <run-test test="test_native_mini_dfs"/>
</target>
</configuration>
</execution>
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/CMakeLists.txt
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/CMakeLists.txt?rev=1616814&r1=1616813&r2=1616814&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/CMakeLists.txt (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/CMakeLists.txt Fri Aug 8 16:26:45 2014
@@ -76,9 +76,39 @@ if (NOT GENERATED_JAVAH)
MESSAGE(FATAL_ERROR "You must set the CMake variable GENERATED_JAVAH")
endif (NOT GENERATED_JAVAH)
-set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -g -Wall -O2")
-set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -D_REENTRANT -D_GNU_SOURCE")
-set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -D_LARGEFILE_SOURCE -D_FILE_OFFSET_BITS=64")
+if (WIN32)
+ set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} /O2")
+
+ # Set warning level 4.
+ set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} /W4")
+
+ # Skip "unreferenced formal parameter".
+ set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} /wd4100")
+
+ # Skip "conditional expression is constant".
+ set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} /wd4127")
+
+ # Skip deprecated POSIX function warnings.
+ set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -D_CRT_NONSTDC_NO_DEPRECATE")
+
+ # Skip CRT non-secure function warnings. If we can convert usage of
+ # strerror, getenv and ctime to their secure CRT equivalents, then we can
+ # re-enable the CRT non-secure function warnings.
+ set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -D_CRT_SECURE_NO_WARNINGS")
+
+ # Omit unneeded headers.
+ set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -DWIN32_LEAN_AND_MEAN")
+
+ set(OS_DIR main/native/libhdfs/os/windows)
+ set(OUT_DIR target/bin)
+else (WIN32)
+ set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -g -Wall -O2")
+ set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -D_REENTRANT -D_GNU_SOURCE")
+ set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -D_LARGEFILE_SOURCE -D_FILE_OFFSET_BITS=64")
+ set(OS_DIR main/native/libhdfs/os/posix)
+ set(OS_LINK_LIBRARIES pthread)
+ set(OUT_DIR target/usr/local/lib)
+endif (WIN32)
include_directories(
${GENERATED_JAVAH}
@@ -87,6 +117,7 @@ include_directories(
${JNI_INCLUDE_DIRS}
main/native
main/native/libhdfs
+ ${OS_DIR}
)
set(_FUSE_DFS_VERSION 0.1.0)
@@ -96,6 +127,9 @@ add_dual_library(hdfs
main/native/libhdfs/exception.c
main/native/libhdfs/jni_helper.c
main/native/libhdfs/hdfs.c
+ main/native/libhdfs/common/htable.c
+ ${OS_DIR}/mutexes.c
+ ${OS_DIR}/thread_local_storage.c
)
if (NEED_LINK_DL)
set(LIB_DL dl)
@@ -104,17 +138,14 @@ endif(NEED_LINK_DL)
target_link_dual_libraries(hdfs
${JAVA_JVM_LIBRARY}
${LIB_DL}
- pthread
+ ${OS_LINK_LIBRARIES}
)
-dual_output_directory(hdfs target/usr/local/lib)
+
+dual_output_directory(hdfs ${OUT_DIR})
set(LIBHDFS_VERSION "0.0.0")
set_target_properties(hdfs PROPERTIES
SOVERSION ${LIBHDFS_VERSION})
-add_library(posix_util
- main/native/util/posix_util.c
-)
-
add_executable(test_libhdfs_ops
main/native/libhdfs/test/test_libhdfs_ops.c
)
@@ -156,11 +187,12 @@ target_link_libraries(test_native_mini_d
add_executable(test_libhdfs_threaded
main/native/libhdfs/expect.c
main/native/libhdfs/test_libhdfs_threaded.c
+ ${OS_DIR}/thread.c
)
target_link_libraries(test_libhdfs_threaded
hdfs
native_mini_dfs
- pthread
+ ${OS_LINK_LIBRARIES}
)
add_executable(test_libhdfs_zerocopy
@@ -170,17 +202,21 @@ add_executable(test_libhdfs_zerocopy
target_link_libraries(test_libhdfs_zerocopy
hdfs
native_mini_dfs
- pthread
+ ${OS_LINK_LIBRARIES}
)
-add_executable(test_libhdfs_vecsum
- main/native/libhdfs/test/vecsum.c
-)
-target_link_libraries(test_libhdfs_vecsum
- hdfs
- pthread
- rt
-)
+# Skip vecsum on Windows. This could be made to work in the future by
+# introducing an abstraction layer over the sys/mman.h functions.
+if (NOT WIN32)
+ add_executable(test_libhdfs_vecsum
+ main/native/libhdfs/test/vecsum.c
+ )
+ target_link_libraries(test_libhdfs_vecsum
+ hdfs
+ pthread
+ rt
+ )
+endif(NOT WIN32)
IF(REQUIRE_LIBWEBHDFS)
add_subdirectory(contrib/libwebhdfs)
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/CMakeLists.txt
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/CMakeLists.txt?rev=1616814&r1=1616813&r2=1616814&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/CMakeLists.txt (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/CMakeLists.txt Fri Aug 8 16:26:45 2014
@@ -37,6 +37,10 @@ ELSE (${CMAKE_SYSTEM_NAME} MATCHES "Linu
ENDIF (${CMAKE_SYSTEM_NAME} MATCHES "Linux")
IF(FUSE_FOUND)
+ add_library(posix_util
+ ../util/posix_util.c
+ )
+
add_executable(fuse_dfs
fuse_dfs.c
fuse_options.c
Added: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/common/htable.c
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/common/htable.c?rev=1616814&view=auto
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/common/htable.c (added)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/common/htable.c Fri Aug 8 16:26:45 2014
@@ -0,0 +1,271 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "common/htable.h"
+
+#include <errno.h>
+#include <inttypes.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+
+struct htable_pair {
+ void *key;
+ void *val;
+};
+
+/**
+ * A hash table which uses linear probing.
+ */
+struct htable {
+ uint32_t capacity;
+ uint32_t used;
+ htable_hash_fn_t hash_fun;
+ htable_eq_fn_t eq_fun;
+ struct htable_pair *elem;
+};
+
+/**
+ * An internal function for inserting a value into the hash table.
+ *
+ * Note: this function assumes that you have made enough space in the table.
+ *
+ * @param nelem The new element to insert.
+ * @param capacity The capacity of the hash table.
+ * @param hash_fun The hash function to use.
+ * @param key The key to insert.
+ * @param val The value to insert.
+ */
+static void htable_insert_internal(struct htable_pair *nelem,
+ uint32_t capacity, htable_hash_fn_t hash_fun, void *key,
+ void *val)
+{
+ uint32_t i;
+
+ i = hash_fun(key, capacity);
+ while (1) {
+ if (!nelem[i].key) {
+ nelem[i].key = key;
+ nelem[i].val = val;
+ return;
+ }
+ i++;
+ if (i == capacity) {
+ i = 0;
+ }
+ }
+}
+
+static int htable_realloc(struct htable *htable, uint32_t new_capacity)
+{
+ struct htable_pair *nelem;
+ uint32_t i, old_capacity = htable->capacity;
+ htable_hash_fn_t hash_fun = htable->hash_fun;
+
+ nelem = calloc(new_capacity, sizeof(struct htable_pair));
+ if (!nelem) {
+ return ENOMEM;
+ }
+ for (i = 0; i < old_capacity; i++) {
+ struct htable_pair *pair = htable->elem + i;
+ htable_insert_internal(nelem, new_capacity, hash_fun,
+ pair->key, pair->val);
+ }
+ free(htable->elem);
+ htable->elem = nelem;
+ htable->capacity = new_capacity;
+ return 0;
+}
+
+struct htable *htable_alloc(uint32_t size,
+ htable_hash_fn_t hash_fun, htable_eq_fn_t eq_fun)
+{
+ struct htable *htable;
+
+ htable = calloc(1, sizeof(*htable));
+ if (!htable) {
+ return NULL;
+ }
+ size = (size + 1) >> 1;
+ size = size << 1;
+ if (size < HTABLE_MIN_SIZE) {
+ size = HTABLE_MIN_SIZE;
+ }
+ htable->hash_fun = hash_fun;
+ htable->eq_fun = eq_fun;
+ htable->used = 0;
+ if (htable_realloc(htable, size)) {
+ free(htable);
+ return NULL;
+ }
+ return htable;
+}
+
+void htable_visit(struct htable *htable, visitor_fn_t fun, void *ctx)
+{
+ uint32_t i;
+
+ for (i = 0; i != htable->capacity; ++i) {
+ struct htable_pair *elem = htable->elem + i;
+ if (elem->key) {
+ fun(ctx, elem->key, elem->val);
+ }
+ }
+}
+
+void htable_free(struct htable *htable)
+{
+ if (htable) {
+ free(htable->elem);
+ free(htable);
+ }
+}
+
+int htable_put(struct htable *htable, void *key, void *val)
+{
+ int ret;
+ uint32_t nused;
+
+ // NULL is not a valid key value.
+ // This helps us implement htable_get_internal efficiently, since we know
+ // that we can stop when we encounter the first NULL key.
+ if (!key) {
+ return EINVAL;
+ }
+ // NULL is not a valid value. Otherwise the results of htable_get would
+ // be confusing (does a NULL return mean entry not found, or that the
+ // entry was found and was NULL?)
+ if (!val) {
+ return EINVAL;
+ }
+ // Re-hash if we have used more than half of the hash table
+ nused = htable->used + 1;
+ if (nused >= (htable->capacity / 2)) {
+ ret = htable_realloc(htable, htable->capacity * 2);
+ if (ret)
+ return ret;
+ }
+ htable_insert_internal(htable->elem, htable->capacity,
+ htable->hash_fun, key, val);
+ htable->used++;
+ return 0;
+}
+
+static int htable_get_internal(const struct htable *htable,
+ const void *key, uint32_t *out)
+{
+ uint32_t start_idx, idx;
+
+ start_idx = htable->hash_fun(key, htable->capacity);
+ idx = start_idx;
+ while (1) {
+ struct htable_pair *pair = htable->elem + idx;
+ if (!pair->key) {
+ // We always maintain the invariant that the entries corresponding
+ // to a given key are stored in a contiguous block, not separated
+ // by any NULLs. So if we encounter a NULL, our search is over.
+ return ENOENT;
+ } else if (htable->eq_fun(pair->key, key)) {
+ *out = idx;
+ return 0;
+ }
+ idx++;
+ if (idx == htable->capacity) {
+ idx = 0;
+ }
+ if (idx == start_idx) {
+ return ENOENT;
+ }
+ }
+}
+
+void *htable_get(const struct htable *htable, const void *key)
+{
+ uint32_t idx;
+
+ if (htable_get_internal(htable, key, &idx)) {
+ return NULL;
+ }
+ return htable->elem[idx].val;
+}
+
+void htable_pop(struct htable *htable, const void *key,
+ void **found_key, void **found_val)
+{
+ uint32_t hole, i;
+ const void *nkey;
+
+ if (htable_get_internal(htable, key, &hole)) {
+ *found_key = NULL;
+ *found_val = NULL;
+ return;
+ }
+ i = hole;
+ htable->used--;
+ // We need to maintain the compactness invariant used in
+ // htable_get_internal. This invariant specifies that the entries for any
+ // given key are never separated by NULLs (although they may be separated
+ // by entries for other keys.)
+ while (1) {
+ i++;
+ if (i == htable->capacity) {
+ i = 0;
+ }
+ nkey = htable->elem[i].key;
+ if (!nkey) {
+ *found_key = htable->elem[hole].key;
+ *found_val = htable->elem[hole].val;
+ htable->elem[hole].key = NULL;
+ htable->elem[hole].val = NULL;
+ return;
+ } else if (htable->eq_fun(key, nkey)) {
+ htable->elem[hole].key = htable->elem[i].key;
+ htable->elem[hole].val = htable->elem[i].val;
+ hole = i;
+ }
+ }
+}
+
+uint32_t htable_used(const struct htable *htable)
+{
+ return htable->used;
+}
+
+uint32_t htable_capacity(const struct htable *htable)
+{
+ return htable->capacity;
+}
+
+uint32_t ht_hash_string(const void *str, uint32_t max)
+{
+ const char *s = str;
+ uint32_t hash = 0;
+
+ while (*s) {
+ hash = (hash * 31) + *s;
+ s++;
+ }
+ return hash % max;
+}
+
+int ht_compare_string(const void *a, const void *b)
+{
+ return strcmp(a, b) == 0;
+}
+
+// vim: ts=4:sw=4:tw=79:et
Added: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/common/htable.h
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/common/htable.h?rev=1616814&view=auto
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/common/htable.h (added)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/common/htable.h Fri Aug 8 16:26:45 2014
@@ -0,0 +1,161 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef HADOOP_CORE_COMMON_HASH_TABLE
+#define HADOOP_CORE_COMMON_HASH_TABLE
+
+#include <inttypes.h>
+#include <stdio.h>
+#include <stdint.h>
+
+#define HTABLE_MIN_SIZE 4
+
+struct htable;
+
+/**
+ * An HTable hash function.
+ *
+ * @param key The key.
+ * @param capacity The total capacity.
+ *
+ * @return The hash slot. Must be less than the capacity.
+ */
+typedef uint32_t (*htable_hash_fn_t)(const void *key, uint32_t capacity);
+
+/**
+ * An HTable equality function. Compares two keys.
+ *
+ * @param a First key.
+ * @param b Second key.
+ *
+ * @return nonzero if the keys are equal.
+ */
+typedef int (*htable_eq_fn_t)(const void *a, const void *b);
+
+/**
+ * Allocate a new hash table.
+ *
+ * @param capacity The minimum suggested starting capacity.
+ * @param hash_fun The hash function to use in this hash table.
+ * @param eq_fun The equals function to use in this hash table.
+ *
+ * @return The new hash table on success; NULL on OOM.
+ */
+struct htable *htable_alloc(uint32_t capacity, htable_hash_fn_t hash_fun,
+ htable_eq_fn_t eq_fun);
+
+typedef void (*visitor_fn_t)(void *ctx, void *key, void *val);
+
+/**
+ * Visit all of the entries in the hash table.
+ *
+ * @param htable The hash table.
+ * @param fun The callback function to invoke on each key and value.
+ * @param ctx Context pointer to pass to the callback.
+ */
+void htable_visit(struct htable *htable, visitor_fn_t fun, void *ctx);
+
+/**
+ * Free the hash table.
+ *
+ * It is up the calling code to ensure that the keys and values inside the
+ * table are de-allocated, if that is necessary.
+ *
+ * @param htable The hash table.
+ */
+void htable_free(struct htable *htable);
+
+/**
+ * Add an entry to the hash table.
+ *
+ * @param htable The hash table.
+ * @param key The key to add. This cannot be NULL.
+ * @param fun The value to add. This cannot be NULL.
+ *
+ * @return 0 on success;
+ * EEXIST if the value already exists in the table;
+ * ENOMEM if there is not enough memory to add the element.
+ * EFBIG if the hash table has too many entries to fit in 32
+ * bits.
+ */
+int htable_put(struct htable *htable, void *key, void *val);
+
+/**
+ * Get an entry from the hash table.
+ *
+ * @param htable The hash table.
+ * @param key The key to find.
+ *
+ * @return NULL if there is no such entry; the entry otherwise.
+ */
+void *htable_get(const struct htable *htable, const void *key);
+
+/**
+ * Get an entry from the hash table and remove it.
+ *
+ * @param htable The hash table.
+ * @param key The key for the entry find and remove.
+ * @param found_key (out param) NULL if the entry was not found; the found key
+ * otherwise.
+ * @param found_val (out param) NULL if the entry was not found; the found
+ * value otherwise.
+ */
+void htable_pop(struct htable *htable, const void *key,
+ void **found_key, void **found_val);
+
+/**
+ * Get the number of entries used in the hash table.
+ *
+ * @param htable The hash table.
+ *
+ * @return The number of entries used in the hash table.
+ */
+uint32_t htable_used(const struct htable *htable);
+
+/**
+ * Get the capacity of the hash table.
+ *
+ * @param htable The hash table.
+ *
+ * @return The capacity of the hash table.
+ */
+uint32_t htable_capacity(const struct htable *htable);
+
+/**
+ * Hash a string.
+ *
+ * @param str The string.
+ * @param max Maximum hash value
+ *
+ * @return A number less than max.
+ */
+uint32_t ht_hash_string(const void *str, uint32_t max);
+
+/**
+ * Compare two strings.
+ *
+ * @param a The first string.
+ * @param b The second string.
+ *
+ * @return 1 if the strings are identical; 0 otherwise.
+ */
+int ht_compare_string(const void *a, const void *b);
+
+#endif
+
+// vim: ts=4:sw=4:tw=79:et
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/exception.c
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/exception.c?rev=1616814&r1=1616813&r2=1616814&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/exception.c (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/exception.c Fri Aug 8 16:26:45 2014
@@ -19,8 +19,8 @@
#include "exception.h"
#include "hdfs.h"
#include "jni_helper.h"
+#include "platform.h"
-#include <inttypes.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
@@ -35,54 +35,54 @@ struct ExceptionInfo {
static const struct ExceptionInfo gExceptionInfo[] = {
{
- .name = "java.io.FileNotFoundException",
- .noPrintFlag = NOPRINT_EXC_FILE_NOT_FOUND,
- .excErrno = ENOENT,
+ "java.io.FileNotFoundException",
+ NOPRINT_EXC_FILE_NOT_FOUND,
+ ENOENT,
},
{
- .name = "org.apache.hadoop.security.AccessControlException",
- .noPrintFlag = NOPRINT_EXC_ACCESS_CONTROL,
- .excErrno = EACCES,
+ "org.apache.hadoop.security.AccessControlException",
+ NOPRINT_EXC_ACCESS_CONTROL,
+ EACCES,
},
{
- .name = "org.apache.hadoop.fs.UnresolvedLinkException",
- .noPrintFlag = NOPRINT_EXC_UNRESOLVED_LINK,
- .excErrno = ENOLINK,
+ "org.apache.hadoop.fs.UnresolvedLinkException",
+ NOPRINT_EXC_UNRESOLVED_LINK,
+ ENOLINK,
},
{
- .name = "org.apache.hadoop.fs.ParentNotDirectoryException",
- .noPrintFlag = NOPRINT_EXC_PARENT_NOT_DIRECTORY,
- .excErrno = ENOTDIR,
+ "org.apache.hadoop.fs.ParentNotDirectoryException",
+ NOPRINT_EXC_PARENT_NOT_DIRECTORY,
+ ENOTDIR,
},
{
- .name = "java.lang.IllegalArgumentException",
- .noPrintFlag = NOPRINT_EXC_ILLEGAL_ARGUMENT,
- .excErrno = EINVAL,
+ "java.lang.IllegalArgumentException",
+ NOPRINT_EXC_ILLEGAL_ARGUMENT,
+ EINVAL,
},
{
- .name = "java.lang.OutOfMemoryError",
- .noPrintFlag = 0,
- .excErrno = ENOMEM,
+ "java.lang.OutOfMemoryError",
+ 0,
+ ENOMEM,
},
{
- .name = "org.apache.hadoop.hdfs.server.namenode.SafeModeException",
- .noPrintFlag = 0,
- .excErrno = EROFS,
+ "org.apache.hadoop.hdfs.server.namenode.SafeModeException",
+ 0,
+ EROFS,
},
{
- .name = "org.apache.hadoop.fs.FileAlreadyExistsException",
- .noPrintFlag = 0,
- .excErrno = EEXIST,
+ "org.apache.hadoop.fs.FileAlreadyExistsException",
+ 0,
+ EEXIST,
},
{
- .name = "org.apache.hadoop.hdfs.protocol.QuotaExceededException",
- .noPrintFlag = 0,
- .excErrno = EDQUOT,
+ "org.apache.hadoop.hdfs.protocol.QuotaExceededException",
+ 0,
+ EDQUOT,
},
{
- .name = "org.apache.hadoop.hdfs.server.namenode.LeaseExpiredException",
- .noPrintFlag = 0,
- .excErrno = ESTALE,
+ "org.apache.hadoop.hdfs.server.namenode.LeaseExpiredException",
+ 0,
+ ESTALE,
},
};
@@ -113,6 +113,7 @@ int printExceptionAndFreeV(JNIEnv *env,
jstring jStr = NULL;
jvalue jVal;
jthrowable jthr;
+ const char *stackTrace;
jthr = classNameOfObject(exc, env, &className);
if (jthr) {
@@ -148,7 +149,7 @@ int printExceptionAndFreeV(JNIEnv *env,
destroyLocalReference(env, jthr);
} else {
jStr = jVal.l;
- const char *stackTrace = (*env)->GetStringUTFChars(env, jStr, NULL);
+ stackTrace = (*env)->GetStringUTFChars(env, jStr, NULL);
if (!stackTrace) {
fprintf(stderr, "(unable to get stack trace for %s exception: "
"GetStringUTFChars error.)\n", className);
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/exception.h
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/exception.h?rev=1616814&r1=1616813&r2=1616814&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/exception.h (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/exception.h Fri Aug 8 16:26:45 2014
@@ -34,13 +34,14 @@
* usually not what you want.)
*/
+#include "platform.h"
+
#include <jni.h>
#include <stdio.h>
#include <stdlib.h>
#include <stdarg.h>
#include <search.h>
-#include <pthread.h>
#include <errno.h>
/**
@@ -109,7 +110,7 @@ int printExceptionAndFreeV(JNIEnv *env,
* object.
*/
int printExceptionAndFree(JNIEnv *env, jthrowable exc, int noPrintFlags,
- const char *fmt, ...) __attribute__((format(printf, 4, 5)));
+ const char *fmt, ...) TYPE_CHECKED_PRINTF_FORMAT(4, 5);
/**
* Print out information about the pending exception and free it.
@@ -124,7 +125,7 @@ int printExceptionAndFree(JNIEnv *env, j
* object.
*/
int printPendingExceptionAndFree(JNIEnv *env, int noPrintFlags,
- const char *fmt, ...) __attribute__((format(printf, 3, 4)));
+ const char *fmt, ...) TYPE_CHECKED_PRINTF_FORMAT(3, 4);
/**
* Get a local reference to the pending exception and clear it.
@@ -150,6 +151,7 @@ jthrowable getPendingExceptionAndClear(J
* @return A local reference to a RuntimeError
*/
jthrowable newRuntimeError(JNIEnv *env, const char *fmt, ...)
- __attribute__((format(printf, 2, 3)));
+ TYPE_CHECKED_PRINTF_FORMAT(2, 3);
+#undef TYPE_CHECKED_PRINTF_FORMAT
#endif
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/expect.c
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/expect.c?rev=1616814&r1=1616813&r2=1616814&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/expect.c (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/expect.c Fri Aug 8 16:26:45 2014
@@ -49,18 +49,18 @@ int expectFileStats(hdfsFile file,
stats->totalShortCircuitBytesRead,
stats->totalZeroCopyBytesRead);
if (expectedTotalBytesRead != UINT64_MAX) {
- EXPECT_INT64_EQ(expectedTotalBytesRead, stats->totalBytesRead);
+ EXPECT_UINT64_EQ(expectedTotalBytesRead, stats->totalBytesRead);
}
if (expectedTotalLocalBytesRead != UINT64_MAX) {
- EXPECT_INT64_EQ(expectedTotalLocalBytesRead,
+ EXPECT_UINT64_EQ(expectedTotalLocalBytesRead,
stats->totalLocalBytesRead);
}
if (expectedTotalShortCircuitBytesRead != UINT64_MAX) {
- EXPECT_INT64_EQ(expectedTotalShortCircuitBytesRead,
+ EXPECT_UINT64_EQ(expectedTotalShortCircuitBytesRead,
stats->totalShortCircuitBytesRead);
}
if (expectedTotalZeroCopyBytesRead != UINT64_MAX) {
- EXPECT_INT64_EQ(expectedTotalZeroCopyBytesRead,
+ EXPECT_UINT64_EQ(expectedTotalZeroCopyBytesRead,
stats->totalZeroCopyBytesRead);
}
hdfsFileFreeReadStatistics(stats);
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/expect.h
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/expect.h?rev=1616814&r1=1616813&r2=1616814&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/expect.h (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/expect.h Fri Aug 8 16:26:45 2014
@@ -126,6 +126,18 @@ struct hdfsFile_internal;
} \
} while (0);
+#define EXPECT_UINT64_EQ(x, y) \
+ do { \
+ uint64_t __my_ret__ = y; \
+ int __my_errno__ = errno; \
+ if (__my_ret__ != (x)) { \
+ fprintf(stderr, "TEST_ERROR: failed on %s:%d with return " \
+ "value %"PRIu64" (errno: %d): expected %"PRIu64"\n", \
+ __FILE__, __LINE__, __my_ret__, __my_errno__, (x)); \
+ return -1; \
+ } \
+ } while (0);
+
#define RETRY_ON_EINTR_GET_ERRNO(ret, expr) do { \
ret = expr; \
if (!ret) \
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/hdfs.c
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/hdfs.c?rev=1616814&r1=1616813&r2=1616814&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/hdfs.c (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/hdfs.c Fri Aug 8 16:26:45 2014
@@ -19,7 +19,9 @@
#include "exception.h"
#include "hdfs.h"
#include "jni_helper.h"
+#include "platform.h"
+#include <fcntl.h>
#include <inttypes.h>
#include <stdio.h>
#include <string.h>
@@ -63,9 +65,9 @@ static void hdfsFreeFileInfoEntry(hdfsFi
*/
enum hdfsStreamType
{
- UNINITIALIZED = 0,
- INPUT = 1,
- OUTPUT = 2,
+ HDFS_STREAM_UNINITIALIZED = 0,
+ HDFS_STREAM_INPUT = 1,
+ HDFS_STREAM_OUTPUT = 2,
};
/**
@@ -79,7 +81,7 @@ struct hdfsFile_internal {
int hdfsFileIsOpenForRead(hdfsFile file)
{
- return (file->type == INPUT);
+ return (file->type == HDFS_STREAM_INPUT);
}
int hdfsFileGetReadStatistics(hdfsFile file,
@@ -96,7 +98,7 @@ int hdfsFileGetReadStatistics(hdfsFile f
errno = EINTERNAL;
return -1;
}
- if (file->type != INPUT) {
+ if (file->type != HDFS_STREAM_INPUT) {
ret = EINVAL;
goto done;
}
@@ -180,7 +182,7 @@ void hdfsFileFreeReadStatistics(struct h
int hdfsFileIsOpenForWrite(hdfsFile file)
{
- return (file->type == OUTPUT);
+ return (file->type == HDFS_STREAM_OUTPUT);
}
int hdfsFileUsesDirectRead(hdfsFile file)
@@ -441,7 +443,7 @@ void hdfsBuilderSetKerbTicketCachePath(s
bld->kerbTicketCachePath = kerbTicketCachePath;
}
-hdfsFS hdfsConnect(const char* host, tPort port)
+hdfsFS hdfsConnect(const char *host, tPort port)
{
struct hdfsBuilder *bld = hdfsNewBuilder();
if (!bld)
@@ -452,7 +454,7 @@ hdfsFS hdfsConnect(const char* host, tPo
}
/** Always return a new FileSystem handle */
-hdfsFS hdfsConnectNewInstance(const char* host, tPort port)
+hdfsFS hdfsConnectNewInstance(const char *host, tPort port)
{
struct hdfsBuilder *bld = hdfsNewBuilder();
if (!bld)
@@ -463,7 +465,7 @@ hdfsFS hdfsConnectNewInstance(const char
return hdfsBuilderConnect(bld);
}
-hdfsFS hdfsConnectAsUser(const char* host, tPort port, const char *user)
+hdfsFS hdfsConnectAsUser(const char *host, tPort port, const char *user)
{
struct hdfsBuilder *bld = hdfsNewBuilder();
if (!bld)
@@ -475,7 +477,7 @@ hdfsFS hdfsConnectAsUser(const char* hos
}
/** Always return a new FileSystem handle */
-hdfsFS hdfsConnectAsUserNewInstance(const char* host, tPort port,
+hdfsFS hdfsConnectAsUserNewInstance(const char *host, tPort port,
const char *user)
{
struct hdfsBuilder *bld = hdfsNewBuilder();
@@ -518,7 +520,7 @@ static int calcEffectiveURI(struct hdfsB
if (bld->port == 0) {
suffix[0] = '\0';
} else {
- lastColon = rindex(bld->nn, ':');
+ lastColon = strrchr(bld->nn, ':');
if (lastColon && (strspn(lastColon + 1, "0123456789") ==
strlen(lastColon + 1))) {
fprintf(stderr, "port %d was given, but URI '%s' already "
@@ -737,6 +739,8 @@ int hdfsDisconnect(hdfsFS fs)
//Get the JNIEnv* corresponding to current thread
JNIEnv* env = getJNIEnv();
int ret;
+ jobject jFS;
+ jthrowable jthr;
if (env == NULL) {
errno = EINTERNAL;
@@ -744,7 +748,7 @@ int hdfsDisconnect(hdfsFS fs)
}
//Parameters
- jobject jFS = (jobject)fs;
+ jFS = (jobject)fs;
//Sanity check
if (fs == NULL) {
@@ -752,7 +756,7 @@ int hdfsDisconnect(hdfsFS fs)
return -1;
}
- jthrowable jthr = invokeMethod(env, NULL, INSTANCE, jFS, HADOOP_FS,
+ jthr = invokeMethod(env, NULL, INSTANCE, jFS, HADOOP_FS,
"close", "()V");
if (jthr) {
ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
@@ -792,7 +796,7 @@ static jthrowable getDefaultBlockSize(JN
return NULL;
}
-hdfsFile hdfsOpenFile(hdfsFS fs, const char* path, int flags,
+hdfsFile hdfsOpenFile(hdfsFS fs, const char *path, int flags,
int bufferSize, short replication, tSize blockSize)
{
/*
@@ -801,15 +805,7 @@ hdfsFile hdfsOpenFile(hdfsFS fs, const c
FSData{Input|Output}Stream f{is|os} = fs.create(f);
return f{is|os};
*/
- /* Get the JNIEnv* corresponding to current thread */
- JNIEnv* env = getJNIEnv();
int accmode = flags & O_ACCMODE;
-
- if (env == NULL) {
- errno = EINTERNAL;
- return NULL;
- }
-
jstring jStrBufferSize = NULL, jStrReplication = NULL;
jobject jConfiguration = NULL, jPath = NULL, jFile = NULL;
jobject jFS = (jobject)fs;
@@ -817,6 +813,20 @@ hdfsFile hdfsOpenFile(hdfsFS fs, const c
jvalue jVal;
hdfsFile file = NULL;
int ret;
+ jint jBufferSize = bufferSize;
+ jshort jReplication = replication;
+
+ /* The hadoop java api/signature */
+ const char *method = NULL;
+ const char *signature = NULL;
+
+ /* Get the JNIEnv* corresponding to current thread */
+ JNIEnv* env = getJNIEnv();
+ if (env == NULL) {
+ errno = EINTERNAL;
+ return NULL;
+ }
+
if (accmode == O_RDONLY || accmode == O_WRONLY) {
/* yay */
@@ -834,10 +844,6 @@ hdfsFile hdfsOpenFile(hdfsFS fs, const c
fprintf(stderr, "WARN: hdfs does not truly support O_CREATE && O_EXCL\n");
}
- /* The hadoop java api/signature */
- const char* method = NULL;
- const char* signature = NULL;
-
if (accmode == O_RDONLY) {
method = "open";
signature = JMETHOD2(JPARAM(HADOOP_PATH), "I", JPARAM(HADOOP_ISTRM));
@@ -867,8 +873,6 @@ hdfsFile hdfsOpenFile(hdfsFS fs, const c
}
jConfiguration = jVal.l;
- jint jBufferSize = bufferSize;
- jshort jReplication = replication;
jStrBufferSize = (*env)->NewStringUTF(env, "io.file.buffer.size");
if (!jStrBufferSize) {
ret = printPendingExceptionAndFree(env, PRINT_EXC_ALL, "OOM");
@@ -905,7 +909,7 @@ hdfsFile hdfsOpenFile(hdfsFS fs, const c
path);
goto done;
}
- jReplication = jVal.i;
+ jReplication = (jshort)jVal.i;
}
}
@@ -955,7 +959,8 @@ hdfsFile hdfsOpenFile(hdfsFS fs, const c
"hdfsOpenFile(%s): NewGlobalRef", path);
goto done;
}
- file->type = (((flags & O_WRONLY) == 0) ? INPUT : OUTPUT);
+ file->type = (((flags & O_WRONLY) == 0) ? HDFS_STREAM_INPUT :
+ HDFS_STREAM_OUTPUT);
file->flags = 0;
if ((flags & O_WRONLY) == 0) {
@@ -998,31 +1003,33 @@ int hdfsCloseFile(hdfsFS fs, hdfsFile fi
// JAVA EQUIVALENT:
// file.close
+ //The interface whose 'close' method to be called
+ const char *interface;
+ const char *interfaceShortName;
+
+ //Caught exception
+ jthrowable jthr;
+
//Get the JNIEnv* corresponding to current thread
JNIEnv* env = getJNIEnv();
-
if (env == NULL) {
errno = EINTERNAL;
return -1;
}
- //Caught exception
- jthrowable jthr;
-
//Sanity check
- if (!file || file->type == UNINITIALIZED) {
+ if (!file || file->type == HDFS_STREAM_UNINITIALIZED) {
errno = EBADF;
return -1;
}
- //The interface whose 'close' method to be called
- const char* interface = (file->type == INPUT) ?
+ interface = (file->type == HDFS_STREAM_INPUT) ?
HADOOP_ISTRM : HADOOP_OSTRM;
jthr = invokeMethod(env, NULL, INSTANCE, file->file, interface,
"close", "()V");
if (jthr) {
- const char *interfaceShortName = (file->type == INPUT) ?
+ interfaceShortName = (file->type == HDFS_STREAM_INPUT) ?
"FSDataInputStream" : "FSDataOutputStream";
ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
"%s#close", interfaceShortName);
@@ -1044,15 +1051,15 @@ int hdfsCloseFile(hdfsFS fs, hdfsFile fi
int hdfsExists(hdfsFS fs, const char *path)
{
JNIEnv *env = getJNIEnv();
- if (env == NULL) {
- errno = EINTERNAL;
- return -1;
- }
-
jobject jPath;
jvalue jVal;
jobject jFS = (jobject)fs;
jthrowable jthr;
+
+ if (env == NULL) {
+ errno = EINTERNAL;
+ return -1;
+ }
if (path == NULL) {
errno = EINVAL;
@@ -1088,13 +1095,13 @@ static int readPrepare(JNIEnv* env, hdfs
*jInputStream = (jobject)(f ? f->file : NULL);
//Sanity check
- if (!f || f->type == UNINITIALIZED) {
+ if (!f || f->type == HDFS_STREAM_UNINITIALIZED) {
errno = EBADF;
return -1;
}
//Error checking... make sure that this file is 'readable'
- if (f->type != INPUT) {
+ if (f->type != HDFS_STREAM_INPUT) {
fprintf(stderr, "Cannot read from a non-InputStream object!\n");
errno = EINVAL;
return -1;
@@ -1105,6 +1112,13 @@ static int readPrepare(JNIEnv* env, hdfs
tSize hdfsRead(hdfsFS fs, hdfsFile f, void* buffer, tSize length)
{
+ jobject jInputStream;
+ jbyteArray jbRarray;
+ jint noReadBytes = length;
+ jvalue jVal;
+ jthrowable jthr;
+ JNIEnv* env;
+
if (length == 0) {
return 0;
} else if (length < 0) {
@@ -1120,23 +1134,17 @@ tSize hdfsRead(hdfsFS fs, hdfsFile f, vo
// fis.read(bR);
//Get the JNIEnv* corresponding to current thread
- JNIEnv* env = getJNIEnv();
+ env = getJNIEnv();
if (env == NULL) {
errno = EINTERNAL;
return -1;
}
//Parameters
- jobject jInputStream;
if (readPrepare(env, fs, f, &jInputStream) == -1) {
return -1;
}
- jbyteArray jbRarray;
- jint noReadBytes = length;
- jvalue jVal;
- jthrowable jthr;
-
//Read the requisite bytes
jbRarray = (*env)->NewByteArray(env, length);
if (!jbRarray) {
@@ -1179,6 +1187,11 @@ tSize readDirect(hdfsFS fs, hdfsFile f,
// ByteBuffer bbuffer = ByteBuffer.allocateDirect(length) // wraps C buffer
// fis.read(bbuffer);
+ jobject jInputStream;
+ jvalue jVal;
+ jthrowable jthr;
+ jobject bb;
+
//Get the JNIEnv* corresponding to current thread
JNIEnv* env = getJNIEnv();
if (env == NULL) {
@@ -1186,16 +1199,12 @@ tSize readDirect(hdfsFS fs, hdfsFile f,
return -1;
}
- jobject jInputStream;
if (readPrepare(env, fs, f, &jInputStream) == -1) {
return -1;
}
- jvalue jVal;
- jthrowable jthr;
-
//Read the requisite bytes
- jobject bb = (*env)->NewDirectByteBuffer(env, buffer, length);
+ bb = (*env)->NewDirectByteBuffer(env, buffer, length);
if (bb == NULL) {
errno = printPendingExceptionAndFree(env, PRINT_EXC_ALL,
"readDirect: NewDirectByteBuffer");
@@ -1227,7 +1236,7 @@ tSize hdfsPread(hdfsFS fs, hdfsFile f, t
errno = EINVAL;
return -1;
}
- if (!f || f->type == UNINITIALIZED) {
+ if (!f || f->type == HDFS_STREAM_UNINITIALIZED) {
errno = EBADF;
return -1;
}
@@ -1239,7 +1248,7 @@ tSize hdfsPread(hdfsFS fs, hdfsFile f, t
}
//Error checking... make sure that this file is 'readable'
- if (f->type != INPUT) {
+ if (f->type != HDFS_STREAM_INPUT) {
fprintf(stderr, "Cannot read from a non-InputStream object!\n");
errno = EINVAL;
return -1;
@@ -1287,6 +1296,10 @@ tSize hdfsWrite(hdfsFS fs, hdfsFile f, c
// byte b[] = str.getBytes();
// fso.write(b);
+ jobject jOutputStream;
+ jbyteArray jbWarray;
+ jthrowable jthr;
+
//Get the JNIEnv* corresponding to current thread
JNIEnv* env = getJNIEnv();
if (env == NULL) {
@@ -1295,14 +1308,12 @@ tSize hdfsWrite(hdfsFS fs, hdfsFile f, c
}
//Sanity check
- if (!f || f->type == UNINITIALIZED) {
+ if (!f || f->type == HDFS_STREAM_UNINITIALIZED) {
errno = EBADF;
return -1;
}
- jobject jOutputStream = f->file;
- jbyteArray jbWarray;
- jthrowable jthr;
+ jOutputStream = f->file;
if (length < 0) {
errno = EINVAL;
@@ -1310,7 +1321,7 @@ tSize hdfsWrite(hdfsFS fs, hdfsFile f, c
}
//Error checking... make sure that this file is 'writable'
- if (f->type != OUTPUT) {
+ if (f->type != HDFS_STREAM_OUTPUT) {
fprintf(stderr, "Cannot write into a non-OutputStream object!\n");
errno = EINVAL;
return -1;
@@ -1355,6 +1366,9 @@ int hdfsSeek(hdfsFS fs, hdfsFile f, tOff
// JAVA EQUIVALENT
// fis.seek(pos);
+ jobject jInputStream;
+ jthrowable jthr;
+
//Get the JNIEnv* corresponding to current thread
JNIEnv* env = getJNIEnv();
if (env == NULL) {
@@ -1363,13 +1377,13 @@ int hdfsSeek(hdfsFS fs, hdfsFile f, tOff
}
//Sanity check
- if (!f || f->type != INPUT) {
+ if (!f || f->type != HDFS_STREAM_INPUT) {
errno = EBADF;
return -1;
}
- jobject jInputStream = f->file;
- jthrowable jthr = invokeMethod(env, NULL, INSTANCE, jInputStream,
+ jInputStream = f->file;
+ jthr = invokeMethod(env, NULL, INSTANCE, jInputStream,
HADOOP_ISTRM, "seek", "(J)V", desiredPos);
if (jthr) {
errno = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
@@ -1387,6 +1401,11 @@ tOffset hdfsTell(hdfsFS fs, hdfsFile f)
// JAVA EQUIVALENT
// pos = f.getPos();
+ jobject jStream;
+ const char *interface;
+ jvalue jVal;
+ jthrowable jthr;
+
//Get the JNIEnv* corresponding to current thread
JNIEnv* env = getJNIEnv();
if (env == NULL) {
@@ -1395,22 +1414,21 @@ tOffset hdfsTell(hdfsFS fs, hdfsFile f)
}
//Sanity check
- if (!f || f->type == UNINITIALIZED) {
+ if (!f || f->type == HDFS_STREAM_UNINITIALIZED) {
errno = EBADF;
return -1;
}
//Parameters
- jobject jStream = f->file;
- const char* interface = (f->type == INPUT) ?
+ jStream = f->file;
+ interface = (f->type == HDFS_STREAM_INPUT) ?
HADOOP_ISTRM : HADOOP_OSTRM;
- jvalue jVal;
- jthrowable jthr = invokeMethod(env, &jVal, INSTANCE, jStream,
+ jthr = invokeMethod(env, &jVal, INSTANCE, jStream,
interface, "getPos", "()J");
if (jthr) {
errno = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
"hdfsTell: %s#getPos",
- ((f->type == INPUT) ? "FSDataInputStream" :
+ ((f->type == HDFS_STREAM_INPUT) ? "FSDataInputStream" :
"FSDataOutputStream"));
return -1;
}
@@ -1422,6 +1440,8 @@ int hdfsFlush(hdfsFS fs, hdfsFile f)
// JAVA EQUIVALENT
// fos.flush();
+ jthrowable jthr;
+
//Get the JNIEnv* corresponding to current thread
JNIEnv* env = getJNIEnv();
if (env == NULL) {
@@ -1430,11 +1450,11 @@ int hdfsFlush(hdfsFS fs, hdfsFile f)
}
//Sanity check
- if (!f || f->type != OUTPUT) {
+ if (!f || f->type != HDFS_STREAM_OUTPUT) {
errno = EBADF;
return -1;
}
- jthrowable jthr = invokeMethod(env, NULL, INSTANCE, f->file,
+ jthr = invokeMethod(env, NULL, INSTANCE, f->file,
HADOOP_OSTRM, "flush", "()V");
if (jthr) {
errno = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
@@ -1446,6 +1466,9 @@ int hdfsFlush(hdfsFS fs, hdfsFile f)
int hdfsHFlush(hdfsFS fs, hdfsFile f)
{
+ jobject jOutputStream;
+ jthrowable jthr;
+
//Get the JNIEnv* corresponding to current thread
JNIEnv* env = getJNIEnv();
if (env == NULL) {
@@ -1454,13 +1477,13 @@ int hdfsHFlush(hdfsFS fs, hdfsFile f)
}
//Sanity check
- if (!f || f->type != OUTPUT) {
+ if (!f || f->type != HDFS_STREAM_OUTPUT) {
errno = EBADF;
return -1;
}
- jobject jOutputStream = f->file;
- jthrowable jthr = invokeMethod(env, NULL, INSTANCE, jOutputStream,
+ jOutputStream = f->file;
+ jthr = invokeMethod(env, NULL, INSTANCE, jOutputStream,
HADOOP_OSTRM, "hflush", "()V");
if (jthr) {
errno = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
@@ -1472,6 +1495,9 @@ int hdfsHFlush(hdfsFS fs, hdfsFile f)
int hdfsHSync(hdfsFS fs, hdfsFile f)
{
+ jobject jOutputStream;
+ jthrowable jthr;
+
//Get the JNIEnv* corresponding to current thread
JNIEnv* env = getJNIEnv();
if (env == NULL) {
@@ -1480,13 +1506,13 @@ int hdfsHSync(hdfsFS fs, hdfsFile f)
}
//Sanity check
- if (!f || f->type != OUTPUT) {
+ if (!f || f->type != HDFS_STREAM_OUTPUT) {
errno = EBADF;
return -1;
}
- jobject jOutputStream = f->file;
- jthrowable jthr = invokeMethod(env, NULL, INSTANCE, jOutputStream,
+ jOutputStream = f->file;
+ jthr = invokeMethod(env, NULL, INSTANCE, jOutputStream,
HADOOP_OSTRM, "hsync", "()V");
if (jthr) {
errno = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
@@ -1501,6 +1527,10 @@ int hdfsAvailable(hdfsFS fs, hdfsFile f)
// JAVA EQUIVALENT
// fis.available();
+ jobject jInputStream;
+ jvalue jVal;
+ jthrowable jthr;
+
//Get the JNIEnv* corresponding to current thread
JNIEnv* env = getJNIEnv();
if (env == NULL) {
@@ -1509,15 +1539,14 @@ int hdfsAvailable(hdfsFS fs, hdfsFile f)
}
//Sanity check
- if (!f || f->type != INPUT) {
+ if (!f || f->type != HDFS_STREAM_INPUT) {
errno = EBADF;
return -1;
}
//Parameters
- jobject jInputStream = f->file;
- jvalue jVal;
- jthrowable jthr = invokeMethod(env, &jVal, INSTANCE, jInputStream,
+ jInputStream = f->file;
+ jthr = invokeMethod(env, &jVal, INSTANCE, jInputStream,
HADOOP_ISTRM, "available", "()I");
if (jthr) {
errno = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
@@ -1527,20 +1556,13 @@ int hdfsAvailable(hdfsFS fs, hdfsFile f)
return jVal.i;
}
-static int hdfsCopyImpl(hdfsFS srcFS, const char* src, hdfsFS dstFS,
- const char* dst, jboolean deleteSource)
+static int hdfsCopyImpl(hdfsFS srcFS, const char *src, hdfsFS dstFS,
+ const char *dst, jboolean deleteSource)
{
//JAVA EQUIVALENT
// FileUtil#copy(srcFS, srcPath, dstFS, dstPath,
// deleteSource = false, conf)
- //Get the JNIEnv* corresponding to current thread
- JNIEnv* env = getJNIEnv();
- if (env == NULL) {
- errno = EINTERNAL;
- return -1;
- }
-
//Parameters
jobject jSrcFS = (jobject)srcFS;
jobject jDstFS = (jobject)dstFS;
@@ -1549,6 +1571,13 @@ static int hdfsCopyImpl(hdfsFS srcFS, co
jvalue jVal;
int ret;
+ //Get the JNIEnv* corresponding to current thread
+ JNIEnv* env = getJNIEnv();
+ if (env == NULL) {
+ errno = EINTERNAL;
+ return -1;
+ }
+
jthr = constructNewObjectOfPath(env, src, &jSrcPath);
if (jthr) {
ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
@@ -1603,22 +1632,28 @@ done:
return 0;
}
-int hdfsCopy(hdfsFS srcFS, const char* src, hdfsFS dstFS, const char* dst)
+int hdfsCopy(hdfsFS srcFS, const char *src, hdfsFS dstFS, const char *dst)
{
return hdfsCopyImpl(srcFS, src, dstFS, dst, 0);
}
-int hdfsMove(hdfsFS srcFS, const char* src, hdfsFS dstFS, const char* dst)
+int hdfsMove(hdfsFS srcFS, const char *src, hdfsFS dstFS, const char *dst)
{
return hdfsCopyImpl(srcFS, src, dstFS, dst, 1);
}
-int hdfsDelete(hdfsFS fs, const char* path, int recursive)
+int hdfsDelete(hdfsFS fs, const char *path, int recursive)
{
// JAVA EQUIVALENT:
// Path p = new Path(path);
// bool retval = fs.delete(p, recursive);
+ jobject jFS = (jobject)fs;
+ jthrowable jthr;
+ jobject jPath;
+ jvalue jVal;
+ jboolean jRecursive;
+
//Get the JNIEnv* corresponding to current thread
JNIEnv* env = getJNIEnv();
if (env == NULL) {
@@ -1626,18 +1661,13 @@ int hdfsDelete(hdfsFS fs, const char* pa
return -1;
}
- jobject jFS = (jobject)fs;
- jthrowable jthr;
- jobject jPath;
- jvalue jVal;
-
jthr = constructNewObjectOfPath(env, path, &jPath);
if (jthr) {
errno = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
"hdfsDelete(path=%s): constructNewObjectOfPath", path);
return -1;
}
- jboolean jRecursive = recursive ? JNI_TRUE : JNI_FALSE;
+ jRecursive = recursive ? JNI_TRUE : JNI_FALSE;
jthr = invokeMethod(env, &jVal, INSTANCE, jFS, HADOOP_FS,
"delete", "(Lorg/apache/hadoop/fs/Path;Z)Z",
jPath, jRecursive);
@@ -1657,13 +1687,19 @@ int hdfsDelete(hdfsFS fs, const char* pa
-int hdfsRename(hdfsFS fs, const char* oldPath, const char* newPath)
+int hdfsRename(hdfsFS fs, const char *oldPath, const char *newPath)
{
// JAVA EQUIVALENT:
// Path old = new Path(oldPath);
// Path new = new Path(newPath);
// fs.rename(old, new);
+ jobject jFS = (jobject)fs;
+ jthrowable jthr;
+ jobject jOldPath = NULL, jNewPath = NULL;
+ int ret = -1;
+ jvalue jVal;
+
//Get the JNIEnv* corresponding to current thread
JNIEnv* env = getJNIEnv();
if (env == NULL) {
@@ -1671,12 +1707,6 @@ int hdfsRename(hdfsFS fs, const char* ol
return -1;
}
- jobject jFS = (jobject)fs;
- jthrowable jthr;
- jobject jOldPath = NULL, jNewPath = NULL;
- int ret = -1;
- jvalue jVal;
-
jthr = constructNewObjectOfPath(env, oldPath, &jOldPath );
if (jthr) {
errno = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
@@ -1721,13 +1751,6 @@ char* hdfsGetWorkingDirectory(hdfsFS fs,
// Path p = fs.getWorkingDirectory();
// return p.toString()
- //Get the JNIEnv* corresponding to current thread
- JNIEnv* env = getJNIEnv();
- if (env == NULL) {
- errno = EINTERNAL;
- return NULL;
- }
-
jobject jPath = NULL;
jstring jPathString = NULL;
jobject jFS = (jobject)fs;
@@ -1736,6 +1759,13 @@ char* hdfsGetWorkingDirectory(hdfsFS fs,
int ret;
const char *jPathChars = NULL;
+ //Get the JNIEnv* corresponding to current thread
+ JNIEnv* env = getJNIEnv();
+ if (env == NULL) {
+ errno = EINTERNAL;
+ return NULL;
+ }
+
//FileSystem#getWorkingDirectory()
jthr = invokeMethod(env, &jVal, INSTANCE, jFS,
HADOOP_FS, "getWorkingDirectory",
@@ -1794,11 +1824,15 @@ done:
-int hdfsSetWorkingDirectory(hdfsFS fs, const char* path)
+int hdfsSetWorkingDirectory(hdfsFS fs, const char *path)
{
// JAVA EQUIVALENT:
// fs.setWorkingDirectory(Path(path));
+ jobject jFS = (jobject)fs;
+ jthrowable jthr;
+ jobject jPath;
+
//Get the JNIEnv* corresponding to current thread
JNIEnv* env = getJNIEnv();
if (env == NULL) {
@@ -1806,10 +1840,6 @@ int hdfsSetWorkingDirectory(hdfsFS fs, c
return -1;
}
- jobject jFS = (jobject)fs;
- jthrowable jthr;
- jobject jPath;
-
//Create an object of org.apache.hadoop.fs.Path
jthr = constructNewObjectOfPath(env, path, &jPath);
if (jthr) {
@@ -1835,11 +1865,16 @@ int hdfsSetWorkingDirectory(hdfsFS fs, c
-int hdfsCreateDirectory(hdfsFS fs, const char* path)
+int hdfsCreateDirectory(hdfsFS fs, const char *path)
{
// JAVA EQUIVALENT:
// fs.mkdirs(new Path(path));
+ jobject jFS = (jobject)fs;
+ jobject jPath;
+ jthrowable jthr;
+ jvalue jVal;
+
//Get the JNIEnv* corresponding to current thread
JNIEnv* env = getJNIEnv();
if (env == NULL) {
@@ -1847,10 +1882,6 @@ int hdfsCreateDirectory(hdfsFS fs, const
return -1;
}
- jobject jFS = (jobject)fs;
- jobject jPath;
- jthrowable jthr;
-
//Create an object of org.apache.hadoop.fs.Path
jthr = constructNewObjectOfPath(env, path, &jPath);
if (jthr) {
@@ -1860,7 +1891,6 @@ int hdfsCreateDirectory(hdfsFS fs, const
}
//Create the directory
- jvalue jVal;
jVal.z = 0;
jthr = invokeMethod(env, &jVal, INSTANCE, jFS, HADOOP_FS,
"mkdirs", "(Lorg/apache/hadoop/fs/Path;)Z",
@@ -1886,11 +1916,16 @@ int hdfsCreateDirectory(hdfsFS fs, const
}
-int hdfsSetReplication(hdfsFS fs, const char* path, int16_t replication)
+int hdfsSetReplication(hdfsFS fs, const char *path, int16_t replication)
{
// JAVA EQUIVALENT:
// fs.setReplication(new Path(path), replication);
+ jobject jFS = (jobject)fs;
+ jthrowable jthr;
+ jobject jPath;
+ jvalue jVal;
+
//Get the JNIEnv* corresponding to current thread
JNIEnv* env = getJNIEnv();
if (env == NULL) {
@@ -1898,11 +1933,7 @@ int hdfsSetReplication(hdfsFS fs, const
return -1;
}
- jobject jFS = (jobject)fs;
- jthrowable jthr;
-
//Create an object of org.apache.hadoop.fs.Path
- jobject jPath;
jthr = constructNewObjectOfPath(env, path, &jPath);
if (jthr) {
errno = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
@@ -1911,7 +1942,6 @@ int hdfsSetReplication(hdfsFS fs, const
}
//Create the directory
- jvalue jVal;
jthr = invokeMethod(env, &jVal, INSTANCE, jFS, HADOOP_FS,
"setReplication", "(Lorg/apache/hadoop/fs/Path;S)Z",
jPath, replication);
@@ -1932,11 +1962,17 @@ int hdfsSetReplication(hdfsFS fs, const
return 0;
}
-int hdfsChown(hdfsFS fs, const char* path, const char *owner, const char *group)
+int hdfsChown(hdfsFS fs, const char *path, const char *owner, const char *group)
{
// JAVA EQUIVALENT:
// fs.setOwner(path, owner, group)
+ jobject jFS = (jobject)fs;
+ jobject jPath = NULL;
+ jstring jOwner = NULL, jGroup = NULL;
+ jthrowable jthr;
+ int ret;
+
//Get the JNIEnv* corresponding to current thread
JNIEnv* env = getJNIEnv();
if (env == NULL) {
@@ -1948,12 +1984,6 @@ int hdfsChown(hdfsFS fs, const char* pat
return 0;
}
- jobject jFS = (jobject)fs;
- jobject jPath = NULL;
- jstring jOwner = NULL, jGroup = NULL;
- jthrowable jthr;
- int ret;
-
jthr = constructNewObjectOfPath(env, path, &jPath);
if (jthr) {
ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
@@ -2001,12 +2031,17 @@ done:
return 0;
}
-int hdfsChmod(hdfsFS fs, const char* path, short mode)
+int hdfsChmod(hdfsFS fs, const char *path, short mode)
{
int ret;
// JAVA EQUIVALENT:
// fs.setPermission(path, FsPermission)
+ jthrowable jthr;
+ jobject jPath = NULL, jPermObj = NULL;
+ jobject jFS = (jobject)fs;
+ jshort jmode = mode;
+
//Get the JNIEnv* corresponding to current thread
JNIEnv* env = getJNIEnv();
if (env == NULL) {
@@ -2014,12 +2049,7 @@ int hdfsChmod(hdfsFS fs, const char* pat
return -1;
}
- jthrowable jthr;
- jobject jPath = NULL, jPermObj = NULL;
- jobject jFS = (jobject)fs;
-
// construct jPerm = FsPermission.createImmutable(short mode);
- jshort jmode = mode;
jthr = constructNewObjectOfClass(env, &jPermObj,
HADOOP_FSPERM,"(S)V",jmode);
if (jthr) {
@@ -2061,11 +2091,16 @@ done:
return 0;
}
-int hdfsUtime(hdfsFS fs, const char* path, tTime mtime, tTime atime)
+int hdfsUtime(hdfsFS fs, const char *path, tTime mtime, tTime atime)
{
// JAVA EQUIVALENT:
// fs.setTimes(src, mtime, atime)
+
jthrowable jthr;
+ jobject jFS = (jobject)fs;
+ jobject jPath;
+ static const tTime NO_CHANGE = -1;
+ jlong jmtime, jatime;
//Get the JNIEnv* corresponding to current thread
JNIEnv* env = getJNIEnv();
@@ -2074,10 +2109,7 @@ int hdfsUtime(hdfsFS fs, const char* pat
return -1;
}
- jobject jFS = (jobject)fs;
-
//Create an object of org.apache.hadoop.fs.Path
- jobject jPath;
jthr = constructNewObjectOfPath(env, path, &jPath);
if (jthr) {
errno = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
@@ -2085,9 +2117,8 @@ int hdfsUtime(hdfsFS fs, const char* pat
return -1;
}
- const tTime NO_CHANGE = -1;
- jlong jmtime = (mtime == NO_CHANGE) ? -1 : (mtime * (jlong)1000);
- jlong jatime = (atime == NO_CHANGE) ? -1 : (atime * (jlong)1000);
+ jmtime = (mtime == NO_CHANGE) ? -1 : (mtime * (jlong)1000);
+ jatime = (atime == NO_CHANGE) ? -1 : (atime * (jlong)1000);
jthr = invokeMethod(env, NULL, INSTANCE, jFS, HADOOP_FS,
"setTimes", JMETHOD3(JPARAM(HADOOP_PATH), "J", "J", JAVA_VOID),
@@ -2397,7 +2428,7 @@ struct hadoopRzBuffer* hadoopReadZero(hd
errno = EINTERNAL;
return NULL;
}
- if (file->type != INPUT) {
+ if (file->type != HDFS_STREAM_INPUT) {
fputs("Cannot read from a non-InputStream object!\n", stderr);
ret = EINVAL;
goto done;
@@ -2495,10 +2526,12 @@ void hadoopRzBufferFree(hdfsFile file, s
}
char***
-hdfsGetHosts(hdfsFS fs, const char* path, tOffset start, tOffset length)
+hdfsGetHosts(hdfsFS fs, const char *path, tOffset start, tOffset length)
{
// JAVA EQUIVALENT:
// fs.getFileBlockLoctions(new Path(path), start, length);
+
+ jobject jFS = (jobject)fs;
jthrowable jthr;
jobject jPath = NULL;
jobject jFileStatus = NULL;
@@ -2508,6 +2541,9 @@ hdfsGetHosts(hdfsFS fs, const char* path
char*** blockHosts = NULL;
int i, j, ret;
jsize jNumFileBlocks = 0;
+ jobject jFileBlock;
+ jsize jNumBlockHosts;
+ const char *hostName;
//Get the JNIEnv* corresponding to current thread
JNIEnv* env = getJNIEnv();
@@ -2516,8 +2552,6 @@ hdfsGetHosts(hdfsFS fs, const char* path
return NULL;
}
- jobject jFS = (jobject)fs;
-
//Create an object of org.apache.hadoop.fs.Path
jthr = constructNewObjectOfPath(env, path, &jPath);
if (jthr) {
@@ -2567,7 +2601,7 @@ hdfsGetHosts(hdfsFS fs, const char* path
//Now parse each block to get hostnames
for (i = 0; i < jNumFileBlocks; ++i) {
- jobject jFileBlock =
+ jFileBlock =
(*env)->GetObjectArrayElement(env, jBlockLocations, i);
if (!jFileBlock) {
ret = printPendingExceptionAndFree(env, PRINT_EXC_ALL,
@@ -2593,7 +2627,7 @@ hdfsGetHosts(hdfsFS fs, const char* path
goto done;
}
//Figure out no of hosts in jFileBlockHosts, and allocate the memory
- jsize jNumBlockHosts = (*env)->GetArrayLength(env, jFileBlockHosts);
+ jNumBlockHosts = (*env)->GetArrayLength(env, jFileBlockHosts);
blockHosts[i] = calloc(jNumBlockHosts + 1, sizeof(char*));
if (!blockHosts[i]) {
ret = ENOMEM;
@@ -2601,7 +2635,6 @@ hdfsGetHosts(hdfsFS fs, const char* path
}
//Now parse each hostname
- const char *hostName;
for (j = 0; j < jNumBlockHosts; ++j) {
jHost = (*env)->GetObjectArrayElement(env, jFileBlockHosts, j);
if (!jHost) {
@@ -2669,6 +2702,10 @@ tOffset hdfsGetDefaultBlockSize(hdfsFS f
// JAVA EQUIVALENT:
// fs.getDefaultBlockSize();
+ jobject jFS = (jobject)fs;
+ jvalue jVal;
+ jthrowable jthr;
+
//Get the JNIEnv* corresponding to current thread
JNIEnv* env = getJNIEnv();
if (env == NULL) {
@@ -2676,11 +2713,7 @@ tOffset hdfsGetDefaultBlockSize(hdfsFS f
return -1;
}
- jobject jFS = (jobject)fs;
-
//FileSystem#getDefaultBlockSize()
- jvalue jVal;
- jthrowable jthr;
jthr = invokeMethod(env, &jVal, INSTANCE, jFS, HADOOP_FS,
"getDefaultBlockSize", "()J");
if (jthr) {
@@ -2732,6 +2765,11 @@ tOffset hdfsGetCapacity(hdfsFS fs)
// FsStatus fss = fs.getStatus();
// return Fss.getCapacity();
+ jobject jFS = (jobject)fs;
+ jvalue jVal;
+ jthrowable jthr;
+ jobject fss;
+
//Get the JNIEnv* corresponding to current thread
JNIEnv* env = getJNIEnv();
if (env == NULL) {
@@ -2739,11 +2777,7 @@ tOffset hdfsGetCapacity(hdfsFS fs)
return -1;
}
- jobject jFS = (jobject)fs;
-
//FileSystem#getStatus
- jvalue jVal;
- jthrowable jthr;
jthr = invokeMethod(env, &jVal, INSTANCE, jFS, HADOOP_FS,
"getStatus", "()Lorg/apache/hadoop/fs/FsStatus;");
if (jthr) {
@@ -2751,7 +2785,7 @@ tOffset hdfsGetCapacity(hdfsFS fs)
"hdfsGetCapacity: FileSystem#getStatus");
return -1;
}
- jobject fss = (jobject)jVal.l;
+ fss = (jobject)jVal.l;
jthr = invokeMethod(env, &jVal, INSTANCE, fss, HADOOP_FSSTATUS,
"getCapacity", "()J");
destroyLocalReference(env, fss);
@@ -2771,6 +2805,11 @@ tOffset hdfsGetUsed(hdfsFS fs)
// FsStatus fss = fs.getStatus();
// return Fss.getUsed();
+ jobject jFS = (jobject)fs;
+ jvalue jVal;
+ jthrowable jthr;
+ jobject fss;
+
//Get the JNIEnv* corresponding to current thread
JNIEnv* env = getJNIEnv();
if (env == NULL) {
@@ -2778,11 +2817,7 @@ tOffset hdfsGetUsed(hdfsFS fs)
return -1;
}
- jobject jFS = (jobject)fs;
-
//FileSystem#getStatus
- jvalue jVal;
- jthrowable jthr;
jthr = invokeMethod(env, &jVal, INSTANCE, jFS, HADOOP_FS,
"getStatus", "()Lorg/apache/hadoop/fs/FsStatus;");
if (jthr) {
@@ -2790,7 +2825,7 @@ tOffset hdfsGetUsed(hdfsFS fs)
"hdfsGetUsed: FileSystem#getStatus");
return -1;
}
- jobject fss = (jobject)jVal.l;
+ fss = (jobject)jVal.l;
jthr = invokeMethod(env, &jVal, INSTANCE, fss, HADOOP_FSSTATUS,
"getUsed", "()J");
destroyLocalReference(env, fss);
@@ -2814,6 +2849,9 @@ getFileInfoFromStat(JNIEnv *env, jobject
jstring jUserName = NULL;
jstring jGroupName = NULL;
jobject jPermission = NULL;
+ const char *cPathName;
+ const char *cUserName;
+ const char *cGroupName;
jthr = invokeMethod(env, &jVal, INSTANCE, jStat,
HADOOP_STAT, "isDir", "()Z");
@@ -2869,7 +2907,7 @@ getFileInfoFromStat(JNIEnv *env, jobject
if (jthr)
goto done;
jPathName = jVal.l;
- const char *cPathName =
+ cPathName =
(const char*) ((*env)->GetStringUTFChars(env, jPathName, NULL));
if (!cPathName) {
jthr = getPendingExceptionAndClear(env);
@@ -2882,7 +2920,7 @@ getFileInfoFromStat(JNIEnv *env, jobject
if (jthr)
goto done;
jUserName = jVal.l;
- const char* cUserName =
+ cUserName =
(const char*) ((*env)->GetStringUTFChars(env, jUserName, NULL));
if (!cUserName) {
jthr = getPendingExceptionAndClear(env);
@@ -2891,7 +2929,6 @@ getFileInfoFromStat(JNIEnv *env, jobject
fileInfo->mOwner = strdup(cUserName);
(*env)->ReleaseStringUTFChars(env, jUserName, cUserName);
- const char* cGroupName;
jthr = invokeMethod(env, &jVal, INSTANCE, jStat, HADOOP_STAT,
"getGroup", "()Ljava/lang/String;");
if (jthr)
@@ -2978,13 +3015,15 @@ getFileInfo(JNIEnv *env, jobject jFS, jo
-hdfsFileInfo* hdfsListDirectory(hdfsFS fs, const char* path, int *numEntries)
+hdfsFileInfo* hdfsListDirectory(hdfsFS fs, const char *path, int *numEntries)
{
// JAVA EQUIVALENT:
// Path p(path);
// Path []pathList = fs.listPaths(p)
// foreach path in pathList
// getFileInfo(path)
+
+ jobject jFS = (jobject)fs;
jthrowable jthr;
jobject jPath = NULL;
hdfsFileInfo *pathList = NULL;
@@ -2992,6 +3031,8 @@ hdfsFileInfo* hdfsListDirectory(hdfsFS f
jvalue jVal;
jsize jPathListSize = 0;
int ret;
+ jsize i;
+ jobject tmpStat;
//Get the JNIEnv* corresponding to current thread
JNIEnv* env = getJNIEnv();
@@ -3000,8 +3041,6 @@ hdfsFileInfo* hdfsListDirectory(hdfsFS f
return NULL;
}
- jobject jFS = (jobject)fs;
-
//Create an object of org.apache.hadoop.fs.Path
jthr = constructNewObjectOfPath(env, path, &jPath);
if (jthr) {
@@ -3037,8 +3076,6 @@ hdfsFileInfo* hdfsListDirectory(hdfsFS f
}
//Save path information in pathList
- jsize i;
- jobject tmpStat;
for (i=0; i < jPathListSize; ++i) {
tmpStat = (*env)->GetObjectArrayElement(env, jPathList, i);
if (!tmpStat) {
@@ -3073,7 +3110,7 @@ done:
-hdfsFileInfo *hdfsGetPathInfo(hdfsFS fs, const char* path)
+hdfsFileInfo *hdfsGetPathInfo(hdfsFS fs, const char *path)
{
// JAVA EQUIVALENT:
// File f(path);
@@ -3082,6 +3119,11 @@ hdfsFileInfo *hdfsGetPathInfo(hdfsFS fs,
// fs.getLength(f)
// f.getPath()
+ jobject jFS = (jobject)fs;
+ jobject jPath;
+ jthrowable jthr;
+ hdfsFileInfo *fileInfo;
+
//Get the JNIEnv* corresponding to current thread
JNIEnv* env = getJNIEnv();
if (env == NULL) {
@@ -3089,17 +3131,13 @@ hdfsFileInfo *hdfsGetPathInfo(hdfsFS fs,
return NULL;
}
- jobject jFS = (jobject)fs;
-
//Create an object of org.apache.hadoop.fs.Path
- jobject jPath;
- jthrowable jthr = constructNewObjectOfPath(env, path, &jPath);
+ jthr = constructNewObjectOfPath(env, path, &jPath);
if (jthr) {
errno = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
"hdfsGetPathInfo(%s): constructNewObjectOfPath", path);
return NULL;
}
- hdfsFileInfo *fileInfo;
jthr = getFileInfo(env, jFS, jPath, &fileInfo);
destroyLocalReference(env, jPath);
if (jthr) {
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/jni_helper.c
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/jni_helper.c?rev=1616814&r1=1616813&r2=1616814&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/jni_helper.c (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/jni_helper.c Fri Aug 8 16:26:45 2014
@@ -19,20 +19,18 @@
#include "config.h"
#include "exception.h"
#include "jni_helper.h"
+#include "platform.h"
+#include "common/htable.h"
+#include "os/mutexes.h"
+#include "os/thread_local_storage.h"
#include <stdio.h>
#include <string.h>
-static pthread_mutex_t hdfsHashMutex = PTHREAD_MUTEX_INITIALIZER;
-static pthread_mutex_t jvmMutex = PTHREAD_MUTEX_INITIALIZER;
-static volatile int hashTableInited = 0;
-
-#define LOCK_HASH_TABLE() pthread_mutex_lock(&hdfsHashMutex)
-#define UNLOCK_HASH_TABLE() pthread_mutex_unlock(&hdfsHashMutex)
-
+static struct htable *gClassRefHTable = NULL;
/** The Native return types that methods could return */
-#define VOID 'V'
+#define JVOID 'V'
#define JOBJECT 'L'
#define JARRAYOBJECT '['
#define JBOOLEAN 'Z'
@@ -51,40 +49,10 @@ static volatile int hashTableInited = 0;
*/
#define MAX_HASH_TABLE_ELEM 4096
-/** Key that allows us to retrieve thread-local storage */
-static pthread_key_t gTlsKey;
-
-/** nonzero if we succeeded in initializing gTlsKey. Protected by the jvmMutex */
-static int gTlsKeyInitialized = 0;
-
-/** Pthreads thread-local storage for each library thread. */
-struct hdfsTls {
- JNIEnv *env;
-};
-
/**
- * The function that is called whenever a thread with libhdfs thread local data
- * is destroyed.
- *
- * @param v The thread-local data
+ * Length of buffer for retrieving created JVMs. (We only ever create one.)
*/
-static void hdfsThreadDestructor(void *v)
-{
- struct hdfsTls *tls = v;
- JavaVM *vm;
- JNIEnv *env = tls->env;
- jint ret;
-
- ret = (*env)->GetJavaVM(env, &vm);
- if (ret) {
- fprintf(stderr, "hdfsThreadDestructor: GetJavaVM failed with "
- "error %d\n", ret);
- (*env)->ExceptionDescribe(env);
- } else {
- (*vm)->DetachCurrentThread(vm);
- }
- free(tls);
-}
+#define VM_BUF_LENGTH 1
void destroyLocalReference(JNIEnv *env, jobject jObject)
{
@@ -138,67 +106,6 @@ jthrowable newCStr(JNIEnv *env, jstring
return NULL;
}
-static int hashTableInit(void)
-{
- if (!hashTableInited) {
- LOCK_HASH_TABLE();
- if (!hashTableInited) {
- if (hcreate(MAX_HASH_TABLE_ELEM) == 0) {
- fprintf(stderr, "error creating hashtable, <%d>: %s\n",
- errno, strerror(errno));
- UNLOCK_HASH_TABLE();
- return 0;
- }
- hashTableInited = 1;
- }
- UNLOCK_HASH_TABLE();
- }
- return 1;
-}
-
-
-static int insertEntryIntoTable(const char *key, void *data)
-{
- ENTRY e, *ep;
- if (key == NULL || data == NULL) {
- return 0;
- }
- if (! hashTableInit()) {
- return -1;
- }
- e.data = data;
- e.key = (char*)key;
- LOCK_HASH_TABLE();
- ep = hsearch(e, ENTER);
- UNLOCK_HASH_TABLE();
- if (ep == NULL) {
- fprintf(stderr, "warn adding key (%s) to hash table, <%d>: %s\n",
- key, errno, strerror(errno));
- }
- return 0;
-}
-
-
-
-static void* searchEntryFromTable(const char *key)
-{
- ENTRY e,*ep;
- if (key == NULL) {
- return NULL;
- }
- hashTableInit();
- e.key = (char*)key;
- LOCK_HASH_TABLE();
- ep = hsearch(e, FIND);
- UNLOCK_HASH_TABLE();
- if (ep != NULL) {
- return ep->data;
- }
- return NULL;
-}
-
-
-
jthrowable invokeMethod(JNIEnv *env, jvalue *retval, MethType methType,
jobject instObj, const char *className,
const char *methName, const char *methSignature, ...)
@@ -235,7 +142,7 @@ jthrowable invokeMethod(JNIEnv *env, jva
}
retval->l = jobj;
}
- else if (returnType == VOID) {
+ else if (returnType == JVOID) {
if (methType == STATIC) {
(*env)->CallStaticVoidMethodV(env, cls, mid, args);
}
@@ -325,11 +232,11 @@ jthrowable methodIdFromClass(const char
{
jclass cls;
jthrowable jthr;
+ jmethodID mid = 0;
jthr = globalClassReference(className, env, &cls);
if (jthr)
return jthr;
- jmethodID mid = 0;
jthr = validateMethodType(env, methType);
if (jthr)
return jthr;
@@ -350,25 +257,50 @@ jthrowable methodIdFromClass(const char
jthrowable globalClassReference(const char *className, JNIEnv *env, jclass *out)
{
- jclass clsLocalRef;
- jclass cls = searchEntryFromTable(className);
- if (cls) {
- *out = cls;
- return NULL;
+ jthrowable jthr = NULL;
+ jclass local_clazz = NULL;
+ jclass clazz = NULL;
+ int ret;
+
+ mutexLock(&hdfsHashMutex);
+ if (!gClassRefHTable) {
+ gClassRefHTable = htable_alloc(MAX_HASH_TABLE_ELEM, ht_hash_string,
+ ht_compare_string);
+ if (!gClassRefHTable) {
+ jthr = newRuntimeError(env, "htable_alloc failed\n");
+ goto done;
+ }
}
- clsLocalRef = (*env)->FindClass(env,className);
- if (clsLocalRef == NULL) {
- return getPendingExceptionAndClear(env);
+ clazz = htable_get(gClassRefHTable, className);
+ if (clazz) {
+ *out = clazz;
+ goto done;
}
- cls = (*env)->NewGlobalRef(env, clsLocalRef);
- if (cls == NULL) {
- (*env)->DeleteLocalRef(env, clsLocalRef);
- return getPendingExceptionAndClear(env);
+ local_clazz = (*env)->FindClass(env,className);
+ if (!local_clazz) {
+ jthr = getPendingExceptionAndClear(env);
+ goto done;
}
- (*env)->DeleteLocalRef(env, clsLocalRef);
- insertEntryIntoTable(className, cls);
- *out = cls;
- return NULL;
+ clazz = (*env)->NewGlobalRef(env, local_clazz);
+ if (!clazz) {
+ jthr = getPendingExceptionAndClear(env);
+ goto done;
+ }
+ ret = htable_put(gClassRefHTable, (void*)className, clazz);
+ if (ret) {
+ jthr = newRuntimeError(env, "htable_put failed with error "
+ "code %d\n", ret);
+ goto done;
+ }
+ *out = clazz;
+ jthr = NULL;
+done:
+ mutexUnlock(&hdfsHashMutex);
+ (*env)->DeleteLocalRef(env, local_clazz);
+ if (jthr && clazz) {
+ (*env)->DeleteGlobalRef(env, clazz);
+ }
+ return jthr;
}
jthrowable classNameOfObject(jobject jobj, JNIEnv *env, char **name)
@@ -436,14 +368,24 @@ done:
*/
static JNIEnv* getGlobalJNIEnv(void)
{
- const jsize vmBufLength = 1;
- JavaVM* vmBuf[vmBufLength];
+ JavaVM* vmBuf[VM_BUF_LENGTH];
JNIEnv *env;
jint rv = 0;
jint noVMs = 0;
jthrowable jthr;
+ char *hadoopClassPath;
+ const char *hadoopClassPathVMArg = "-Djava.class.path=";
+ size_t optHadoopClassPathLen;
+ char *optHadoopClassPath;
+ int noArgs = 1;
+ char *hadoopJvmArgs;
+ char jvmArgDelims[] = " ";
+ char *str, *token, *savePtr;
+ JavaVMInitArgs vm_args;
+ JavaVM *vm;
+ JavaVMOption *options;
- rv = JNI_GetCreatedJavaVMs(&(vmBuf[0]), vmBufLength, &noVMs);
+ rv = JNI_GetCreatedJavaVMs(&(vmBuf[0]), VM_BUF_LENGTH, &noVMs);
if (rv != 0) {
fprintf(stderr, "JNI_GetCreatedJavaVMs failed with error: %d\n", rv);
return NULL;
@@ -451,23 +393,19 @@ static JNIEnv* getGlobalJNIEnv(void)
if (noVMs == 0) {
//Get the environment variables for initializing the JVM
- char *hadoopClassPath = getenv("CLASSPATH");
+ hadoopClassPath = getenv("CLASSPATH");
if (hadoopClassPath == NULL) {
fprintf(stderr, "Environment variable CLASSPATH not set!\n");
return NULL;
}
- char *hadoopClassPathVMArg = "-Djava.class.path=";
- size_t optHadoopClassPathLen = strlen(hadoopClassPath) +
+ optHadoopClassPathLen = strlen(hadoopClassPath) +
strlen(hadoopClassPathVMArg) + 1;
- char *optHadoopClassPath = malloc(sizeof(char)*optHadoopClassPathLen);
+ optHadoopClassPath = malloc(sizeof(char)*optHadoopClassPathLen);
snprintf(optHadoopClassPath, optHadoopClassPathLen,
"%s%s", hadoopClassPathVMArg, hadoopClassPath);
// Determine the # of LIBHDFS_OPTS args
- int noArgs = 1;
- char *hadoopJvmArgs = getenv("LIBHDFS_OPTS");
- char jvmArgDelims[] = " ";
- char *str, *token, *savePtr;
+ hadoopJvmArgs = getenv("LIBHDFS_OPTS");
if (hadoopJvmArgs != NULL) {
hadoopJvmArgs = strdup(hadoopJvmArgs);
for (noArgs = 1, str = hadoopJvmArgs; ; noArgs++, str = NULL) {
@@ -480,7 +418,12 @@ static JNIEnv* getGlobalJNIEnv(void)
}
// Now that we know the # args, populate the options array
- JavaVMOption options[noArgs];
+ options = calloc(noArgs, sizeof(JavaVMOption));
+ if (!options) {
+ fputs("Call to calloc failed\n", stderr);
+ free(optHadoopClassPath);
+ return NULL;
+ }
options[0].optionString = optHadoopClassPath;
hadoopJvmArgs = getenv("LIBHDFS_OPTS");
if (hadoopJvmArgs != NULL) {
@@ -495,8 +438,6 @@ static JNIEnv* getGlobalJNIEnv(void)
}
//Create the VM
- JavaVMInitArgs vm_args;
- JavaVM *vm;
vm_args.version = JNI_VERSION_1_2;
vm_args.options = options;
vm_args.nOptions = noArgs;
@@ -508,6 +449,7 @@ static JNIEnv* getGlobalJNIEnv(void)
free(hadoopJvmArgs);
}
free(optHadoopClassPath);
+ free(options);
if (rv != 0) {
fprintf(stderr, "Call to JNI_CreateJavaVM failed "
@@ -523,7 +465,7 @@ static JNIEnv* getGlobalJNIEnv(void)
}
else {
//Attach this thread to the VM
- JavaVM* vm = vmBuf[0];
+ vm = vmBuf[0];
rv = (*vm)->AttachCurrentThread(vm, (void*)&env, 0);
if (rv != 0) {
fprintf(stderr, "Call to AttachCurrentThread "
@@ -557,54 +499,27 @@ static JNIEnv* getGlobalJNIEnv(void)
JNIEnv* getJNIEnv(void)
{
JNIEnv *env;
- struct hdfsTls *tls;
- int ret;
-
-#ifdef HAVE_BETTER_TLS
- static __thread struct hdfsTls *quickTls = NULL;
- if (quickTls)
- return quickTls->env;
-#endif
- pthread_mutex_lock(&jvmMutex);
- if (!gTlsKeyInitialized) {
- ret = pthread_key_create(&gTlsKey, hdfsThreadDestructor);
- if (ret) {
- pthread_mutex_unlock(&jvmMutex);
- fprintf(stderr, "getJNIEnv: pthread_key_create failed with "
- "error %d\n", ret);
- return NULL;
- }
- gTlsKeyInitialized = 1;
- }
- tls = pthread_getspecific(gTlsKey);
- if (tls) {
- pthread_mutex_unlock(&jvmMutex);
- return tls->env;
+ THREAD_LOCAL_STORAGE_GET_QUICK();
+ mutexLock(&jvmMutex);
+ if (threadLocalStorageGet(&env)) {
+ mutexUnlock(&jvmMutex);
+ return NULL;
+ }
+ if (env) {
+ mutexUnlock(&jvmMutex);
+ return env;
}
env = getGlobalJNIEnv();
- pthread_mutex_unlock(&jvmMutex);
+ mutexUnlock(&jvmMutex);
if (!env) {
- fprintf(stderr, "getJNIEnv: getGlobalJNIEnv failed\n");
- return NULL;
+ fprintf(stderr, "getJNIEnv: getGlobalJNIEnv failed\n");
+ return NULL;
}
- tls = calloc(1, sizeof(struct hdfsTls));
- if (!tls) {
- fprintf(stderr, "getJNIEnv: OOM allocating %zd bytes\n",
- sizeof(struct hdfsTls));
- return NULL;
- }
- tls->env = env;
- ret = pthread_setspecific(gTlsKey, tls);
- if (ret) {
- fprintf(stderr, "getJNIEnv: pthread_setspecific failed with "
- "error code %d\n", ret);
- hdfsThreadDestructor(tls);
- return NULL;
+ if (threadLocalStorageSet(env)) {
+ return NULL;
}
-#ifdef HAVE_BETTER_TLS
- quickTls = tls;
-#endif
+ THREAD_LOCAL_STORAGE_SET_QUICK(env);
return env;
}