You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by cn...@apache.org on 2014/08/08 18:26:46 UTC
svn commit: r1616814 [2/2] - in
/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs: ./ src/
src/main/native/fuse-dfs/ src/main/native/libhdfs/
src/main/native/libhdfs/common/ src/main/native/libhdfs/os/
src/main/native/libhdfs/os/posix/ src/main/nati...
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/jni_helper.h
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/jni_helper.h?rev=1616814&r1=1616813&r2=1616814&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/jni_helper.h (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/jni_helper.h Fri Aug 8 16:26:45 2014
@@ -24,8 +24,6 @@
#include <stdlib.h>
#include <stdarg.h>
-#include <search.h>
-#include <pthread.h>
#include <errno.h>
#define PATH_SEPARATOR ':'
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/native_mini_dfs.c
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/native_mini_dfs.c?rev=1616814&r1=1616813&r2=1616814&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/native_mini_dfs.c (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/native_mini_dfs.c Fri Aug 8 16:26:45 2014
@@ -21,6 +21,7 @@
#include "hdfs_test.h"
#include "jni_helper.h"
#include "native_mini_dfs.h"
+#include "platform.h"
#include <errno.h>
#include <jni.h>
@@ -347,10 +348,11 @@ error_dlr_nn:
int nmdConfigureHdfsBuilder(struct NativeMiniDfsCluster *cl,
struct hdfsBuilder *bld)
{
- int port, ret;
+ int ret;
+ tPort port;
hdfsBuilderSetNameNode(bld, "localhost");
- port = nmdGetNameNodePort(cl);
+ port = (tPort)nmdGetNameNodePort(cl);
if (port < 0) {
fprintf(stderr, "nmdGetNameNodePort failed with error %d\n", -port);
return EIO;
Added: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/mutexes.h
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/mutexes.h?rev=1616814&view=auto
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/mutexes.h (added)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/mutexes.h Fri Aug 8 16:26:45 2014
@@ -0,0 +1,55 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef LIBHDFS_MUTEXES_H
+#define LIBHDFS_MUTEXES_H
+
+/*
+ * Defines abstraction over platform-specific mutexes. libhdfs has no formal
+ * initialization function that users would call from a single-threaded context
+ * to initialize the library. This creates a challenge for bootstrapping the
+ * mutexes. To address this, all required mutexes are pre-defined here with
+ * external storage. Platform-specific implementations must guarantee that the
+ * mutexes are initialized via static initialization.
+ */
+
+#include "platform.h"
+
+/** Mutex protecting the class reference hash table. */
+extern mutex hdfsHashMutex;
+
+/** Mutex protecting singleton JVM instance. */
+extern mutex jvmMutex;
+
+/**
+ * Locks a mutex.
+ *
+ * @param m mutex
+ * @return 0 if successful, non-zero otherwise
+ */
+int mutexLock(mutex *m);
+
+/**
+ * Unlocks a mutex.
+ *
+ * @param m mutex
+ * @return 0 if successful, non-zero otherwise
+ */
+int mutexUnlock(mutex *m);
+
+#endif
Added: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/posix/mutexes.c
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/posix/mutexes.c?rev=1616814&view=auto
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/posix/mutexes.c (added)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/posix/mutexes.c Fri Aug 8 16:26:45 2014
@@ -0,0 +1,43 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "os/mutexes.h"
+
+#include <pthread.h>
+#include <stdio.h>
+
+mutex hdfsHashMutex = PTHREAD_MUTEX_INITIALIZER;
+mutex jvmMutex = PTHREAD_MUTEX_INITIALIZER;
+
+int mutexLock(mutex *m) {
+ int ret = pthread_mutex_lock(m);
+ if (ret) {
+ fprintf(stderr, "mutexLock: pthread_mutex_lock failed with error %d\n",
+ ret);
+ }
+ return ret;
+}
+
+int mutexUnlock(mutex *m) {
+ int ret = pthread_mutex_unlock(m);
+ if (ret) {
+ fprintf(stderr, "mutexUnlock: pthread_mutex_unlock failed with error %d\n",
+ ret);
+ }
+ return ret;
+}
Added: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/posix/platform.h
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/posix/platform.h?rev=1616814&view=auto
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/posix/platform.h (added)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/posix/platform.h Fri Aug 8 16:26:45 2014
@@ -0,0 +1,34 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef LIBHDFS_PLATFORM_H
+#define LIBHDFS_PLATFORM_H
+
+#include <pthread.h>
+
+/* Use gcc type-checked format arguments. */
+#define TYPE_CHECKED_PRINTF_FORMAT(formatArg, varArgs) \
+ __attribute__((format(printf, formatArg, varArgs)))
+
+/*
+ * Mutex and thread data types defined by pthreads.
+ */
+typedef pthread_mutex_t mutex;
+typedef pthread_t threadId;
+
+#endif
Added: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/posix/thread.c
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/posix/thread.c?rev=1616814&view=auto
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/posix/thread.c (added)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/posix/thread.c Fri Aug 8 16:26:45 2014
@@ -0,0 +1,52 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "os/thread.h"
+
+#include <pthread.h>
+#include <stdio.h>
+
+/**
+ * Defines a helper function that adapts function pointer provided by caller to
+ * the type required by pthread_create.
+ *
+ * @param toRun thread to run
+ * @return void* result of running thread (always NULL)
+ */
+static void* runThread(void *toRun) {
+ const thread *t = toRun;
+ t->start(t->arg);
+ return NULL;
+}
+
+int threadCreate(thread *t) {
+ int ret;
+ ret = pthread_create(&t->id, NULL, runThread, t);
+ if (ret) {
+ fprintf(stderr, "threadCreate: pthread_create failed with error %d\n", ret);
+ }
+ return ret;
+}
+
+int threadJoin(const thread *t) {
+ int ret = pthread_join(t->id, NULL);
+ if (ret) {
+ fprintf(stderr, "threadJoin: pthread_join failed with error %d\n", ret);
+ }
+ return ret;
+}
Added: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/posix/thread_local_storage.c
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/posix/thread_local_storage.c?rev=1616814&view=auto
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/posix/thread_local_storage.c (added)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/posix/thread_local_storage.c Fri Aug 8 16:26:45 2014
@@ -0,0 +1,80 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "os/thread_local_storage.h"
+
+#include <jni.h>
+#include <pthread.h>
+#include <stdio.h>
+
+/** Key that allows us to retrieve thread-local storage */
+static pthread_key_t gTlsKey;
+
+/** nonzero if we succeeded in initializing gTlsKey. Protected by the jvmMutex */
+static int gTlsKeyInitialized = 0;
+
+/**
+ * The function that is called whenever a thread with libhdfs thread local data
+ * is destroyed.
+ *
+ * @param v The thread-local data
+ */
+static void hdfsThreadDestructor(void *v)
+{
+ JavaVM *vm;
+ JNIEnv *env = v;
+ jint ret;
+
+ ret = (*env)->GetJavaVM(env, &vm);
+ if (ret) {
+ fprintf(stderr, "hdfsThreadDestructor: GetJavaVM failed with error %d\n",
+ ret);
+ (*env)->ExceptionDescribe(env);
+ } else {
+ (*vm)->DetachCurrentThread(vm);
+ }
+}
+
+int threadLocalStorageGet(JNIEnv **env)
+{
+ int ret = 0;
+ if (!gTlsKeyInitialized) {
+ ret = pthread_key_create(&gTlsKey, hdfsThreadDestructor);
+ if (ret) {
+ fprintf(stderr,
+ "threadLocalStorageGet: pthread_key_create failed with error %d\n",
+ ret);
+ return ret;
+ }
+ gTlsKeyInitialized = 1;
+ }
+ *env = pthread_getspecific(gTlsKey);
+ return ret;
+}
+
+int threadLocalStorageSet(JNIEnv *env)
+{
+ int ret = pthread_setspecific(gTlsKey, env);
+ if (ret) {
+ fprintf(stderr,
+ "threadLocalStorageSet: pthread_setspecific failed with error %d\n",
+ ret);
+ hdfsThreadDestructor(env);
+ }
+ return ret;
+}
Added: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/thread.h
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/thread.h?rev=1616814&view=auto
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/thread.h (added)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/thread.h Fri Aug 8 16:26:45 2014
@@ -0,0 +1,54 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef LIBHDFS_THREAD_H
+#define LIBHDFS_THREAD_H
+
+/*
+ * Defines abstraction over platform-specific threads.
+ */
+
+#include "platform.h"
+
+/** Pointer to function to run in thread. */
+typedef void (*threadProcedure)(void *);
+
+/** Structure containing a thread's ID, starting address and argument. */
+typedef struct {
+ threadId id;
+ threadProcedure start;
+ void *arg;
+} thread;
+
+/**
+ * Creates and immediately starts a new thread.
+ *
+ * @param t thread to create
+ * @return 0 if successful, non-zero otherwise
+ */
+int threadCreate(thread *t);
+
+/**
+ * Joins to the given thread, blocking if necessary.
+ *
+ * @param t thread to join
+ * @return 0 if successful, non-zero otherwise
+ */
+int threadJoin(const thread *t);
+
+#endif
Added: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/thread_local_storage.h
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/thread_local_storage.h?rev=1616814&view=auto
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/thread_local_storage.h (added)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/thread_local_storage.h Fri Aug 8 16:26:45 2014
@@ -0,0 +1,75 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef LIBHDFS_THREAD_LOCAL_STORAGE_H
+#define LIBHDFS_THREAD_LOCAL_STORAGE_H
+
+/*
+ * Defines abstraction over platform-specific thread-local storage. libhdfs
+ * currently only needs thread-local storage for a single piece of data: the
+ * thread's JNIEnv. For simplicity, this interface is defined in terms of
+ * JNIEnv, not general-purpose thread-local storage of any arbitrary data.
+ */
+
+#include <jni.h>
+
+/*
+ * Most operating systems support the more efficient __thread construct, which
+ * is initialized by the linker. The following macros use this technique on the
+ * operating systems that support it.
+ */
+#ifdef HAVE_BETTER_TLS
+ #define THREAD_LOCAL_STORAGE_GET_QUICK() \
+ static __thread JNIEnv *quickTlsEnv = NULL; \
+ { \
+ if (quickTlsEnv) { \
+ return quickTlsEnv; \
+ } \
+ }
+
+ #define THREAD_LOCAL_STORAGE_SET_QUICK(env) \
+ { \
+ quickTlsEnv = (env); \
+ }
+#else
+ #define THREAD_LOCAL_STORAGE_GET_QUICK()
+ #define THREAD_LOCAL_STORAGE_SET_QUICK(env)
+#endif
+
+/**
+ * Gets the JNIEnv in thread-local storage for the current thread. If the call
+ * succeeds, and there is a JNIEnv associated with this thread, then returns 0
+ * and populates env. If the call succeeds, but there is no JNIEnv associated
+ * with this thread, then returns 0 and sets JNIEnv to NULL. If the call fails,
+ * then returns non-zero. Only one thread at a time may execute this function.
+ * The caller is responsible for enforcing mutual exclusion.
+ *
+ * @param env JNIEnv out parameter
+ * @return 0 if successful, non-zero otherwise
+ */
+int threadLocalStorageGet(JNIEnv **env);
+
+/**
+ * Sets the JNIEnv in thread-local storage for the current thread.
+ *
+ * @param env JNIEnv to set
+ * @return 0 if successful, non-zero otherwise
+ */
+int threadLocalStorageSet(JNIEnv *env);
+
+#endif
Added: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/windows/inttypes.h
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/windows/inttypes.h?rev=1616814&view=auto
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/windows/inttypes.h (added)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/windows/inttypes.h Fri Aug 8 16:26:45 2014
@@ -0,0 +1,28 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef LIBHDFS_INTTYPES_H
+#define LIBHDFS_INTTYPES_H
+
+/* On Windows, inttypes.h does not exist, so manually define what we need. */
+
+#define PRId64 "I64d"
+#define PRIu64 "I64u"
+typedef unsigned __int64 uint64_t;
+
+#endif
Added: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/windows/mutexes.c
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/windows/mutexes.c?rev=1616814&view=auto
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/windows/mutexes.c (added)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/windows/mutexes.c Fri Aug 8 16:26:45 2014
@@ -0,0 +1,52 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "os/mutexes.h"
+
+#include <windows.h>
+
+mutex hdfsHashMutex;
+mutex jvmMutex;
+
+/**
+ * Unfortunately, there is no simple static initializer for a critical section.
+ * Instead, the API requires calling InitializeCriticalSection. Since libhdfs
+ * lacks an explicit initialization function, there is no obvious existing place
+ * for the InitializeCriticalSection calls. To work around this, we define an
+ * initialization function and instruct the linker to set a pointer to that
+ * function as a user-defined global initializer. See discussion of CRT
+ * Initialization:
+ * http://msdn.microsoft.com/en-us/library/bb918180.aspx
+ */
+static void __cdecl initializeMutexes(void) {
+ InitializeCriticalSection(&hdfsHashMutex);
+ InitializeCriticalSection(&jvmMutex);
+}
+#pragma section(".CRT$XCU", read)
+__declspec(allocate(".CRT$XCU"))
+const void (__cdecl *pInitialize)(void) = initializeMutexes;
+
+int mutexLock(mutex *m) {
+ EnterCriticalSection(m);
+ return 0;
+}
+
+int mutexUnlock(mutex *m) {
+ LeaveCriticalSection(m);
+ return 0;
+}
Added: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/windows/platform.h
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/windows/platform.h?rev=1616814&view=auto
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/windows/platform.h (added)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/windows/platform.h Fri Aug 8 16:26:45 2014
@@ -0,0 +1,86 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef LIBHDFS_PLATFORM_H
+#define LIBHDFS_PLATFORM_H
+
+#include <stdio.h>
+#include <windows.h>
+#include <winsock.h>
+
+/*
+ * O_ACCMODE defined to match Linux definition.
+ */
+#ifndef O_ACCMODE
+#define O_ACCMODE 0x0003
+#endif
+
+/*
+ * Windows has a different name for its maximum path length constant.
+ */
+#ifndef PATH_MAX
+#define PATH_MAX MAX_PATH
+#endif
+
+/*
+ * Windows does not define EDQUOT and ESTALE in errno.h. The closest equivalents
+ * are these constants from winsock.h.
+ */
+#ifndef EDQUOT
+#define EDQUOT WSAEDQUOT
+#endif
+
+#ifndef ESTALE
+#define ESTALE WSAESTALE
+#endif
+
+/*
+ * gcc-style type-checked format arguments are not supported on Windows, so just
+ * stub this macro.
+ */
+#define TYPE_CHECKED_PRINTF_FORMAT(formatArg, varArgs)
+
+/*
+ * Define macros for various string formatting functions not defined on Windows.
+ * Where possible, we reroute to one of the secure CRT variants. On Windows,
+ * the preprocessor does support variadic macros, even though they weren't
+ * defined until C99.
+ */
+#define snprintf(str, size, format, ...) \
+ _snprintf_s((str), (size), _TRUNCATE, (format), __VA_ARGS__)
+#define strncpy(dest, src, n) \
+ strncpy_s((dest), (n), (src), _TRUNCATE)
+#define strtok_r(str, delim, saveptr) \
+ strtok_s((str), (delim), (saveptr))
+#define vsnprintf(str, size, format, ...) \
+ vsnprintf_s((str), (size), _TRUNCATE, (format), __VA_ARGS__)
+
+/*
+ * Mutex data type defined as Windows CRITICAL_SECTION. A critical section (not
+ * Windows mutex) is used, because libhdfs only needs synchronization of multiple
+ * threads within a single process, not synchronization across process
+ * boundaries.
+ */
+typedef CRITICAL_SECTION mutex;
+
+/*
+ * Thread data type defined as HANDLE to a Windows thread.
+ */
+typedef HANDLE threadId;
+
+#endif
Added: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/windows/thread.c
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/windows/thread.c?rev=1616814&view=auto
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/windows/thread.c (added)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/windows/thread.c Fri Aug 8 16:26:45 2014
@@ -0,0 +1,66 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "os/thread.h"
+
+#include <stdio.h>
+#include <windows.h>
+
+/**
+ * Defines a helper function that adapts function pointer provided by caller to
+ * the type required by CreateThread.
+ *
+ * @param toRun thread to run
+ * @return DWORD result of running thread (always 0)
+ */
+static DWORD runThread(LPVOID toRun) {
+ const thread *t = toRun;
+ t->start(t->arg);
+ return 0;
+}
+
+int threadCreate(thread *t) {
+ DWORD ret = 0;
+ HANDLE h;
+ h = CreateThread(NULL, 0, runThread, t, 0, NULL);
+ if (h) {
+ t->id = h;
+ } else {
+ ret = GetLastError();
+ fprintf(stderr, "threadCreate: CreateThread failed with error %d\n", ret);
+ }
+ return ret;
+}
+
+int threadJoin(const thread *t) {
+ DWORD ret = WaitForSingleObject(t->id, INFINITE);
+ switch (ret) {
+ case WAIT_OBJECT_0:
+ break;
+ case WAIT_FAILED:
+ ret = GetLastError();
+ fprintf(stderr, "threadJoin: WaitForSingleObject failed with error %d\n",
+ ret);
+ break;
+ default:
+ fprintf(stderr, "threadJoin: WaitForSingleObject unexpected error %d\n",
+ ret);
+ break;
+ }
+ return ret;
+}
Added: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/windows/thread_local_storage.c
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/windows/thread_local_storage.c?rev=1616814&view=auto
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/windows/thread_local_storage.c (added)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/windows/thread_local_storage.c Fri Aug 8 16:26:45 2014
@@ -0,0 +1,164 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "os/thread_local_storage.h"
+
+#include <jni.h>
+#include <stdio.h>
+#include <windows.h>
+
+/** Key that allows us to retrieve thread-local storage */
+static DWORD gTlsIndex = TLS_OUT_OF_INDEXES;
+
+/**
+ * If the current thread has a JNIEnv in thread-local storage, then detaches the
+ * current thread from the JVM.
+ */
+static void detachCurrentThreadFromJvm()
+{
+ JNIEnv *env = NULL;
+ JavaVM *vm;
+ jint ret;
+ if (threadLocalStorageGet(&env) || !env) {
+ return;
+ }
+ ret = (*env)->GetJavaVM(env, &vm);
+ if (ret) {
+ fprintf(stderr,
+ "detachCurrentThreadFromJvm: GetJavaVM failed with error %d\n",
+ ret);
+ (*env)->ExceptionDescribe(env);
+ } else {
+ (*vm)->DetachCurrentThread(vm);
+ }
+}
+
+/**
+ * Unlike pthreads, the Windows API does not seem to provide a convenient way to
+ * hook a callback onto thread shutdown. However, the Windows portable
+ * executable format does define a concept of thread-local storage callbacks.
+ * Here, we define a function and instruct the linker to set a pointer to that
+ * function in the segment for thread-local storage callbacks. See page 85 of
+ * Microsoft Portable Executable and Common Object File Format Specification:
+ * http://msdn.microsoft.com/en-us/gg463119.aspx
+ * This technique only works for implicit linking (OS loads DLL on demand), not
+ * for explicit linking (user code calls LoadLibrary directly). This effectively
+ * means that we have a known limitation: libhdfs may not work correctly if a
+ * Windows application attempts to use it via explicit linking.
+ *
+ * @param h module handle
+ * @param reason the reason for calling the callback
+ * @param pv reserved, unused
+ */
+static void NTAPI tlsCallback(PVOID h, DWORD reason, PVOID pv)
+{
+ DWORD tlsIndex;
+ switch (reason) {
+ case DLL_THREAD_DETACH:
+ detachCurrentThreadFromJvm();
+ break;
+ case DLL_PROCESS_DETACH:
+ detachCurrentThreadFromJvm();
+ tlsIndex = gTlsIndex;
+ gTlsIndex = TLS_OUT_OF_INDEXES;
+ if (!TlsFree(tlsIndex)) {
+ fprintf(stderr, "tlsCallback: TlsFree failed with error %d\n",
+ GetLastError());
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+/*
+ * A variable named _tls_used contains the TLS directory, which contains a list
+ * of pointers to callback functions. Normally, the linker won't retain this
+ * variable unless the executable has implicit thread-local variables, defined
+ * using the __declspec(thread) extended storage-class modifier. libhdfs
+ * doesn't use __declspec(thread), and we have no guarantee that the executable
+ * linked to libhdfs will use __declspec(thread). By forcing the linker to
+ * reference _tls_used, we guarantee that the binary retains the TLS directory.
+ * See Microsoft Visual Studio 10.0/VC/crt/src/tlssup.c .
+ */
+#pragma comment(linker, "/INCLUDE:_tls_used")
+
+/*
+ * We must retain a pointer to the callback function. Force the linker to keep
+ * this symbol, even though it appears that nothing in our source code uses it.
+ */
+#pragma comment(linker, "/INCLUDE:pTlsCallback")
+
+/*
+ * Define constant pointer to our callback, and tell the linker to pin it into
+ * the TLS directory so that it receives thread callbacks. Use external linkage
+ * to protect against the linker discarding the seemingly unused symbol.
+ */
+#pragma const_seg(".CRT$XLB")
+extern const PIMAGE_TLS_CALLBACK pTlsCallback;
+const PIMAGE_TLS_CALLBACK pTlsCallback = tlsCallback;
+#pragma const_seg()
+
+int threadLocalStorageGet(JNIEnv **env)
+{
+ LPVOID tls;
+ DWORD ret;
+ if (TLS_OUT_OF_INDEXES == gTlsIndex) {
+ gTlsIndex = TlsAlloc();
+ if (TLS_OUT_OF_INDEXES == gTlsIndex) {
+ fprintf(stderr,
+ "threadLocalStorageGet: TlsAlloc failed with error %d\n",
+ TLS_OUT_OF_INDEXES);
+ return TLS_OUT_OF_INDEXES;
+ }
+ }
+ tls = TlsGetValue(gTlsIndex);
+ if (tls) {
+ *env = tls;
+ return 0;
+ } else {
+ ret = GetLastError();
+ if (ERROR_SUCCESS == ret) {
+ /* Thread-local storage contains NULL, because we haven't set it yet. */
+ *env = NULL;
+ return 0;
+ } else {
+ /*
+ * The API call failed. According to documentation, TlsGetValue cannot
+ * fail as long as the index is a valid index from a successful TlsAlloc
+ * call. This error handling is purely defensive.
+ */
+ fprintf(stderr,
+ "threadLocalStorageGet: TlsGetValue failed with error %d\n", ret);
+ return ret;
+ }
+ }
+}
+
+int threadLocalStorageSet(JNIEnv *env)
+{
+ DWORD ret = 0;
+ if (!TlsSetValue(gTlsIndex, (LPVOID)env)) {
+ ret = GetLastError();
+ fprintf(stderr,
+ "threadLocalStorageSet: TlsSetValue failed with error %d\n",
+ ret);
+ detachCurrentThreadFromJvm(env);
+ }
+ return ret;
+}
Added: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/windows/unistd.h
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/windows/unistd.h?rev=1616814&view=auto
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/windows/unistd.h (added)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/windows/unistd.h Fri Aug 8 16:26:45 2014
@@ -0,0 +1,29 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef LIBHDFS_UNISTD_H
+#define LIBHDFS_UNISTD_H
+
+/* On Windows, unistd.h does not exist, so manually define what we need. */
+
+#include <process.h> /* Declares getpid(). */
+#include <windows.h>
+
+/* Re-route sleep to Sleep, converting units from seconds to milliseconds. */
+#define sleep(seconds) Sleep((seconds) * 1000)
+#endif
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test/test_libhdfs_ops.c
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test/test_libhdfs_ops.c?rev=1616814&r1=1616813&r2=1616814&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test/test_libhdfs_ops.c (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test/test_libhdfs_ops.c Fri Aug 8 16:26:45 2014
@@ -18,6 +18,7 @@
#include "hdfs.h"
#include "hdfs_test.h"
+#include "platform.h"
#include <inttypes.h>
#include <jni.h>
@@ -28,12 +29,13 @@
#include <unistd.h>
void permission_disp(short permissions, char *rtr) {
- rtr[9] = '\0';
int i;
+ short permissionsId;
+ char* perm;
+ rtr[9] = '\0';
for(i=2;i>=0;i--)
{
- short permissionsId = permissions >> (i * 3) & (short)7;
- char* perm;
+ permissionsId = permissions >> (i * 3) & (short)7;
switch(permissionsId) {
case 7:
perm = "rwx"; break;
@@ -60,35 +62,56 @@ void permission_disp(short permissions,
}
int main(int argc, char **argv) {
- char buffer[32];
- tSize num_written_bytes;
+ const char *writePath = "/tmp/testfile.txt";
+ const char *fileContents = "Hello, World!";
+ const char *readPath = "/tmp/testfile.txt";
+ const char *srcPath = "/tmp/testfile.txt";
+ const char *dstPath = "/tmp/testfile2.txt";
+ const char *slashTmp = "/tmp";
+ const char *newDirectory = "/tmp/newdir";
+ const char *newOwner = "root";
+ const char *tuser = "nobody";
+ const char *appendPath = "/tmp/appends";
+ const char *userPath = "/tmp/usertestfile.txt";
+
+ char buffer[32], buffer2[256], rdbuffer[32];
+ tSize num_written_bytes, num_read_bytes;
+ hdfsFS fs, lfs;
+ hdfsFile writeFile, readFile, localFile, appendFile, userFile;
+ tOffset currentPos, seekPos;
+ int exists, totalResult, result, numEntries, i, j;
+ const char *resp;
+ hdfsFileInfo *fileInfo, *fileList, *finfo;
+ char *buffer3;
+ char permissions[10];
+ char ***hosts;
+ short newPerm = 0666;
+ tTime newMtime, newAtime;
- hdfsFS fs = hdfsConnectNewInstance("default", 0);
+ fs = hdfsConnectNewInstance("default", 0);
if(!fs) {
fprintf(stderr, "Oops! Failed to connect to hdfs!\n");
exit(-1);
}
- hdfsFS lfs = hdfsConnectNewInstance(NULL, 0);
+ lfs = hdfsConnectNewInstance(NULL, 0);
if(!lfs) {
fprintf(stderr, "Oops! Failed to connect to 'local' hdfs!\n");
exit(-1);
}
- const char* writePath = "/tmp/testfile.txt";
- const char* fileContents = "Hello, World!";
-
{
//Write tests
- hdfsFile writeFile = hdfsOpenFile(fs, writePath, O_WRONLY|O_CREAT, 0, 0, 0);
+ writeFile = hdfsOpenFile(fs, writePath, O_WRONLY|O_CREAT, 0, 0, 0);
if(!writeFile) {
fprintf(stderr, "Failed to open %s for writing!\n", writePath);
exit(-1);
}
fprintf(stderr, "Opened %s for writing successfully...\n", writePath);
num_written_bytes =
- hdfsWrite(fs, writeFile, (void*)fileContents, strlen(fileContents)+1);
+ hdfsWrite(fs, writeFile, (void*)fileContents,
+ (tSize)(strlen(fileContents)+1));
if (num_written_bytes != strlen(fileContents) + 1) {
fprintf(stderr, "Failed to write correct number of bytes - expected %d, got %d\n",
(int)(strlen(fileContents) + 1), (int)num_written_bytes);
@@ -96,7 +119,7 @@ int main(int argc, char **argv) {
}
fprintf(stderr, "Wrote %d bytes\n", num_written_bytes);
- tOffset currentPos = -1;
+ currentPos = -1;
if ((currentPos = hdfsTell(fs, writeFile)) == -1) {
fprintf(stderr,
"Failed to get current file position correctly! Got %ld!\n",
@@ -123,15 +146,14 @@ int main(int argc, char **argv) {
{
//Read tests
- const char* readPath = "/tmp/testfile.txt";
- int exists = hdfsExists(fs, readPath);
+ exists = hdfsExists(fs, readPath);
if (exists) {
fprintf(stderr, "Failed to validate existence of %s\n", readPath);
exit(-1);
}
- hdfsFile readFile = hdfsOpenFile(fs, readPath, O_RDONLY, 0, 0, 0);
+ readFile = hdfsOpenFile(fs, readPath, O_RDONLY, 0, 0, 0);
if (!readFile) {
fprintf(stderr, "Failed to open %s for reading!\n", readPath);
exit(-1);
@@ -146,13 +168,13 @@ int main(int argc, char **argv) {
fprintf(stderr, "hdfsAvailable: %d\n", hdfsAvailable(fs, readFile));
- tOffset seekPos = 1;
+ seekPos = 1;
if(hdfsSeek(fs, readFile, seekPos)) {
fprintf(stderr, "Failed to seek %s for reading!\n", readPath);
exit(-1);
}
- tOffset currentPos = -1;
+ currentPos = -1;
if((currentPos = hdfsTell(fs, readFile)) != seekPos) {
fprintf(stderr,
"Failed to get current file position correctly! Got %ld!\n",
@@ -175,7 +197,7 @@ int main(int argc, char **argv) {
exit(-1);
}
memset(buffer, 0, sizeof(buffer));
- tSize num_read_bytes = hdfsRead(fs, readFile, (void*)buffer,
+ num_read_bytes = hdfsRead(fs, readFile, (void*)buffer,
sizeof(buffer));
if (strncmp(fileContents, buffer, strlen(fileContents)) != 0) {
fprintf(stderr, "Failed to read (direct). Expected %s but got %s (%d bytes)\n",
@@ -208,14 +230,14 @@ int main(int argc, char **argv) {
hdfsCloseFile(fs, readFile);
// Test correct behaviour for unsupported filesystems
- hdfsFile localFile = hdfsOpenFile(lfs, writePath, O_WRONLY|O_CREAT, 0, 0, 0);
+ localFile = hdfsOpenFile(lfs, writePath, O_WRONLY|O_CREAT, 0, 0, 0);
if(!localFile) {
fprintf(stderr, "Failed to open %s for writing!\n", writePath);
exit(-1);
}
num_written_bytes = hdfsWrite(lfs, localFile, (void*)fileContents,
- strlen(fileContents) + 1);
+ (tSize)(strlen(fileContents) + 1));
hdfsCloseFile(lfs, localFile);
localFile = hdfsOpenFile(lfs, writePath, O_RDONLY, 0, 0, 0);
@@ -229,50 +251,43 @@ int main(int argc, char **argv) {
hdfsCloseFile(lfs, localFile);
}
- int totalResult = 0;
- int result = 0;
+ totalResult = 0;
+ result = 0;
{
//Generic file-system operations
- const char* srcPath = "/tmp/testfile.txt";
- const char* dstPath = "/tmp/testfile2.txt";
-
- fprintf(stderr, "hdfsCopy(remote-local): %s\n", ((result = hdfsCopy(fs, srcPath, lfs, srcPath)) ? "Failed!" : "Success!"));
+ fprintf(stderr, "hdfsCopy(remote-local): %s\n", ((result = hdfsCopy(fs, srcPath, lfs, srcPath)) != 0 ? "Failed!" : "Success!"));
totalResult += result;
- fprintf(stderr, "hdfsCopy(remote-remote): %s\n", ((result = hdfsCopy(fs, srcPath, fs, dstPath)) ? "Failed!" : "Success!"));
+ fprintf(stderr, "hdfsCopy(remote-remote): %s\n", ((result = hdfsCopy(fs, srcPath, fs, dstPath)) != 0 ? "Failed!" : "Success!"));
totalResult += result;
- fprintf(stderr, "hdfsMove(local-local): %s\n", ((result = hdfsMove(lfs, srcPath, lfs, dstPath)) ? "Failed!" : "Success!"));
+ fprintf(stderr, "hdfsMove(local-local): %s\n", ((result = hdfsMove(lfs, srcPath, lfs, dstPath)) != 0 ? "Failed!" : "Success!"));
totalResult += result;
- fprintf(stderr, "hdfsMove(remote-local): %s\n", ((result = hdfsMove(fs, srcPath, lfs, srcPath)) ? "Failed!" : "Success!"));
+ fprintf(stderr, "hdfsMove(remote-local): %s\n", ((result = hdfsMove(fs, srcPath, lfs, srcPath)) != 0 ? "Failed!" : "Success!"));
totalResult += result;
- fprintf(stderr, "hdfsRename: %s\n", ((result = hdfsRename(fs, dstPath, srcPath)) ? "Failed!" : "Success!"));
+ fprintf(stderr, "hdfsRename: %s\n", ((result = hdfsRename(fs, dstPath, srcPath)) != 0 ? "Failed!" : "Success!"));
totalResult += result;
- fprintf(stderr, "hdfsCopy(remote-remote): %s\n", ((result = hdfsCopy(fs, srcPath, fs, dstPath)) ? "Failed!" : "Success!"));
+ fprintf(stderr, "hdfsCopy(remote-remote): %s\n", ((result = hdfsCopy(fs, srcPath, fs, dstPath)) != 0 ? "Failed!" : "Success!"));
totalResult += result;
- const char* slashTmp = "/tmp";
- const char* newDirectory = "/tmp/newdir";
- fprintf(stderr, "hdfsCreateDirectory: %s\n", ((result = hdfsCreateDirectory(fs, newDirectory)) ? "Failed!" : "Success!"));
+ fprintf(stderr, "hdfsCreateDirectory: %s\n", ((result = hdfsCreateDirectory(fs, newDirectory)) != 0 ? "Failed!" : "Success!"));
totalResult += result;
- fprintf(stderr, "hdfsSetReplication: %s\n", ((result = hdfsSetReplication(fs, srcPath, 2)) ? "Failed!" : "Success!"));
+ fprintf(stderr, "hdfsSetReplication: %s\n", ((result = hdfsSetReplication(fs, srcPath, 2)) != 0 ? "Failed!" : "Success!"));
totalResult += result;
- char buffer[256];
- const char *resp;
- fprintf(stderr, "hdfsGetWorkingDirectory: %s\n", ((resp = hdfsGetWorkingDirectory(fs, buffer, sizeof(buffer))) ? buffer : "Failed!"));
+ fprintf(stderr, "hdfsGetWorkingDirectory: %s\n", ((resp = hdfsGetWorkingDirectory(fs, buffer2, sizeof(buffer2))) != 0 ? buffer2 : "Failed!"));
totalResult += (resp ? 0 : 1);
- fprintf(stderr, "hdfsSetWorkingDirectory: %s\n", ((result = hdfsSetWorkingDirectory(fs, slashTmp)) ? "Failed!" : "Success!"));
+ fprintf(stderr, "hdfsSetWorkingDirectory: %s\n", ((result = hdfsSetWorkingDirectory(fs, slashTmp)) != 0 ? "Failed!" : "Success!"));
totalResult += result;
- fprintf(stderr, "hdfsGetWorkingDirectory: %s\n", ((resp = hdfsGetWorkingDirectory(fs, buffer, sizeof(buffer))) ? buffer : "Failed!"));
+ fprintf(stderr, "hdfsGetWorkingDirectory: %s\n", ((resp = hdfsGetWorkingDirectory(fs, buffer2, sizeof(buffer2))) != 0 ? buffer2 : "Failed!"));
totalResult += (resp ? 0 : 1);
fprintf(stderr, "hdfsGetDefaultBlockSize: %ld\n", hdfsGetDefaultBlockSize(fs));
fprintf(stderr, "hdfsGetCapacity: %ld\n", hdfsGetCapacity(fs));
fprintf(stderr, "hdfsGetUsed: %ld\n", hdfsGetUsed(fs));
- hdfsFileInfo *fileInfo = NULL;
+ fileInfo = NULL;
if((fileInfo = hdfsGetPathInfo(fs, slashTmp)) != NULL) {
fprintf(stderr, "hdfsGetPathInfo - SUCCESS!\n");
fprintf(stderr, "Name: %s, ", fileInfo->mName);
@@ -283,7 +298,6 @@ int main(int argc, char **argv) {
fprintf(stderr, "LastMod: %s", ctime(&fileInfo->mLastMod));
fprintf(stderr, "Owner: %s, ", fileInfo->mOwner);
fprintf(stderr, "Group: %s, ", fileInfo->mGroup);
- char permissions[10];
permission_disp(fileInfo->mPermissions, permissions);
fprintf(stderr, "Permissions: %d (%s)\n", fileInfo->mPermissions, permissions);
hdfsFreeFileInfo(fileInfo, 1);
@@ -292,10 +306,8 @@ int main(int argc, char **argv) {
fprintf(stderr, "waah! hdfsGetPathInfo for %s - FAILED!\n", slashTmp);
}
- hdfsFileInfo *fileList = 0;
- int numEntries = 0;
+ fileList = 0;
if((fileList = hdfsListDirectory(fs, slashTmp, &numEntries)) != NULL) {
- int i = 0;
for(i=0; i < numEntries; ++i) {
fprintf(stderr, "Name: %s, ", fileList[i].mName);
fprintf(stderr, "Type: %c, ", (char)fileList[i].mKind);
@@ -305,7 +317,6 @@ int main(int argc, char **argv) {
fprintf(stderr, "LastMod: %s", ctime(&fileList[i].mLastMod));
fprintf(stderr, "Owner: %s, ", fileList[i].mOwner);
fprintf(stderr, "Group: %s, ", fileList[i].mGroup);
- char permissions[10];
permission_disp(fileList[i].mPermissions, permissions);
fprintf(stderr, "Permissions: %d (%s)\n", fileList[i].mPermissions, permissions);
}
@@ -319,12 +330,12 @@ int main(int argc, char **argv) {
}
}
- char*** hosts = hdfsGetHosts(fs, srcPath, 0, 1);
+ hosts = hdfsGetHosts(fs, srcPath, 0, 1);
if(hosts) {
fprintf(stderr, "hdfsGetHosts - SUCCESS! ... \n");
- int i=0;
+ i=0;
while(hosts[i]) {
- int j = 0;
+ j = 0;
while(hosts[i][j]) {
fprintf(stderr,
"\thosts[%d][%d] - %s\n", i, j, hosts[i][j]);
@@ -337,131 +348,129 @@ int main(int argc, char **argv) {
fprintf(stderr, "waah! hdfsGetHosts - FAILED!\n");
}
- char *newOwner = "root";
// setting tmp dir to 777 so later when connectAsUser nobody, we can write to it
- short newPerm = 0666;
// chown write
- fprintf(stderr, "hdfsChown: %s\n", ((result = hdfsChown(fs, writePath, NULL, "users")) ? "Failed!" : "Success!"));
+ fprintf(stderr, "hdfsChown: %s\n", ((result = hdfsChown(fs, writePath, NULL, "users")) != 0 ? "Failed!" : "Success!"));
totalResult += result;
- fprintf(stderr, "hdfsChown: %s\n", ((result = hdfsChown(fs, writePath, newOwner, NULL)) ? "Failed!" : "Success!"));
+ fprintf(stderr, "hdfsChown: %s\n", ((result = hdfsChown(fs, writePath, newOwner, NULL)) != 0 ? "Failed!" : "Success!"));
totalResult += result;
// chmod write
- fprintf(stderr, "hdfsChmod: %s\n", ((result = hdfsChmod(fs, writePath, newPerm)) ? "Failed!" : "Success!"));
+ fprintf(stderr, "hdfsChmod: %s\n", ((result = hdfsChmod(fs, writePath, newPerm)) != 0 ? "Failed!" : "Success!"));
totalResult += result;
sleep(2);
- tTime newMtime = time(NULL);
- tTime newAtime = time(NULL);
+ newMtime = time(NULL);
+ newAtime = time(NULL);
// utime write
- fprintf(stderr, "hdfsUtime: %s\n", ((result = hdfsUtime(fs, writePath, newMtime, newAtime)) ? "Failed!" : "Success!"));
+ fprintf(stderr, "hdfsUtime: %s\n", ((result = hdfsUtime(fs, writePath, newMtime, newAtime)) != 0 ? "Failed!" : "Success!"));
totalResult += result;
// chown/chmod/utime read
- hdfsFileInfo *finfo = hdfsGetPathInfo(fs, writePath);
+ finfo = hdfsGetPathInfo(fs, writePath);
- fprintf(stderr, "hdfsChown read: %s\n", ((result = (strcmp(finfo->mOwner, newOwner) != 0)) ? "Failed!" : "Success!"));
+ fprintf(stderr, "hdfsChown read: %s\n", ((result = (strcmp(finfo->mOwner, newOwner))) != 0 ? "Failed!" : "Success!"));
totalResult += result;
- fprintf(stderr, "hdfsChmod read: %s\n", ((result = (finfo->mPermissions != newPerm)) ? "Failed!" : "Success!"));
+ fprintf(stderr, "hdfsChmod read: %s\n", ((result = (finfo->mPermissions != newPerm)) != 0 ? "Failed!" : "Success!"));
totalResult += result;
// will later use /tmp/ as a different user so enable it
- fprintf(stderr, "hdfsChmod: %s\n", ((result = hdfsChmod(fs, "/tmp/", 0777)) ? "Failed!" : "Success!"));
+ fprintf(stderr, "hdfsChmod: %s\n", ((result = hdfsChmod(fs, "/tmp/", 0777)) != 0 ? "Failed!" : "Success!"));
totalResult += result;
fprintf(stderr,"newMTime=%ld\n",newMtime);
fprintf(stderr,"curMTime=%ld\n",finfo->mLastMod);
- fprintf(stderr, "hdfsUtime read (mtime): %s\n", ((result = (finfo->mLastMod != newMtime)) ? "Failed!" : "Success!"));
+ fprintf(stderr, "hdfsUtime read (mtime): %s\n", ((result = (finfo->mLastMod != newMtime)) != 0 ? "Failed!" : "Success!"));
totalResult += result;
// No easy way to turn on access times from hdfs_test right now
- // fprintf(stderr, "hdfsUtime read (atime): %s\n", ((result = (finfo->mLastAccess != newAtime)) ? "Failed!" : "Success!"));
+ // fprintf(stderr, "hdfsUtime read (atime): %s\n", ((result = (finfo->mLastAccess != newAtime)) != 0 ? "Failed!" : "Success!"));
// totalResult += result;
hdfsFreeFileInfo(finfo, 1);
// Clean up
- fprintf(stderr, "hdfsDelete: %s\n", ((result = hdfsDelete(fs, newDirectory, 1)) ? "Failed!" : "Success!"));
+ fprintf(stderr, "hdfsDelete: %s\n", ((result = hdfsDelete(fs, newDirectory, 1)) != 0 ? "Failed!" : "Success!"));
totalResult += result;
- fprintf(stderr, "hdfsDelete: %s\n", ((result = hdfsDelete(fs, srcPath, 1)) ? "Failed!" : "Success!"));
+ fprintf(stderr, "hdfsDelete: %s\n", ((result = hdfsDelete(fs, srcPath, 1)) != 0 ? "Failed!" : "Success!"));
totalResult += result;
- fprintf(stderr, "hdfsDelete: %s\n", ((result = hdfsDelete(lfs, srcPath, 1)) ? "Failed!" : "Success!"));
+ fprintf(stderr, "hdfsDelete: %s\n", ((result = hdfsDelete(lfs, srcPath, 1)) != 0 ? "Failed!" : "Success!"));
totalResult += result;
- fprintf(stderr, "hdfsDelete: %s\n", ((result = hdfsDelete(lfs, dstPath, 1)) ? "Failed!" : "Success!"));
+ fprintf(stderr, "hdfsDelete: %s\n", ((result = hdfsDelete(lfs, dstPath, 1)) != 0 ? "Failed!" : "Success!"));
totalResult += result;
- fprintf(stderr, "hdfsExists: %s\n", ((result = hdfsExists(fs, newDirectory)) ? "Success!" : "Failed!"));
+ fprintf(stderr, "hdfsExists: %s\n", ((result = hdfsExists(fs, newDirectory)) != 0 ? "Success!" : "Failed!"));
totalResult += (result ? 0 : 1);
}
{
// TEST APPENDS
- const char *writePath = "/tmp/appends";
// CREATE
- hdfsFile writeFile = hdfsOpenFile(fs, writePath, O_WRONLY, 0, 0, 0);
- if(!writeFile) {
- fprintf(stderr, "Failed to open %s for writing!\n", writePath);
+ appendFile = hdfsOpenFile(fs, appendPath, O_WRONLY, 0, 0, 0);
+ if(!appendFile) {
+ fprintf(stderr, "Failed to open %s for writing!\n", appendPath);
exit(-1);
}
- fprintf(stderr, "Opened %s for writing successfully...\n", writePath);
+ fprintf(stderr, "Opened %s for writing successfully...\n", appendPath);
- char* buffer = "Hello,";
- tSize num_written_bytes = hdfsWrite(fs, writeFile, (void*)buffer, strlen(buffer));
+ buffer3 = "Hello,";
+ num_written_bytes = hdfsWrite(fs, appendFile, (void*)buffer3,
+ (tSize)strlen(buffer3));
fprintf(stderr, "Wrote %d bytes\n", num_written_bytes);
- if (hdfsFlush(fs, writeFile)) {
- fprintf(stderr, "Failed to 'flush' %s\n", writePath);
+ if (hdfsFlush(fs, appendFile)) {
+ fprintf(stderr, "Failed to 'flush' %s\n", appendPath);
exit(-1);
}
- fprintf(stderr, "Flushed %s successfully!\n", writePath);
+ fprintf(stderr, "Flushed %s successfully!\n", appendPath);
- hdfsCloseFile(fs, writeFile);
+ hdfsCloseFile(fs, appendFile);
// RE-OPEN
- writeFile = hdfsOpenFile(fs, writePath, O_WRONLY|O_APPEND, 0, 0, 0);
- if(!writeFile) {
- fprintf(stderr, "Failed to open %s for writing!\n", writePath);
+ appendFile = hdfsOpenFile(fs, appendPath, O_WRONLY|O_APPEND, 0, 0, 0);
+ if(!appendFile) {
+ fprintf(stderr, "Failed to open %s for writing!\n", appendPath);
exit(-1);
}
- fprintf(stderr, "Opened %s for writing successfully...\n", writePath);
+ fprintf(stderr, "Opened %s for writing successfully...\n", appendPath);
- buffer = " World";
- num_written_bytes = hdfsWrite(fs, writeFile, (void*)buffer, strlen(buffer) + 1);
+ buffer3 = " World";
+ num_written_bytes = hdfsWrite(fs, appendFile, (void*)buffer3,
+ (tSize)(strlen(buffer3) + 1));
fprintf(stderr, "Wrote %d bytes\n", num_written_bytes);
- if (hdfsFlush(fs, writeFile)) {
- fprintf(stderr, "Failed to 'flush' %s\n", writePath);
+ if (hdfsFlush(fs, appendFile)) {
+ fprintf(stderr, "Failed to 'flush' %s\n", appendPath);
exit(-1);
}
- fprintf(stderr, "Flushed %s successfully!\n", writePath);
+ fprintf(stderr, "Flushed %s successfully!\n", appendPath);
- hdfsCloseFile(fs, writeFile);
+ hdfsCloseFile(fs, appendFile);
// CHECK size
- hdfsFileInfo *finfo = hdfsGetPathInfo(fs, writePath);
- fprintf(stderr, "fileinfo->mSize: == total %s\n", ((result = (finfo->mSize == strlen("Hello, World") + 1)) ? "Success!" : "Failed!"));
+ finfo = hdfsGetPathInfo(fs, appendPath);
+ fprintf(stderr, "fileinfo->mSize: == total %s\n", ((result = (finfo->mSize == (tOffset)(strlen("Hello, World") + 1))) == 1 ? "Success!" : "Failed!"));
totalResult += (result ? 0 : 1);
// READ and check data
- hdfsFile readFile = hdfsOpenFile(fs, writePath, O_RDONLY, 0, 0, 0);
+ readFile = hdfsOpenFile(fs, appendPath, O_RDONLY, 0, 0, 0);
if (!readFile) {
- fprintf(stderr, "Failed to open %s for reading!\n", writePath);
+ fprintf(stderr, "Failed to open %s for reading!\n", appendPath);
exit(-1);
}
- char rdbuffer[32];
- tSize num_read_bytes = hdfsRead(fs, readFile, (void*)rdbuffer, sizeof(rdbuffer));
+ num_read_bytes = hdfsRead(fs, readFile, (void*)rdbuffer, sizeof(rdbuffer));
fprintf(stderr, "Read following %d bytes:\n%s\n",
num_read_bytes, rdbuffer);
- fprintf(stderr, "read == Hello, World %s\n", (result = (strcmp(rdbuffer, "Hello, World") == 0)) ? "Success!" : "Failed!");
+ fprintf(stderr, "read == Hello, World %s\n", ((result = (strcmp(rdbuffer, "Hello, World"))) == 0 ? "Success!" : "Failed!"));
hdfsCloseFile(fs, readFile);
@@ -478,36 +487,33 @@ int main(int argc, char **argv) {
// the actual fs user capabilities. Thus just create a file and read
// the owner is correct.
- const char *tuser = "nobody";
- const char* writePath = "/tmp/usertestfile.txt";
-
fs = hdfsConnectAsUserNewInstance("default", 0, tuser);
if(!fs) {
fprintf(stderr, "Oops! Failed to connect to hdfs as user %s!\n",tuser);
exit(-1);
}
- hdfsFile writeFile = hdfsOpenFile(fs, writePath, O_WRONLY|O_CREAT, 0, 0, 0);
- if(!writeFile) {
- fprintf(stderr, "Failed to open %s for writing!\n", writePath);
+ userFile = hdfsOpenFile(fs, userPath, O_WRONLY|O_CREAT, 0, 0, 0);
+ if(!userFile) {
+ fprintf(stderr, "Failed to open %s for writing!\n", userPath);
exit(-1);
}
- fprintf(stderr, "Opened %s for writing successfully...\n", writePath);
+ fprintf(stderr, "Opened %s for writing successfully...\n", userPath);
- char* buffer = "Hello, World!";
- tSize num_written_bytes = hdfsWrite(fs, writeFile, (void*)buffer, strlen(buffer)+1);
+ num_written_bytes = hdfsWrite(fs, userFile, (void*)fileContents,
+ (tSize)(strlen(fileContents)+1));
fprintf(stderr, "Wrote %d bytes\n", num_written_bytes);
- if (hdfsFlush(fs, writeFile)) {
- fprintf(stderr, "Failed to 'flush' %s\n", writePath);
+ if (hdfsFlush(fs, userFile)) {
+ fprintf(stderr, "Failed to 'flush' %s\n", userPath);
exit(-1);
}
- fprintf(stderr, "Flushed %s successfully!\n", writePath);
+ fprintf(stderr, "Flushed %s successfully!\n", userPath);
- hdfsCloseFile(fs, writeFile);
+ hdfsCloseFile(fs, userFile);
- hdfsFileInfo *finfo = hdfsGetPathInfo(fs, writePath);
- fprintf(stderr, "hdfs new file user is correct: %s\n", ((result = (strcmp(finfo->mOwner, tuser) != 0)) ? "Failed!" : "Success!"));
+ finfo = hdfsGetPathInfo(fs, userPath);
+ fprintf(stderr, "hdfs new file user is correct: %s\n", ((result = (strcmp(finfo->mOwner, tuser))) != 0 ? "Failed!" : "Success!"));
totalResult += result;
}
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test/test_libhdfs_read.c
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test/test_libhdfs_read.c?rev=1616814&r1=1616813&r2=1616814&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test/test_libhdfs_read.c (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test/test_libhdfs_read.c Fri Aug 8 16:26:45 2014
@@ -22,35 +22,38 @@
#include <stdlib.h>
int main(int argc, char **argv) {
+ hdfsFS fs;
+ const char *rfile = argv[1];
+ tSize bufferSize = strtoul(argv[3], NULL, 10);
+ hdfsFile readFile;
+ char* buffer;
+ tSize curSize;
if (argc != 4) {
fprintf(stderr, "Usage: hdfs_read <filename> <filesize> <buffersize>\n");
exit(-1);
}
- hdfsFS fs = hdfsConnect("default", 0);
+ fs = hdfsConnect("default", 0);
if (!fs) {
fprintf(stderr, "Oops! Failed to connect to hdfs!\n");
exit(-1);
}
-
- const char* rfile = argv[1];
- tSize bufferSize = strtoul(argv[3], NULL, 10);
-
- hdfsFile readFile = hdfsOpenFile(fs, rfile, O_RDONLY, bufferSize, 0, 0);
+
+ readFile = hdfsOpenFile(fs, rfile, O_RDONLY, bufferSize, 0, 0);
if (!readFile) {
fprintf(stderr, "Failed to open %s for writing!\n", rfile);
exit(-2);
}
// data to be written to the file
- char* buffer = malloc(sizeof(char) * bufferSize);
+ buffer = malloc(sizeof(char) * bufferSize);
if(buffer == NULL) {
return -2;
}
// read from the file
- tSize curSize = bufferSize;
+ curSize = bufferSize;
for (; curSize == bufferSize;) {
curSize = hdfsRead(fs, readFile, (void*)buffer, curSize);
}
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test/test_libhdfs_write.c
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test/test_libhdfs_write.c?rev=1616814&r1=1616813&r2=1616814&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test/test_libhdfs_write.c (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test/test_libhdfs_write.c Fri Aug 8 16:26:45 2014
@@ -21,23 +21,31 @@
#include <limits.h>
#include <stdio.h>
#include <stdlib.h>
+#include <sys/types.h>
int main(int argc, char **argv) {
+ hdfsFS fs;
+ const char *writeFileName = argv[1];
+ off_t fileTotalSize = strtoul(argv[2], NULL, 10);
+ long long tmpBufferSize = strtoul(argv[3], NULL, 10);
+ tSize bufferSize;
+ hdfsFile writeFile;
+ char* buffer;
+ int i;
+ off_t nrRemaining;
+ tSize curSize;
+ tSize written;
if (argc != 4) {
fprintf(stderr, "Usage: hdfs_write <filename> <filesize> <buffersize>\n");
exit(-1);
}
- hdfsFS fs = hdfsConnect("default", 0);
+ fs = hdfsConnect("default", 0);
if (!fs) {
fprintf(stderr, "Oops! Failed to connect to hdfs!\n");
exit(-1);
}
-
- const char* writeFileName = argv[1];
- off_t fileTotalSize = strtoul(argv[2], NULL, 10);
- long long tmpBufferSize = strtoul(argv[3], NULL, 10);
// sanity check
if(fileTotalSize == ULONG_MAX && errno == ERANGE) {
@@ -51,30 +59,27 @@ int main(int argc, char **argv) {
exit(-3);
}
- tSize bufferSize = tmpBufferSize;
+ bufferSize = (tSize)tmpBufferSize;
- hdfsFile writeFile = hdfsOpenFile(fs, writeFileName, O_WRONLY, bufferSize, 0, 0);
+ writeFile = hdfsOpenFile(fs, writeFileName, O_WRONLY, bufferSize, 0, 0);
if (!writeFile) {
fprintf(stderr, "Failed to open %s for writing!\n", writeFileName);
exit(-2);
}
// data to be written to the file
- char* buffer = malloc(sizeof(char) * bufferSize);
+ buffer = malloc(sizeof(char) * bufferSize);
if(buffer == NULL) {
fprintf(stderr, "Could not allocate buffer of size %d\n", bufferSize);
return -2;
}
- int i = 0;
for (i=0; i < bufferSize; ++i) {
buffer[i] = 'a' + (i%26);
}
// write to the file
- off_t nrRemaining;
for (nrRemaining = fileTotalSize; nrRemaining > 0; nrRemaining -= bufferSize ) {
- tSize curSize = ( bufferSize < nrRemaining ) ? bufferSize : (tSize)nrRemaining;
- tSize written;
+ curSize = ( bufferSize < nrRemaining ) ? bufferSize : (tSize)nrRemaining;
if ((written = hdfsWrite(fs, writeFile, (void*)buffer, curSize)) != curSize) {
fprintf(stderr, "ERROR: hdfsWrite returned an error on write: %d\n", written);
exit(-3);
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test/test_libhdfs_zerocopy.c
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test/test_libhdfs_zerocopy.c?rev=1616814&r1=1616813&r2=1616814&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test/test_libhdfs_zerocopy.c (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test/test_libhdfs_zerocopy.c Fri Aug 8 16:26:45 2014
@@ -19,12 +19,12 @@
#include "expect.h"
#include "hdfs.h"
#include "native_mini_dfs.h"
+#include "platform.h"
#include <errno.h>
#include <inttypes.h>
-#include <semaphore.h>
-#include <pthread.h>
#include <unistd.h>
+#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
@@ -53,7 +53,7 @@ static uint8_t *getZeroCopyBlockData(int
exit(1);
}
for (i = 0; i < TEST_ZEROCOPY_FULL_BLOCK_SIZE; i++) {
- buf[i] = blockIdx + (i % 17);
+ buf[i] = (uint8_t)(blockIdx + (i % 17));
}
return buf;
}
@@ -69,18 +69,6 @@ static int getZeroCopyBlockLen(int block
}
}
-static void printBuf(const uint8_t *buf, size_t len) __attribute__((unused));
-
-static void printBuf(const uint8_t *buf, size_t len)
-{
- size_t i;
-
- for (i = 0; i < len; i++) {
- fprintf(stderr, "%02x", buf[i]);
- }
- fprintf(stderr, "\n");
-}
-
static int doTestZeroCopyReads(hdfsFS fs, const char *fileName)
{
hdfsFile file = NULL;
@@ -127,8 +115,9 @@ static int doTestZeroCopyReads(hdfsFS fs
EXPECT_NONNULL(block);
EXPECT_ZERO(memcmp(block, hadoopRzBufferGet(buffer), SMALL_READ_LEN));
hadoopRzBufferFree(file, buffer);
- EXPECT_INT_EQ(TEST_ZEROCOPY_FULL_BLOCK_SIZE + SMALL_READ_LEN,
- hdfsTell(fs, file));
+ EXPECT_INT64_EQ(
+ (int64_t)TEST_ZEROCOPY_FULL_BLOCK_SIZE + (int64_t)SMALL_READ_LEN,
+ hdfsTell(fs, file));
EXPECT_ZERO(expectFileStats(file,
TEST_ZEROCOPY_FULL_BLOCK_SIZE + SMALL_READ_LEN,
TEST_ZEROCOPY_FULL_BLOCK_SIZE + SMALL_READ_LEN,
@@ -165,7 +154,7 @@ static int doTestZeroCopyReads(hdfsFS fs
free(block);
block = getZeroCopyBlockData(2);
EXPECT_NONNULL(block);
- EXPECT_ZERO(memcmp(block, hadoopRzBufferGet(buffer) +
+ EXPECT_ZERO(memcmp(block, (uint8_t*)hadoopRzBufferGet(buffer) +
(TEST_ZEROCOPY_FULL_BLOCK_SIZE - SMALL_READ_LEN), SMALL_READ_LEN));
hadoopRzBufferFree(file, buffer);
@@ -219,8 +208,10 @@ int main(void)
{
int port;
struct NativeMiniDfsConf conf = {
- .doFormat = 1,
- .configureShortCircuit = 1,
+ 1, /* doFormat */
+ 0, /* webhdfsEnabled */
+ 0, /* namenodeHttpPort */
+ 1, /* configureShortCircuit */
};
char testFileName[TEST_FILE_NAME_LENGTH];
hdfsFS fs;
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test_libhdfs_threaded.c
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test_libhdfs_threaded.c?rev=1616814&r1=1616813&r2=1616814&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test_libhdfs_threaded.c (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test_libhdfs_threaded.c Fri Aug 8 16:26:45 2014
@@ -19,11 +19,11 @@
#include "expect.h"
#include "hdfs.h"
#include "native_mini_dfs.h"
+#include "os/thread.h"
#include <errno.h>
#include <inttypes.h>
-#include <semaphore.h>
-#include <pthread.h>
+#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
@@ -35,8 +35,6 @@
#define TLH_DEFAULT_BLOCK_SIZE 134217728
-static sem_t tlhSem;
-
static struct NativeMiniDfsCluster* tlhCluster;
struct tlhThreadInfo {
@@ -44,18 +42,19 @@ struct tlhThreadInfo {
int threadIdx;
/** 0 = thread was successful; error code otherwise */
int success;
- /** pthread identifier */
- pthread_t thread;
+ /** thread identifier */
+ thread theThread;
};
static int hdfsSingleNameNodeConnect(struct NativeMiniDfsCluster *cl, hdfsFS *fs,
const char *username)
{
- int ret, port;
+ int ret;
+ tPort port;
hdfsFS hdfs;
struct hdfsBuilder *bld;
- port = nmdGetNameNodePort(cl);
+ port = (tPort)nmdGetNameNodePort(cl);
if (port < 0) {
fprintf(stderr, "hdfsSingleNameNodeConnect: nmdGetNameNodePort "
"returned error %d\n", port);
@@ -164,7 +163,7 @@ static int doTestHdfsOperations(struct t
EXPECT_NONNULL(file);
/* TODO: implement writeFully and use it here */
- expected = strlen(paths->prefix);
+ expected = (int)strlen(paths->prefix);
ret = hdfsWrite(fs, file, paths->prefix, expected);
if (ret < 0) {
ret = errno;
@@ -186,9 +185,9 @@ static int doTestHdfsOperations(struct t
EXPECT_ZERO(hdfsFileGetReadStatistics(file, &readStats));
errno = 0;
- EXPECT_ZERO(readStats->totalBytesRead);
- EXPECT_ZERO(readStats->totalLocalBytesRead);
- EXPECT_ZERO(readStats->totalShortCircuitBytesRead);
+ EXPECT_UINT64_EQ(UINT64_C(0), readStats->totalBytesRead);
+ EXPECT_UINT64_EQ(UINT64_C(0), readStats->totalLocalBytesRead);
+ EXPECT_UINT64_EQ(UINT64_C(0), readStats->totalShortCircuitBytesRead);
hdfsFileFreeReadStatistics(readStats);
/* TODO: implement readFully and use it here */
ret = hdfsRead(fs, file, tmp, sizeof(tmp));
@@ -204,7 +203,7 @@ static int doTestHdfsOperations(struct t
}
EXPECT_ZERO(hdfsFileGetReadStatistics(file, &readStats));
errno = 0;
- EXPECT_INT_EQ(expected, readStats->totalBytesRead);
+ EXPECT_UINT64_EQ((uint64_t)expected, readStats->totalBytesRead);
hdfsFileFreeReadStatistics(readStats);
EXPECT_ZERO(memcmp(paths->prefix, tmp, expected));
EXPECT_ZERO(hdfsCloseFile(fs, file));
@@ -262,12 +261,11 @@ static int testHdfsOperationsImpl(struct
return 0;
}
-static void *testHdfsOperations(void *v)
+static void testHdfsOperations(void *v)
{
struct tlhThreadInfo *ti = (struct tlhThreadInfo*)v;
int ret = testHdfsOperationsImpl(ti);
ti->success = ret;
- return NULL;
}
static int checkFailures(struct tlhThreadInfo *ti, int tlhNumThreads)
@@ -304,7 +302,7 @@ int main(void)
const char *tlhNumThreadsStr;
struct tlhThreadInfo ti[TLH_MAX_THREADS];
struct NativeMiniDfsConf conf = {
- .doFormat = 1,
+ 1, /* doFormat */
};
tlhNumThreadsStr = getenv("TLH_NUM_THREADS");
@@ -323,21 +321,20 @@ int main(void)
ti[i].threadIdx = i;
}
- EXPECT_ZERO(sem_init(&tlhSem, 0, tlhNumThreads));
tlhCluster = nmdCreate(&conf);
EXPECT_NONNULL(tlhCluster);
EXPECT_ZERO(nmdWaitClusterUp(tlhCluster));
for (i = 0; i < tlhNumThreads; i++) {
- EXPECT_ZERO(pthread_create(&ti[i].thread, NULL,
- testHdfsOperations, &ti[i]));
+ ti[i].theThread.start = testHdfsOperations;
+ ti[i].theThread.arg = &ti[i];
+ EXPECT_ZERO(threadCreate(&ti[i].theThread));
}
for (i = 0; i < tlhNumThreads; i++) {
- EXPECT_ZERO(pthread_join(ti[i].thread, NULL));
+ EXPECT_ZERO(threadJoin(&ti[i].theThread));
}
EXPECT_ZERO(nmdShutdown(tlhCluster));
nmdFree(tlhCluster);
- EXPECT_ZERO(sem_destroy(&tlhSem));
return checkFailures(ti, tlhNumThreads);
}
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test_native_mini_dfs.c
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test_native_mini_dfs.c?rev=1616814&r1=1616813&r2=1616814&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test_native_mini_dfs.c (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test_native_mini_dfs.c Fri Aug 8 16:26:45 2014
@@ -22,7 +22,7 @@
#include <errno.h>
static struct NativeMiniDfsConf conf = {
- .doFormat = 1,
+ 1, /* doFormat */
};
/**