You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by ar...@apache.org on 2016/01/07 23:41:53 UTC

[36/50] [abbrv] hadoop git commit: HDFS-9047. Retire libwebhdfs. Contributed by Haohui Mai.

HDFS-9047. Retire libwebhdfs. Contributed by Haohui Mai.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c213ee08
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c213ee08
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c213ee08

Branch: refs/heads/HDFS-1312
Commit: c213ee085971483d737a2d4652adfda0f767eea0
Parents: b6c9d3f
Author: Haohui Mai <wh...@apache.org>
Authored: Wed Jan 6 16:12:24 2016 -0800
Committer: Haohui Mai <wh...@apache.org>
Committed: Wed Jan 6 16:16:31 2016 -0800

----------------------------------------------------------------------
 .../hadoop-hdfs-native-client/pom.xml           |    5 +-
 .../src/CMakeLists.txt                          |    5 -
 .../src/contrib/libwebhdfs/CMakeLists.txt       |   88 -
 .../libwebhdfs/resources/FindJansson.cmake      |   43 -
 .../contrib/libwebhdfs/src/hdfs_http_client.c   |  490 ------
 .../contrib/libwebhdfs/src/hdfs_http_client.h   |  294 ----
 .../contrib/libwebhdfs/src/hdfs_http_query.c    |  402 -----
 .../contrib/libwebhdfs/src/hdfs_http_query.h    |  240 ---
 .../contrib/libwebhdfs/src/hdfs_json_parser.c   |  654 --------
 .../contrib/libwebhdfs/src/hdfs_json_parser.h   |  178 --
 .../src/contrib/libwebhdfs/src/hdfs_web.c       | 1538 ------------------
 .../libwebhdfs/src/test_libwebhdfs_ops.c        |  552 -------
 .../libwebhdfs/src/test_libwebhdfs_read.c       |   78 -
 .../libwebhdfs/src/test_libwebhdfs_threaded.c   |  247 ---
 .../libwebhdfs/src/test_libwebhdfs_write.c      |  111 --
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |    2 +
 16 files changed, 4 insertions(+), 4923 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c213ee08/hadoop-hdfs-project/hadoop-hdfs-native-client/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/pom.xml b/hadoop-hdfs-project/hadoop-hdfs-native-client/pom.xml
index 9fa5fbf..9f2c77d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-native-client/pom.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/pom.xml
@@ -32,7 +32,6 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
 
   <properties>
     <require.fuse>false</require.fuse>
-    <require.libwebhdfs>false</require.libwebhdfs>
   </properties>
 
   <dependencies>
@@ -140,7 +139,7 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
                     <mkdir dir="${project.build.directory}/native"/>
                     <exec executable="cmake" dir="${project.build.directory}/native"
                           failonerror="true">
-                      <arg line="${basedir}/src/ -DGENERATED_JAVAH=${project.build.directory}/native/javah -DJVM_ARCH_DATA_MODEL=${sun.arch.data.model} -DREQUIRE_LIBWEBHDFS=${require.libwebhdfs} -DREQUIRE_FUSE=${require.fuse} -G '${generator}'"/>
+                      <arg line="${basedir}/src/ -DGENERATED_JAVAH=${project.build.directory}/native/javah -DJVM_ARCH_DATA_MODEL=${sun.arch.data.model} -DREQUIRE_FUSE=${require.fuse} -G '${generator}'"/>
                     </exec>
                     <exec executable="msbuild" dir="${project.build.directory}/native"
                           failonerror="true">
@@ -199,7 +198,7 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
                   <target>
                     <mkdir dir="${project.build.directory}"/>
                     <exec executable="cmake" dir="${project.build.directory}" failonerror="true">
-                      <arg line="${basedir}/src/ -DGENERATED_JAVAH=${project.build.directory}/native/javah -DJVM_ARCH_DATA_MODEL=${sun.arch.data.model} -DREQUIRE_LIBWEBHDFS=${require.libwebhdfs} -DREQUIRE_FUSE=${require.fuse}"/>
+                      <arg line="${basedir}/src/ -DGENERATED_JAVAH=${project.build.directory}/native/javah -DJVM_ARCH_DATA_MODEL=${sun.arch.data.model} -DREQUIRE_FUSE=${require.fuse}"/>
                     </exec>
                     <exec executable="make" dir="${project.build.directory}" failonerror="true">
                     </exec>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c213ee08/hadoop-hdfs-project/hadoop-hdfs-native-client/src/CMakeLists.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/CMakeLists.txt b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/CMakeLists.txt
index 0a6f383..d7bfb76 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/CMakeLists.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/CMakeLists.txt
@@ -91,11 +91,6 @@ endfunction()
 add_subdirectory(main/native/libhdfs)
 add_subdirectory(main/native/libhdfs-tests)
 
-
-if(REQUIRE_LIBWEBHDFS)
-    add_subdirectory(contrib/libwebhdfs)
-endif()
-
 # Find Linux FUSE
 if(${CMAKE_SYSTEM_NAME} MATCHES "Linux")
     find_package(PkgConfig REQUIRED)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c213ee08/hadoop-hdfs-project/hadoop-hdfs-native-client/src/contrib/libwebhdfs/CMakeLists.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/contrib/libwebhdfs/CMakeLists.txt b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/contrib/libwebhdfs/CMakeLists.txt
deleted file mode 100644
index cc2b42d..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/contrib/libwebhdfs/CMakeLists.txt
+++ /dev/null
@@ -1,88 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-find_package(CURL REQUIRED)
-
-set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH}
-    "${CMAKE_SOURCE_DIR}/contrib/libwebhdfs/resources/")
-
-find_package(Jansson REQUIRED)
-include_directories(
-    ${JNI_INCLUDE_DIRS}
-    ${CMAKE_BINARY_DIR}
-    ${CMAKE_SOURCE_DIR}/main/native
-    ${CMAKE_SOURCE_DIR}/main/native/libhdfs
-    ${CMAKE_SOURCE_DIR}/main/native/libhdfs/include
-    ${OS_DIR}
-    ${JANSSON_INCLUDE_DIR}
-)
-
-add_definitions(-DLIBHDFS_DLL_EXPORT)
-
-hadoop_add_dual_library(webhdfs
-    src/hdfs_web.c
-    src/hdfs_http_client.c
-    src/hdfs_http_query.c
-    src/hdfs_json_parser.c
-    ../../main/native/libhdfs/exception.c
-    ../../main/native/libhdfs/jni_helper.c
-    ../../main/native/libhdfs/common/htable.c
-    ${OS_DIR}/mutexes.c
-    ${OS_DIR}/thread_local_storage.c
-)
-hadoop_target_link_dual_libraries(webhdfs
-    ${JAVA_JVM_LIBRARY}
-    ${CURL_LIBRARY}
-    ${JANSSON_LIBRARY}
-    pthread
-)
-hadoop_dual_output_directory(webhdfs target)
-set(LIBWEBHDFS_VERSION "0.0.0")
-set_target_properties(webhdfs PROPERTIES
-    SOVERSION ${LIBWEBHDFS_VERSION})
-
-add_executable(test_libwebhdfs_ops
-    src/test_libwebhdfs_ops.c
-)
-target_link_libraries(test_libwebhdfs_ops
-    webhdfs
-    native_mini_dfs
-)
-
-add_executable(test_libwebhdfs_read
-    src/test_libwebhdfs_read.c
-)
-target_link_libraries(test_libwebhdfs_read
-    webhdfs
-)
-
-add_executable(test_libwebhdfs_write
-    src/test_libwebhdfs_write.c
-)
-target_link_libraries(test_libwebhdfs_write
-    webhdfs
-)
-
-add_executable(test_libwebhdfs_threaded
-    src/test_libwebhdfs_threaded.c
-)
-target_link_libraries(test_libwebhdfs_threaded
-    webhdfs
-    native_mini_dfs
-    pthread
-)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c213ee08/hadoop-hdfs-project/hadoop-hdfs-native-client/src/contrib/libwebhdfs/resources/FindJansson.cmake
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/contrib/libwebhdfs/resources/FindJansson.cmake b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/contrib/libwebhdfs/resources/FindJansson.cmake
deleted file mode 100644
index b8c67ea..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/contrib/libwebhdfs/resources/FindJansson.cmake
+++ /dev/null
@@ -1,43 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-
-
-# - Try to find Jansson
-# Once done this will define
-#  JANSSON_FOUND - System has Jansson
-#  JANSSON_INCLUDE_DIRS - The Jansson include directories
-#  JANSSON_LIBRARIES - The libraries needed to use Jansson
-#  JANSSON_DEFINITIONS - Compiler switches required for using Jansson
-
-find_path(JANSSON_INCLUDE_DIR jansson.h
-          /usr/include
-          /usr/include/jansson
-          /usr/local/include )
-
-find_library(JANSSON_LIBRARY NAMES jansson
-             PATHS /usr/lib /usr/local/lib )
-
-set(JANSSON_LIBRARIES ${JANSSON_LIBRARY} )
-set(JANSSON_INCLUDE_DIRS ${JANSSON_INCLUDE_DIR} )
-
-include(FindPackageHandleStandardArgs)
-# handle the QUIETLY and REQUIRED arguments and set JANSSON_FOUND to TRUE
-# if all listed variables are TRUE
-find_package_handle_standard_args(Jansson  DEFAULT_MSG
-                                  JANSSON_LIBRARY JANSSON_INCLUDE_DIR)
-
-mark_as_advanced(JANSSON_INCLUDE_DIR JANSSON_LIBRARY )

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c213ee08/hadoop-hdfs-project/hadoop-hdfs-native-client/src/contrib/libwebhdfs/src/hdfs_http_client.c
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/contrib/libwebhdfs/src/hdfs_http_client.c b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/contrib/libwebhdfs/src/hdfs_http_client.c
deleted file mode 100644
index dc5ca41..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/contrib/libwebhdfs/src/hdfs_http_client.c
+++ /dev/null
@@ -1,490 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <stdlib.h>
-#include <string.h>
-#include <curl/curl.h>
-
-#include "hdfs_http_client.h"
-#include "exception.h"
-
-static pthread_mutex_t curlInitMutex = PTHREAD_MUTEX_INITIALIZER;
-static volatile int curlGlobalInited = 0;
-
-const char *hdfs_strerror(int errnoval)
-{
-#if defined(__sun)
-// MT-Safe under Solaris which doesn't support sys_errlist/sys_nerr
-  return strerror(errnoval);
-#else
-  if ((errnoval < 0) || (errnoval >= sys_nerr)) {
-    return "unknown error.";
-  }
-  return sys_errlist[errnoval];
-#endif
-}
-
-int initResponseBuffer(struct ResponseBuffer **buffer)
-{
-    struct ResponseBuffer *info = NULL;
-    int ret = 0;
-    info = calloc(1, sizeof(struct ResponseBuffer));
-    if (!info) {
-        ret = ENOMEM;
-    }
-    *buffer = info;
-    return ret;
-}
-
-void freeResponseBuffer(struct ResponseBuffer *buffer)
-{
-    if (buffer) {
-        if (buffer->content) {
-            free(buffer->content);
-        }
-        free(buffer);
-        buffer = NULL;
-    }
-}
-
-void freeResponse(struct Response *resp)
-{
-    if (resp) {
-        freeResponseBuffer(resp->body);
-        freeResponseBuffer(resp->header);
-        free(resp);
-        resp = NULL;
-    }
-}
-
-/** 
- * Callback used by libcurl for allocating local buffer and 
- * reading data to local buffer
- */
-static size_t writefunc(void *ptr, size_t size,
-                        size_t nmemb, struct ResponseBuffer *rbuffer)
-{
-    void *temp = NULL;
-    if (size * nmemb < 1) {
-        return 0;
-    }
-    if (!rbuffer) {
-        fprintf(stderr,
-                "ERROR: ResponseBuffer is NULL for the callback writefunc.\n");
-        return 0;
-    }
-    
-    if (rbuffer->remaining < size * nmemb) {
-        temp = realloc(rbuffer->content, rbuffer->offset + size * nmemb + 1);
-        if (temp == NULL) {
-            fprintf(stderr, "ERROR: fail to realloc in callback writefunc.\n");
-            return 0;
-        }
-        rbuffer->content = temp;
-        rbuffer->remaining = size * nmemb;
-    }
-    memcpy(rbuffer->content + rbuffer->offset, ptr, size * nmemb);
-    rbuffer->offset += size * nmemb;
-    (rbuffer->content)[rbuffer->offset] = '\0';
-    rbuffer->remaining -= size * nmemb;
-    return size * nmemb;
-}
-
-/**
- * Callback used by libcurl for reading data into buffer provided by user,
- * thus no need to reallocate buffer.
- */
-static size_t writeFuncWithUserBuffer(void *ptr, size_t size,
-                                   size_t nmemb, struct ResponseBuffer *rbuffer)
-{
-    size_t toCopy = 0;
-    if (size * nmemb < 1) {
-        return 0;
-    }
-    if (!rbuffer || !rbuffer->content) {
-        fprintf(stderr,
-                "ERROR: buffer to read is NULL for the "
-                "callback writeFuncWithUserBuffer.\n");
-        return 0;
-    }
-    
-    toCopy = rbuffer->remaining < (size * nmemb) ?
-                            rbuffer->remaining : (size * nmemb);
-    memcpy(rbuffer->content + rbuffer->offset, ptr, toCopy);
-    rbuffer->offset += toCopy;
-    rbuffer->remaining -= toCopy;
-    return toCopy;
-}
-
-/**
- * Callback used by libcurl for writing data to remote peer
- */
-static size_t readfunc(void *ptr, size_t size, size_t nmemb, void *stream)
-{
-    struct webhdfsBuffer *wbuffer = NULL;
-    if (size * nmemb < 1) {
-        return 0;
-    }
-    
-    wbuffer = stream;
-    pthread_mutex_lock(&wbuffer->writeMutex);
-    while (wbuffer->remaining == 0) {
-        /*
-         * The current remainning bytes to write is 0,
-         * check closeFlag to see whether need to finish the transfer.
-         * if yes, return 0; else, wait
-         */
-        if (wbuffer->closeFlag) { // We can close the transfer now
-            //For debug
-            fprintf(stderr, "CloseFlag is set, ready to close the transfer\n");
-            pthread_mutex_unlock(&wbuffer->writeMutex);
-            return 0;
-        } else {
-            // remaining == 0 but closeFlag is not set
-            // indicates that user's buffer has been transferred
-            pthread_cond_signal(&wbuffer->transfer_finish);
-            pthread_cond_wait(&wbuffer->newwrite_or_close,
-                                    &wbuffer->writeMutex);
-        }
-    }
-    
-    if (wbuffer->remaining > 0 && !wbuffer->closeFlag) {
-        size_t copySize = wbuffer->remaining < size * nmemb ?
-                                wbuffer->remaining : size * nmemb;
-        memcpy(ptr, wbuffer->wbuffer + wbuffer->offset, copySize);
-        wbuffer->offset += copySize;
-        wbuffer->remaining -= copySize;
-        pthread_mutex_unlock(&wbuffer->writeMutex);
-        return copySize;
-    } else {
-        fprintf(stderr, "ERROR: webhdfsBuffer's remaining is %ld, "
-                "it should be a positive value!\n", wbuffer->remaining);
-        pthread_mutex_unlock(&wbuffer->writeMutex);
-        return 0;
-    }
-}
-
-/**
- * Initialize the global libcurl environment
- */
-static void initCurlGlobal()
-{
-    if (!curlGlobalInited) {
-        pthread_mutex_lock(&curlInitMutex);
-        if (!curlGlobalInited) {
-            curl_global_init(CURL_GLOBAL_ALL);
-            curlGlobalInited = 1;
-        }
-        pthread_mutex_unlock(&curlInitMutex);
-    }
-}
-
-/**
- * Launch simple commands (commands without file I/O) and return response
- *
- * @param url       Target URL
- * @param method    HTTP method (GET/PUT/POST)
- * @param followloc Whether or not need to set CURLOPT_FOLLOWLOCATION
- * @param response  Response from remote service
- * @return 0 for success and non-zero value to indicate error
- */
-static int launchCmd(const char *url, enum HttpHeader method,
-                     enum Redirect followloc, struct Response **response)
-{
-    CURL *curl = NULL;
-    CURLcode curlCode;
-    int ret = 0;
-    struct Response *resp = NULL;
-    
-    resp = calloc(1, sizeof(struct Response));
-    if (!resp) {
-        return ENOMEM;
-    }
-    ret = initResponseBuffer(&(resp->body));
-    if (ret) {
-        goto done;
-    }
-    ret = initResponseBuffer(&(resp->header));
-    if (ret) {
-        goto done;
-    }
-    initCurlGlobal();
-    curl = curl_easy_init();
-    if (!curl) {
-        ret = ENOMEM;       // curl_easy_init does not return error code,
-                            // and most of its errors are caused by malloc()
-        fprintf(stderr, "ERROR in curl_easy_init.\n");
-        goto done;
-    }
-    /* Set callback function for reading data from remote service */
-    curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, writefunc);
-    curl_easy_setopt(curl, CURLOPT_WRITEDATA, resp->body);
-    curl_easy_setopt(curl, CURLOPT_HEADERFUNCTION, writefunc);
-    curl_easy_setopt(curl, CURLOPT_WRITEHEADER, resp->header);
-    curl_easy_setopt(curl, CURLOPT_URL, url);
-    switch(method) {
-        case GET:
-            break;
-        case PUT:
-            curl_easy_setopt(curl, CURLOPT_CUSTOMREQUEST, "PUT");
-            break;
-        case POST:
-            curl_easy_setopt(curl, CURLOPT_CUSTOMREQUEST, "POST");
-            break;
-        case DELETE:
-            curl_easy_setopt(curl, CURLOPT_CUSTOMREQUEST, "DELETE");
-            break;
-        default:
-            ret = EINVAL;
-            fprintf(stderr, "ERROR: Invalid HTTP method\n");
-            goto done;
-    }
-    if (followloc == YES) {
-        curl_easy_setopt(curl, CURLOPT_FOLLOWLOCATION, 1);
-    }
-    /* Now run the curl handler */
-    curlCode = curl_easy_perform(curl);
-    if (curlCode != CURLE_OK) {
-        ret = EIO;
-        fprintf(stderr, "ERROR: preform the URL %s failed, <%d>: %s\n",
-                url, curlCode, curl_easy_strerror(curlCode));
-    }
-done:
-    if (curl != NULL) {
-        curl_easy_cleanup(curl);
-    }
-    if (ret) {
-        free(resp);
-        resp = NULL;
-    }
-    *response = resp;
-    return ret;
-}
-
-/**
- * Launch the read request. The request is sent to the NameNode and then 
- * redirected to corresponding DataNode
- *
- * @param url   The URL for the read request
- * @param resp  The response containing the buffer provided by user
- * @return 0 for success and non-zero value to indicate error
- */
-static int launchReadInternal(const char *url, struct Response* resp)
-{
-    CURL *curl;
-    CURLcode curlCode;
-    int ret = 0;
-    
-    if (!resp || !resp->body || !resp->body->content) {
-        fprintf(stderr,
-                "ERROR: invalid user-provided buffer!\n");
-        return EINVAL;
-    }
-    
-    initCurlGlobal();
-    /* get a curl handle */
-    curl = curl_easy_init();
-    if (!curl) {
-        fprintf(stderr, "ERROR in curl_easy_init.\n");
-        return ENOMEM;
-    }
-    curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, writeFuncWithUserBuffer);
-    curl_easy_setopt(curl, CURLOPT_WRITEDATA, resp->body);
-    curl_easy_setopt(curl, CURLOPT_HEADERFUNCTION, writefunc);
-    curl_easy_setopt(curl, CURLOPT_WRITEHEADER, resp->header);
-    curl_easy_setopt(curl, CURLOPT_URL, url);
-    curl_easy_setopt(curl, CURLOPT_FOLLOWLOCATION, 1);
-    
-    curlCode = curl_easy_perform(curl);
-    if (curlCode != CURLE_OK && curlCode != CURLE_PARTIAL_FILE) {
-        ret = EIO;
-        fprintf(stderr, "ERROR: preform the URL %s failed, <%d>: %s\n",
-                url, curlCode, curl_easy_strerror(curlCode));
-    }
-    
-    curl_easy_cleanup(curl);
-    return ret;
-}
-
-/**
- * The function does the write operation by connecting to a DataNode. 
- * The function keeps the connection with the DataNode until 
- * the closeFlag is set. Whenever the current data has been sent out, 
- * the function blocks waiting for further input from user or close.
- *
- * @param url           URL of the remote DataNode
- * @param method        PUT for create and POST for append
- * @param uploadBuffer  Buffer storing user's data to write
- * @param response      Response from remote service
- * @return 0 for success and non-zero value to indicate error
- */
-static int launchWrite(const char *url, enum HttpHeader method,
-                       struct webhdfsBuffer *uploadBuffer,
-                       struct Response **response)
-{
-    CURLcode curlCode;
-    struct Response* resp = NULL;
-    struct curl_slist *chunk = NULL;
-    CURL *curl = NULL;
-    int ret = 0;
-    
-    if (!uploadBuffer) {
-        fprintf(stderr, "ERROR: upload buffer is NULL!\n");
-        return EINVAL;
-    }
-    
-    initCurlGlobal();
-    resp = calloc(1, sizeof(struct Response));
-    if (!resp) {
-        return ENOMEM;
-    }
-    ret = initResponseBuffer(&(resp->body));
-    if (ret) {
-        goto done;
-    }
-    ret = initResponseBuffer(&(resp->header));
-    if (ret) {
-        goto done;
-    }
-    
-    // Connect to the datanode in order to create the lease in the namenode
-    curl = curl_easy_init();
-    if (!curl) {
-        fprintf(stderr, "ERROR: failed to initialize the curl handle.\n");
-        return ENOMEM;
-    }
-    curl_easy_setopt(curl, CURLOPT_URL, url);
-    
-    curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, writefunc);
-    curl_easy_setopt(curl, CURLOPT_WRITEDATA, resp->body);
-    curl_easy_setopt(curl, CURLOPT_HEADERFUNCTION, writefunc);
-    curl_easy_setopt(curl, CURLOPT_WRITEHEADER, resp->header);
-    curl_easy_setopt(curl, CURLOPT_READFUNCTION, readfunc);
-    curl_easy_setopt(curl, CURLOPT_READDATA, uploadBuffer);
-    curl_easy_setopt(curl, CURLOPT_UPLOAD, 1L);
-    
-    chunk = curl_slist_append(chunk, "Transfer-Encoding: chunked");
-    curl_easy_setopt(curl, CURLOPT_HTTPHEADER, chunk);
-    chunk = curl_slist_append(chunk, "Expect:");
-    curl_easy_setopt(curl, CURLOPT_HTTPHEADER, chunk);
-    
-    switch(method) {
-        case PUT:
-            curl_easy_setopt(curl, CURLOPT_CUSTOMREQUEST, "PUT");
-            break;
-        case POST:
-            curl_easy_setopt(curl, CURLOPT_CUSTOMREQUEST, "POST");
-            break;
-        default:
-            ret = EINVAL;
-            fprintf(stderr, "ERROR: Invalid HTTP method\n");
-            goto done;
-    }
-    curlCode = curl_easy_perform(curl);
-    if (curlCode != CURLE_OK) {
-        ret = EIO;
-        fprintf(stderr, "ERROR: preform the URL %s failed, <%d>: %s\n",
-                url, curlCode, curl_easy_strerror(curlCode));
-    }
-    
-done:
-    if (chunk != NULL) {
-        curl_slist_free_all(chunk);
-    }
-    if (curl != NULL) {
-        curl_easy_cleanup(curl);
-    }
-    if (ret) {
-        free(resp);
-        resp = NULL;
-    }
-    *response = resp;
-    return ret;
-}
-
-int launchMKDIR(const char *url, struct Response **resp)
-{
-    return launchCmd(url, PUT, NO, resp);
-}
-
-int launchRENAME(const char *url, struct Response **resp)
-{
-    return launchCmd(url, PUT, NO, resp);
-}
-
-int launchGFS(const char *url, struct Response **resp)
-{
-    return launchCmd(url, GET, NO, resp);
-}
-
-int launchLS(const char *url, struct Response **resp)
-{
-    return launchCmd(url, GET, NO, resp);
-}
-
-int launchCHMOD(const char *url, struct Response **resp)
-{
-    return launchCmd(url, PUT, NO, resp);
-}
-
-int launchCHOWN(const char *url, struct Response **resp)
-{
-    return launchCmd(url, PUT, NO, resp);
-}
-
-int launchDELETE(const char *url, struct Response **resp)
-{
-    return launchCmd(url, DELETE, NO, resp);
-}
-
-int launchOPEN(const char *url, struct Response* resp)
-{
-    return launchReadInternal(url, resp);
-}
-
-int launchUTIMES(const char *url, struct Response **resp)
-{
-    return launchCmd(url, PUT, NO, resp);
-}
-
-int launchNnWRITE(const char *url, struct Response **resp)
-{
-    return launchCmd(url, PUT, NO, resp);
-}
-
-int launchNnAPPEND(const char *url, struct Response **resp)
-{
-    return launchCmd(url, POST, NO, resp);
-}
-
-int launchDnWRITE(const char *url, struct webhdfsBuffer *buffer,
-                               struct Response **resp)
-{
-    return launchWrite(url, PUT, buffer, resp);
-}
-
-int launchDnAPPEND(const char *url, struct webhdfsBuffer *buffer,
-                                struct Response **resp)
-{
-    return launchWrite(url, POST, buffer, resp);
-}
-
-int launchSETREPLICATION(const char *url, struct Response **resp)
-{
-    return launchCmd(url, PUT, NO, resp);
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c213ee08/hadoop-hdfs-project/hadoop-hdfs-native-client/src/contrib/libwebhdfs/src/hdfs_http_client.h
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/contrib/libwebhdfs/src/hdfs_http_client.h b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/contrib/libwebhdfs/src/hdfs_http_client.h
deleted file mode 100644
index ab85464..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/contrib/libwebhdfs/src/hdfs_http_client.h
+++ /dev/null
@@ -1,294 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-
-
-#ifndef _HDFS_HTTP_CLIENT_H_
-#define _HDFS_HTTP_CLIENT_H_
-
-#include "hdfs/hdfs.h" /* for tSize */
-
-#include <pthread.h> /* for pthread_t */
-#include <unistd.h> /* for size_t */
-
-/** enum indicating the type of hdfs stream */
-enum hdfsStreamType
-{
-    UNINITIALIZED = 0,
-    INPUT = 1,
-    OUTPUT = 2,
-};
-
-/**
- * webhdfsBuffer - used for hold the data for read/write from/to http connection
- */
-struct webhdfsBuffer {
-    const char *wbuffer;  /* The user's buffer for uploading */
-    size_t remaining;     /* Length of content */
-    size_t offset;        /* offset for reading */
-    /* Check whether the hdfsOpenFile has been called before */
-    int openFlag;
-    /* Whether to close the http connection for writing */
-    int closeFlag;
-    /* Synchronization between the curl and hdfsWrite threads */
-    pthread_mutex_t writeMutex;
-    /* 
-     * Transferring thread waits for this condition
-     * when there is no more content for transferring in the buffer
-     */
-    pthread_cond_t newwrite_or_close;
-    /* Condition used to indicate finishing transferring (one buffer) */
-    pthread_cond_t transfer_finish;
-};
-
-/** File handle for webhdfs */
-struct webhdfsFileHandle {
-    char *absPath;        /* Absolute path of file */
-    int bufferSize;       /* Size of buffer */
-    short replication;    /* Number of replication */
-    tSize blockSize;      /* Block size */
-    char *datanode;       /* URL of the DataNode */
-    /* webhdfsBuffer handle used to store the upload data */
-    struct webhdfsBuffer *uploadBuffer;
-    /* The thread used for data transferring */
-    pthread_t connThread;
-};
-
-/** Type of http header */
-enum HttpHeader {
-    GET,
-    PUT,
-    POST,
-    DELETE
-};
-
-/** Whether to redirect */
-enum Redirect {
-    YES,
-    NO
-};
-
-/** Buffer used for holding response */
-struct ResponseBuffer {
-    char *content;
-    size_t remaining;
-    size_t offset;
-};
-
-/**
- * The response got through webhdfs
- */
-struct Response {
-    struct ResponseBuffer *body;
-    struct ResponseBuffer *header;
-};
-
-/**
- * Create and initialize a ResponseBuffer
- *
- * @param buffer Pointer pointing to new created ResponseBuffer handle
- * @return 0 for success, non-zero value to indicate error
- */
-int initResponseBuffer(struct ResponseBuffer **buffer) __attribute__ ((warn_unused_result));
-
-/**
- * Free the given ResponseBuffer
- *
- * @param buffer The ResponseBuffer to free
- */
-void freeResponseBuffer(struct ResponseBuffer *buffer);
-
-/**
- * Free the given Response
- *
- * @param resp The Response to free
- */
-void freeResponse(struct Response *resp);
-
-/**
- * Send the MKDIR request to NameNode using the given URL. 
- * The NameNode will execute the operation and return the result as response.
- *
- * @param url The URL for MKDIR operation
- * @param response Response handle to store response returned from the NameNode
- * @return 0 for success, non-zero value to indicate error
- */
-int launchMKDIR(const char *url,
-                struct Response **response) __attribute__ ((warn_unused_result));
-
-/**
- * Send the RENAME request to NameNode using the given URL.
- * The NameNode will execute the operation and return the result as response.
- *
- * @param url The URL for RENAME operation
- * @param response Response handle to store response returned from the NameNode
- * @return 0 for success, non-zero value to indicate error
- */
-int launchRENAME(const char *url,
-                 struct Response **response) __attribute__ ((warn_unused_result));
-
-/**
- * Send the CHMOD request to NameNode using the given URL.
- * The NameNode will execute the operation and return the result as response.
- *
- * @param url The URL for CHMOD operation
- * @param response Response handle to store response returned from the NameNode
- * @return 0 for success, non-zero value to indicate error
- */
-int launchCHMOD(const char *url,
-                struct Response **response) __attribute__ ((warn_unused_result));
-
-/**
- * Send the GetFileStatus request to NameNode using the given URL.
- * The NameNode will execute the operation and return the result as response.
- *
- * @param url The URL for GetFileStatus operation
- * @param response Response handle to store response returned from the NameNode,
- *                 containing either file status or exception information
- * @return 0 for success, non-zero value to indicate error
- */
-int launchGFS(const char *url,
-              struct Response **response) __attribute__ ((warn_unused_result));
-
-/**
- * Send the LS (LISTSTATUS) request to NameNode using the given URL.
- * The NameNode will execute the operation and return the result as response.
- *
- * @param url The URL for LISTSTATUS operation
- * @param response Response handle to store response returned from the NameNode
- * @return 0 for success, non-zero value to indicate error
- */
-int launchLS(const char *url,
-             struct Response **response) __attribute__ ((warn_unused_result));
-
-/**
- * Send the DELETE request to NameNode using the given URL.
- * The NameNode will execute the operation and return the result as response.
- *
- * @param url The URL for DELETE operation
- * @param response Response handle to store response returned from the NameNode
- * @return 0 for success, non-zero value to indicate error
- */
-int launchDELETE(const char *url,
-                 struct Response **response) __attribute__ ((warn_unused_result));
-
-/**
- * Send the CHOWN request to NameNode using the given URL.
- * The NameNode will execute the operation and return the result as response.
- *
- * @param url The URL for CHOWN operation
- * @param response Response handle to store response returned from the NameNode
- * @return 0 for success, non-zero value to indicate error
- */
-int launchCHOWN(const char *url,
-                struct Response **response) __attribute__ ((warn_unused_result));
-
-/**
- * Send the OPEN request to NameNode using the given URL, 
- * asking for reading a file (within a range). 
- * The NameNode first redirects the request to the datanode
- * that holds the corresponding first block of the file (within a range),
- * and the datanode returns the content of the file through the HTTP connection.
- *
- * @param url The URL for OPEN operation
- * @param resp The response holding user's buffer. 
-               The file content will be written into the buffer.
- * @return 0 for success, non-zero value to indicate error
- */
-int launchOPEN(const char *url,
-               struct Response* resp) __attribute__ ((warn_unused_result));
-
-/**
- * Send the SETTIMES request to NameNode using the given URL.
- * The NameNode will execute the operation and return the result as response.
- *
- * @param url The URL for SETTIMES operation
- * @param response Response handle to store response returned from the NameNode
- * @return 0 for success, non-zero value to indicate error
- */
-int launchUTIMES(const char *url,
-                 struct Response **response) __attribute__ ((warn_unused_result));
-
-/**
- * Send the WRITE/CREATE request to NameNode using the given URL.
- * The NameNode will choose the writing target datanodes 
- * and return the first datanode in the pipeline as response
- *
- * @param url The URL for WRITE/CREATE operation connecting to NameNode
- * @param response Response handle to store response returned from the NameNode
- * @return 0 for success, non-zero value to indicate error
- */
-int launchNnWRITE(const char *url,
-                  struct Response **response) __attribute__ ((warn_unused_result));
-
-/**
- * Send the WRITE request along with to-write content to 
- * the corresponding DataNode using the given URL. 
- * The DataNode will write the data and return the response.
- *
- * @param url The URL for WRITE operation connecting to DataNode
- * @param buffer The webhdfsBuffer containing data to be written to hdfs
- * @param response Response handle to store response returned from the NameNode
- * @return 0 for success, non-zero value to indicate error
- */
-int launchDnWRITE(const char *url, struct webhdfsBuffer *buffer,
-                  struct Response **response) __attribute__ ((warn_unused_result));
-
-/**
- * Send the WRITE (APPEND) request to NameNode using the given URL. 
- * The NameNode determines the DataNode for appending and 
- * sends its URL back as response.
- *
- * @param url The URL for APPEND operation
- * @param response Response handle to store response returned from the NameNode
- * @return 0 for success, non-zero value to indicate error
- */
-int launchNnAPPEND(const char *url, struct Response **response) __attribute__ ((warn_unused_result));
-
-/**
- * Send the SETREPLICATION request to NameNode using the given URL.
- * The NameNode will execute the operation and return the result as response.
- *
- * @param url The URL for SETREPLICATION operation
- * @param response Response handle to store response returned from the NameNode
- * @return 0 for success, non-zero value to indicate error
- */
-int launchSETREPLICATION(const char *url,
-                         struct Response **response) __attribute__ ((warn_unused_result));
-
-/**
- * Send the APPEND request along with the content to DataNode.
- * The DataNode will do the appending and return the result as response.
- *
- * @param url The URL for APPEND operation connecting to DataNode
- * @param buffer The webhdfsBuffer containing data to be appended
- * @param response Response handle to store response returned from the NameNode
- * @return 0 for success, non-zero value to indicate error
- */
-int launchDnAPPEND(const char *url, struct webhdfsBuffer *buffer,
-                   struct Response **response) __attribute__ ((warn_unused_result));
-
-/**
- * Thread-safe strerror alternative.
- *
- * @param errnoval  The error code value
- * @return          The error message string mapped to the given error code
- */
-const char *hdfs_strerror(int errnoval);
-
-#endif //_HDFS_HTTP_CLIENT_H_

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c213ee08/hadoop-hdfs-project/hadoop-hdfs-native-client/src/contrib/libwebhdfs/src/hdfs_http_query.c
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/contrib/libwebhdfs/src/hdfs_http_query.c b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/contrib/libwebhdfs/src/hdfs_http_query.c
deleted file mode 100644
index b082c08..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/contrib/libwebhdfs/src/hdfs_http_query.c
+++ /dev/null
@@ -1,402 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-#include "hdfs_http_query.h"
-#include <math.h>
-#include <stdlib.h>
-#include <string.h>
-#include <stdio.h>
-#include <errno.h>
-
-#define PERM_STR_LEN 4  // "644" + one byte for NUL
-#define SHORT_STR_LEN 6 // 65535 + NUL
-#define LONG_STR_LEN 21 // 2^64-1 = 18446744073709551615 + NUL
-
-/**
- * Create query based on NameNode hostname,
- * NameNode port, path, operation and other parameters
- *
- * @param host          NameNode hostName
- * @param nnPort        Port of NameNode
- * @param path          Absolute path for the corresponding file
- * @param op            Operations
- * @param paraNum       Number of remaining parameters
- * @param paraNames     Names of remaining parameters
- * @param paraValues    Values of remaining parameters
- * @param url           Holding the created URL
- * @return 0 on success and non-zero value to indicate error
- */
-static int createQueryURL(const char *host, unsigned int nnPort,
-                          const char *path, const char *op, int paraNum,
-                          const char **paraNames, const char **paraValues,
-                          char **queryUrl)
-{
-    size_t length = 0;
-    int i = 0, offset = 0, ret = 0;
-    char *url = NULL;
-    const char *protocol = "http://";
-    const char *prefix = "/webhdfs/v1";
-    
-    if (!paraNames || !paraValues) {
-        return EINVAL;
-    }
-    length = strlen(protocol) + strlen(host) + strlen(":") +
-                SHORT_STR_LEN + strlen(prefix) + strlen(path) +
-                strlen ("?op=") + strlen(op);
-    for (i = 0; i < paraNum; i++) {
-        if (paraNames[i] && paraValues[i]) {
-            length += 2 + strlen(paraNames[i]) + strlen(paraValues[i]);
-        }
-    }
-    url = malloc(length);   // The '\0' has already been included
-                            // when using SHORT_STR_LEN
-    if (!url) {
-        return ENOMEM;
-    }
-    
-    offset = snprintf(url, length, "%s%s:%d%s%s?op=%s",
-                      protocol, host, nnPort, prefix, path, op);
-    if (offset >= length || offset < 0) {
-        ret = EIO;
-        goto done;
-    }
-    for (i = 0; i < paraNum; i++) {
-        if (!paraNames[i] || !paraValues[i] || paraNames[i][0] == '\0' ||
-            paraValues[i][0] == '\0') {
-            continue;
-        }
-        offset += snprintf(url + offset, length - offset,
-                           "&%s=%s", paraNames[i], paraValues[i]);
-        if (offset >= length || offset < 0) {
-            ret = EIO;
-            goto done;
-        }
-    }
-done:
-    if (ret) {
-        free(url);
-        return ret;
-    }
-    *queryUrl = url;
-    return 0;
-}
-
-int createUrlForMKDIR(const char *host, int nnPort,
-                      const char *path, const char *user, char **url)
-{
-    const char *userPara = "user.name";
-    return createQueryURL(host, nnPort, path, "MKDIRS", 1,
-                          &userPara, &user, url);
-}
-
-int createUrlForGetFileStatus(const char *host, int nnPort, const char *path,
-                              const char *user, char **url)
-{
-    const char *userPara = "user.name";
-    return createQueryURL(host, nnPort, path, "GETFILESTATUS", 1,
-                          &userPara, &user, url);
-}
-
-int createUrlForLS(const char *host, int nnPort, const char *path,
-                   const char *user, char **url)
-{
-    const char *userPara = "user.name";
-    return createQueryURL(host, nnPort, path, "LISTSTATUS",
-                          1, &userPara, &user, url);
-}
-
-int createUrlForNnAPPEND(const char *host, int nnPort, const char *path,
-                         const char *user, char **url)
-{
-    const char *userPara = "user.name";
-    return createQueryURL(host, nnPort, path, "APPEND",
-                          1, &userPara, &user, url);
-}
-
-int createUrlForMKDIRwithMode(const char *host, int nnPort, const char *path,
-                              int mode, const char *user, char **url)
-{
-    int strlength;
-    char permission[PERM_STR_LEN];
-    const char *paraNames[2], *paraValues[2];
-    
-    paraNames[0] = "permission";
-    paraNames[1] = "user.name";
-    memset(permission, 0, PERM_STR_LEN);
-    strlength = snprintf(permission, PERM_STR_LEN, "%o", mode);
-    if (strlength < 0 || strlength >= PERM_STR_LEN) {
-        return EIO;
-    }
-    paraValues[0] = permission;
-    paraValues[1] = user;
-    
-    return createQueryURL(host, nnPort, path, "MKDIRS", 2,
-                          paraNames, paraValues, url);
-}
-
-int createUrlForRENAME(const char *host, int nnPort, const char *srcpath,
-                         const char *destpath, const char *user, char **url)
-{
-    const char *paraNames[2], *paraValues[2];
-    paraNames[0] = "destination";
-    paraNames[1] = "user.name";
-    paraValues[0] = destpath;
-    paraValues[1] = user;
-    
-    return createQueryURL(host, nnPort, srcpath,
-                          "RENAME", 2, paraNames, paraValues, url);
-}
-
-int createUrlForCHMOD(const char *host, int nnPort, const char *path,
-                      int mode, const char *user, char **url)
-{
-    int strlength;
-    char permission[PERM_STR_LEN];
-    const char *paraNames[2], *paraValues[2];
-    
-    paraNames[0] = "permission";
-    paraNames[1] = "user.name";
-    memset(permission, 0, PERM_STR_LEN);
-    strlength = snprintf(permission, PERM_STR_LEN, "%o", mode);
-    if (strlength < 0 || strlength >= PERM_STR_LEN) {
-        return EIO;
-    }
-    paraValues[0] = permission;
-    paraValues[1] = user;
-    
-    return createQueryURL(host, nnPort, path, "SETPERMISSION",
-                          2, paraNames, paraValues, url);
-}
-
-int createUrlForDELETE(const char *host, int nnPort, const char *path,
-                       int recursive, const char *user, char **url)
-{
-    const char *paraNames[2], *paraValues[2];
-    paraNames[0] = "recursive";
-    paraNames[1] = "user.name";
-    if (recursive) {
-        paraValues[0] = "true";
-    } else {
-        paraValues[0] = "false";
-    }
-    paraValues[1] = user;
-    
-    return createQueryURL(host, nnPort, path, "DELETE",
-                          2, paraNames, paraValues, url);
-}
-
-int createUrlForCHOWN(const char *host, int nnPort, const char *path,
-                      const char *owner, const char *group,
-                      const char *user, char **url)
-{
-    const char *paraNames[3], *paraValues[3];
-    paraNames[0] = "owner";
-    paraNames[1] = "group";
-    paraNames[2] = "user.name";
-    paraValues[0] = owner;
-    paraValues[1] = group;
-    paraValues[2] = user;
-    
-    return createQueryURL(host, nnPort, path, "SETOWNER",
-                          3, paraNames, paraValues, url);
-}
-
-int createUrlForOPEN(const char *host, int nnPort, const char *path,
-                     const char *user, size_t offset, size_t length, char **url)
-{
-    int strlength;
-    char offsetStr[LONG_STR_LEN], lengthStr[LONG_STR_LEN];
-    const char *paraNames[3], *paraValues[3];
-    
-    paraNames[0] = "offset";
-    paraNames[1] = "length";
-    paraNames[2] = "user.name";
-    memset(offsetStr, 0, LONG_STR_LEN);
-    memset(lengthStr, 0, LONG_STR_LEN);
-    strlength = snprintf(offsetStr, LONG_STR_LEN, "%lu", offset);
-    if (strlength < 0 || strlength >= LONG_STR_LEN) {
-        return EIO;
-    }
-    strlength = snprintf(lengthStr, LONG_STR_LEN, "%lu", length);
-    if (strlength < 0 || strlength >= LONG_STR_LEN) {
-        return EIO;
-    }
-    paraValues[0] = offsetStr;
-    paraValues[1] = lengthStr;
-    paraValues[2] = user;
-    
-    return createQueryURL(host, nnPort, path, "OPEN",
-                          3, paraNames, paraValues, url);
-}
-
-int createUrlForUTIMES(const char *host, int nnPort, const char *path,
-                       long unsigned mTime, long unsigned aTime,
-                       const char *user, char **url)
-{
-    int strlength;
-    char modTime[LONG_STR_LEN], acsTime[LONG_STR_LEN];
-    const char *paraNames[3], *paraValues[3];
-    
-    memset(modTime, 0, LONG_STR_LEN);
-    memset(acsTime, 0, LONG_STR_LEN);
-    strlength = snprintf(modTime, LONG_STR_LEN, "%lu", mTime);
-    if (strlength < 0 || strlength >= LONG_STR_LEN) {
-        return EIO;
-    }
-    strlength = snprintf(acsTime, LONG_STR_LEN, "%lu", aTime);
-    if (strlength < 0 || strlength >= LONG_STR_LEN) {
-        return EIO;
-    }
-    paraNames[0] = "modificationtime";
-    paraNames[1] = "accesstime";
-    paraNames[2] = "user.name";
-    paraValues[0] = modTime;
-    paraValues[1] = acsTime;
-    paraValues[2] = user;
-    
-    return createQueryURL(host, nnPort, path, "SETTIMES",
-                          3, paraNames, paraValues, url);
-}
-
-int createUrlForNnWRITE(const char *host, int nnPort,
-                        const char *path, const char *user,
-                        int16_t replication, size_t blockSize, char **url)
-{
-    int strlength;
-    char repStr[SHORT_STR_LEN], blockSizeStr[LONG_STR_LEN];
-    const char *paraNames[4], *paraValues[4];
-    
-    memset(repStr, 0, SHORT_STR_LEN);
-    memset(blockSizeStr, 0, LONG_STR_LEN);
-    if (replication > 0) {
-        strlength = snprintf(repStr, SHORT_STR_LEN, "%u", replication);
-        if (strlength < 0 || strlength >= SHORT_STR_LEN) {
-            return EIO;
-        }
-    }
-    if (blockSize > 0) {
-        strlength = snprintf(blockSizeStr, LONG_STR_LEN, "%lu", blockSize);
-        if (strlength < 0 || strlength >= LONG_STR_LEN) {
-            return EIO;
-        }
-    }
-    paraNames[0] = "overwrite";
-    paraNames[1] = "replication";
-    paraNames[2] = "blocksize";
-    paraNames[3] = "user.name";
-    paraValues[0] = "true";
-    paraValues[1] = repStr;
-    paraValues[2] = blockSizeStr;
-    paraValues[3] = user;
-    
-    return createQueryURL(host, nnPort, path, "CREATE",
-                          4, paraNames, paraValues, url);
-}
-
-int createUrlForSETREPLICATION(const char *host, int nnPort,
-                               const char *path, int16_t replication,
-                               const char *user, char **url)
-{
-    char repStr[SHORT_STR_LEN];
-    const char *paraNames[2], *paraValues[2];
-    int strlength;
-
-    memset(repStr, 0, SHORT_STR_LEN);
-    if (replication > 0) {
-        strlength = snprintf(repStr, SHORT_STR_LEN, "%u", replication);
-        if (strlength < 0 || strlength >= SHORT_STR_LEN) {
-            return EIO;
-        }
-    }
-    paraNames[0] = "replication";
-    paraNames[1] = "user.name";
-    paraValues[0] = repStr;
-    paraValues[1] = user;
-    
-    return createQueryURL(host, nnPort, path, "SETREPLICATION",
-                          2, paraNames, paraValues, url);
-}
-
-int createUrlForGetBlockLocations(const char *host, int nnPort,
-                                  const char *path, size_t offset,
-                                  size_t length, const char *user, char **url)
-{
-    char offsetStr[LONG_STR_LEN], lengthStr[LONG_STR_LEN];
-    const char *paraNames[3], *paraValues[3];
-    int strlength;
-    
-    memset(offsetStr, 0, LONG_STR_LEN);
-    memset(lengthStr, 0, LONG_STR_LEN);
-    if (offset > 0) {
-        strlength = snprintf(offsetStr, LONG_STR_LEN, "%lu", offset);
-        if (strlength < 0 || strlength >= LONG_STR_LEN) {
-            return EIO;
-        }
-    }
-    if (length > 0) {
-        strlength = snprintf(lengthStr, LONG_STR_LEN, "%lu", length);
-        if (strlength < 0 || strlength >= LONG_STR_LEN) {
-            return EIO;
-        }
-    }
-    paraNames[0] = "offset";
-    paraNames[1] = "length";
-    paraNames[2] = "user.name";
-    paraValues[0] = offsetStr;
-    paraValues[1] = lengthStr;
-    paraValues[2] = user;
-    
-    return createQueryURL(host, nnPort, path, "GET_BLOCK_LOCATIONS",
-                          3, paraNames, paraValues, url);
-}
-
-int createUrlForReadFromDatanode(const char *dnHost, int dnPort,
-                                 const char *path, size_t offset,
-                                 size_t length, const char *user,
-                                 const char *namenodeRpcAddr, char **url)
-{
-    char offsetStr[LONG_STR_LEN], lengthStr[LONG_STR_LEN];
-    const char *paraNames[4], *paraValues[4];
-    int strlength;
-    
-    memset(offsetStr, 0, LONG_STR_LEN);
-    memset(lengthStr, 0, LONG_STR_LEN);
-    if (offset > 0) {
-        strlength = snprintf(offsetStr, LONG_STR_LEN, "%lu", offset);
-        if (strlength < 0 || strlength >= LONG_STR_LEN) {
-            return EIO;
-        }
-    }
-    if (length > 0) {
-        strlength = snprintf(lengthStr, LONG_STR_LEN, "%lu", length);
-        if (strlength < 0 || strlength >= LONG_STR_LEN) {
-            return EIO;
-        }
-    }
-    
-    paraNames[0] = "offset";
-    paraNames[1] = "length";
-    paraNames[2] = "user.name";
-    paraNames[3] = "namenoderpcaddress";
-    paraValues[0] = offsetStr;
-    paraValues[1] = lengthStr;
-    paraValues[2] = user;
-    paraValues[3] = namenodeRpcAddr;
-    
-    return createQueryURL(dnHost, dnPort, path, "OPEN",
-                          4, paraNames, paraValues, url);
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c213ee08/hadoop-hdfs-project/hadoop-hdfs-native-client/src/contrib/libwebhdfs/src/hdfs_http_query.h
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/contrib/libwebhdfs/src/hdfs_http_query.h b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/contrib/libwebhdfs/src/hdfs_http_query.h
deleted file mode 100644
index 432797b..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/contrib/libwebhdfs/src/hdfs_http_query.h
+++ /dev/null
@@ -1,240 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-
-#ifndef _HDFS_HTTP_QUERY_H_
-#define _HDFS_HTTP_QUERY_H_
-
-#include <unistd.h> /* for size_t */
-#include <inttypes.h> /* for int16_t */
-
-/**
- * Create the URL for a MKDIR request
- *
- * @param host The hostname of the NameNode
- * @param nnPort Port of the NameNode
- * @param path Path of the dir to create
- * @param user User name
- * @param url Holding the generated URL for MKDIR request
- * @return 0 on success and non-zero value on errors
- */
-int createUrlForMKDIR(const char *host, int nnPort,
-                      const char *path, const char *user,
-                      char **url) __attribute__ ((warn_unused_result));
-
-/**
- * Create the URL for a MKDIR (with mode) request
- *
- * @param host The hostname of the NameNode
- * @param nnPort Port of the NameNode
- * @param path Path of the dir to create
- * @param mode Mode of MKDIR
- * @param user User name
- * @param url Holding the generated URL for MKDIR request
- * @return 0 on success and non-zero value on errors
- */
-int createUrlForMKDIRwithMode(const char *host, int nnPort, const char *path,
-                              int mode, const char *user,
-                              char **url) __attribute__ ((warn_unused_result));
-
-/**
- * Create the URL for a RENAME request
- *
- * @param host The hostname of the NameNode
- * @param nnPort Port of the NameNode
- * @param srcpath Source path
- * @param dstpath Destination path
- * @param user User name
- * @param url Holding the generated URL for RENAME request
- * @return 0 on success and non-zero value on errors
- */
-int createUrlForRENAME(const char *host, int nnPort, const char *srcpath,
-                       const char *dstpath, const char *user,
-                       char **url) __attribute__ ((warn_unused_result));
-
-/**
- * Create the URL for a CHMOD request
- *
- * @param host The hostname of the NameNode
- * @param nnPort Port of the NameNode
- * @param path Target path
- * @param mode New mode for the file
- * @param user User name
- * @param url Holding the generated URL for CHMOD request
- * @return 0 on success and non-zero value on errors
- */
-int createUrlForCHMOD(const char *host, int nnPort, const char *path,
-                      int mode, const char *user,
-                      char **url) __attribute__ ((warn_unused_result));
-
-/**
- * Create the URL for a GETFILESTATUS request
- *
- * @param host The hostname of the NameNode
- * @param nnPort Port of the NameNode
- * @param path Path of the target file
- * @param user User name
- * @param url Holding the generated URL for GETFILESTATUS request
- * @return 0 on success and non-zero value on errors
- */
-int createUrlForGetFileStatus(const char *host, int nnPort,
-                              const char *path, const char *user,
-                              char **url) __attribute__ ((warn_unused_result));
-
-/**
- * Create the URL for a LISTSTATUS request
- *
- * @param host The hostname of the NameNode
- * @param nnPort Port of the NameNode
- * @param path Path of the directory for listing
- * @param user User name
- * @param url Holding the generated URL for LISTSTATUS request
- * @return 0 on success and non-zero value on errors
- */
-int createUrlForLS(const char *host, int nnPort,
-                   const char *path, const char *user,
-                   char **url) __attribute__ ((warn_unused_result));
-
-/**
- * Create the URL for a DELETE request
- *
- * @param host The hostname of the NameNode
- * @param nnPort Port of the NameNode
- * @param path Path of the file to be deletected
- * @param recursive Whether or not to delete in a recursive way
- * @param user User name
- * @param url Holding the generated URL for DELETE request
- * @return 0 on success and non-zero value on errors
- */
-int createUrlForDELETE(const char *host, int nnPort, const char *path,
-                       int recursive, const char *user,
-                       char **url) __attribute__ ((warn_unused_result));
-
-/**
- * Create the URL for a CHOWN request
- *
- * @param host The hostname of the NameNode
- * @param nnPort Port of the NameNode
- * @param path Path of the target
- * @param owner New owner
- * @param group New group
- * @param user User name
- * @param url Holding the generated URL for CHOWN request
- * @return 0 on success and non-zero value on errors
- */
-int createUrlForCHOWN(const char *host, int nnPort, const char *path,
-                      const char *owner, const char *group, const char *user,
-                      char **url) __attribute__ ((warn_unused_result));
-
-/**
- * Create the URL for a OPEN/READ request
- *
- * @param host The hostname of the NameNode
- * @param nnPort Port of the NameNode
- * @param path Path of the file to read
- * @param user User name
- * @param offset Offset for reading (the start position for this read)
- * @param length Length of the file to read
- * @param url Holding the generated URL for OPEN/READ request
- * @return 0 on success and non-zero value on errors
- */
-int createUrlForOPEN(const char *host, int nnPort, const char *path,
-                     const char *user, size_t offset, size_t length,
-                     char **url) __attribute__ ((warn_unused_result));
-
-/**
- * Create the URL for a UTIMES (update time) request
- *
- * @param host The hostname of the NameNode
- * @param nnPort Port of the NameNode
- * @param path Path of the file for updating time
- * @param mTime Modified time to set
- * @param aTime Access time to set
- * @param user User name
- * @param url Holding the generated URL for UTIMES request
- * @return 0 on success and non-zero value on errors
- */
-int createUrlForUTIMES(const char *host, int nnPort, const char *path,
-                       long unsigned mTime, long unsigned aTime,
-                       const char *user,
-                       char **url) __attribute__ ((warn_unused_result));
-
-/**
- * Create the URL for a WRITE/CREATE request (sent to NameNode)
- *
- * @param host The hostname of the NameNode
- * @param nnPort Port of the NameNode
- * @param path Path of the dir to create
- * @param user User name
- * @param replication Number of replication of the file
- * @param blockSize Size of the block for the file
- * @param url Holding the generated URL for WRITE request
- * @return 0 on success and non-zero value on errors
- */
-int createUrlForNnWRITE(const char *host, int nnPort, const char *path,
-                        const char *user, int16_t replication, size_t blockSize,
-                        char **url) __attribute__ ((warn_unused_result));
-
-/**
- * Create the URL for an APPEND request (sent to NameNode)
- *
- * @param host The hostname of the NameNode
- * @param nnPort Port of the NameNode
- * @param path Path of the file for appending
- * @param user User name
- * @param url Holding the generated URL for APPEND request
- * @return 0 on success and non-zero value on errors
- */
-int createUrlForNnAPPEND(const char *host, int nnPort,
-                         const char *path, const char *user,
-                         char **url) __attribute__ ((warn_unused_result));
-
-/**
- * Create the URL for a SETREPLICATION request
- *
- * @param host The hostname of the NameNode
- * @param nnPort Port of the NameNode
- * @param path Path of the target file
- * @param replication New replication number
- * @param user User name
- * @param url Holding the generated URL for SETREPLICATION request
- * @return 0 on success and non-zero value on errors
- */
-int createUrlForSETREPLICATION(const char *host, int nnPort, const char *path,
-                               int16_t replication, const char *user,
-                               char **url) __attribute__ ((warn_unused_result));
-
-/**
- * Create the URL for a GET_BLOCK_LOCATIONS request
- *
- * @param host The hostname of the NameNode
- * @param nnPort Port of the NameNode
- * @param path Path of the target file
- * @param offset The offset in the file
- * @param length Length of the file content
- * @param user User name
- * @param url Holding the generated URL for GET_BLOCK_LOCATIONS request
- * @return 0 on success and non-zero value on errors
- */
-int createUrlForGetBlockLocations(const char *host, int nnPort,
-                            const char *path, size_t offset,
-                            size_t length, const char *user,
-                            char **url) __attribute__ ((warn_unused_result));
-
-
-#endif  //_HDFS_HTTP_QUERY_H_

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c213ee08/hadoop-hdfs-project/hadoop-hdfs-native-client/src/contrib/libwebhdfs/src/hdfs_json_parser.c
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/contrib/libwebhdfs/src/hdfs_json_parser.c b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/contrib/libwebhdfs/src/hdfs_json_parser.c
deleted file mode 100644
index f0973a6..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/contrib/libwebhdfs/src/hdfs_json_parser.c
+++ /dev/null
@@ -1,654 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "exception.h"
-#include "hdfs/hdfs.h" /* for hdfsFileInfo */
-#include "hdfs_json_parser.h"
-
-#include <stdlib.h>
-#include <string.h>
-#include <ctype.h>
-#include <jansson.h>
-
-static const char * const temporaryRedirectCode = "307 TEMPORARY_REDIRECT";
-static const char * const twoHundredOKCode = "200 OK";
-static const char * const twoHundredOneCreatedCode = "201 Created";
-static const char * const httpHeaderString = "HTTP/1.1";
-
-/**
- * Exception information after calling JSON operations
- */
-struct jsonException {
-  const char *exception;
-  const char *javaClassName;
-  const char *message;
-};
-
-/** Print out the JSON exception information */
-static int printJsonExceptionV(struct jsonException *exc, int noPrintFlags,
-                               const char *fmt, va_list ap)
-{
-    char *javaClassName = NULL;
-    int excErrno = EINTERNAL, shouldPrint = 0;
-    if (!exc) {
-        fprintf(stderr, "printJsonExceptionV: the jsonException is NULL\n");
-        return EINTERNAL;
-    }
-    javaClassName = strdup(exc->javaClassName);
-    if (!javaClassName) {
-        fprintf(stderr, "printJsonExceptionV: internal out of memory error\n");
-        return EINTERNAL;
-    }
-    getExceptionInfo(javaClassName, noPrintFlags, &excErrno, &shouldPrint);
-    free(javaClassName);
-    
-    if (shouldPrint) {
-        vfprintf(stderr, fmt, ap);
-        fprintf(stderr, " error:\n");
-        fprintf(stderr, "Exception: %s\nJavaClassName: %s\nMessage: %s\n",
-                exc->exception, exc->javaClassName, exc->message);
-    }
-    
-    free(exc);
-    return excErrno;
-}
-
-/**
- * Print out JSON exception information.
- *
- * @param exc             The exception information to print and free
- * @param noPrintFlags    Flags which determine which exceptions we should NOT
- *                        print.
- * @param fmt             Printf-style format list
- * @param ...             Printf-style varargs
- *
- * @return                The POSIX error number associated with the exception
- *                        object.
- */
-static int printJsonException(struct jsonException *exc, int noPrintFlags,
-                              const char *fmt, ...)
-{
-    va_list ap;
-    int ret = 0;
-    
-    va_start(ap, fmt);
-    ret = printJsonExceptionV(exc, noPrintFlags, fmt, ap);
-    va_end(ap);
-    return ret;
-}
-
-/** Parse the exception information from JSON */
-static struct jsonException *parseJsonException(json_t *jobj)
-{
-    const char *key = NULL;
-    json_t *value = NULL;
-    struct jsonException *exception = NULL;
-    void *iter = NULL;
-    
-    exception = calloc(1, sizeof(*exception));
-    if (!exception) {
-        return NULL;
-    }
-    
-    iter = json_object_iter(jobj);
-    while (iter) {
-        key = json_object_iter_key(iter);
-        value = json_object_iter_value(iter);
-        
-        if (!strcmp(key, "exception")) {
-            exception->exception = json_string_value(value);
-        } else if (!strcmp(key, "javaClassName")) {
-            exception->javaClassName = json_string_value(value);
-        } else if (!strcmp(key, "message")) {
-            exception->message = json_string_value(value);
-        }
-        
-        iter = json_object_iter_next(jobj, iter);
-    }
-    return exception;
-}
-
-/** 
- * Parse the exception information which is presented in JSON
- * 
- * @param content   Exception information in JSON
- * @return          jsonException for printing out
- */
-static struct jsonException *parseException(const char *content)
-{
-    json_error_t error;
-    size_t flags = 0;
-    const char *key = NULL;
-    json_t *value;
-    json_t *jobj;
-    struct jsonException *exception = NULL;
-    
-    if (!content) {
-        return NULL;
-    }
-    jobj = json_loads(content, flags, &error);
-    if (!jobj) {
-        fprintf(stderr, "JSon parsing error: on line %d: %s\n",
-                error.line, error.text);
-        return NULL;
-    }
-    void *iter = json_object_iter(jobj);
-    while(iter)  {
-        key = json_object_iter_key(iter);
-        value = json_object_iter_value(iter);
-        
-        if (!strcmp(key, "RemoteException") &&
-                    json_typeof(value) == JSON_OBJECT) {
-            exception = parseJsonException(value);
-            break;
-        }
-        iter = json_object_iter_next(jobj, iter);
-    }
-    
-    json_decref(jobj);
-    return exception;
-}
-
-/**
- * Parse the response information which uses TRUE/FALSE 
- * to indicate whether the operation succeeded
- *
- * @param response  Response information
- * @return          0 to indicate success
- */
-static int parseBoolean(const char *response)
-{
-    json_t *root, *value;
-    json_error_t error;
-    size_t flags = 0;
-    int result = 0;
-    
-    root = json_loads(response, flags, &error);
-    if (!root) {
-        fprintf(stderr, "JSon parsing error: on line %d: %s\n",
-                error.line, error.text);
-        return EIO;
-    }
-    void *iter = json_object_iter(root);
-    value = json_object_iter_value(iter);
-    if (json_typeof(value) == JSON_TRUE)  {
-        result = 0;
-    } else {
-        result = EIO;  // FALSE means error in remote NN/DN
-    }
-    json_decref(root);
-    return result;
-}
-
-int parseMKDIR(const char *response)
-{
-    return parseBoolean(response);
-}
-
-int parseRENAME(const char *response)
-{
-    return parseBoolean(response);
-}
-
-int parseDELETE(const char *response)
-{
-    return parseBoolean(response);
-}
-
-int parseSETREPLICATION(const char *response)
-{
-    return parseBoolean(response);
-}
-
-/**
- * Check the header of response to see if it's 200 OK
- * 
- * @param header    Header information for checking
- * @param content   Stores exception information if there are errors
- * @param operation Indicate the operation for exception printing
- * @return 0 for success
- */
-static int checkHeader(const char *header, const char *content,
-                       const char *operation)
-{
-    char *result = NULL;
-    const char delims[] = ":";
-    char *savepter;
-    int ret = 0;
-    
-    if (!header || strncmp(header, "HTTP/", strlen("HTTP/"))) {
-        return EINVAL;
-    }
-    if (!(strstr(header, twoHundredOKCode)) ||
-       !(result = strstr(header, "Content-Length"))) {
-        struct jsonException *exc = parseException(content);
-        if (exc) {
-            ret = printJsonException(exc, PRINT_EXC_ALL,
-                                       "Calling WEBHDFS (%s)", operation);
-        } else {
-            ret = EIO;
-        }
-        return ret;
-    }
-    result = strtok_r(result, delims, &savepter);
-    result = strtok_r(NULL, delims, &savepter);
-    while (isspace(*result)) {
-        result++;
-    }
-    // Content-Length should be equal to 0,
-    // and the string should be "0\r\nServer"
-    if (strncmp(result, "0\r\n", 3)) {
-        ret = EIO;
-    }
-    return ret;
-}
-
-int parseCHMOD(const char *header, const char *content)
-{
-    return checkHeader(header, content, "CHMOD");
-}
-
-int parseCHOWN(const char *header, const char *content)
-{
-    return checkHeader(header, content, "CHOWN");
-}
-
-int parseUTIMES(const char *header, const char *content)
-{
-    return checkHeader(header, content, "SETTIMES");
-}
-
-/**
- * Check if the header contains correct information
- * ("307 TEMPORARY_REDIRECT" and "Location")
- * 
- * @param header    Header for parsing
- * @param content   Contains exception information 
- *                  if the remote operation failed
- * @param operation Specify the remote operation when printing out exception
- * @return 0 for success
- */
-static int checkRedirect(const char *header,
-                         const char *content, const char *operation)
-{
-    const char *locTag = "Location";
-    int ret = 0, offset = 0;
-    
-    // The header must start with "HTTP/1.1"
-    if (!header || strncmp(header, httpHeaderString,
-                           strlen(httpHeaderString))) {
-        return EINVAL;
-    }
-    
-    offset += strlen(httpHeaderString);
-    while (isspace(header[offset])) {
-        offset++;
-    }
-    // Looking for "307 TEMPORARY_REDIRECT" in header
-    if (strncmp(header + offset, temporaryRedirectCode,
-                strlen(temporaryRedirectCode))) {
-        // Process possible exception information
-        struct jsonException *exc = parseException(content);
-        if (exc) {
-            ret = printJsonException(exc, PRINT_EXC_ALL,
-                                     "Calling WEBHDFS (%s)", operation);
-        } else {
-            ret = EIO;
-        }
-        return ret;
-    }
-    // Here we just simply check if header contains "Location" tag,
-    // detailed processing is in parseDnLoc
-    if (!(strstr(header, locTag))) {
-        ret = EIO;
-    }
-    return ret;
-}
-
-int parseNnWRITE(const char *header, const char *content)
-{
-    return checkRedirect(header, content, "Write(NameNode)");
-}
-
-int parseNnAPPEND(const char *header, const char *content)
-{
-    return checkRedirect(header, content, "Append(NameNode)");
-}
-
-/** 0 for success , -1 for out of range, other values for error */
-int parseOPEN(const char *header, const char *content)
-{
-    int ret = 0, offset = 0;
-    
-    if (!header || strncmp(header, httpHeaderString,
-                           strlen(httpHeaderString))) {
-        return EINVAL;
-    }
-    
-    offset += strlen(httpHeaderString);
-    while (isspace(header[offset])) {
-        offset++;
-    }
-    if (strncmp(header + offset, temporaryRedirectCode,
-                strlen(temporaryRedirectCode)) ||
-        !strstr(header, twoHundredOKCode)) {
-        struct jsonException *exc = parseException(content);
-        if (exc) {
-            // If the exception is an IOException and it is because
-            // the offset is out of the range, do not print out the exception
-            if (!strcasecmp(exc->exception, "IOException") &&
-                    strstr(exc->message, "out of the range")) {
-                ret = -1;
-            } else {
-                ret = printJsonException(exc, PRINT_EXC_ALL,
-                                       "Calling WEBHDFS (OPEN)");
-            }
-        } else {
-            ret = EIO;
-        }
-    }
-    return ret;
-}
-
-int parseDnLoc(char *content, char **dn)
-{
-    char *url = NULL, *dnLocation = NULL, *savepter, *tempContent;
-    const char *prefix = "Location: http://";
-    const char *prefixToRemove = "Location: ";
-    const char *delims = "\r\n";
-    
-    tempContent = strdup(content);
-    if (!tempContent) {
-        return ENOMEM;
-    }
-    
-    dnLocation = strtok_r(tempContent, delims, &savepter);
-    while (dnLocation && strncmp(dnLocation, "Location:",
-                                 strlen("Location:"))) {
-        dnLocation = strtok_r(NULL, delims, &savepter);
-    }
-    if (!dnLocation) {
-        return EIO;
-    }
-    
-    while (isspace(*dnLocation)) {
-        dnLocation++;
-    }
-    if (strncmp(dnLocation, prefix, strlen(prefix))) {
-        return EIO;
-    }
-    url = strdup(dnLocation + strlen(prefixToRemove));
-    if (!url) {
-        return ENOMEM;
-    }
-    *dn = url;
-    return 0;
-}
-
-int parseDnWRITE(const char *header, const char *content)
-{
-    int ret = 0;
-    if (header == NULL || header[0] == '\0' ||
-                         strncmp(header, "HTTP/", strlen("HTTP/"))) {
-        return EINVAL;
-    }
-    if (!(strstr(header, twoHundredOneCreatedCode))) {
-        struct jsonException *exc = parseException(content);
-        if (exc) {
-            ret = printJsonException(exc, PRINT_EXC_ALL,
-                                     "Calling WEBHDFS (WRITE(DataNode))");
-        } else {
-            ret = EIO;
-        }
-    }
-    return ret;
-}
-
-int parseDnAPPEND(const char *header, const char *content)
-{
-    int ret = 0;
-    
-    if (header == NULL || header[0] == '\0' ||
-                         strncmp(header, "HTTP/", strlen("HTTP/"))) {
-        return EINVAL;
-    }
-    if (!(strstr(header, twoHundredOKCode))) {
-        struct jsonException *exc = parseException(content);
-        if (exc) {
-            ret = printJsonException(exc, PRINT_EXC_ALL,
-                                     "Calling WEBHDFS (APPEND(DataNode))");
-        } else {
-            ret = EIO;
-        }
-    }
-    return ret;
-}
-
-/**
- * Retrieve file status from the JSON object 
- *
- * @param jobj          JSON object for parsing, which contains 
- *                      file status information
- * @param fileStat      hdfsFileInfo handle to hold file status information
- * @return 0 on success
- */
-static int parseJsonForFileStatus(json_t *jobj, hdfsFileInfo *fileStat)
-{
-    const char *key, *tempstr;
-    json_t *value;
-    void *iter = NULL;
-    
-    iter = json_object_iter(jobj);
-    while (iter) {
-        key = json_object_iter_key(iter);
-        value = json_object_iter_value(iter);
-        
-        if (!strcmp(key, "accessTime")) {
-            // json field contains time in milliseconds,
-            // hdfsFileInfo is counted in seconds
-            fileStat->mLastAccess = json_integer_value(value) / 1000;
-        } else if (!strcmp(key, "blockSize")) {
-            fileStat->mBlockSize = json_integer_value(value);
-        } else if (!strcmp(key, "length")) {
-            fileStat->mSize = json_integer_value(value);
-        } else if (!strcmp(key, "modificationTime")) {
-            fileStat->mLastMod = json_integer_value(value) / 1000;
-        } else if (!strcmp(key, "replication")) {
-            fileStat->mReplication = json_integer_value(value);
-        } else if (!strcmp(key, "group")) {
-            fileStat->mGroup = strdup(json_string_value(value));
-            if (!fileStat->mGroup) {
-                return ENOMEM;
-            }
-        } else if (!strcmp(key, "owner")) {
-            fileStat->mOwner = strdup(json_string_value(value));
-            if (!fileStat->mOwner) {
-                return ENOMEM;
-            }
-        } else if (!strcmp(key, "pathSuffix")) {
-            fileStat->mName = strdup(json_string_value(value));
-            if (!fileStat->mName) {
-                return ENOMEM;
-            }
-        } else if (!strcmp(key, "permission")) {
-            tempstr = json_string_value(value);
-            fileStat->mPermissions = (short) strtol(tempstr, NULL, 8);
-        } else if (!strcmp(key, "type")) {
-            tempstr = json_string_value(value);
-            if (!strcmp(tempstr, "DIRECTORY")) {
-                fileStat->mKind = kObjectKindDirectory;
-            } else {
-                fileStat->mKind = kObjectKindFile;
-            }
-        }
-        // Go to the next key-value pair in the json object
-        iter = json_object_iter_next(jobj, iter);
-    }
-    return 0;
-}
-
-int parseGFS(const char *response, hdfsFileInfo *fileStat, int printError)
-{
-    int ret = 0, printFlag;
-    json_error_t error;
-    size_t flags = 0;
-    json_t *jobj, *value;
-    const char *key;
-    void *iter = NULL;
-    
-    if (!response || !fileStat) {
-        return EIO;
-    }
-    jobj = json_loads(response, flags, &error);
-    if (!jobj) {
-        fprintf(stderr, "error while parsing json: on line %d: %s\n",
-                error.line, error.text);
-        return EIO;
-    }
-    iter = json_object_iter(jobj);
-    key = json_object_iter_key(iter);
-    value = json_object_iter_value(iter);
-    if (json_typeof(value) == JSON_OBJECT) {
-        if (!strcmp(key, "RemoteException")) {
-            struct jsonException *exception = parseJsonException(value);
-            if (exception) {
-                if (printError) {
-                    printFlag = PRINT_EXC_ALL;
-                } else {
-                    printFlag = NOPRINT_EXC_FILE_NOT_FOUND |
-                                NOPRINT_EXC_ACCESS_CONTROL |
-                                NOPRINT_EXC_PARENT_NOT_DIRECTORY;
-                }
-                ret = printJsonException(exception, printFlag,
-                                         "Calling WEBHDFS GETFILESTATUS");
-            } else {
-                ret = EIO;
-            }
-        } else if (!strcmp(key, "FileStatus")) {
-            ret = parseJsonForFileStatus(value, fileStat);
-        } else {
-            ret = EIO;
-        }
-        
-    } else {
-        ret = EIO;
-    }
-    
-    json_decref(jobj);
-    return ret;
-}
-
-/**
- * Parse the JSON array. Called to parse the result of 
- * the LISTSTATUS operation. Thus each element of the JSON array is 
- * a JSON object with the information of a file entry contained 
- * in the folder.
- *
- * @param jobj          The JSON array to be parsed
- * @param fileStat      The hdfsFileInfo handle used to 
- *                      store a group of file information
- * @param numEntries    Capture the number of files in the folder
- * @return              0 for success
- */
-static int parseJsonArrayForFileStatuses(json_t *jobj, hdfsFileInfo **fileStat,
-                                         int *numEntries)
-{
-    json_t *jvalue = NULL;
-    int i = 0, ret = 0, arraylen = 0;
-    hdfsFileInfo *fileInfo = NULL;
-    
-    arraylen = (int) json_array_size(jobj);
-    if (arraylen > 0) {
-        fileInfo = calloc(arraylen, sizeof(hdfsFileInfo));
-        if (!fileInfo) {
-            return ENOMEM;
-        }
-    }
-    for (i = 0; i < arraylen; i++) {
-        //Getting the array element at position i
-        jvalue = json_array_get(jobj, i);
-        if (json_is_object(jvalue)) {
-            ret = parseJsonForFileStatus(jvalue, &fileInfo[i]);
-            if (ret) {
-                goto done;
-            }
-        } else {
-            ret = EIO;
-            goto done;
-        }
-    }
-done:
-    if (ret) {
-        free(fileInfo);
-    } else {
-        *numEntries = arraylen;
-        *fileStat = fileInfo;
-    }
-    return ret;
-}
-
-int parseLS(const char *response, hdfsFileInfo **fileStats, int *numOfEntries)
-{
-    int ret = 0;
-    json_error_t error;
-    size_t flags = 0;
-    json_t *jobj, *value;
-    const char *key;
-    void *iter = NULL;
-    
-    if (!response || response[0] == '\0' || !fileStats) {
-        return EIO;
-    }
-    jobj = json_loads(response, flags, &error);
-    if (!jobj) {
-        fprintf(stderr, "error while parsing json: on line %d: %s\n",
-                error.line, error.text);
-        return EIO;
-    }
-    
-    iter = json_object_iter(jobj);
-    key = json_object_iter_key(iter);
-    value = json_object_iter_value(iter);
-    if (json_typeof(value) == JSON_OBJECT) {
-        if (!strcmp(key, "RemoteException")) {
-            struct jsonException *exception = parseJsonException(value);
-            if (exception) {
-                ret = printJsonException(exception, PRINT_EXC_ALL,
-                                         "Calling WEBHDFS GETFILESTATUS");
-            } else {
-                ret = EIO;
-            }
-        } else if (!strcmp(key, "FileStatuses")) {
-            iter = json_object_iter(value);
-            value = json_object_iter_value(iter);
-            if (json_is_array(value)) {
-                ret = parseJsonArrayForFileStatuses(value, fileStats,
-                                                    numOfEntries);
-            } else {
-                ret = EIO;
-            }
-        } else {
-            ret = EIO;
-        }
-    } else {
-        ret = EIO;
-    }
-    
-    json_decref(jobj);
-    return ret;
-}