You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by we...@apache.org on 2019/02/20 19:38:43 UTC

[hadoop] branch trunk updated: HDFS-14267. Add test_libhdfs_ops to libhdfs tests, mark libhdfs_read/write.c as examples. Contributed by Sahil Takiar.

This is an automated email from the ASF dual-hosted git repository.

weichiu pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
     new a30059b  HDFS-14267. Add test_libhdfs_ops to libhdfs tests, mark libhdfs_read/write.c as examples. Contributed by Sahil Takiar.
a30059b is described below

commit a30059bb61ba6f94b0a237c9e1ce1b3f871f7e6f
Author: Sahil Takiar <st...@apache.org>
AuthorDate: Wed Feb 20 11:36:37 2019 -0800

    HDFS-14267. Add test_libhdfs_ops to libhdfs tests, mark libhdfs_read/write.c as examples. Contributed by Sahil Takiar.
    
    Signed-off-by: Wei-Chiu Chuang <we...@apache.org>
---
 .../hadoop-hdfs-native-client/src/CMakeLists.txt   |   1 +
 .../main/native/libhdfs-examples/CMakeLists.txt    |  34 ++++++
 .../src/main/native/libhdfs-examples/README.md     |  24 +++++
 .../libhdfs_read.c}                                |  15 ++-
 .../libhdfs_write.c}                               |  13 ++-
 .../main/native/libhdfs-examples}/test-libhdfs.sh  |   6 +-
 .../main/native/libhdfs-tests/test_libhdfs_ops.c   | 119 ++++++++++++++-------
 .../src/main/native/libhdfs/CMakeLists.txt         |   8 +-
 8 files changed, 167 insertions(+), 53 deletions(-)

diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/CMakeLists.txt b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/CMakeLists.txt
index a962f94..626c49b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/CMakeLists.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/CMakeLists.txt
@@ -146,6 +146,7 @@ endif()
 
 add_subdirectory(main/native/libhdfs)
 add_subdirectory(main/native/libhdfs-tests)
+add_subdirectory(main/native/libhdfs-examples)
 
 # Temporary fix to disable Libhdfs++ build on older systems that do not support thread_local
 include(CheckCXXSourceCompiles)
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-examples/CMakeLists.txt b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-examples/CMakeLists.txt
new file mode 100644
index 0000000..1d33639
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-examples/CMakeLists.txt
@@ -0,0 +1,34 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+cmake_minimum_required(VERSION 3.1 FATAL_ERROR)
+
+include_directories(
+    ${CMAKE_CURRENT_SOURCE_DIR}/../libhdfs/include
+    ${GENERATED_JAVAH}
+    ${CMAKE_BINARY_DIR}
+    ${CMAKE_CURRENT_SOURCE_DIR}/../libhdfs
+    ${JNI_INCLUDE_DIRS}
+    ${OS_DIR}
+)
+
+add_executable(hdfs_read libhdfs_read.c)
+target_link_libraries(hdfs_read hdfs)
+
+add_executable(hdfs_write libhdfs_write.c)
+target_link_libraries(hdfs_write hdfs)
\ No newline at end of file
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-examples/README.md b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-examples/README.md
new file mode 100644
index 0000000..c962feb
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-examples/README.md
@@ -0,0 +1,24 @@
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one
+  or more contributor license agreements.  See the NOTICE file
+  distributed with this work for additional information
+  regarding copyright ownership.  The ASF licenses this file
+  to you under the Apache License, Version 2.0 (the
+  "License"); you may not use this file except in compliance
+  with the License.  You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+
+The files in this directory are purely meant to provide additional examples for how to use libhdfs. They are compiled as
+part of the build and are thus guaranteed to compile against the associated version of lidhdfs. However, no tests exists
+for these examples so their functionality is not guaranteed.
+
+The examples are written to run against a mini-dfs cluster. The script `test-libhdfs.sh` can setup a mini DFS cluster
+that the examples can run against. Again, none of this is tested and is thus not guaranteed to work.
\ No newline at end of file
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/test_libhdfs_read.c b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-examples/libhdfs_read.c
similarity index 91%
rename from hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/test_libhdfs_read.c
rename to hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-examples/libhdfs_read.c
index 4b90f2a..419be12 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/test_libhdfs_read.c
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-examples/libhdfs_read.c
@@ -16,11 +16,16 @@
  * limitations under the License.
  */
 
-#include "hdfs/hdfs.h" 
+#include "hdfs/hdfs.h"
 
 #include <stdio.h>
 #include <stdlib.h>
 
+/**
+ * An example of using libhdfs to read files. The usage of this file is as follows:
+ *
+ *   Usage: hdfs_read <filename> <filesize> <buffersize>
+ */
 int main(int argc, char **argv) {
     hdfsFS fs;
     const char *rfile = argv[1];
@@ -33,12 +38,12 @@ int main(int argc, char **argv) {
         fprintf(stderr, "Usage: hdfs_read <filename> <filesize> <buffersize>\n");
         exit(-1);
     }
-    
+
     fs = hdfsConnect("default", 0);
     if (!fs) {
         fprintf(stderr, "Oops! Failed to connect to hdfs!\n");
         exit(-1);
-    } 
+    }
 
     readFile = hdfsOpenFile(fs, rfile, O_RDONLY, bufferSize, 0, 0);
     if (!readFile) {
@@ -51,13 +56,13 @@ int main(int argc, char **argv) {
     if(buffer == NULL) {
         return -2;
     }
-    
+
     // read from the file
     curSize = bufferSize;
     for (; curSize == bufferSize;) {
         curSize = hdfsRead(fs, readFile, (void*)buffer, curSize);
     }
-    
+
 
     free(buffer);
     hdfsCloseFile(fs, readFile);
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/test_libhdfs_write.c b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-examples/libhdfs_write.c
similarity index 93%
rename from hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/test_libhdfs_write.c
rename to hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-examples/libhdfs_write.c
index c55c8e3..8fbf87e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/test_libhdfs_write.c
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-examples/libhdfs_write.c
@@ -16,13 +16,18 @@
  * limitations under the License.
  */
 
-#include "hdfs/hdfs.h" 
+#include "hdfs/hdfs.h"
 
 #include <limits.h>
 #include <stdio.h>
 #include <stdlib.h>
 #include <sys/types.h>
 
+/**
+ * An example of using libhdfs to write files. The usage of this file is as follows:
+ *
+ *   Usage: hdfs_write <filename> <filesize> <buffersize>
+ */
 int main(int argc, char **argv) {
     hdfsFS fs;
     const char *writeFileName = argv[1];
@@ -40,12 +45,12 @@ int main(int argc, char **argv) {
         fprintf(stderr, "Usage: hdfs_write <filename> <filesize> <buffersize>\n");
         exit(-1);
     }
-    
+
     fs = hdfsConnect("default", 0);
     if (!fs) {
         fprintf(stderr, "Oops! Failed to connect to hdfs!\n");
         exit(-1);
-    } 
+    }
 
     // sanity check
     if(fileTotalSize == ULONG_MAX && errno == ERANGE) {
@@ -79,7 +84,7 @@ int main(int argc, char **argv) {
 
     // write to the file
     for (nrRemaining = fileTotalSize; nrRemaining > 0; nrRemaining -= bufferSize ) {
-      curSize = ( bufferSize < nrRemaining ) ? bufferSize : (tSize)nrRemaining; 
+      curSize = ( bufferSize < nrRemaining ) ? bufferSize : (tSize)nrRemaining;
       if ((written = hdfsWrite(fs, writeFile, (void*)buffer, curSize)) != curSize) {
         fprintf(stderr, "ERROR: hdfsWrite returned an error on write: %d\n", written);
         exit(-3);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/tests/test-libhdfs.sh b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-examples/test-libhdfs.sh
similarity index 98%
rename from hadoop-hdfs-project/hadoop-hdfs/src/main/native/tests/test-libhdfs.sh
rename to hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-examples/test-libhdfs.sh
index 3407e9c..e43b0a5 100755
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/tests/test-libhdfs.sh
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-examples/test-libhdfs.sh
@@ -70,7 +70,7 @@ $HADOOP_HOME/share/hadoop/common/
 $HADOOP_HOME/share/hadoop/hdfs
 $HADOOP_HOME/share/hadoop/hdfs/lib/"
 
-for d in $JAR_DIRS; do 
+for d in $JAR_DIRS; do
   for j in $d/*.jar; do
     CLASSPATH=${CLASSPATH}:$j
   done;
@@ -114,14 +114,14 @@ LIB_JVM_DIR=`findlibjvm`
 echo  "++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++"
 echo  LIB_JVM_DIR = $LIB_JVM_DIR
 echo  "++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++"
-# Put delays to ensure hdfs is up and running and also shuts down 
+# Put delays to ensure hdfs is up and running and also shuts down
 # after the tests are complete
 rm $HDFS_TEST_CONF_DIR/core-site.xml
 
 $HADOOP_HOME/bin/hadoop jar $HDFS_TEST_JAR \
     org.apache.hadoop.test.MiniDFSClusterManager \
     -format -nnport 20300 -writeConfig $HDFS_TEST_CONF_DIR/core-site.xml \
-    > /tmp/libhdfs-test-cluster.out 2>&1 & 
+    > /tmp/libhdfs-test-cluster.out 2>&1 &
 
 MINI_CLUSTER_PID=$!
 for i in {1..15}; do
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/test_libhdfs_ops.c b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/test_libhdfs_ops.c
index d69aa37..1cd497b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/test_libhdfs_ops.c
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/test_libhdfs_ops.c
@@ -16,8 +16,10 @@
  * limitations under the License.
  */
 
-#include "hdfs/hdfs.h" 
-#include "hdfs_test.h" 
+#include "expect.h"
+#include "hdfs/hdfs.h"
+#include "hdfs_test.h"
+#include "native_mini_dfs.h"
 #include "platform.h"
 
 #include <inttypes.h>
@@ -59,7 +61,18 @@ void permission_disp(short permissions, char *rtr) {
       strncpy(rtr, perm, 3);
       rtr+=3;
     }
-} 
+}
+
+/**
+ * Shutdown and free the given mini cluster, and then exit with the provided exit_code. This method is meant to be
+ * called with a non-zero exit code, which is why we ignore the return status of calling MiniDFSCluster#shutdown since
+ * the process is going to fail anyway.
+ */
+void shutdown_and_exit(struct NativeMiniDfsCluster* cl, int exit_code) {
+    nmdShutdown(cl);
+    nmdFree(cl);
+    exit(exit_code);
+}
 
 int main(int argc, char **argv) {
     const char *writePath = "/tmp/testfile.txt";
@@ -88,16 +101,47 @@ int main(int argc, char **argv) {
     short newPerm = 0666;
     tTime newMtime, newAtime;
 
-    fs = hdfsConnectNewInstance("default", 0);
+    // Create and start the mini cluster
+    struct NativeMiniDfsCluster* cl;
+    struct NativeMiniDfsConf conf = {
+        1, /* doFormat */
+    };
+
+    cl = nmdCreate(&conf);
+    EXPECT_NONNULL(cl);
+    EXPECT_ZERO(nmdWaitClusterUp(cl));
+    tPort port;
+    port = (tPort) nmdGetNameNodePort(cl);
+
+    // Create a hdfs connection to the mini cluster
+    struct hdfsBuilder *bld;
+    bld = hdfsNewBuilder();
+    EXPECT_NONNULL(bld);
+
+    hdfsBuilderSetForceNewInstance(bld);
+    hdfsBuilderSetNameNode(bld, "localhost");
+    hdfsBuilderSetNameNodePort(bld, port);
+    // The HDFS append tests require setting this property otherwise the tests fail with:
+    //
+    //     IOException: Failed to replace a bad datanode on the existing pipeline due to no more good datanodes being
+    //     available to try. The current failed datanode replacement policy is DEFAULT, and a client may configure this
+    //     via 'dfs.client.block.write.replace-datanode-on-failure.policy' in its configuration.
+    //
+    // It seems that when operating against a mini DFS cluster, some HDFS append tests require setting this property
+    // (for example, see TestFileAppend#testMultipleAppends)
+    hdfsBuilderConfSetStr(bld, "dfs.client.block.write.replace-datanode-on-failure.enable", "false");
+
+    fs = hdfsBuilderConnect(bld);
+
     if(!fs) {
         fprintf(stderr, "Oops! Failed to connect to hdfs!\n");
-        exit(-1);
+        shutdown_and_exit(cl, -1);
     } 
  
     lfs = hdfsConnectNewInstance(NULL, 0);
     if(!lfs) {
         fprintf(stderr, "Oops! Failed to connect to 'local' hdfs!\n");
-        exit(-1);
+        shutdown_and_exit(cl, -1);
     } 
 
     {
@@ -106,7 +150,7 @@ int main(int argc, char **argv) {
         writeFile = hdfsOpenFile(fs, writePath, O_WRONLY|O_CREAT, 0, 0, 0);
         if(!writeFile) {
             fprintf(stderr, "Failed to open %s for writing!\n", writePath);
-            exit(-1);
+            shutdown_and_exit(cl, -1);
         }
         fprintf(stderr, "Opened %s for writing successfully...\n", writePath);
         num_written_bytes =
@@ -115,7 +159,7 @@ int main(int argc, char **argv) {
         if (num_written_bytes != strlen(fileContents) + 1) {
           fprintf(stderr, "Failed to write correct number of bytes - expected %d, got %d\n",
                   (int)(strlen(fileContents) + 1), (int)num_written_bytes);
-            exit(-1);
+            shutdown_and_exit(cl, -1);
         }
         fprintf(stderr, "Wrote %d bytes\n", num_written_bytes);
 
@@ -124,19 +168,19 @@ int main(int argc, char **argv) {
             fprintf(stderr, 
                     "Failed to get current file position correctly! Got %" PRId64 "!\n",
                     currentPos);
-            exit(-1);
+            shutdown_and_exit(cl, -1);
         }
         fprintf(stderr, "Current position: %" PRId64 "\n", currentPos);
 
         if (hdfsFlush(fs, writeFile)) {
             fprintf(stderr, "Failed to 'flush' %s\n", writePath); 
-            exit(-1);
+            shutdown_and_exit(cl, -1);
         }
         fprintf(stderr, "Flushed %s successfully!\n", writePath); 
 
         if (hdfsHFlush(fs, writeFile)) {
             fprintf(stderr, "Failed to 'hflush' %s\n", writePath);
-            exit(-1);
+            shutdown_and_exit(cl, -1);
         }
         fprintf(stderr, "HFlushed %s successfully!\n", writePath);
 
@@ -150,20 +194,20 @@ int main(int argc, char **argv) {
 
         if (exists) {
           fprintf(stderr, "Failed to validate existence of %s\n", readPath);
-          exit(-1);
+          shutdown_and_exit(cl, -1);
         }
 
         readFile = hdfsOpenFile(fs, readPath, O_RDONLY, 0, 0, 0);
         if (!readFile) {
             fprintf(stderr, "Failed to open %s for reading!\n", readPath);
-            exit(-1);
+            shutdown_and_exit(cl, -1);
         }
 
         if (!hdfsFileIsOpenForRead(readFile)) {
             fprintf(stderr, "hdfsFileIsOpenForRead: we just opened a file "
                     "with O_RDONLY, and it did not show up as 'open for "
                     "read'\n");
-            exit(-1);
+            shutdown_and_exit(cl, -1);
         }
 
         fprintf(stderr, "hdfsAvailable: %d\n", hdfsAvailable(fs, readFile));
@@ -171,7 +215,7 @@ int main(int argc, char **argv) {
         seekPos = 1;
         if(hdfsSeek(fs, readFile, seekPos)) {
             fprintf(stderr, "Failed to seek %s for reading!\n", readPath);
-            exit(-1);
+            shutdown_and_exit(cl, -1);
         }
 
         currentPos = -1;
@@ -179,14 +223,14 @@ int main(int argc, char **argv) {
             fprintf(stderr, 
                     "Failed to get current file position correctly! Got %" PRId64 "!\n",
                     currentPos);
-            exit(-1);
+            shutdown_and_exit(cl, -1);
         }
         fprintf(stderr, "Current position: %" PRId64 "\n", currentPos);
 
         if (!hdfsFileUsesDirectRead(readFile)) {
           fprintf(stderr, "Direct read support incorrectly not detected "
                   "for HDFS filesystem\n");
-          exit(-1);
+          shutdown_and_exit(cl, -1);
         }
 
         fprintf(stderr, "Direct read support detected for HDFS\n");
@@ -194,7 +238,7 @@ int main(int argc, char **argv) {
         // Test the direct read path
         if(hdfsSeek(fs, readFile, 0)) {
             fprintf(stderr, "Failed to seek %s for reading!\n", readPath);
-            exit(-1);
+            shutdown_and_exit(cl, -1);
         }
         memset(buffer, 0, sizeof(buffer));
         num_read_bytes = hdfsRead(fs, readFile, (void*)buffer,
@@ -202,13 +246,13 @@ int main(int argc, char **argv) {
         if (strncmp(fileContents, buffer, strlen(fileContents)) != 0) {
             fprintf(stderr, "Failed to read (direct). Expected %s but got %s (%d bytes)\n",
                     fileContents, buffer, num_read_bytes);
-            exit(-1);
+            shutdown_and_exit(cl, -1);
         }
         fprintf(stderr, "Read (direct) following %d bytes:\n%s\n",
                 num_read_bytes, buffer);
         if (hdfsSeek(fs, readFile, 0L)) {
             fprintf(stderr, "Failed to seek to file start!\n");
-            exit(-1);
+            shutdown_and_exit(cl, -1);
         }
 
         // Disable the direct read path so that we really go through the slow
@@ -233,7 +277,7 @@ int main(int argc, char **argv) {
         localFile = hdfsOpenFile(lfs, writePath, O_WRONLY|O_CREAT, 0, 0, 0);
         if(!localFile) {
             fprintf(stderr, "Failed to open %s for writing!\n", writePath);
-            exit(-1);
+            shutdown_and_exit(cl, -1);
         }
 
         num_written_bytes = hdfsWrite(lfs, localFile, (void*)fileContents,
@@ -245,7 +289,7 @@ int main(int argc, char **argv) {
         if (hdfsFileUsesDirectRead(localFile)) {
           fprintf(stderr, "Direct read support incorrectly detected for local "
                   "filesystem\n");
-          exit(-1);
+          shutdown_and_exit(cl, -1);
         }
 
         hdfsCloseFile(lfs, localFile);
@@ -425,7 +469,7 @@ int main(int argc, char **argv) {
       appendFile = hdfsOpenFile(fs, appendPath, O_WRONLY, 0, 0, 0);
       if(!appendFile) {
         fprintf(stderr, "Failed to open %s for writing!\n", appendPath);
-        exit(-1);
+        shutdown_and_exit(cl, -1);
       }
       fprintf(stderr, "Opened %s for writing successfully...\n", appendPath);
 
@@ -435,10 +479,10 @@ int main(int argc, char **argv) {
       fprintf(stderr, "Wrote %d bytes\n", num_written_bytes);
 
       if (hdfsFlush(fs, appendFile)) {
-        fprintf(stderr, "Failed to 'flush' %s\n", appendPath); 
-        exit(-1);
+        fprintf(stderr, "Failed to 'flush' %s\n", appendPath);
+        shutdown_and_exit(cl, -1);
         }
-      fprintf(stderr, "Flushed %s successfully!\n", appendPath); 
+      fprintf(stderr, "Flushed %s successfully!\n", appendPath);
 
       hdfsCloseFile(fs, appendFile);
 
@@ -446,7 +490,7 @@ int main(int argc, char **argv) {
       appendFile = hdfsOpenFile(fs, appendPath, O_WRONLY|O_APPEND, 0, 0, 0);
       if(!appendFile) {
         fprintf(stderr, "Failed to open %s for writing!\n", appendPath);
-        exit(-1);
+        shutdown_and_exit(cl, -1);
       }
       fprintf(stderr, "Opened %s for writing successfully...\n", appendPath);
 
@@ -456,10 +500,10 @@ int main(int argc, char **argv) {
       fprintf(stderr, "Wrote %d bytes\n", num_written_bytes);
 
       if (hdfsFlush(fs, appendFile)) {
-        fprintf(stderr, "Failed to 'flush' %s\n", appendPath); 
-        exit(-1);
+        fprintf(stderr, "Failed to 'flush' %s\n", appendPath);
+        shutdown_and_exit(cl, -1);
       }
-      fprintf(stderr, "Flushed %s successfully!\n", appendPath); 
+      fprintf(stderr, "Flushed %s successfully!\n", appendPath);
 
       hdfsCloseFile(fs, appendFile);
 
@@ -472,11 +516,11 @@ int main(int argc, char **argv) {
       readFile = hdfsOpenFile(fs, appendPath, O_RDONLY, 0, 0, 0);
       if (!readFile) {
         fprintf(stderr, "Failed to open %s for reading!\n", appendPath);
-        exit(-1);
+        shutdown_and_exit(cl, -1);
       }
 
       num_read_bytes = hdfsRead(fs, readFile, (void*)rdbuffer, sizeof(rdbuffer));
-      fprintf(stderr, "Read following %d bytes:\n%s\n", 
+      fprintf(stderr, "Read following %d bytes:\n%s\n",
               num_read_bytes, rdbuffer);
 
       fprintf(stderr, "read == Hello, World %s\n", ((result = (strcmp(rdbuffer, "Hello, World"))) == 0 ? "Success!" : "Failed!"));
@@ -496,16 +540,16 @@ int main(int argc, char **argv) {
       // the actual fs user capabilities. Thus just create a file and read
       // the owner is correct.
 
-      fs = hdfsConnectAsUserNewInstance("default", 0, tuser);
+      fs = hdfsConnectAsUserNewInstance("localhost", port, tuser);
       if(!fs) {
         fprintf(stderr, "Oops! Failed to connect to hdfs as user %s!\n",tuser);
-        exit(-1);
+        shutdown_and_exit(cl, -1);
       } 
 
         userFile = hdfsOpenFile(fs, userPath, O_WRONLY|O_CREAT, 0, 0, 0);
         if(!userFile) {
             fprintf(stderr, "Failed to open %s for writing!\n", userPath);
-            exit(-1);
+            shutdown_and_exit(cl, -1);
         }
         fprintf(stderr, "Opened %s for writing successfully...\n", userPath);
 
@@ -515,7 +559,7 @@ int main(int argc, char **argv) {
 
         if (hdfsFlush(fs, userFile)) {
             fprintf(stderr, "Failed to 'flush' %s\n", userPath); 
-            exit(-1);
+            shutdown_and_exit(cl, -1);
         }
         fprintf(stderr, "Flushed %s successfully!\n", userPath); 
 
@@ -528,6 +572,9 @@ int main(int argc, char **argv) {
     
     totalResult += (hdfsDisconnect(fs) != 0);
 
+    EXPECT_ZERO(nmdShutdown(cl));
+    nmdFree(cl);
+
     if (totalResult != 0) {
         return -1;
     } else {
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/CMakeLists.txt b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/CMakeLists.txt
index cac1335..0d8f929 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/CMakeLists.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/CMakeLists.txt
@@ -55,11 +55,9 @@ set_target_properties(hdfs PROPERTIES
     SOVERSION ${LIBHDFS_VERSION})
 
 build_libhdfs_test(test_libhdfs_ops hdfs_static test_libhdfs_ops.c)
-link_libhdfs_test(test_libhdfs_ops hdfs_static ${JAVA_JVM_LIBRARY})
-build_libhdfs_test(test_libhdfs_reads hdfs_static test_libhdfs_read.c)
-link_libhdfs_test(test_libhdfs_reads hdfs_static ${JAVA_JVM_LIBRARY})
-build_libhdfs_test(test_libhdfs_write hdfs_static test_libhdfs_write.c)
-link_libhdfs_test(test_libhdfs_write hdfs_static ${JAVA_JVM_LIBRARY})
+link_libhdfs_test(test_libhdfs_ops hdfs_static native_mini_dfs ${JAVA_JVM_LIBRARY})
+add_libhdfs_test(test_libhdfs_ops hdfs_static)
+
 build_libhdfs_test(test_libhdfs_threaded hdfs_static expect.c test_libhdfs_threaded.c ${OS_DIR}/thread.c)
 link_libhdfs_test(test_libhdfs_threaded hdfs_static native_mini_dfs)
 add_libhdfs_test(test_libhdfs_threaded hdfs_static)


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org