You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by dh...@apache.org on 2009/02/11 11:59:57 UTC

svn commit: r743298 - in /hadoop/core/branches/branch-0.20: CHANGES.txt src/c++/libhdfs/hdfs.c src/c++/libhdfs/hdfs.h src/c++/libhdfs/hdfs_test.c

Author: dhruba
Date: Wed Feb 11 10:59:57 2009
New Revision: 743298

URL: http://svn.apache.org/viewvc?rev=743298&view=rev
Log:
HADOOP-4494. Allow libhdfs to append to files.
(Pete Wyckoff via dhruba)
svn merge -c 743296 from trunk.


Modified:
    hadoop/core/branches/branch-0.20/CHANGES.txt   (contents, props changed)
    hadoop/core/branches/branch-0.20/src/c++/libhdfs/hdfs.c
    hadoop/core/branches/branch-0.20/src/c++/libhdfs/hdfs.h
    hadoop/core/branches/branch-0.20/src/c++/libhdfs/hdfs_test.c

Modified: hadoop/core/branches/branch-0.20/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/core/branches/branch-0.20/CHANGES.txt?rev=743298&r1=743297&r2=743298&view=diff
==============================================================================
--- hadoop/core/branches/branch-0.20/CHANGES.txt (original)
+++ hadoop/core/branches/branch-0.20/CHANGES.txt Wed Feb 11 10:59:57 2009
@@ -678,6 +678,9 @@
 
     HADOOP-5193. Correct calculation of edits modification time. (shv)
 
+    HADOOP-4494. Allow libhdfs to append to files.
+    (Pete Wyckoff via dhruba)
+
 Release 0.19.0 - 2008-11-18
 
   INCOMPATIBLE CHANGES

Propchange: hadoop/core/branches/branch-0.20/CHANGES.txt
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Wed Feb 11 10:59:57 2009
@@ -1,3 +1,3 @@
 /hadoop/core/branches/branch-0.18/CHANGES.txt:727226
 /hadoop/core/branches/branch-0.19/CHANGES.txt:713112
-/hadoop/core/trunk/CHANGES.txt:727001,727117,727191,727212,727228,727255,727869,728187,729052,729987,732385,732572,732777,732838,732869,733887,734870,734916,735082,736426,738602,738697,739416,740077,740157,741703,741762
+/hadoop/core/trunk/CHANGES.txt:727001,727117,727191,727212,727228,727255,727869,728187,729052,729987,732385,732572,732777,732838,732869,733887,734870,734916,735082,736426,738602,738697,739416,740077,740157,741703,741762,743296

Modified: hadoop/core/branches/branch-0.20/src/c++/libhdfs/hdfs.c
URL: http://svn.apache.org/viewvc/hadoop/core/branches/branch-0.20/src/c%2B%2B/libhdfs/hdfs.c?rev=743298&r1=743297&r2=743298&view=diff
==============================================================================
--- hadoop/core/branches/branch-0.20/src/c++/libhdfs/hdfs.c (original)
+++ hadoop/core/branches/branch-0.20/src/c++/libhdfs/hdfs.c Wed Feb 11 10:59:57 2009
@@ -410,12 +410,17 @@
       return NULL;
     }
 
+    if ((flags & O_CREAT) && (flags & O_EXCL)) {
+      fprintf(stderr, "WARN: hdfs does not truly support O_CREATE && O_EXCL\n");
+    }
 
     /* The hadoop java api/signature */
-    const char* method = ((flags & O_WRONLY) == 0) ? "open" : "create";
+    const char* method = ((flags & O_WRONLY) == 0) ? "open" : (flags & O_APPEND) ? "append" : "create";
     const char* signature = ((flags & O_WRONLY) == 0) ?
         JMETHOD2(JPARAM(HADOOP_PATH), "I", JPARAM(HADOOP_ISTRM)) :
-        JMETHOD2(JPARAM(HADOOP_PATH), "ZISJ", JPARAM(HADOOP_OSTRM));
+      (flags & O_APPEND) ?
+      JMETHOD1(JPARAM(HADOOP_PATH), JPARAM(HADOOP_OSTRM)) :
+      JMETHOD2(JPARAM(HADOOP_PATH), "ZISJ", JPARAM(HADOOP_OSTRM));
 
     /* Return value */
     hdfsFile file = NULL;
@@ -459,7 +464,7 @@
         jBufferSize = jVal.i;
     }
 
-    if (flags & O_WRONLY) {
+    if ((flags & O_WRONLY) && (flags & O_APPEND) == 0) {
         //replication
 
         if (!replication) {
@@ -490,15 +495,28 @@
     /* Create and return either the FSDataInputStream or
        FSDataOutputStream references jobject jStream */
 
+    // READ?
     if ((flags & O_WRONLY) == 0) {
+      if (invokeMethod(env, &jVal, &jExc, INSTANCE, jFS, HADOOP_FS,
+                       method, signature, jPath, jBufferSize)) {
+        errno = errnoFromException(jExc, env, "org.apache.hadoop.conf."
+                                   "FileSystem::%s(%s)", method,
+                                   signature);
+        goto done;
+      }
+      // WRITE/APPEND?
+      else if ((flags & O_WRONLY) && (flags & O_APPEND)) {
         if (invokeMethod(env, &jVal, &jExc, INSTANCE, jFS, HADOOP_FS,
-                         method, signature, jPath, jBufferSize)) {
-            errno = errnoFromException(jExc, env, "org.apache.hadoop.conf."
-                                       "FileSystem::%s(%s)", method,
-                                       signature);
-            goto done;
+                         method, signature, jPath)) {
+          errno = errnoFromException(jExc, env, "org.apache.hadoop.conf."
+                                     "FileSystem::%s(%s)", method,
+                                     signature);
+          goto done;
         }
+      }
+
     }
+    // WRITE/CREATE
     else {
         jboolean jOverWrite = 1;
         if (invokeMethod(env, &jVal, &jExc, INSTANCE, jFS, HADOOP_FS,

Modified: hadoop/core/branches/branch-0.20/src/c++/libhdfs/hdfs.h
URL: http://svn.apache.org/viewvc/hadoop/core/branches/branch-0.20/src/c%2B%2B/libhdfs/hdfs.h?rev=743298&r1=743297&r2=743298&view=diff
==============================================================================
--- hadoop/core/branches/branch-0.20/src/c++/libhdfs/hdfs.h (original)
+++ hadoop/core/branches/branch-0.20/src/c++/libhdfs/hdfs.h Wed Feb 11 10:59:57 2009
@@ -134,7 +134,8 @@
      * hdfsOpenFile - Open a hdfs file in given mode.
      * @param fs The configured filesystem handle.
      * @param path The full path to the file.
-     * @param flags Either O_RDONLY or O_WRONLY, for read-only or write-only.
+     * @param flags - an | of bits/fcntl.h file flags - supported flags are O_RDONLY, O_WRONLY (meaning create or overwrite i.e., implies O_TRUNCAT), 
+     * O_WRONLY|O_APPEND. Other flags are generally ignored other than (O_RDWR || (O_EXCL & O_CREAT)) which return NULL and set errno equal ENOTSUP.
      * @param bufferSize Size of buffer for read/write - pass 0 if you want
      * to use the default configured values.
      * @param replication Block replication - pass 0 if you want to use

Modified: hadoop/core/branches/branch-0.20/src/c++/libhdfs/hdfs_test.c
URL: http://svn.apache.org/viewvc/hadoop/core/branches/branch-0.20/src/c%2B%2B/libhdfs/hdfs_test.c?rev=743298&r1=743297&r2=743298&view=diff
==============================================================================
--- hadoop/core/branches/branch-0.20/src/c++/libhdfs/hdfs_test.c (original)
+++ hadoop/core/branches/branch-0.20/src/c++/libhdfs/hdfs_test.c Wed Feb 11 10:59:57 2009
@@ -63,7 +63,7 @@
         fprintf(stderr, "Oops! Failed to connect to 'local' hdfs!\n");
         exit(-1);
     } 
- 
+
         const char* writePath = "/tmp/testfile.txt";
     {
         //Write tests
@@ -317,7 +317,75 @@
         totalResult += (result ? 0 : 1);
     }
 
+    {
+      // TEST APPENDS
+      const char *writePath = "/tmp/appends";
+
+      // CREATE
+      hdfsFile writeFile = hdfsOpenFile(fs, writePath, O_WRONLY, 0, 0, 0);
+      if(!writeFile) {
+        fprintf(stderr, "Failed to open %s for writing!\n", writePath);
+        exit(-1);
+      }
+      fprintf(stderr, "Opened %s for writing successfully...\n", writePath);
+
+      char* buffer = "Hello,";
+      tSize num_written_bytes = hdfsWrite(fs, writeFile, (void*)buffer, strlen(buffer));
+      fprintf(stderr, "Wrote %d bytes\n", num_written_bytes);
+
+      if (hdfsFlush(fs, writeFile)) {
+        fprintf(stderr, "Failed to 'flush' %s\n", writePath); 
+        exit(-1);
+        }
+      fprintf(stderr, "Flushed %s successfully!\n", writePath); 
+
+      hdfsCloseFile(fs, writeFile);
+
+      // RE-OPEN
+      writeFile = hdfsOpenFile(fs, writePath, O_WRONLY|O_APPEND, 0, 0, 0);
+      if(!writeFile) {
+        fprintf(stderr, "Failed to open %s for writing!\n", writePath);
+        exit(-1);
+      }
+      fprintf(stderr, "Opened %s for writing successfully...\n", writePath);
 
+      buffer = " World";
+      num_written_bytes = hdfsWrite(fs, writeFile, (void*)buffer, strlen(buffer) + 1);
+      fprintf(stderr, "Wrote %d bytes\n", num_written_bytes);
+
+      if (hdfsFlush(fs, writeFile)) {
+        fprintf(stderr, "Failed to 'flush' %s\n", writePath); 
+        exit(-1);
+      }
+      fprintf(stderr, "Flushed %s successfully!\n", writePath); 
+
+      hdfsCloseFile(fs, writeFile);
+
+      // CHECK size
+      hdfsFileInfo *finfo = hdfsGetPathInfo(fs, writePath);
+      fprintf(stderr, "fileinfo->mSize: == total %s\n", ((result = (finfo->mSize == strlen("Hello, World") + 1)) ? "Success!" : "Failed!"));
+      totalResult += (result ? 0 : 1);
+
+      // READ and check data
+      hdfsFile readFile = hdfsOpenFile(fs, writePath, O_RDONLY, 0, 0, 0);
+      if (!readFile) {
+        fprintf(stderr, "Failed to open %s for reading!\n", writePath);
+        exit(-1);
+      }
+
+      char rdbuffer[32];
+      tSize num_read_bytes = hdfsRead(fs, readFile, (void*)rdbuffer, sizeof(rdbuffer));
+      fprintf(stderr, "Read following %d bytes:\n%s\n", 
+              num_read_bytes, rdbuffer);
+
+      fprintf(stderr, "read == Hello, World %s\n", (result = (strcmp(rdbuffer, "Hello, World") == 0)) ? "Success!" : "Failed!");
+
+      hdfsCloseFile(fs, readFile);
+
+      // DONE test appends
+    }
+      
+      
     totalResult += (hdfsDisconnect(fs) != 0);
 
     {