You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by cu...@apache.org on 2014/08/20 03:34:47 UTC

svn commit: r1619019 [9/11] - in /hadoop/common/branches/YARN-1051/hadoop-hdfs-project: hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/ hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/ hadoop-hdfs-httpfs/src/main/ja...

Modified: hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/native_mini_dfs.c
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/native_mini_dfs.c?rev=1619019&r1=1619018&r2=1619019&view=diff
==============================================================================
--- hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/native_mini_dfs.c (original)
+++ hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/native_mini_dfs.c Wed Aug 20 01:34:29 2014
@@ -21,6 +21,7 @@
 #include "hdfs_test.h"
 #include "jni_helper.h"
 #include "native_mini_dfs.h"
+#include "platform.h"
 
 #include <errno.h>
 #include <jni.h>
@@ -347,10 +348,11 @@ error_dlr_nn:
 int nmdConfigureHdfsBuilder(struct NativeMiniDfsCluster *cl,
                             struct hdfsBuilder *bld)
 {
-    int port, ret;
+    int ret;
+    tPort port;
 
     hdfsBuilderSetNameNode(bld, "localhost");
-    port = nmdGetNameNodePort(cl);
+    port = (tPort)nmdGetNameNodePort(cl);
     if (port < 0) {
       fprintf(stderr, "nmdGetNameNodePort failed with error %d\n", -port);
       return EIO;

Modified: hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test/test_libhdfs_ops.c
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test/test_libhdfs_ops.c?rev=1619019&r1=1619018&r2=1619019&view=diff
==============================================================================
--- hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test/test_libhdfs_ops.c (original)
+++ hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test/test_libhdfs_ops.c Wed Aug 20 01:34:29 2014
@@ -18,6 +18,7 @@
 
 #include "hdfs.h" 
 #include "hdfs_test.h" 
+#include "platform.h"
 
 #include <inttypes.h>
 #include <jni.h>
@@ -28,12 +29,13 @@
 #include <unistd.h>
 
 void permission_disp(short permissions, char *rtr) {
-  rtr[9] = '\0';
   int i;
+  short permissionsId;
+  char* perm;
+  rtr[9] = '\0';
   for(i=2;i>=0;i--)
     {
-      short permissionsId = permissions >> (i * 3) & (short)7;
-      char* perm;
+      permissionsId = permissions >> (i * 3) & (short)7;
       switch(permissionsId) {
       case 7:
         perm = "rwx"; break;
@@ -60,35 +62,56 @@ void permission_disp(short permissions, 
 } 
 
 int main(int argc, char **argv) {
-    char buffer[32];
-    tSize num_written_bytes;
+    const char *writePath = "/tmp/testfile.txt";
+    const char *fileContents = "Hello, World!";
+    const char *readPath = "/tmp/testfile.txt";
+    const char *srcPath = "/tmp/testfile.txt";
+    const char *dstPath = "/tmp/testfile2.txt";
+    const char *slashTmp = "/tmp";
+    const char *newDirectory = "/tmp/newdir";
+    const char *newOwner = "root";
+    const char *tuser = "nobody";
+    const char *appendPath = "/tmp/appends";
+    const char *userPath = "/tmp/usertestfile.txt";
+
+    char buffer[32], buffer2[256], rdbuffer[32];
+    tSize num_written_bytes, num_read_bytes;
+    hdfsFS fs, lfs;
+    hdfsFile writeFile, readFile, localFile, appendFile, userFile;
+    tOffset currentPos, seekPos;
+    int exists, totalResult, result, numEntries, i, j;
+    const char *resp;
+    hdfsFileInfo *fileInfo, *fileList, *finfo;
+    char *buffer3;
+    char permissions[10];
+    char ***hosts;
+    short newPerm = 0666;
+    tTime newMtime, newAtime;
 
-    hdfsFS fs = hdfsConnectNewInstance("default", 0);
+    fs = hdfsConnectNewInstance("default", 0);
     if(!fs) {
         fprintf(stderr, "Oops! Failed to connect to hdfs!\n");
         exit(-1);
     } 
  
-    hdfsFS lfs = hdfsConnectNewInstance(NULL, 0);
+    lfs = hdfsConnectNewInstance(NULL, 0);
     if(!lfs) {
         fprintf(stderr, "Oops! Failed to connect to 'local' hdfs!\n");
         exit(-1);
     } 
 
-    const char* writePath = "/tmp/testfile.txt";
-    const char* fileContents = "Hello, World!";
-
     {
         //Write tests
         
-        hdfsFile writeFile = hdfsOpenFile(fs, writePath, O_WRONLY|O_CREAT, 0, 0, 0);
+        writeFile = hdfsOpenFile(fs, writePath, O_WRONLY|O_CREAT, 0, 0, 0);
         if(!writeFile) {
             fprintf(stderr, "Failed to open %s for writing!\n", writePath);
             exit(-1);
         }
         fprintf(stderr, "Opened %s for writing successfully...\n", writePath);
         num_written_bytes =
-          hdfsWrite(fs, writeFile, (void*)fileContents, strlen(fileContents)+1);
+          hdfsWrite(fs, writeFile, (void*)fileContents,
+            (tSize)(strlen(fileContents)+1));
         if (num_written_bytes != strlen(fileContents) + 1) {
           fprintf(stderr, "Failed to write correct number of bytes - expected %d, got %d\n",
                   (int)(strlen(fileContents) + 1), (int)num_written_bytes);
@@ -96,7 +119,7 @@ int main(int argc, char **argv) {
         }
         fprintf(stderr, "Wrote %d bytes\n", num_written_bytes);
 
-        tOffset currentPos = -1;
+        currentPos = -1;
         if ((currentPos = hdfsTell(fs, writeFile)) == -1) {
             fprintf(stderr, 
                     "Failed to get current file position correctly! Got %ld!\n",
@@ -123,15 +146,14 @@ int main(int argc, char **argv) {
     {
         //Read tests
         
-        const char* readPath = "/tmp/testfile.txt";
-        int exists = hdfsExists(fs, readPath);
+        exists = hdfsExists(fs, readPath);
 
         if (exists) {
           fprintf(stderr, "Failed to validate existence of %s\n", readPath);
           exit(-1);
         }
 
-        hdfsFile readFile = hdfsOpenFile(fs, readPath, O_RDONLY, 0, 0, 0);
+        readFile = hdfsOpenFile(fs, readPath, O_RDONLY, 0, 0, 0);
         if (!readFile) {
             fprintf(stderr, "Failed to open %s for reading!\n", readPath);
             exit(-1);
@@ -146,13 +168,13 @@ int main(int argc, char **argv) {
 
         fprintf(stderr, "hdfsAvailable: %d\n", hdfsAvailable(fs, readFile));
 
-        tOffset seekPos = 1;
+        seekPos = 1;
         if(hdfsSeek(fs, readFile, seekPos)) {
             fprintf(stderr, "Failed to seek %s for reading!\n", readPath);
             exit(-1);
         }
 
-        tOffset currentPos = -1;
+        currentPos = -1;
         if((currentPos = hdfsTell(fs, readFile)) != seekPos) {
             fprintf(stderr, 
                     "Failed to get current file position correctly! Got %ld!\n", 
@@ -175,7 +197,7 @@ int main(int argc, char **argv) {
             exit(-1);
         }
         memset(buffer, 0, sizeof(buffer));
-        tSize num_read_bytes = hdfsRead(fs, readFile, (void*)buffer,
+        num_read_bytes = hdfsRead(fs, readFile, (void*)buffer,
                 sizeof(buffer));
         if (strncmp(fileContents, buffer, strlen(fileContents)) != 0) {
             fprintf(stderr, "Failed to read (direct). Expected %s but got %s (%d bytes)\n",
@@ -208,14 +230,14 @@ int main(int argc, char **argv) {
         hdfsCloseFile(fs, readFile);
 
         // Test correct behaviour for unsupported filesystems
-        hdfsFile localFile = hdfsOpenFile(lfs, writePath, O_WRONLY|O_CREAT, 0, 0, 0);
+        localFile = hdfsOpenFile(lfs, writePath, O_WRONLY|O_CREAT, 0, 0, 0);
         if(!localFile) {
             fprintf(stderr, "Failed to open %s for writing!\n", writePath);
             exit(-1);
         }
 
         num_written_bytes = hdfsWrite(lfs, localFile, (void*)fileContents,
-                                      strlen(fileContents) + 1);
+                                      (tSize)(strlen(fileContents) + 1));
 
         hdfsCloseFile(lfs, localFile);
         localFile = hdfsOpenFile(lfs, writePath, O_RDONLY, 0, 0, 0);
@@ -229,50 +251,43 @@ int main(int argc, char **argv) {
         hdfsCloseFile(lfs, localFile);
     }
 
-    int totalResult = 0;
-    int result = 0;
+    totalResult = 0;
+    result = 0;
     {
         //Generic file-system operations
 
-        const char* srcPath = "/tmp/testfile.txt";
-        const char* dstPath = "/tmp/testfile2.txt";
-
-        fprintf(stderr, "hdfsCopy(remote-local): %s\n", ((result = hdfsCopy(fs, srcPath, lfs, srcPath)) ? "Failed!" : "Success!"));
+        fprintf(stderr, "hdfsCopy(remote-local): %s\n", ((result = hdfsCopy(fs, srcPath, lfs, srcPath)) != 0 ? "Failed!" : "Success!"));
         totalResult += result;
-        fprintf(stderr, "hdfsCopy(remote-remote): %s\n", ((result = hdfsCopy(fs, srcPath, fs, dstPath)) ? "Failed!" : "Success!"));
+        fprintf(stderr, "hdfsCopy(remote-remote): %s\n", ((result = hdfsCopy(fs, srcPath, fs, dstPath)) != 0 ? "Failed!" : "Success!"));
         totalResult += result;
-        fprintf(stderr, "hdfsMove(local-local): %s\n", ((result = hdfsMove(lfs, srcPath, lfs, dstPath)) ? "Failed!" : "Success!"));
+        fprintf(stderr, "hdfsMove(local-local): %s\n", ((result = hdfsMove(lfs, srcPath, lfs, dstPath)) != 0 ? "Failed!" : "Success!"));
         totalResult += result;
-        fprintf(stderr, "hdfsMove(remote-local): %s\n", ((result = hdfsMove(fs, srcPath, lfs, srcPath)) ? "Failed!" : "Success!"));
+        fprintf(stderr, "hdfsMove(remote-local): %s\n", ((result = hdfsMove(fs, srcPath, lfs, srcPath)) != 0 ? "Failed!" : "Success!"));
         totalResult += result;
 
-        fprintf(stderr, "hdfsRename: %s\n", ((result = hdfsRename(fs, dstPath, srcPath)) ? "Failed!" : "Success!"));
+        fprintf(stderr, "hdfsRename: %s\n", ((result = hdfsRename(fs, dstPath, srcPath)) != 0 ? "Failed!" : "Success!"));
         totalResult += result;
-        fprintf(stderr, "hdfsCopy(remote-remote): %s\n", ((result = hdfsCopy(fs, srcPath, fs, dstPath)) ? "Failed!" : "Success!"));
+        fprintf(stderr, "hdfsCopy(remote-remote): %s\n", ((result = hdfsCopy(fs, srcPath, fs, dstPath)) != 0 ? "Failed!" : "Success!"));
         totalResult += result;
 
-        const char* slashTmp = "/tmp";
-        const char* newDirectory = "/tmp/newdir";
-        fprintf(stderr, "hdfsCreateDirectory: %s\n", ((result = hdfsCreateDirectory(fs, newDirectory)) ? "Failed!" : "Success!"));
+        fprintf(stderr, "hdfsCreateDirectory: %s\n", ((result = hdfsCreateDirectory(fs, newDirectory)) != 0 ? "Failed!" : "Success!"));
         totalResult += result;
 
-        fprintf(stderr, "hdfsSetReplication: %s\n", ((result = hdfsSetReplication(fs, srcPath, 2)) ? "Failed!" : "Success!"));
+        fprintf(stderr, "hdfsSetReplication: %s\n", ((result = hdfsSetReplication(fs, srcPath, 2)) != 0 ? "Failed!" : "Success!"));
         totalResult += result;
 
-        char buffer[256];
-        const char *resp;
-        fprintf(stderr, "hdfsGetWorkingDirectory: %s\n", ((resp = hdfsGetWorkingDirectory(fs, buffer, sizeof(buffer))) ? buffer : "Failed!"));
+        fprintf(stderr, "hdfsGetWorkingDirectory: %s\n", ((resp = hdfsGetWorkingDirectory(fs, buffer2, sizeof(buffer2))) != 0 ? buffer2 : "Failed!"));
         totalResult += (resp ? 0 : 1);
-        fprintf(stderr, "hdfsSetWorkingDirectory: %s\n", ((result = hdfsSetWorkingDirectory(fs, slashTmp)) ? "Failed!" : "Success!"));
+        fprintf(stderr, "hdfsSetWorkingDirectory: %s\n", ((result = hdfsSetWorkingDirectory(fs, slashTmp)) != 0 ? "Failed!" : "Success!"));
         totalResult += result;
-        fprintf(stderr, "hdfsGetWorkingDirectory: %s\n", ((resp = hdfsGetWorkingDirectory(fs, buffer, sizeof(buffer))) ? buffer : "Failed!"));
+        fprintf(stderr, "hdfsGetWorkingDirectory: %s\n", ((resp = hdfsGetWorkingDirectory(fs, buffer2, sizeof(buffer2))) != 0 ? buffer2 : "Failed!"));
         totalResult += (resp ? 0 : 1);
 
         fprintf(stderr, "hdfsGetDefaultBlockSize: %ld\n", hdfsGetDefaultBlockSize(fs));
         fprintf(stderr, "hdfsGetCapacity: %ld\n", hdfsGetCapacity(fs));
         fprintf(stderr, "hdfsGetUsed: %ld\n", hdfsGetUsed(fs));
 
-        hdfsFileInfo *fileInfo = NULL;
+        fileInfo = NULL;
         if((fileInfo = hdfsGetPathInfo(fs, slashTmp)) != NULL) {
             fprintf(stderr, "hdfsGetPathInfo - SUCCESS!\n");
             fprintf(stderr, "Name: %s, ", fileInfo->mName);
@@ -283,7 +298,6 @@ int main(int argc, char **argv) {
             fprintf(stderr, "LastMod: %s", ctime(&fileInfo->mLastMod)); 
             fprintf(stderr, "Owner: %s, ", fileInfo->mOwner);
             fprintf(stderr, "Group: %s, ", fileInfo->mGroup);
-            char permissions[10];
             permission_disp(fileInfo->mPermissions, permissions);
             fprintf(stderr, "Permissions: %d (%s)\n", fileInfo->mPermissions, permissions);
             hdfsFreeFileInfo(fileInfo, 1);
@@ -292,10 +306,8 @@ int main(int argc, char **argv) {
             fprintf(stderr, "waah! hdfsGetPathInfo for %s - FAILED!\n", slashTmp);
         }
 
-        hdfsFileInfo *fileList = 0;
-        int numEntries = 0;
+        fileList = 0;
         if((fileList = hdfsListDirectory(fs, slashTmp, &numEntries)) != NULL) {
-            int i = 0;
             for(i=0; i < numEntries; ++i) {
                 fprintf(stderr, "Name: %s, ", fileList[i].mName);
                 fprintf(stderr, "Type: %c, ", (char)fileList[i].mKind);
@@ -305,7 +317,6 @@ int main(int argc, char **argv) {
                 fprintf(stderr, "LastMod: %s", ctime(&fileList[i].mLastMod));
                 fprintf(stderr, "Owner: %s, ", fileList[i].mOwner);
                 fprintf(stderr, "Group: %s, ", fileList[i].mGroup);
-                char permissions[10];
                 permission_disp(fileList[i].mPermissions, permissions);
                 fprintf(stderr, "Permissions: %d (%s)\n", fileList[i].mPermissions, permissions);
             }
@@ -319,12 +330,12 @@ int main(int argc, char **argv) {
             }
         }
 
-        char*** hosts = hdfsGetHosts(fs, srcPath, 0, 1);
+        hosts = hdfsGetHosts(fs, srcPath, 0, 1);
         if(hosts) {
             fprintf(stderr, "hdfsGetHosts - SUCCESS! ... \n");
-            int i=0; 
+            i=0; 
             while(hosts[i]) {
-                int j = 0;
+                j = 0;
                 while(hosts[i][j]) {
                     fprintf(stderr, 
                             "\thosts[%d][%d] - %s\n", i, j, hosts[i][j]);
@@ -337,131 +348,129 @@ int main(int argc, char **argv) {
             fprintf(stderr, "waah! hdfsGetHosts - FAILED!\n");
         }
        
-        char *newOwner = "root";
         // setting tmp dir to 777 so later when connectAsUser nobody, we can write to it
-        short newPerm = 0666;
 
         // chown write
-        fprintf(stderr, "hdfsChown: %s\n", ((result = hdfsChown(fs, writePath, NULL, "users")) ? "Failed!" : "Success!"));
+        fprintf(stderr, "hdfsChown: %s\n", ((result = hdfsChown(fs, writePath, NULL, "users")) != 0 ? "Failed!" : "Success!"));
         totalResult += result;
-        fprintf(stderr, "hdfsChown: %s\n", ((result = hdfsChown(fs, writePath, newOwner, NULL)) ? "Failed!" : "Success!"));
+        fprintf(stderr, "hdfsChown: %s\n", ((result = hdfsChown(fs, writePath, newOwner, NULL)) != 0 ? "Failed!" : "Success!"));
         totalResult += result;
         // chmod write
-        fprintf(stderr, "hdfsChmod: %s\n", ((result = hdfsChmod(fs, writePath, newPerm)) ? "Failed!" : "Success!"));
+        fprintf(stderr, "hdfsChmod: %s\n", ((result = hdfsChmod(fs, writePath, newPerm)) != 0 ? "Failed!" : "Success!"));
         totalResult += result;
 
 
 
         sleep(2);
-        tTime newMtime = time(NULL);
-        tTime newAtime = time(NULL);
+        newMtime = time(NULL);
+        newAtime = time(NULL);
 
         // utime write
-        fprintf(stderr, "hdfsUtime: %s\n", ((result = hdfsUtime(fs, writePath, newMtime, newAtime)) ? "Failed!" : "Success!"));
+        fprintf(stderr, "hdfsUtime: %s\n", ((result = hdfsUtime(fs, writePath, newMtime, newAtime)) != 0 ? "Failed!" : "Success!"));
 
         totalResult += result;
 
         // chown/chmod/utime read
-        hdfsFileInfo *finfo = hdfsGetPathInfo(fs, writePath);
+        finfo = hdfsGetPathInfo(fs, writePath);
 
-        fprintf(stderr, "hdfsChown read: %s\n", ((result = (strcmp(finfo->mOwner, newOwner) != 0)) ? "Failed!" : "Success!"));
+        fprintf(stderr, "hdfsChown read: %s\n", ((result = (strcmp(finfo->mOwner, newOwner))) != 0 ? "Failed!" : "Success!"));
         totalResult += result;
 
-        fprintf(stderr, "hdfsChmod read: %s\n", ((result = (finfo->mPermissions != newPerm)) ? "Failed!" : "Success!"));
+        fprintf(stderr, "hdfsChmod read: %s\n", ((result = (finfo->mPermissions != newPerm)) != 0 ? "Failed!" : "Success!"));
         totalResult += result;
 
         // will later use /tmp/ as a different user so enable it
-        fprintf(stderr, "hdfsChmod: %s\n", ((result = hdfsChmod(fs, "/tmp/", 0777)) ? "Failed!" : "Success!"));
+        fprintf(stderr, "hdfsChmod: %s\n", ((result = hdfsChmod(fs, "/tmp/", 0777)) != 0 ? "Failed!" : "Success!"));
         totalResult += result;
 
         fprintf(stderr,"newMTime=%ld\n",newMtime);
         fprintf(stderr,"curMTime=%ld\n",finfo->mLastMod);
 
 
-        fprintf(stderr, "hdfsUtime read (mtime): %s\n", ((result = (finfo->mLastMod != newMtime)) ? "Failed!" : "Success!"));
+        fprintf(stderr, "hdfsUtime read (mtime): %s\n", ((result = (finfo->mLastMod != newMtime)) != 0 ? "Failed!" : "Success!"));
         totalResult += result;
 
         // No easy way to turn on access times from hdfs_test right now
-        //        fprintf(stderr, "hdfsUtime read (atime): %s\n", ((result = (finfo->mLastAccess != newAtime)) ? "Failed!" : "Success!"));
+        //        fprintf(stderr, "hdfsUtime read (atime): %s\n", ((result = (finfo->mLastAccess != newAtime)) != 0 ? "Failed!" : "Success!"));
         //        totalResult += result;
 
         hdfsFreeFileInfo(finfo, 1);
 
         // Clean up
-        fprintf(stderr, "hdfsDelete: %s\n", ((result = hdfsDelete(fs, newDirectory, 1)) ? "Failed!" : "Success!"));
+        fprintf(stderr, "hdfsDelete: %s\n", ((result = hdfsDelete(fs, newDirectory, 1)) != 0 ? "Failed!" : "Success!"));
         totalResult += result;
-        fprintf(stderr, "hdfsDelete: %s\n", ((result = hdfsDelete(fs, srcPath, 1)) ? "Failed!" : "Success!"));
+        fprintf(stderr, "hdfsDelete: %s\n", ((result = hdfsDelete(fs, srcPath, 1)) != 0 ? "Failed!" : "Success!"));
         totalResult += result;
-        fprintf(stderr, "hdfsDelete: %s\n", ((result = hdfsDelete(lfs, srcPath, 1)) ? "Failed!" : "Success!"));
+        fprintf(stderr, "hdfsDelete: %s\n", ((result = hdfsDelete(lfs, srcPath, 1)) != 0 ? "Failed!" : "Success!"));
         totalResult += result;
-        fprintf(stderr, "hdfsDelete: %s\n", ((result = hdfsDelete(lfs, dstPath, 1)) ? "Failed!" : "Success!"));
+        fprintf(stderr, "hdfsDelete: %s\n", ((result = hdfsDelete(lfs, dstPath, 1)) != 0 ? "Failed!" : "Success!"));
         totalResult += result;
-        fprintf(stderr, "hdfsExists: %s\n", ((result = hdfsExists(fs, newDirectory)) ? "Success!" : "Failed!"));
+        fprintf(stderr, "hdfsExists: %s\n", ((result = hdfsExists(fs, newDirectory)) != 0 ? "Success!" : "Failed!"));
         totalResult += (result ? 0 : 1);
     }
 
     {
       // TEST APPENDS
-      const char *writePath = "/tmp/appends";
 
       // CREATE
-      hdfsFile writeFile = hdfsOpenFile(fs, writePath, O_WRONLY, 0, 0, 0);
-      if(!writeFile) {
-        fprintf(stderr, "Failed to open %s for writing!\n", writePath);
+      appendFile = hdfsOpenFile(fs, appendPath, O_WRONLY, 0, 0, 0);
+      if(!appendFile) {
+        fprintf(stderr, "Failed to open %s for writing!\n", appendPath);
         exit(-1);
       }
-      fprintf(stderr, "Opened %s for writing successfully...\n", writePath);
+      fprintf(stderr, "Opened %s for writing successfully...\n", appendPath);
 
-      char* buffer = "Hello,";
-      tSize num_written_bytes = hdfsWrite(fs, writeFile, (void*)buffer, strlen(buffer));
+      buffer3 = "Hello,";
+      num_written_bytes = hdfsWrite(fs, appendFile, (void*)buffer3,
+        (tSize)strlen(buffer3));
       fprintf(stderr, "Wrote %d bytes\n", num_written_bytes);
 
-      if (hdfsFlush(fs, writeFile)) {
-        fprintf(stderr, "Failed to 'flush' %s\n", writePath); 
+      if (hdfsFlush(fs, appendFile)) {
+        fprintf(stderr, "Failed to 'flush' %s\n", appendPath); 
         exit(-1);
         }
-      fprintf(stderr, "Flushed %s successfully!\n", writePath); 
+      fprintf(stderr, "Flushed %s successfully!\n", appendPath); 
 
-      hdfsCloseFile(fs, writeFile);
+      hdfsCloseFile(fs, appendFile);
 
       // RE-OPEN
-      writeFile = hdfsOpenFile(fs, writePath, O_WRONLY|O_APPEND, 0, 0, 0);
-      if(!writeFile) {
-        fprintf(stderr, "Failed to open %s for writing!\n", writePath);
+      appendFile = hdfsOpenFile(fs, appendPath, O_WRONLY|O_APPEND, 0, 0, 0);
+      if(!appendFile) {
+        fprintf(stderr, "Failed to open %s for writing!\n", appendPath);
         exit(-1);
       }
-      fprintf(stderr, "Opened %s for writing successfully...\n", writePath);
+      fprintf(stderr, "Opened %s for writing successfully...\n", appendPath);
 
-      buffer = " World";
-      num_written_bytes = hdfsWrite(fs, writeFile, (void*)buffer, strlen(buffer) + 1);
+      buffer3 = " World";
+      num_written_bytes = hdfsWrite(fs, appendFile, (void*)buffer3,
+        (tSize)(strlen(buffer3) + 1));
       fprintf(stderr, "Wrote %d bytes\n", num_written_bytes);
 
-      if (hdfsFlush(fs, writeFile)) {
-        fprintf(stderr, "Failed to 'flush' %s\n", writePath); 
+      if (hdfsFlush(fs, appendFile)) {
+        fprintf(stderr, "Failed to 'flush' %s\n", appendPath); 
         exit(-1);
       }
-      fprintf(stderr, "Flushed %s successfully!\n", writePath); 
+      fprintf(stderr, "Flushed %s successfully!\n", appendPath); 
 
-      hdfsCloseFile(fs, writeFile);
+      hdfsCloseFile(fs, appendFile);
 
       // CHECK size
-      hdfsFileInfo *finfo = hdfsGetPathInfo(fs, writePath);
-      fprintf(stderr, "fileinfo->mSize: == total %s\n", ((result = (finfo->mSize == strlen("Hello, World") + 1)) ? "Success!" : "Failed!"));
+      finfo = hdfsGetPathInfo(fs, appendPath);
+      fprintf(stderr, "fileinfo->mSize: == total %s\n", ((result = (finfo->mSize == (tOffset)(strlen("Hello, World") + 1))) == 1 ? "Success!" : "Failed!"));
       totalResult += (result ? 0 : 1);
 
       // READ and check data
-      hdfsFile readFile = hdfsOpenFile(fs, writePath, O_RDONLY, 0, 0, 0);
+      readFile = hdfsOpenFile(fs, appendPath, O_RDONLY, 0, 0, 0);
       if (!readFile) {
-        fprintf(stderr, "Failed to open %s for reading!\n", writePath);
+        fprintf(stderr, "Failed to open %s for reading!\n", appendPath);
         exit(-1);
       }
 
-      char rdbuffer[32];
-      tSize num_read_bytes = hdfsRead(fs, readFile, (void*)rdbuffer, sizeof(rdbuffer));
+      num_read_bytes = hdfsRead(fs, readFile, (void*)rdbuffer, sizeof(rdbuffer));
       fprintf(stderr, "Read following %d bytes:\n%s\n", 
               num_read_bytes, rdbuffer);
 
-      fprintf(stderr, "read == Hello, World %s\n", (result = (strcmp(rdbuffer, "Hello, World") == 0)) ? "Success!" : "Failed!");
+      fprintf(stderr, "read == Hello, World %s\n", ((result = (strcmp(rdbuffer, "Hello, World"))) == 0 ? "Success!" : "Failed!"));
 
       hdfsCloseFile(fs, readFile);
 
@@ -478,36 +487,33 @@ int main(int argc, char **argv) {
       // the actual fs user capabilities. Thus just create a file and read
       // the owner is correct.
 
-      const char *tuser = "nobody";
-      const char* writePath = "/tmp/usertestfile.txt";
-
       fs = hdfsConnectAsUserNewInstance("default", 0, tuser);
       if(!fs) {
         fprintf(stderr, "Oops! Failed to connect to hdfs as user %s!\n",tuser);
         exit(-1);
       } 
 
-        hdfsFile writeFile = hdfsOpenFile(fs, writePath, O_WRONLY|O_CREAT, 0, 0, 0);
-        if(!writeFile) {
-            fprintf(stderr, "Failed to open %s for writing!\n", writePath);
+        userFile = hdfsOpenFile(fs, userPath, O_WRONLY|O_CREAT, 0, 0, 0);
+        if(!userFile) {
+            fprintf(stderr, "Failed to open %s for writing!\n", userPath);
             exit(-1);
         }
-        fprintf(stderr, "Opened %s for writing successfully...\n", writePath);
+        fprintf(stderr, "Opened %s for writing successfully...\n", userPath);
 
-        char* buffer = "Hello, World!";
-        tSize num_written_bytes = hdfsWrite(fs, writeFile, (void*)buffer, strlen(buffer)+1);
+        num_written_bytes = hdfsWrite(fs, userFile, (void*)fileContents,
+          (tSize)(strlen(fileContents)+1));
         fprintf(stderr, "Wrote %d bytes\n", num_written_bytes);
 
-        if (hdfsFlush(fs, writeFile)) {
-            fprintf(stderr, "Failed to 'flush' %s\n", writePath); 
+        if (hdfsFlush(fs, userFile)) {
+            fprintf(stderr, "Failed to 'flush' %s\n", userPath); 
             exit(-1);
         }
-        fprintf(stderr, "Flushed %s successfully!\n", writePath); 
+        fprintf(stderr, "Flushed %s successfully!\n", userPath); 
 
-        hdfsCloseFile(fs, writeFile);
+        hdfsCloseFile(fs, userFile);
 
-        hdfsFileInfo *finfo = hdfsGetPathInfo(fs, writePath);
-        fprintf(stderr, "hdfs new file user is correct: %s\n", ((result = (strcmp(finfo->mOwner, tuser) != 0)) ? "Failed!" : "Success!"));
+        finfo = hdfsGetPathInfo(fs, userPath);
+        fprintf(stderr, "hdfs new file user is correct: %s\n", ((result = (strcmp(finfo->mOwner, tuser))) != 0 ? "Failed!" : "Success!"));
         totalResult += result;
     }
     

Modified: hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test/test_libhdfs_read.c
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test/test_libhdfs_read.c?rev=1619019&r1=1619018&r2=1619019&view=diff
==============================================================================
--- hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test/test_libhdfs_read.c (original)
+++ hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test/test_libhdfs_read.c Wed Aug 20 01:34:29 2014
@@ -22,35 +22,38 @@
 #include <stdlib.h>
 
 int main(int argc, char **argv) {
+    hdfsFS fs;
+    const char *rfile = argv[1];
+    tSize bufferSize = strtoul(argv[3], NULL, 10);
+    hdfsFile readFile;
+    char* buffer;
+    tSize curSize;
 
     if (argc != 4) {
         fprintf(stderr, "Usage: hdfs_read <filename> <filesize> <buffersize>\n");
         exit(-1);
     }
     
-    hdfsFS fs = hdfsConnect("default", 0);
+    fs = hdfsConnect("default", 0);
     if (!fs) {
         fprintf(stderr, "Oops! Failed to connect to hdfs!\n");
         exit(-1);
     } 
- 
-    const char* rfile = argv[1];
-    tSize bufferSize = strtoul(argv[3], NULL, 10);
-   
-    hdfsFile readFile = hdfsOpenFile(fs, rfile, O_RDONLY, bufferSize, 0, 0);
+
+    readFile = hdfsOpenFile(fs, rfile, O_RDONLY, bufferSize, 0, 0);
     if (!readFile) {
         fprintf(stderr, "Failed to open %s for writing!\n", rfile);
         exit(-2);
     }
 
     // data to be written to the file
-    char* buffer = malloc(sizeof(char) * bufferSize);
+    buffer = malloc(sizeof(char) * bufferSize);
     if(buffer == NULL) {
         return -2;
     }
     
     // read from the file
-    tSize curSize = bufferSize;
+    curSize = bufferSize;
     for (; curSize == bufferSize;) {
         curSize = hdfsRead(fs, readFile, (void*)buffer, curSize);
     }

Modified: hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test/test_libhdfs_write.c
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test/test_libhdfs_write.c?rev=1619019&r1=1619018&r2=1619019&view=diff
==============================================================================
--- hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test/test_libhdfs_write.c (original)
+++ hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test/test_libhdfs_write.c Wed Aug 20 01:34:29 2014
@@ -21,23 +21,31 @@
 #include <limits.h>
 #include <stdio.h>
 #include <stdlib.h>
+#include <sys/types.h>
 
 int main(int argc, char **argv) {
+    hdfsFS fs;
+    const char *writeFileName = argv[1];
+    off_t fileTotalSize = strtoul(argv[2], NULL, 10);
+    long long tmpBufferSize = strtoul(argv[3], NULL, 10);
+    tSize bufferSize;
+    hdfsFile writeFile;
+    char* buffer;
+    int i;
+    off_t nrRemaining;
+    tSize curSize;
+    tSize written;
 
     if (argc != 4) {
         fprintf(stderr, "Usage: hdfs_write <filename> <filesize> <buffersize>\n");
         exit(-1);
     }
     
-    hdfsFS fs = hdfsConnect("default", 0);
+    fs = hdfsConnect("default", 0);
     if (!fs) {
         fprintf(stderr, "Oops! Failed to connect to hdfs!\n");
         exit(-1);
     } 
- 
-    const char* writeFileName = argv[1];
-    off_t fileTotalSize = strtoul(argv[2], NULL, 10);
-    long long tmpBufferSize = strtoul(argv[3], NULL, 10);
 
     // sanity check
     if(fileTotalSize == ULONG_MAX && errno == ERANGE) {
@@ -51,30 +59,27 @@ int main(int argc, char **argv) {
       exit(-3);
     }
 
-    tSize bufferSize = tmpBufferSize;
+    bufferSize = (tSize)tmpBufferSize;
 
-    hdfsFile writeFile = hdfsOpenFile(fs, writeFileName, O_WRONLY, bufferSize, 0, 0);
+    writeFile = hdfsOpenFile(fs, writeFileName, O_WRONLY, bufferSize, 0, 0);
     if (!writeFile) {
         fprintf(stderr, "Failed to open %s for writing!\n", writeFileName);
         exit(-2);
     }
 
     // data to be written to the file
-    char* buffer = malloc(sizeof(char) * bufferSize);
+    buffer = malloc(sizeof(char) * bufferSize);
     if(buffer == NULL) {
         fprintf(stderr, "Could not allocate buffer of size %d\n", bufferSize);
         return -2;
     }
-    int i = 0;
     for (i=0; i < bufferSize; ++i) {
         buffer[i] = 'a' + (i%26);
     }
 
     // write to the file
-    off_t nrRemaining;
     for (nrRemaining = fileTotalSize; nrRemaining > 0; nrRemaining -= bufferSize ) {
-      tSize curSize = ( bufferSize < nrRemaining ) ? bufferSize : (tSize)nrRemaining; 
-      tSize written;
+      curSize = ( bufferSize < nrRemaining ) ? bufferSize : (tSize)nrRemaining; 
       if ((written = hdfsWrite(fs, writeFile, (void*)buffer, curSize)) != curSize) {
         fprintf(stderr, "ERROR: hdfsWrite returned an error on write: %d\n", written);
         exit(-3);

Modified: hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test/test_libhdfs_zerocopy.c
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test/test_libhdfs_zerocopy.c?rev=1619019&r1=1619018&r2=1619019&view=diff
==============================================================================
--- hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test/test_libhdfs_zerocopy.c (original)
+++ hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test/test_libhdfs_zerocopy.c Wed Aug 20 01:34:29 2014
@@ -19,12 +19,12 @@
 #include "expect.h"
 #include "hdfs.h"
 #include "native_mini_dfs.h"
+#include "platform.h"
 
 #include <errno.h>
 #include <inttypes.h>
-#include <semaphore.h>
-#include <pthread.h>
 #include <unistd.h>
+#include <stdint.h>
 #include <stdio.h>
 #include <stdlib.h>
 #include <string.h>
@@ -53,7 +53,7 @@ static uint8_t *getZeroCopyBlockData(int
         exit(1);
     }
     for (i = 0; i < TEST_ZEROCOPY_FULL_BLOCK_SIZE; i++) {
-      buf[i] = blockIdx + (i % 17);
+      buf[i] = (uint8_t)(blockIdx + (i % 17));
     }
     return buf;
 }
@@ -69,18 +69,6 @@ static int getZeroCopyBlockLen(int block
     }
 }
 
-static void printBuf(const uint8_t *buf, size_t len) __attribute__((unused));
-
-static void printBuf(const uint8_t *buf, size_t len)
-{
-  size_t i;
-
-  for (i = 0; i < len; i++) {
-    fprintf(stderr, "%02x", buf[i]);
-  }
-  fprintf(stderr, "\n");
-}
-
 static int doTestZeroCopyReads(hdfsFS fs, const char *fileName)
 {
     hdfsFile file = NULL;
@@ -127,8 +115,9 @@ static int doTestZeroCopyReads(hdfsFS fs
     EXPECT_NONNULL(block);
     EXPECT_ZERO(memcmp(block, hadoopRzBufferGet(buffer), SMALL_READ_LEN));
     hadoopRzBufferFree(file, buffer);
-    EXPECT_INT_EQ(TEST_ZEROCOPY_FULL_BLOCK_SIZE + SMALL_READ_LEN,
-                  hdfsTell(fs, file));
+    EXPECT_INT64_EQ(
+          (int64_t)TEST_ZEROCOPY_FULL_BLOCK_SIZE + (int64_t)SMALL_READ_LEN,
+          hdfsTell(fs, file));
     EXPECT_ZERO(expectFileStats(file,
           TEST_ZEROCOPY_FULL_BLOCK_SIZE + SMALL_READ_LEN,
           TEST_ZEROCOPY_FULL_BLOCK_SIZE + SMALL_READ_LEN,
@@ -165,7 +154,7 @@ static int doTestZeroCopyReads(hdfsFS fs
     free(block);
     block = getZeroCopyBlockData(2);
     EXPECT_NONNULL(block);
-    EXPECT_ZERO(memcmp(block, hadoopRzBufferGet(buffer) +
+    EXPECT_ZERO(memcmp(block, (uint8_t*)hadoopRzBufferGet(buffer) +
         (TEST_ZEROCOPY_FULL_BLOCK_SIZE - SMALL_READ_LEN), SMALL_READ_LEN));
     hadoopRzBufferFree(file, buffer);
 
@@ -219,8 +208,10 @@ int main(void)
 {
     int port;
     struct NativeMiniDfsConf conf = {
-        .doFormat = 1,
-        .configureShortCircuit = 1,
+        1, /* doFormat */
+        0, /* webhdfsEnabled */
+        0, /* namenodeHttpPort */
+        1, /* configureShortCircuit */
     };
     char testFileName[TEST_FILE_NAME_LENGTH];
     hdfsFS fs;

Modified: hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test_libhdfs_threaded.c
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test_libhdfs_threaded.c?rev=1619019&r1=1619018&r2=1619019&view=diff
==============================================================================
--- hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test_libhdfs_threaded.c (original)
+++ hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test_libhdfs_threaded.c Wed Aug 20 01:34:29 2014
@@ -19,11 +19,11 @@
 #include "expect.h"
 #include "hdfs.h"
 #include "native_mini_dfs.h"
+#include "os/thread.h"
 
 #include <errno.h>
 #include <inttypes.h>
-#include <semaphore.h>
-#include <pthread.h>
+#include <stdint.h>
 #include <stdio.h>
 #include <stdlib.h>
 #include <string.h>
@@ -35,8 +35,6 @@
 
 #define TLH_DEFAULT_BLOCK_SIZE 134217728
 
-static sem_t tlhSem;
-
 static struct NativeMiniDfsCluster* tlhCluster;
 
 struct tlhThreadInfo {
@@ -44,18 +42,19 @@ struct tlhThreadInfo {
     int threadIdx;
     /** 0 = thread was successful; error code otherwise */
     int success;
-    /** pthread identifier */
-    pthread_t thread;
+    /** thread identifier */
+    thread theThread;
 };
 
 static int hdfsSingleNameNodeConnect(struct NativeMiniDfsCluster *cl, hdfsFS *fs,
                                      const char *username)
 {
-    int ret, port;
+    int ret;
+    tPort port;
     hdfsFS hdfs;
     struct hdfsBuilder *bld;
     
-    port = nmdGetNameNodePort(cl);
+    port = (tPort)nmdGetNameNodePort(cl);
     if (port < 0) {
         fprintf(stderr, "hdfsSingleNameNodeConnect: nmdGetNameNodePort "
                 "returned error %d\n", port);
@@ -164,7 +163,7 @@ static int doTestHdfsOperations(struct t
     EXPECT_NONNULL(file);
 
     /* TODO: implement writeFully and use it here */
-    expected = strlen(paths->prefix);
+    expected = (int)strlen(paths->prefix);
     ret = hdfsWrite(fs, file, paths->prefix, expected);
     if (ret < 0) {
         ret = errno;
@@ -186,9 +185,9 @@ static int doTestHdfsOperations(struct t
 
     EXPECT_ZERO(hdfsFileGetReadStatistics(file, &readStats));
     errno = 0;
-    EXPECT_ZERO(readStats->totalBytesRead);
-    EXPECT_ZERO(readStats->totalLocalBytesRead);
-    EXPECT_ZERO(readStats->totalShortCircuitBytesRead);
+    EXPECT_UINT64_EQ(UINT64_C(0), readStats->totalBytesRead);
+    EXPECT_UINT64_EQ(UINT64_C(0), readStats->totalLocalBytesRead);
+    EXPECT_UINT64_EQ(UINT64_C(0), readStats->totalShortCircuitBytesRead);
     hdfsFileFreeReadStatistics(readStats);
     /* TODO: implement readFully and use it here */
     ret = hdfsRead(fs, file, tmp, sizeof(tmp));
@@ -204,7 +203,7 @@ static int doTestHdfsOperations(struct t
     }
     EXPECT_ZERO(hdfsFileGetReadStatistics(file, &readStats));
     errno = 0;
-    EXPECT_INT_EQ(expected, readStats->totalBytesRead);
+    EXPECT_UINT64_EQ((uint64_t)expected, readStats->totalBytesRead);
     hdfsFileFreeReadStatistics(readStats);
     EXPECT_ZERO(memcmp(paths->prefix, tmp, expected));
     EXPECT_ZERO(hdfsCloseFile(fs, file));
@@ -262,12 +261,11 @@ static int testHdfsOperationsImpl(struct
     return 0;
 }
 
-static void *testHdfsOperations(void *v)
+static void testHdfsOperations(void *v)
 {
     struct tlhThreadInfo *ti = (struct tlhThreadInfo*)v;
     int ret = testHdfsOperationsImpl(ti);
     ti->success = ret;
-    return NULL;
 }
 
 static int checkFailures(struct tlhThreadInfo *ti, int tlhNumThreads)
@@ -304,7 +302,7 @@ int main(void)
     const char *tlhNumThreadsStr;
     struct tlhThreadInfo ti[TLH_MAX_THREADS];
     struct NativeMiniDfsConf conf = {
-        .doFormat = 1,
+        1, /* doFormat */
     };
 
     tlhNumThreadsStr = getenv("TLH_NUM_THREADS");
@@ -323,21 +321,20 @@ int main(void)
         ti[i].threadIdx = i;
     }
 
-    EXPECT_ZERO(sem_init(&tlhSem, 0, tlhNumThreads));
     tlhCluster = nmdCreate(&conf);
     EXPECT_NONNULL(tlhCluster);
     EXPECT_ZERO(nmdWaitClusterUp(tlhCluster));
 
     for (i = 0; i < tlhNumThreads; i++) {
-        EXPECT_ZERO(pthread_create(&ti[i].thread, NULL,
-            testHdfsOperations, &ti[i]));
+        ti[i].theThread.start = testHdfsOperations;
+        ti[i].theThread.arg = &ti[i];
+        EXPECT_ZERO(threadCreate(&ti[i].theThread));
     }
     for (i = 0; i < tlhNumThreads; i++) {
-        EXPECT_ZERO(pthread_join(ti[i].thread, NULL));
+        EXPECT_ZERO(threadJoin(&ti[i].theThread));
     }
 
     EXPECT_ZERO(nmdShutdown(tlhCluster));
     nmdFree(tlhCluster);
-    EXPECT_ZERO(sem_destroy(&tlhSem));
     return checkFailures(ti, tlhNumThreads);
 }

Modified: hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test_native_mini_dfs.c
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test_native_mini_dfs.c?rev=1619019&r1=1619018&r2=1619019&view=diff
==============================================================================
--- hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test_native_mini_dfs.c (original)
+++ hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test_native_mini_dfs.c Wed Aug 20 01:34:29 2014
@@ -22,7 +22,7 @@
 #include <errno.h>
 
 static struct NativeMiniDfsConf conf = {
-    .doFormat = 1,
+    1, /* doFormat */
 };
 
 /**

Modified: hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto?rev=1619019&r1=1619018&r2=1619019&view=diff
==============================================================================
--- hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto (original)
+++ hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto Wed Aug 20 01:34:29 2014
@@ -281,6 +281,19 @@ message GetDatanodeReportResponseProto {
   repeated DatanodeInfoProto di = 1;
 }
 
+message GetDatanodeStorageReportRequestProto {
+  required DatanodeReportTypeProto type = 1;
+}
+
+message DatanodeStorageReportProto {
+  required DatanodeInfoProto datanodeInfo = 1;
+  repeated StorageReportProto storageReports = 2;
+}
+
+message GetDatanodeStorageReportResponseProto {
+  repeated DatanodeStorageReportProto datanodeStorageReports = 1;
+}
+
 message GetPreferredBlockSizeRequestProto {
   required string filename = 1;
 }
@@ -641,6 +654,14 @@ message DeleteSnapshotRequestProto {
 message DeleteSnapshotResponseProto { // void response
 }
 
+message CheckAccessRequestProto {
+  required string path = 1;
+  required AclEntryProto.FsActionProto mode = 2;
+}
+
+message CheckAccessResponseProto { // void response
+}
+
 service ClientNamenodeProtocol {
   rpc getBlockLocations(GetBlockLocationsRequestProto)
       returns(GetBlockLocationsResponseProto);
@@ -672,6 +693,8 @@ service ClientNamenodeProtocol {
   rpc getFsStats(GetFsStatusRequestProto) returns(GetFsStatsResponseProto);
   rpc getDatanodeReport(GetDatanodeReportRequestProto)
       returns(GetDatanodeReportResponseProto);
+  rpc getDatanodeStorageReport(GetDatanodeStorageReportRequestProto)
+      returns(GetDatanodeStorageReportResponseProto);
   rpc getPreferredBlockSize(GetPreferredBlockSizeRequestProto)
       returns(GetPreferredBlockSizeResponseProto);
   rpc setSafeMode(SetSafeModeRequestProto)
@@ -768,4 +791,6 @@ service ClientNamenodeProtocol {
       returns(ListXAttrsResponseProto);
   rpc removeXAttr(RemoveXAttrRequestProto)
       returns(RemoveXAttrResponseProto);
+  rpc checkAccess(CheckAccessRequestProto)
+      returns(CheckAccessResponseProto);
 }

Modified: hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto?rev=1619019&r1=1619018&r2=1619019&view=diff
==============================================================================
--- hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto (original)
+++ hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto Wed Aug 20 01:34:29 2014
@@ -45,20 +45,6 @@ message DatanodeRegistrationProto {
 }
 
 /**
- * Represents a storage available on the datanode
- */
-message DatanodeStorageProto {
-  enum StorageState {
-    NORMAL = 0;
-    READ_ONLY_SHARED = 1;
-  }
-
-  required string storageUuid = 1;
-  optional StorageState state = 2 [default = NORMAL];
-  optional StorageTypeProto storageType = 3 [default = DISK];
-}
-
-/**
  * Commands sent from namenode to the datanodes
  */
 message DatanodeCommandProto {
@@ -196,16 +182,6 @@ message HeartbeatRequestProto {
   optional uint64 cacheUsed = 7 [default = 0 ];
 }
 
-message StorageReportProto {
-  required string storageUuid = 1 [ deprecated = true ];
-  optional bool failed = 2 [ default = false ];
-  optional uint64 capacity = 3 [ default = 0 ];
-  optional uint64 dfsUsed = 4 [ default = 0 ];
-  optional uint64 remaining = 5 [ default = 0 ];
-  optional uint64 blockPoolUsed = 6 [ default = 0 ];
-  optional DatanodeStorageProto storage = 7; // supersedes StorageUuid
-}
-
 /**
  * state - State the NN is in when returning response to the DN
  * txid - Highest transaction ID this NN has seen

Modified: hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/datatransfer.proto
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/datatransfer.proto?rev=1619019&r1=1619018&r2=1619019&view=diff
==============================================================================
--- hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/datatransfer.proto (original)
+++ hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/datatransfer.proto Wed Aug 20 01:34:29 2014
@@ -207,6 +207,7 @@ enum Status {
   OOB_RESERVED1 = 9;          // Reserved
   OOB_RESERVED2 = 10;         // Reserved
   OOB_RESERVED3 = 11;         // Reserved
+  IN_PROGRESS = 12;
 }
 
 message PipelineAckProto {

Modified: hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto?rev=1619019&r1=1619018&r2=1619019&view=diff
==============================================================================
--- hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto (original)
+++ hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto Wed Aug 20 01:34:29 2014
@@ -100,6 +100,30 @@ message DatanodeInfoProto {
 }
 
 /**
+ * Represents a storage available on the datanode
+ */
+message DatanodeStorageProto {
+  enum StorageState {
+    NORMAL = 0;
+    READ_ONLY_SHARED = 1;
+  }
+
+  required string storageUuid = 1;
+  optional StorageState state = 2 [default = NORMAL];
+  optional StorageTypeProto storageType = 3 [default = DISK];
+}
+
+message StorageReportProto {
+  required string storageUuid = 1 [ deprecated = true ];
+  optional bool failed = 2 [ default = false ];
+  optional uint64 capacity = 3 [ default = 0 ];
+  optional uint64 dfsUsed = 4 [ default = 0 ];
+  optional uint64 remaining = 5 [ default = 0 ];
+  optional uint64 blockPoolUsed = 6 [ default = 0 ];
+  optional DatanodeStorageProto storage = 7; // supersedes StorageUuid
+}
+
+/**
  * Summary of a file or directory
  */
 message ContentSummaryProto {
@@ -380,6 +404,7 @@ message BlockWithLocationsProto {
   required BlockProto block = 1;   // Block
   repeated string datanodeUuids = 2; // Datanodes with replicas of the block
   repeated string storageUuids = 3;  // Storages with replicas of the block
+  repeated StorageTypeProto storageTypes = 4;
 }
 
 /**

Modified: hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml?rev=1619019&r1=1619018&r2=1619019&view=diff
==============================================================================
--- hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml (original)
+++ hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml Wed Aug 20 01:34:29 2014
@@ -2052,4 +2052,12 @@
   </description>
 </property>
 
+<property>
+  <name>dfs.datanode.block.id.layout.upgrade.threads</name>
+  <value>12</value>
+  <description>The number of threads to use when creating hard links from
+    current to previous blocks during upgrade of a DataNode to block ID-based
+    block layout (see HDFS-6482 for details on the layout).</description>
+</property>
+
 </configuration>

Propchange: hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode:r1603348-1619017

Propchange: hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs:r1613508-1619017

Modified: hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html?rev=1619019&r1=1619018&r2=1619019&view=diff
==============================================================================
--- hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html (original)
+++ hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html Wed Aug 20 01:34:29 2014
@@ -281,7 +281,7 @@
   {#DeadNodes}
   <tr class="danger">
     <td>{name} ({xferaddr})</td>
-    <td>{lastContact}</td>
+    <td>{#helper_lastcontact_tostring value="{lastContact}"/}</td>
     <td>Dead{?decommissioned}, Decommissioned{/decommissioned}</td>
     <td>-</td>
     <td>-</td>

Modified: hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.js
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.js?rev=1619019&r1=1619018&r2=1619019&view=diff
==============================================================================
--- hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.js (original)
+++ hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.js Wed Aug 20 01:34:29 2014
@@ -139,6 +139,14 @@
   }
 
   function load_datanode_info() {
+
+    var HELPERS = {
+      'helper_lastcontact_tostring' : function (chunk, ctx, bodies, params) {
+        var value = dust.helpers.tap(params.value, chunk, ctx);
+        return chunk.write('' + new Date(Date.now()-1000*Number(value)));
+      }
+    };
+
     function workaround(r) {
       function node_map_to_array(nodes) {
         var res = [];
@@ -160,7 +168,8 @@
       '/jmx?qry=Hadoop:service=NameNode,name=NameNodeInfo',
       guard_with_startup_progress(function (resp) {
         var data = workaround(resp.beans[0]);
-        dust.render('datanode-info', data, function(err, out) {
+        var base = dust.makeBase(HELPERS);
+        dust.render('datanode-info', base.push(data), function(err, out) {
           $('#tab-datanode').html(out);
           $('#ui-tabs a[href="#tab-datanode"]').tab('show');
         });

Propchange: hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary:r1613508-1619017

Modified: hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsNfsGateway.apt.vm
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsNfsGateway.apt.vm?rev=1619019&r1=1619018&r2=1619019&view=diff
==============================================================================
--- hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsNfsGateway.apt.vm (original)
+++ hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsNfsGateway.apt.vm Wed Aug 20 01:34:29 2014
@@ -44,18 +44,24 @@ HDFS NFS Gateway
 
 * {Configuration}
 
-   The user running the NFS-gateway must be able to proxy all the users using the NFS mounts. 
-   For instance, if user 'nfsserver' is running the gateway, and users belonging to the groups 'nfs-users1'
-   and 'nfs-users2' use the NFS mounts, then in core-site.xml of the namenode, the following must be set
-   (NOTE: replace 'nfsserver' with the user name starting the gateway in your cluster):
+   The NFS-gateway uses proxy user to proxy all the users accessing the NFS mounts. 
+   In non-secure mode, the user running the gateway is the proxy user, while in secure mode the
+   user in Kerberos keytab is the proxy user. Suppose the proxy user is 'nfsserver'
+   and users belonging to the groups 'users-group1'
+   and 'users-group2' use the NFS mounts, then in core-site.xml of the NameNode, the following
+   two properities must be set and only NameNode needs restart after the configuration change
+   (NOTE: replace the string 'nfsserver' with the proxy user name in your cluster):
 
 ----
 <property>
   <name>hadoop.proxyuser.nfsserver.groups</name>
-  <value>nfs-users1,nfs-users2</value>
+  <value>root,users-group1,users-group2</value>
   <description>
-         The 'nfsserver' user is allowed to proxy all members of the 'nfs-users1' and 
-         'nfs-users2' groups. Set this to '*' to allow nfsserver user to proxy any group.
+         The 'nfsserver' user is allowed to proxy all members of the 'users-group1' and 
+         'users-group2' groups. Note that in most cases you will need to include the
+         group "root" because the user "root" (which usually belonges to "root" group) will
+         generally be the user that initially executes the mount on the NFS client system. 
+         Set this to '*' to allow nfsserver user to proxy any group.
   </description>
 </property>
 ----
@@ -72,7 +78,9 @@ HDFS NFS Gateway
 ----
 
    The above are the only required configuration for the NFS gateway in non-secure mode. For Kerberized
-   hadoop clusters, the following configurations need to be added to hdfs-site.xml:
+   hadoop clusters, the following configurations need to be added to hdfs-site.xml for the gateway (NOTE: replace 
+   string "nfsserver" with the proxy user name and ensure the user contained in the keytab is
+   also the same proxy user):
 
 ----
   <property>
@@ -87,6 +95,8 @@ HDFS NFS Gateway
     <value>nfsserver/_HOST@YOUR-REALM.COM</value>
   </property>
 ----
+  
+   The rest of the NFS gateway configurations are optional for both secure and non-secure mode.
 
    The AIX NFS client has a {{{https://issues.apache.org/jira/browse/HDFS-6549}few known issues}}
    that prevent it from working correctly by default with the HDFS NFS
@@ -108,7 +118,7 @@ HDFS NFS Gateway
    have been committed.
 
    It's strongly recommended for the users to update a few configuration properties based on their use
-   cases. All the related configuration properties can be added or updated in hdfs-site.xml.
+   cases. All the following configuration properties can be added or updated in hdfs-site.xml.
   
    * If the client mounts the export with access time update allowed, make sure the following 
     property is not disabled in the configuration file. Only NameNode needs to restart after 
@@ -145,36 +155,6 @@ HDFS NFS Gateway
   </property>
 ---- 
 
-   * For optimal performance, it is recommended that rtmax be updated to
-     1MB. However, note that this 1MB is a per client allocation, and not
-     from a shared memory pool, and therefore a larger value may adversely 
-     affect small reads, consuming a lot of memory. The maximum value of 
-     this property is 1MB.
-
-----
-<property>
-  <name>nfs.rtmax</name>
-  <value>1048576</value>
-  <description>This is the maximum size in bytes of a READ request
-    supported by the NFS gateway. If you change this, make sure you
-    also update the nfs mount's rsize(add rsize= # of bytes to the 
-    mount directive).
-  </description>
-</property>
-----
-
-----
-<property>
-  <name>nfs.wtmax</name>
-  <value>65536</value>
-  <description>This is the maximum size in bytes of a WRITE request
-    supported by the NFS gateway. If you change this, make sure you
-    also update the nfs mount's wsize(add wsize= # of bytes to the 
-    mount directive).
-  </description>
-</property>
-----
-
   * By default, the export can be mounted by any client. To better control the access,
     users can update the following property. The value string contains machine name and
     access privilege, separated by whitespace
@@ -229,7 +209,7 @@ HDFS NFS Gateway
    [[2]] Start package included portmap (needs root privileges):
 
 -------------------------
-     hadoop portmap
+     hdfs portmap
   
      OR
 
@@ -238,11 +218,13 @@ HDFS NFS Gateway
 
    [[3]] Start mountd and nfsd.
    
-     No root privileges are required for this command. However, ensure that the user starting
-     the Hadoop cluster and the user starting the NFS gateway are same.
+     No root privileges are required for this command. In non-secure mode, the NFS gateway
+     should be started by the proxy user mentioned at the beginning of this user guide. 
+     While in secure mode, any user can start NFS gateway 
+     as long as the user has read access to the Kerberos keytab defined in "nfs.keytab.file".
 
 -------------------------
-     hadoop nfs3
+     hdfs nfs3
 
      OR
 
@@ -339,7 +321,10 @@ HDFS NFS Gateway
 -------------------------------------------------------------------
 
   Then the users can access HDFS as part of the local file system except that, 
-  hard link and random write are not supported yet.
+  hard link and random write are not supported yet. To optimize the performance
+  of large file I/O, one can increase the NFS transfer size(rsize and wsize) during mount.
+  By default, NFS gateway supports 1MB as the maximum transfer size. For larger data
+  transfer size, one needs to update "nfs.rtmax" and "nfs.rtmax" in hdfs-site.xml.
 
 * {Allow mounts from unprivileged clients}
 

Modified: hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsUserGuide.apt.vm
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsUserGuide.apt.vm?rev=1619019&r1=1619018&r2=1619019&view=diff
==============================================================================
--- hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsUserGuide.apt.vm (original)
+++ hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsUserGuide.apt.vm Wed Aug 20 01:34:29 2014
@@ -143,8 +143,8 @@ HDFS Users Guide
 
 **  DFSAdmin Command
 
-   The <<<bin/hadoop dfsadmin>>> command supports a few HDFS administration
-   related operations. The <<<bin/hadoop dfsadmin -help>>> command lists all the
+   The <<<bin/hdfs dfsadmin>>> command supports a few HDFS administration
+   related operations. The <<<bin/hdfs dfsadmin -help>>> command lists all the
    commands currently supported. For e.g.:
 
      * <<<-report>>>: reports basic statistics of HDFS. Some of this
@@ -172,7 +172,7 @@ HDFS Users Guide
        of racks and datanodes attached to the tracks as viewed by the
        NameNode.
 
-   For command usage, see {{{../hadoop-common/CommandsManual.html#dfsadmin}dfsadmin}}.
+   For command usage, see {{{./HDFSCommands.html#dfsadmin}dfsadmin}}.
 
 * Secondary NameNode
 
@@ -207,7 +207,7 @@ HDFS Users Guide
    primary NameNode if necessary.
 
    For command usage,
-   see {{{../hadoop-common/CommandsManual.html#secondarynamenode}secondarynamenode}}.
+   see {{{./HDFSCommands.html#secondarynamenode}secondarynamenode}}.
 
 * Checkpoint Node
 
@@ -249,7 +249,7 @@ HDFS Users Guide
    Multiple checkpoint nodes may be specified in the cluster configuration
    file.
 
-   For command usage, see {{{../hadoop-common/CommandsManual.html#namenode}namenode}}.
+   For command usage, see {{{./HDFSCommands.html#namenode}namenode}}.
 
 * Backup Node
 
@@ -291,7 +291,7 @@ HDFS Users Guide
 
    For a complete discussion of the motivation behind the creation of the
    Backup node and Checkpoint node, see {{{https://issues.apache.org/jira/browse/HADOOP-4539}HADOOP-4539}}.
-   For command usage, see {{{../hadoop-common/CommandsManual.html#namenode}namenode}}.
+   For command usage, see {{{./HDFSCommands.html#namenode}namenode}}.
 
 * Import Checkpoint
 
@@ -314,7 +314,7 @@ HDFS Users Guide
    verifies that the image in <<<dfs.namenode.checkpoint.dir>>> is consistent,
    but does not modify it in any way.
 
-   For command usage, see {{{../hadoop-common/CommandsManual.html#namenode}namenode}}.
+   For command usage, see {{{./HDFSCommands.html#namenode}namenode}}.
 
 * Balancer
 
@@ -341,7 +341,7 @@ HDFS Users Guide
    A brief administrator's guide for balancer is available at 
    {{{https://issues.apache.org/jira/browse/HADOOP-1652}HADOOP-1652}}.
 
-   For command usage, see {{{../hadoop-common/CommandsManual.html#balancer}balancer}}.
+   For command usage, see {{{./HDFSCommands.html#balancer}balancer}}.
 
 * Rack Awareness
 
@@ -368,7 +368,7 @@ HDFS Users Guide
    allow any modifications to file system or blocks. Normally the NameNode
    leaves Safemode automatically after the DataNodes have reported that
    most file system blocks are available. If required, HDFS could be
-   placed in Safemode explicitly using <<<bin/hadoop dfsadmin -safemode>>>
+   placed in Safemode explicitly using <<<bin/hdfs dfsadmin -safemode>>>
    command. NameNode front page shows whether Safemode is on or off. A
    more detailed description and configuration is maintained as JavaDoc
    for <<<setSafeMode()>>>.
@@ -383,8 +383,8 @@ HDFS Users Guide
    most of the recoverable failures. By default fsck ignores open files
    but provides an option to select all files during reporting. The HDFS
    fsck command is not a Hadoop shell command. It can be run as
-   <<<bin/hadoop fsck>>>. For command usage, see 
-   {{{../hadoop-common/CommandsManual.html#fsck}fsck}}. fsck can be run on
+   <<<bin/hdfs fsck>>>. For command usage, see
+   {{{./HDFSCommands.html#fsck}fsck}}. fsck can be run on
    the whole file system or on a subset of files.
 
 * fetchdt
@@ -395,11 +395,11 @@ HDFS Users Guide
    Utility uses either RPC or HTTPS (over Kerberos) to get the token, and
    thus requires kerberos tickets to be present before the run (run kinit
    to get the tickets). The HDFS fetchdt command is not a Hadoop shell
-   command. It can be run as <<<bin/hadoop fetchdt DTfile>>>. After you got
+   command. It can be run as <<<bin/hdfs fetchdt DTfile>>>. After you got
    the token you can run an HDFS command without having Kerberos tickets,
    by pointing <<<HADOOP_TOKEN_FILE_LOCATION>>> environmental variable to the
    delegation token file. For command usage, see
-   {{{../hadoop-common/CommandsManual.html#fetchdt}fetchdt}} command.
+   {{{./HDFSCommands.html#fetchdt}fetchdt}} command.
 
 * Recovery Mode
 
@@ -533,5 +533,4 @@ HDFS Users Guide
      * Explore {{{./hdfs-default.xml}hdfs-default.xml}}. It includes
        brief description of most of the configuration variables available.
 
-     * {{{../hadoop-common/CommandsManual.html}Hadoop Commands Guide}}:
-       Hadoop commands usage.
+     * {{{./HDFSCommands.html}HDFS Commands Guide}}: HDFS commands usage.

Modified: hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/WebHDFS.apt.vm
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/WebHDFS.apt.vm?rev=1619019&r1=1619018&r2=1619019&view=diff
==============================================================================
--- hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/WebHDFS.apt.vm (original)
+++ hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/WebHDFS.apt.vm Wed Aug 20 01:34:29 2014
@@ -82,6 +82,9 @@ WebHDFS REST API
     * {{{List all XAttrs}<<<LISTXATTRS>>>}}
         (see  {{{../../api/org/apache/hadoop/fs/FileSystem.html}FileSystem}}.listXAttrs)
 
+    * {{{Check access}<<<CHECKACCESS>>>}}
+        (see  {{{../../api/org/apache/hadoop/fs/FileSystem.html}FileSystem}}.access)
+
   * HTTP PUT
 
     * {{{Create and Write to a File}<<<CREATE>>>}}
@@ -927,6 +930,28 @@ Transfer-Encoding: chunked
   {{{../../api/org/apache/hadoop/fs/FileSystem.html}FileSystem}}.getAclStatus
 
 
+** {Check access}
+
+  * Submit a HTTP GET request.
+  
++---------------------------------
+curl -i -X PUT "http://<HOST>:<PORT>/webhdfs/v1/<PATH>?op=CHECKACCESS
+                              &fsaction=<FSACTION>
++---------------------------------
+
+  The client receives a response with zero content length:
+
++---------------------------------
+HTTP/1.1 200 OK
+Content-Length: 0
++---------------------------------
+
+  []
+
+  See also:
+  {{{../../api/org/apache/hadoop/fs/FileSystem.html}FileSystem}}.access
+    
+
 * {Extended Attributes(XAttrs) Operations}
 
 ** {Set XAttr}
@@ -2166,6 +2191,25 @@ var tokenProperties =
   {{Proxy Users}}
 
 
+** {Fs Action}
+
+*----------------+-------------------------------------------------------------------+
+|| Name          | <<<fsaction>>> |
+*----------------+-------------------------------------------------------------------+
+|| Description   | File system operation read/write/execute |
+*----------------+-------------------------------------------------------------------+
+|| Type          | String |
+*----------------+-------------------------------------------------------------------+
+|| Default Value | null (an invalid value) |
+*----------------+-------------------------------------------------------------------+
+|| Valid Values  | Strings matching regex pattern \"[rwx-]\{3\}\" |
+*----------------+-------------------------------------------------------------------+
+|| Syntax        | \"[rwx-]\{3\}\" |
+*----------------+-------------------------------------------------------------------+
+
+  See also:
+  {{{Check access}<<<CHECKACCESS>>>}},
+
 ** {Group}
 
 *----------------+-------------------------------------------------------------------+

Propchange: hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs:r1603348-1619017

Modified: hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/TestGenericRefresh.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/TestGenericRefresh.java?rev=1619019&r1=1619018&r2=1619019&view=diff
==============================================================================
--- hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/TestGenericRefresh.java (original)
+++ hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/TestGenericRefresh.java Wed Aug 20 01:34:29 2014
@@ -47,7 +47,6 @@ import org.mockito.Mockito;
 public class TestGenericRefresh {
   private static MiniDFSCluster cluster;
   private static Configuration config;
-  private static final int NNPort = 54222;
 
   private static RefreshHandler firstHandler;
   private static RefreshHandler secondHandler;
@@ -57,8 +56,8 @@ public class TestGenericRefresh {
     config = new Configuration();
     config.set("hadoop.security.authorization", "true");
 
-    FileSystem.setDefaultUri(config, "hdfs://localhost:" + NNPort);
-    cluster = new MiniDFSCluster.Builder(config).nameNodePort(NNPort).build();
+    FileSystem.setDefaultUri(config, "hdfs://localhost:0");
+    cluster = new MiniDFSCluster.Builder(config).build();
     cluster.waitActive();
   }
 
@@ -103,7 +102,8 @@ public class TestGenericRefresh {
   @Test
   public void testInvalidIdentifier() throws Exception {
     DFSAdmin admin = new DFSAdmin(config);
-    String [] args = new String[]{"-refresh", "localhost:" + NNPort, "unregisteredIdentity"};
+    String [] args = new String[]{"-refresh", "localhost:" + 
+        cluster.getNameNodePort(), "unregisteredIdentity"};
     int exitCode = admin.run(args);
     assertEquals("DFSAdmin should fail due to no handler registered", -1, exitCode);
   }
@@ -111,7 +111,8 @@ public class TestGenericRefresh {
   @Test
   public void testValidIdentifier() throws Exception {
     DFSAdmin admin = new DFSAdmin(config);
-    String[] args = new String[]{"-refresh", "localhost:" + NNPort, "firstHandler"};
+    String[] args = new String[]{"-refresh",
+        "localhost:" + cluster.getNameNodePort(), "firstHandler"};
     int exitCode = admin.run(args);
     assertEquals("DFSAdmin should succeed", 0, exitCode);
 
@@ -124,11 +125,13 @@ public class TestGenericRefresh {
   @Test
   public void testVariableArgs() throws Exception {
     DFSAdmin admin = new DFSAdmin(config);
-    String[] args = new String[]{"-refresh", "localhost:" + NNPort, "secondHandler", "one"};
+    String[] args = new String[]{"-refresh", "localhost:" +
+        cluster.getNameNodePort(), "secondHandler", "one"};
     int exitCode = admin.run(args);
     assertEquals("DFSAdmin should return 2", 2, exitCode);
 
-    exitCode = admin.run(new String[]{"-refresh", "localhost:" + NNPort, "secondHandler", "one", "two"});
+    exitCode = admin.run(new String[]{"-refresh", "localhost:" +
+        cluster.getNameNodePort(), "secondHandler", "one", "two"});
     assertEquals("DFSAdmin should now return 3", 3, exitCode);
 
     Mockito.verify(secondHandler).handleRefresh("secondHandler", new String[]{"one"});
@@ -141,7 +144,8 @@ public class TestGenericRefresh {
 
     // And now this should fail
     DFSAdmin admin = new DFSAdmin(config);
-    String[] args = new String[]{"-refresh", "localhost:" + NNPort, "firstHandler"};
+    String[] args = new String[]{"-refresh", "localhost:" +
+        cluster.getNameNodePort(), "firstHandler"};
     int exitCode = admin.run(args);
     assertEquals("DFSAdmin should return -1", -1, exitCode);
   }
@@ -161,7 +165,8 @@ public class TestGenericRefresh {
 
     // this should trigger both
     DFSAdmin admin = new DFSAdmin(config);
-    String[] args = new String[]{"-refresh", "localhost:" + NNPort, "sharedId", "one"};
+    String[] args = new String[]{"-refresh", "localhost:" +
+        cluster.getNameNodePort(), "sharedId", "one"};
     int exitCode = admin.run(args);
     assertEquals(-1, exitCode); // -1 because one of the responses is unregistered
 
@@ -189,7 +194,8 @@ public class TestGenericRefresh {
 
     // We refresh both
     DFSAdmin admin = new DFSAdmin(config);
-    String[] args = new String[]{"-refresh", "localhost:" + NNPort, "shared"};
+    String[] args = new String[]{"-refresh", "localhost:" +
+        cluster.getNameNodePort(), "shared"};
     int exitCode = admin.run(args);
     assertEquals(-1, exitCode); // We get -1 because of our logic for melding non-zero return codes
 
@@ -215,7 +221,8 @@ public class TestGenericRefresh {
     RefreshRegistry.defaultRegistry().register("exceptional", otherExceptionalHandler);
 
     DFSAdmin admin = new DFSAdmin(config);
-    String[] args = new String[]{"-refresh", "localhost:" + NNPort, "exceptional"};
+    String[] args = new String[]{"-refresh", "localhost:" +
+        cluster.getNameNodePort(), "exceptional"};
     int exitCode = admin.run(args);
     assertEquals(-1, exitCode); // Exceptions result in a -1
 

Modified: hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/TestRefreshCallQueue.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/TestRefreshCallQueue.java?rev=1619019&r1=1619018&r2=1619019&view=diff
==============================================================================
--- hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/TestRefreshCallQueue.java (original)
+++ hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/TestRefreshCallQueue.java Wed Aug 20 01:34:29 2014
@@ -24,6 +24,8 @@ import static org.junit.Assert.assertTru
 import static org.junit.Assert.fail;
 
 import java.io.IOException;
+import java.net.BindException;
+import java.util.Random;
 import java.util.concurrent.BlockingQueue;
 import java.util.concurrent.LinkedBlockingQueue;
 
@@ -42,24 +44,42 @@ public class TestRefreshCallQueue {
   private FileSystem fs;
   static int mockQueueConstructions;
   static int mockQueuePuts;
-  private static final int NNPort = 54222;
-  private static String CALLQUEUE_CONFIG_KEY = "ipc." + NNPort + ".callqueue.impl";
+  private String callQueueConfigKey = "";
+  private final Random rand = new Random();
 
   @Before
   public void setUp() throws Exception {
     // We want to count additional events, so we reset here
     mockQueueConstructions = 0;
     mockQueuePuts = 0;
+    int portRetries = 5;
+    int nnPort;
 
-    config = new Configuration();
-    config.setClass(CALLQUEUE_CONFIG_KEY,
-        MockCallQueue.class, BlockingQueue.class);
-    config.set("hadoop.security.authorization", "true");
-
-    FileSystem.setDefaultUri(config, "hdfs://localhost:" + NNPort);
-    fs = FileSystem.get(config);
-    cluster = new MiniDFSCluster.Builder(config).nameNodePort(NNPort).build();
-    cluster.waitActive();
+    for (; portRetries > 0; --portRetries) {
+      // Pick a random port in the range [30000,60000).
+      nnPort = 30000 + rand.nextInt(30000);  
+      config = new Configuration();
+      callQueueConfigKey = "ipc." + nnPort + ".callqueue.impl";
+      config.setClass(callQueueConfigKey,
+          MockCallQueue.class, BlockingQueue.class);
+      config.set("hadoop.security.authorization", "true");
+
+      FileSystem.setDefaultUri(config, "hdfs://localhost:" + nnPort);
+      fs = FileSystem.get(config);
+      
+      try {
+        cluster = new MiniDFSCluster.Builder(config).nameNodePort(nnPort).build();
+        cluster.waitActive();
+        break;
+      } catch (BindException be) {
+        // Retry with a different port number.
+      }
+    }
+    
+    if (portRetries == 0) {
+      // Bail if we get very unlucky with our choice of ports.
+      fail("Failed to pick an ephemeral port for the NameNode RPC server.");
+    }
   }
 
   @After

Modified: hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java?rev=1619019&r1=1619018&r2=1619019&view=diff
==============================================================================
--- hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java (original)
+++ hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java Wed Aug 20 01:34:29 2014
@@ -44,6 +44,9 @@ import org.apache.hadoop.hdfs.protocol.d
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto;
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
 import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager;
@@ -1300,4 +1303,33 @@ public class DFSTestUtil {
       sockDir.close();
     }
   }
+
+  /**
+   * @return the node which is expected to run the recovery of the
+   * given block, which is known to be under construction inside the
+   * given NameNOde.
+   */
+  public static DatanodeDescriptor getExpectedPrimaryNode(NameNode nn,
+      ExtendedBlock blk) {
+    BlockManager bm0 = nn.getNamesystem().getBlockManager();
+    BlockInfo storedBlock = bm0.getStoredBlock(blk.getLocalBlock());
+    assertTrue("Block " + blk + " should be under construction, " +
+        "got: " + storedBlock,
+        storedBlock instanceof BlockInfoUnderConstruction);
+    BlockInfoUnderConstruction ucBlock =
+      (BlockInfoUnderConstruction)storedBlock;
+    // We expect that the replica with the most recent heart beat will be
+    // the one to be in charge of the synchronization / recovery protocol.
+    final DatanodeStorageInfo[] storages = ucBlock.getExpectedStorageLocations();
+    DatanodeStorageInfo expectedPrimary = storages[0];
+    long mostRecentLastUpdate = expectedPrimary.getDatanodeDescriptor().getLastUpdate();
+    for (int i = 1; i < storages.length; i++) {
+      final long lastUpdate = storages[i].getDatanodeDescriptor().getLastUpdate();
+      if (lastUpdate > mostRecentLastUpdate) {
+        expectedPrimary = storages[i];
+        mostRecentLastUpdate = lastUpdate;
+      }
+    }
+    return expectedPrimary.getDatanodeDescriptor();
+  }
 }

Modified: hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java?rev=1619019&r1=1619018&r2=1619019&view=diff
==============================================================================
--- hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java (original)
+++ hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java Wed Aug 20 01:34:29 2014
@@ -2353,8 +2353,8 @@ public class MiniDFSCluster {
    * @return data file corresponding to the block
    */
   public static File getBlockFile(File storageDir, ExtendedBlock blk) {
-    return new File(getFinalizedDir(storageDir, blk.getBlockPoolId()), 
-        blk.getBlockName());
+    return new File(DatanodeUtil.idToBlockDir(getFinalizedDir(storageDir,
+        blk.getBlockPoolId()), blk.getBlockId()), blk.getBlockName());
   }
 
   /**
@@ -2364,10 +2364,32 @@ public class MiniDFSCluster {
    * @return metadata file corresponding to the block
    */
   public static File getBlockMetadataFile(File storageDir, ExtendedBlock blk) {
-    return new File(getFinalizedDir(storageDir, blk.getBlockPoolId()), 
-        blk.getBlockName() + "_" + blk.getGenerationStamp() +
-        Block.METADATA_EXTENSION);
-    
+    return new File(DatanodeUtil.idToBlockDir(getFinalizedDir(storageDir,
+        blk.getBlockPoolId()), blk.getBlockId()), blk.getBlockName() + "_" +
+        blk.getGenerationStamp() + Block.METADATA_EXTENSION);
+  }
+
+  /**
+   * Return all block metadata files in given directory (recursive search)
+   */
+  public static List<File> getAllBlockMetadataFiles(File storageDir) {
+    List<File> results = new ArrayList<File>();
+    File[] files = storageDir.listFiles();
+    if (files == null) {
+      return null;
+    }
+    for (File f : files) {
+      if (f.getName().startsWith("blk_") && f.getName().endsWith(
+          Block.METADATA_EXTENSION)) {
+        results.add(f);
+      } else if (f.isDirectory()) {
+        List<File> subdirResults = getAllBlockMetadataFiles(f);
+        if (subdirResults != null) {
+          results.addAll(subdirResults);
+        }
+      }
+    }
+    return results;
   }
 
   /**

Modified: hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientFailover.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientFailover.java?rev=1619019&r1=1619018&r2=1619019&view=diff
==============================================================================
--- hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientFailover.java (original)
+++ hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientFailover.java Wed Aug 20 01:34:29 2014
@@ -52,6 +52,7 @@ import org.apache.hadoop.io.retry.Defaul
 import org.apache.hadoop.io.retry.FailoverProxyProvider;
 import org.apache.hadoop.net.ConnectTimeoutException;
 import org.apache.hadoop.net.StandardSocketFactory;
+import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.StringUtils;
@@ -89,6 +90,11 @@ public class TestDFSClientFailover {
     cluster.shutdown();
   }
 
+  @After
+  public void clearConfig() {
+    SecurityUtil.setTokenServiceUseIp(true);
+  }
+
   /**
    * Make sure that client failover works when an active NN dies and the standby
    * takes over.
@@ -323,6 +329,7 @@ public class TestDFSClientFailover {
   /**
    * Test to verify legacy proxy providers are correctly wrapped.
    */
+  @Test
   public void testWrappedFailoverProxyProvider() throws Exception {
     // setup the config with the dummy provider class
     Configuration config = new HdfsConfiguration(conf);
@@ -332,6 +339,9 @@ public class TestDFSClientFailover {
         DummyLegacyFailoverProxyProvider.class.getName());
     Path p = new Path("hdfs://" + logicalName + "/");
 
+    // not to use IP address for token service
+    SecurityUtil.setTokenServiceUseIp(false);
+
     // Logical URI should be used.
     assertTrue("Legacy proxy providers should use logical URI.",
         HAUtil.useLogicalUri(config, p.toUri()));
@@ -340,6 +350,7 @@ public class TestDFSClientFailover {
   /**
    * Test to verify IPFailoverProxyProvider is not requiring logical URI.
    */
+  @Test
   public void testIPFailoverProxyProviderLogicalUri() throws Exception {
     // setup the config with the IP failover proxy provider class
     Configuration config = new HdfsConfiguration(conf);

Modified: hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSFinalize.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSFinalize.java?rev=1619019&r1=1619018&r2=1619019&view=diff
==============================================================================
--- hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSFinalize.java (original)
+++ hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSFinalize.java Wed Aug 20 01:34:29 2014
@@ -79,8 +79,8 @@ public class TestDFSFinalize {
     File dnCurDirs[] = new File[dataNodeDirs.length];
     for (int i = 0; i < dataNodeDirs.length; i++) {
       dnCurDirs[i] = new File(dataNodeDirs[i],"current");
-      assertEquals(UpgradeUtilities.checksumContents(DATA_NODE, dnCurDirs[i]),
-                   UpgradeUtilities.checksumMasterDataNodeContents());
+      assertEquals(UpgradeUtilities.checksumContents(DATA_NODE, dnCurDirs[i],
+              false), UpgradeUtilities.checksumMasterDataNodeContents());
     }
     for (int i = 0; i < nameNodeDirs.length; i++) {
       assertFalse(new File(nameNodeDirs[i],"previous").isDirectory());
@@ -96,8 +96,9 @@ public class TestDFSFinalize {
         assertFalse(new File(bpRoot,"previous").isDirectory());
         
         File bpCurFinalizeDir = new File(bpRoot,"current/"+DataStorage.STORAGE_DIR_FINALIZED);
-        assertEquals(UpgradeUtilities.checksumContents(DATA_NODE, bpCurFinalizeDir),
-                     UpgradeUtilities.checksumMasterBlockPoolFinalizedContents());
+        assertEquals(UpgradeUtilities.checksumContents(DATA_NODE,
+                bpCurFinalizeDir, true),
+                UpgradeUtilities.checksumMasterBlockPoolFinalizedContents());
       }
     }
   }

Modified: hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSPermission.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSPermission.java?rev=1619019&r1=1619018&r2=1619019&view=diff
==============================================================================
--- hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSPermission.java (original)
+++ hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSPermission.java Wed Aug 20 01:34:29 2014
@@ -20,8 +20,11 @@ package org.apache.hadoop.hdfs;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
 
+import java.io.FileNotFoundException;
 import java.io.IOException;
+import java.security.PrivilegedExceptionAction;
 import java.util.HashMap;
 import java.util.Map;
 import java.util.Random;
@@ -36,6 +39,7 @@ import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.Time;
@@ -421,6 +425,79 @@ public class TestDFSPermission {
     }
   }
 
+  @Test
+  public void testAccessOwner() throws IOException, InterruptedException {
+    FileSystem rootFs = FileSystem.get(conf);
+    Path p1 = new Path("/p1");
+    rootFs.mkdirs(p1);
+    rootFs.setOwner(p1, USER1_NAME, GROUP1_NAME);
+    fs = USER1.doAs(new PrivilegedExceptionAction<FileSystem>() {
+      @Override
+      public FileSystem run() throws Exception {
+        return FileSystem.get(conf);
+      }
+    });
+    fs.setPermission(p1, new FsPermission((short) 0444));
+    fs.access(p1, FsAction.READ);
+    try {
+      fs.access(p1, FsAction.WRITE);
+      fail("The access call should have failed.");
+    } catch (AccessControlException e) {
+      // expected
+    }
+
+    Path badPath = new Path("/bad/bad");
+    try {
+      fs.access(badPath, FsAction.READ);
+      fail("The access call should have failed");
+    } catch (FileNotFoundException e) {
+      // expected
+    }
+  }
+
+  @Test
+  public void testAccessGroupMember() throws IOException, InterruptedException {
+    FileSystem rootFs = FileSystem.get(conf);
+    Path p2 = new Path("/p2");
+    rootFs.mkdirs(p2);
+    rootFs.setOwner(p2, UserGroupInformation.getCurrentUser().getShortUserName(), GROUP1_NAME);
+    rootFs.setPermission(p2, new FsPermission((short) 0740));
+    fs = USER1.doAs(new PrivilegedExceptionAction<FileSystem>() {
+      @Override
+      public FileSystem run() throws Exception {
+        return FileSystem.get(conf);
+      }
+    });
+    fs.access(p2, FsAction.READ);
+    try {
+      fs.access(p2, FsAction.EXECUTE);
+      fail("The access call should have failed.");
+    } catch (AccessControlException e) {
+      // expected
+    }
+  }
+
+  @Test
+  public void testAccessOthers() throws IOException, InterruptedException {
+    FileSystem rootFs = FileSystem.get(conf);
+    Path p3 = new Path("/p3");
+    rootFs.mkdirs(p3);
+    rootFs.setPermission(p3, new FsPermission((short) 0774));
+    fs = USER1.doAs(new PrivilegedExceptionAction<FileSystem>() {
+      @Override
+      public FileSystem run() throws Exception {
+        return FileSystem.get(conf);
+      }
+    });
+    fs.access(p3, FsAction.READ);
+    try {
+      fs.access(p3, FsAction.READ_WRITE);
+      fail("The access call should have failed.");
+    } catch (AccessControlException e) {
+      // expected
+    }
+  }
+
   /* Check if namenode performs permission checking correctly 
    * for the given user for operations mkdir, open, setReplication, 
    * getFileInfo, isDirectory, exists, getContentLength, list, rename,

Modified: hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRollback.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRollback.java?rev=1619019&r1=1619018&r2=1619019&view=diff
==============================================================================
--- hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRollback.java (original)
+++ hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRollback.java Wed Aug 20 01:34:29 2014
@@ -81,7 +81,7 @@ public class TestDFSRollback {
         break;
       case DATA_NODE:
         assertEquals(
-            UpgradeUtilities.checksumContents(nodeType, curDir),
+            UpgradeUtilities.checksumContents(nodeType, curDir, false),
             UpgradeUtilities.checksumMasterDataNodeContents());
         break;
       }