You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by dh...@apache.org on 2008/08/01 09:33:54 UTC

svn commit: r681617 - in /hadoop/core/trunk: CHANGES.txt src/contrib/fuse-dfs/README src/contrib/fuse-dfs/build.xml src/contrib/fuse-dfs/src/fuse_dfs.c src/contrib/fuse-dfs/src/fuse_dfs_wrapper.sh src/contrib/fuse-dfs/src/test/TestFuseDFS.java

Author: dhruba
Date: Fri Aug  1 00:33:53 2008
New Revision: 681617

URL: http://svn.apache.org/viewvc?rev=681617&view=rev
Log:
HADOOP-3796. fuse-dfs configuration is implemented as file system
mount options. (Pete Wyckoff via dhruba)


Modified:
    hadoop/core/trunk/CHANGES.txt
    hadoop/core/trunk/src/contrib/fuse-dfs/README
    hadoop/core/trunk/src/contrib/fuse-dfs/build.xml
    hadoop/core/trunk/src/contrib/fuse-dfs/src/fuse_dfs.c
    hadoop/core/trunk/src/contrib/fuse-dfs/src/fuse_dfs_wrapper.sh
    hadoop/core/trunk/src/contrib/fuse-dfs/src/test/TestFuseDFS.java

Modified: hadoop/core/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/CHANGES.txt?rev=681617&r1=681616&r2=681617&view=diff
==============================================================================
--- hadoop/core/trunk/CHANGES.txt (original)
+++ hadoop/core/trunk/CHANGES.txt Fri Aug  1 00:33:53 2008
@@ -205,6 +205,9 @@
     HADOOP-3131. Fix reduce progress reporting for compressed intermediate
     data. (Matei Zaharia via acmurthy) 
 
+    HADOOP-3796. fuse-dfs configuration is implemented as file system
+    mount options. (Pete Wyckoff via dhruba)
+
 Release 0.18.0 - Unreleased
 
   INCOMPATIBLE CHANGES

Modified: hadoop/core/trunk/src/contrib/fuse-dfs/README
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/contrib/fuse-dfs/README?rev=681617&r1=681616&r2=681617&view=diff
==============================================================================
--- hadoop/core/trunk/src/contrib/fuse-dfs/README (original)
+++ hadoop/core/trunk/src/contrib/fuse-dfs/README Fri Aug  1 00:33:53 2008
@@ -13,14 +13,10 @@
 It allows one to mount HDFS as a Unix filesystem and optionally export
 that mount point to other machines.
 
-For now, writes are disabled as this requires Hadoop-1700 - file
-appends which I guess won't be ready till 0.18 ish ??.
-
-rmdir, mv, mkdir, rm are all supported. just not cp, touch, ...
+cp, write, rmdir, mv, mkdir, rm are all supported. But permissions are not.
 
 BUILDING:
 
-
 Requirements:
 
    1. a Linux kernel > 2.6.9 or a kernel module from FUSE - i.e., you
@@ -72,12 +68,37 @@
 
 1. add the following to /etc/fstab -
   fuse_dfs#dfs://hadoop_server.foo.com:9000 /mnt/dfs fuse
-  allow_other,rw 0 0
+  -oallow_other,rw,-ousetrash 0 0
 
 2. mount /mnt/dfs Expect problems with not finding fuse_dfs. You will
    need to probably add this to /sbin and then problems finding the
    above 3 libraries. Add these using ldconfig.
 
+
+Fuse DFS takes the following mount options (i.e., on the command line or the comma separated list of options in /etc/fstab:
+
+-oserver=%s  (optional place to specify the server but in fstab use the format above)
+-oport=%d (optional port see comment on server option)
+-oentry_timeout=%d (how long directory entries are cached by fuse in seconds - see fuse docs)
+-oattribute_timeout=%d (how long attributes are cached by fuse in seconds - see fuse docs)
+-oprotected=%s (a colon separated list of directories that fuse-dfs should not allow to be deleted or moved - e.g., /user:/tmp)
+-oprivate (not often used but means only the person who does the mount can use the filesystem - aka ! allow_others in fuse speak)
+-ordbuffer=%d (in KBs how large a buffer should fuse-dfs use when doing hdfs reads)
+ro 
+rw
+-ousetrash (should fuse dfs throw things in /Trash when deleting them)
+-onotrash (opposite of usetrash)
+-odebug (do not daemonize - aka -d in fuse speak)
+
+The defaults are:
+
+entry,attribute_timeouts = 60 seconds
+rdbuffer = 10 MB
+protected = null
+debug = 0
+notrash
+private = 0
+
 --------------------------------------------------------------------------------
 
 
@@ -96,3 +117,22 @@
 you may want to ensure certain directories cannot be deleted from the
 shell until the FS has permissions. You can set this in the build.xml
 file in src/contrib/fuse-dfs/build.xml
+
+--------------------------------------------------------------------------------
+
+RECOMMENDATIONS:
+
+1. From /bin, ln -s $HADOOP_HOME/contrib/fuse-dfs/fuse_dfs* .
+2. Always start with debug on so you can see if you are missing a classpath or something like that.
+
+
+--------------------------------------------------------------------------------
+
+PERFORMANCE:
+
+1. if you alias ls to ls --color=auto and try listing a directory with lots (over thousands) of files, expect it to be slow and at 10s of thousands, expect it to be very very slow.  This is because --color=auto causes ls to stat every file in the directory. Since fuse-dfs does not cache attribute entries when doing a readdir, this is very slow. see https://issues.apache.org/jira/browse/HADOOP-3797 
+
+2. Writes are approximately 33% slower than the DFSClient. TBD how to optimize this. see: https://issues.apache.org/jira/browse/HADOOP-3805
+
+3. Reads are ~20-30% slower even with the read buffering. 
+

Modified: hadoop/core/trunk/src/contrib/fuse-dfs/build.xml
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/contrib/fuse-dfs/build.xml?rev=681617&r1=681616&r2=681617&view=diff
==============================================================================
--- hadoop/core/trunk/src/contrib/fuse-dfs/build.xml (original)
+++ hadoop/core/trunk/src/contrib/fuse-dfs/build.xml Fri Aug  1 00:33:53 2008
@@ -41,7 +41,6 @@
       <env key="OS_NAME" value="${os.name}"/>
       <env key="OS_ARCH" value="${os.arch}"/>
       <env key="HADOOP_HOME" value="${hadoop.root}"/>
-      <env key="PROTECTED_PATHS" value="/,/Trash,/user"/>
       <env key="PACKAGE_VERSION" value="0.1.0"/>
     </exec>
     <mkdir dir="${build.dir}"/>
@@ -73,10 +72,11 @@
     </exec>
   </target>
 
- <target name="test-unix">
-   <antcall target="hadoopbuildcontrib.test">
+  <target name="test" if="libhdfs-fuse">
+    <echo message="testing FuseDFS ..."/>
+   <antcall target="hadoopbuildcontrib.test"> 
    </antcall>
- </target>
+  </target>  
 
   <!-- override clean target !-->
   <target name="clean" depends="check-libhdfs-fuse" if="libhdfs-fuse">

Modified: hadoop/core/trunk/src/contrib/fuse-dfs/src/fuse_dfs.c
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/contrib/fuse-dfs/src/fuse_dfs.c?rev=681617&r1=681616&r2=681617&view=diff
==============================================================================
--- hadoop/core/trunk/src/contrib/fuse-dfs/src/fuse_dfs.c (original)
+++ hadoop/core/trunk/src/contrib/fuse-dfs/src/fuse_dfs.c Fri Aug  1 00:33:53 2008
@@ -46,26 +46,46 @@
 #include <strings.h>
 
 #include <hdfs.h>
+#include <stddef.h>
 
 // Constants
 //
 static const int default_id       = 99; // nobody  - not configurable since soon uids in dfs, yeah!
-static const size_t rd_buf_size   = 128 * 1024;
 static const int blksize = 512;
-static const size_t rd_cache_buf_size = 10*1024*1024;//how much of reads to buffer here
+static const char *const TrashPrefixDir = "/Trash";
+static const char *const TrashDir = "/Trash/Current";
+#define OPTIMIZED_READS 1
+static const char *program;
+
 
 /** options for fuse_opt.h */
 struct options {
+  char* protected;
   char* server;
   int port;
   int debug;
-  int nowrites;
-  int no_trash;
-}options;
-
+  int read_only;
+  int usetrash;
+  int entry_timeout;
+  int attribute_timeout;
+  int private;
+  size_t rdbuffer_size;
+} options;
+
+void print_options() {
+  fprintf(stderr,"options:\n");
+  fprintf(stderr, "\tprotected=%s\n",options.protected);
+  fprintf(stderr, "\tserver=%s\n",options.server);
+  fprintf(stderr, "\tport=%d\n",options.port);
+  fprintf(stderr, "\tdebug=%d\n",options.debug);
+  fprintf(stderr, "\tread_only=%d\n",options.read_only);
+  fprintf(stderr, "\tusetrash=%d\n",options.usetrash);
+  fprintf(stderr, "\tentry_timeout=%d\n",options.entry_timeout);
+  fprintf(stderr, "\tattribute_timeout=%d\n",options.attribute_timeout);
+  fprintf(stderr, "\tprivate=%d\n",options.private);
+  fprintf(stderr, "\trdbuffer_size=%d (KBs)\n",(int)options.rdbuffer_size/1024);
+}
 
-static const char *const TrashPrefixDir = "/Trash";
-static const char *const TrashDir = "/Trash/Current";
 
 typedef struct dfs_fh_struct {
   hdfsFile hdfsFH;
@@ -74,7 +94,6 @@
   off_t startOffset; //where the buffer starts in the file
 } dfs_fh;
 
-#include <stddef.h>
 
 /** macro to define options */
 #define DFSFS_OPT_KEY(t, p, v) { t, offsetof(struct options, p), v }
@@ -82,30 +101,38 @@
 /** keys for FUSE_OPT_ options */
 static void print_usage(const char *pname)
 {
-  fprintf(stdout,"USAGE: %s [--debug] [--help] [--version] [--nowrites] [--notrash] --server=<hadoop_servername> --port=<hadoop_port> <mntpoint> [fuse options]\n",pname);
-  fprintf(stdout,"NOTE: a useful fuse option is -o allow_others and -o default_permissions\n");
-  fprintf(stdout,"NOTE: optimizations include -o entry_timeout=500 -o attr_timeout=500\n");
+  fprintf(stdout,"USAGE: %s [debug] [--help] [--version] [protected=<colon_seped_list_of_paths] [rw] [notrash] [usetrash] [private (single user)] [ro] server=<hadoop_servername> port=<hadoop_port> [entry_timeout=<secs>] [attribute_timeout=<secs>] <mntpoint> [fuse options]\n",pname);
   fprintf(stdout,"NOTE: debugging option for fuse is -debug\n");
 }
 
-static char **protectedpaths;
-
-#define OPTIMIZED_READS 1
 
 enum
   {
     KEY_VERSION,
     KEY_HELP,
+    KEY_USETRASH,
+    KEY_NOTRASH,
+    KEY_RO,
+    KEY_RW,
+    KEY_PRIVATE,
+    KEY_DEBUG,
   };
 
 static struct fuse_opt dfs_opts[] =
   {
-    DFSFS_OPT_KEY("--server=%s", server, 0),
-    DFSFS_OPT_KEY("--port=%d", port, 0),
-    DFSFS_OPT_KEY("--debug", debug, 1),
-    DFSFS_OPT_KEY("--nowrites", nowrites, 1),
-    DFSFS_OPT_KEY("--notrash", no_trash, 1),
-
+    DFSFS_OPT_KEY("server=%s", server, 0),
+    DFSFS_OPT_KEY("entry_timeout=%d", entry_timeout, 0),
+    DFSFS_OPT_KEY("attribute_timeout=%d", attribute_timeout, 0),
+    DFSFS_OPT_KEY("protected=%s", protected, 0),
+    DFSFS_OPT_KEY("port=%d", port, 0),
+    DFSFS_OPT_KEY("rdbuffer=%d", rdbuffer_size,0),
+
+    FUSE_OPT_KEY("private", KEY_PRIVATE),
+    FUSE_OPT_KEY("ro", KEY_RO),
+    FUSE_OPT_KEY("debug", KEY_DEBUG),
+    FUSE_OPT_KEY("rw", KEY_RW),
+    FUSE_OPT_KEY("usetrash", KEY_USETRASH),
+    FUSE_OPT_KEY("notrash", KEY_NOTRASH),
     FUSE_OPT_KEY("-v",             KEY_VERSION),
     FUSE_OPT_KEY("--version",      KEY_VERSION),
     FUSE_OPT_KEY("-h",             KEY_HELP),
@@ -113,30 +140,63 @@
     FUSE_OPT_END
   };
 
-static const char *program;
+
 
 int dfs_options(void *data, const char *arg, int key,  struct fuse_args *outargs)
 {
+  (void) data;
 
-  if (key == KEY_VERSION) {
+  switch (key) {
+  case FUSE_OPT_KEY_OPT:
+    fprintf(stderr,"fuse-dfs ignoring option %s\n",arg);
+    return 1;
+  case  KEY_VERSION:
     fprintf(stdout,"%s %s\n",program,_FUSE_DFS_VERSION);
     exit(0);
-  } else if (key == KEY_HELP) {
+  case KEY_HELP:
     print_usage(program);
     exit(0);
-  } else {
+  case KEY_USETRASH:
+    options.usetrash = 1;
+    break;
+  case KEY_NOTRASH:
+    options.usetrash = 1;
+    break;
+  case KEY_RO:
+    options.read_only = 1;
+    break;
+  case KEY_RW:
+    options.read_only = 0;
+    break;
+  case KEY_PRIVATE:
+    options.private = 1;
+    break;
+  case KEY_DEBUG:
+    fuse_opt_add_arg(outargs, "-d");
+    options.debug = 1;
+    break;
+  default: {
     // try and see if the arg is a URI for DFS
     int tmp_port;
     char tmp_server[1024];
 
     if (!sscanf(arg,"dfs://%1024[a-zA-Z0-9_.-]:%d",tmp_server,&tmp_port)) {
-      printf("didn't recognize %s\n",arg);
-      fuse_opt_add_arg(outargs,arg);
+      if(strcmp(arg,"ro") == 0) {
+	options.read_only = 1;
+      } else if(strcmp(arg,"rw") == 0) {
+	options.read_only = 0;
+      } else {
+	fprintf(stderr,"fuse-dfs didn't recognize %s,%d\n",arg,key);
+	//      fuse_opt_add_arg(outargs,arg);
+	return 1;
+      }
     } else {
       options.port = tmp_port;
       options.server = strdup(tmp_server);
+      fprintf(stderr, "port=%d,server=%s\n", options.port, options.server);
     }
   }
+  }
   return 0;
 }
 
@@ -152,9 +212,10 @@
   char *nn_hostname;
   int nn_port;
   hdfsFS fs;
-  int nowrites;
-  int no_trash;
-
+  int read_only;
+  int usetrash;
+  char **protectedpaths;
+  size_t rdbuffer_size;
   // todo:
   // total hack city - use this to strip off the dfs url from the filenames
   // that the dfs API is now providing in 0.14.5
@@ -451,20 +512,20 @@
   dfs_fh *fh = (dfs_fh*)fi->fh;
   //fprintf(stderr, "Cache bounds for %s: %llu -> %llu (%d bytes). Check for offset %llu\n", path, fh->startOffset, fh->startOffset + fh->sizeBuffer, fh->sizeBuffer, offset);
   if (fh->sizeBuffer == 0  || offset < fh->startOffset || offset > (fh->startOffset + fh->sizeBuffer)  )
-  {
-    // do the actual read
-    //fprintf (stderr,"Reading %s from HDFS, offset %llu, amount %d\n", path, offset, rd_cache_buf_size);
-    const tSize num_read = hdfsPread(dfs->fs, fh->hdfsFH, offset, fh->buf, rd_cache_buf_size);
-    if (num_read < 0) {
-      syslog(LOG_ERR, "Read error - pread failed for %s with return code %d %s:%d", path, num_read, __FILE__, __LINE__);
-      hdfsDisconnect(dfs->fs);
-      dfs->fs = NULL;
-      return -EIO;
+    {
+      // do the actual read
+      //fprintf (stderr,"Reading %s from HDFS, offset %llu, amount %d\n", path, offset, dfs->rdbuffer_size);
+      const tSize num_read = hdfsPread(dfs->fs, fh->hdfsFH, offset, fh->buf, dfs->rdbuffer_size);
+      if (num_read < 0) {
+	syslog(LOG_ERR, "Read error - pread failed for %s with return code %d %s:%d", path, num_read, __FILE__, __LINE__);
+	hdfsDisconnect(dfs->fs);
+	dfs->fs = NULL;
+	return -EIO;
+      }
+      fh->sizeBuffer = num_read;
+      fh->startOffset = offset;
+      //fprintf (stderr,"Read %d bytes of %s from HDFS\n", num_read, path);
     }
-    fh->sizeBuffer = num_read;
-    fh->startOffset = offset;
-    //fprintf (stderr,"Read %d bytes of %s from HDFS\n", num_read, path);
-  }
 
   char* local_buf = fh->buf;
   const tSize cacheLookupOffset = offset - fh->startOffset;
@@ -590,15 +651,15 @@
   assert('/' == *path);
 
   int i ;
-  for (i = 0; protectedpaths[i]; i++) {
-    if (strcmp(path, protectedpaths[i]) == 0) {
+  for (i = 0; dfs->protectedpaths[i]; i++) {
+    if (strcmp(path, dfs->protectedpaths[i]) == 0) {
       syslog(LOG_ERR,"ERROR: hdfs trying to create the directory: %s", path);
       return -EACCES;
     }
   }
 
 
-  if (dfs->nowrites) {
+  if (dfs->read_only) {
     syslog(LOG_ERR,"ERROR: hdfs is configured as read-only, cannot create the directory %s\n",path);
     return -EACCES;
   }
@@ -632,18 +693,18 @@
   assert('/' == *to);
 
   int i ;
-  for (i = 0; protectedpaths[i] != NULL; i++) {
-    if (strcmp(from, protectedpaths[i]) == 0) {
+  for (i = 0; dfs->protectedpaths[i] != NULL; i++) {
+    if (strcmp(from, dfs->protectedpaths[i]) == 0) {
       syslog(LOG_ERR,"ERROR: hdfs trying to rename directories %s to %s",from,to);
       return -EACCES;
     }
-    if (strcmp(to, protectedpaths[i]) == 0) {
+    if (strcmp(to,dfs->protectedpaths[i]) == 0) {
       syslog(LOG_ERR,"ERROR: hdfs trying to rename directories %s to %s",from,to);
       return -EACCES;
     }
   }
 
-  if (dfs->nowrites) {
+  if (dfs->read_only) {
     syslog(LOG_ERR,"ERROR: hdfs is configured as read-only, cannot rename the directory %s\n",from);
     return -EACCES;
   }
@@ -657,9 +718,12 @@
 }
 
 static int is_protected(const char *path) {
+  dfs_context *dfs = (dfs_context*)fuse_get_context()->private_data;
+  assert(dfs != NULL);
+
   int i ;
-  for (i = 0; protectedpaths[i]; i++) {
-    if (strcmp(path, protectedpaths[i]) == 0) {
+  for (i = 0; dfs->protectedpaths[i]; i++) {
+    if (strcmp(path, dfs->protectedpaths[i]) == 0) {
       return 1;
     }
   }
@@ -698,12 +762,12 @@
     return -ENOTEMPTY;
   }
 
-  if (!dfs->no_trash && strncmp(path, TrashPrefixDir, strlen(TrashPrefixDir)) != 0) {
+  if (dfs->usetrash && strncmp(path, TrashPrefixDir, strlen(TrashPrefixDir)) != 0) {
     return move_to_trash(path);
   }
 
 
-  if (dfs->nowrites) {
+  if (dfs->read_only) {
     syslog(LOG_ERR,"ERROR: hdfs is configured as read-only, cannot delete the directory %s\n",path);
     return -EACCES;
   }
@@ -740,11 +804,11 @@
   }
 
   // move the file to the trash if this is enabled and its not actually in the trash.
-  if (!dfs->no_trash && strncmp(path, TrashPrefixDir, strlen(TrashPrefixDir)) != 0) {
+  if (dfs->usetrash && strncmp(path, TrashPrefixDir, strlen(TrashPrefixDir)) != 0) {
     return move_to_trash(path);
   }
 
-  if (dfs->nowrites) {
+  if (dfs->read_only) {
     syslog(LOG_ERR,"ERROR: hdfs is configured as read-only, cannot create the directory %s\n",path);
     return -EACCES;
   }
@@ -809,7 +873,8 @@
   dfs_fh *fh = (dfs_fh*)malloc(sizeof (dfs_fh));
   fi->fh = (uint64_t)fh;
   fh->hdfsFH = (hdfsFile)hdfsOpenFile(dfs->fs, path, flags,  0, 3, 0);
-  fh->buf = (char*)malloc(rd_cache_buf_size*sizeof (char));
+  assert(dfs->rdbuffer_size > 0);
+  fh->buf = (char*)malloc(dfs->rdbuffer_size*sizeof (char));
 
   fh->startOffset = 0;
   fh->sizeBuffer = 0;
@@ -936,7 +1001,7 @@
 
   if (NULL == file_handle) {
     return 0;
- }
+  }
 
   if (hdfsCloseFile(dfs->fs, file_handle) != 0) {
     syslog(LOG_ERR, "ERROR: dfs problem - could not close file_handle(%ld) for %s %s:%d\n",(long)file_handle,path, __FILE__, __LINE__);
@@ -991,7 +1056,7 @@
 #endif
 
     if (hdfsFlush(dfs->fs, file_handle) != 0) {
-      syslog(LOG_ERR, "ERROR: dfs problem - could not flush file_handle(%x) for %s %s:%d\n",(long)file_handle,path, __FILE__, __LINE__);
+      syslog(LOG_ERR, "ERROR: dfs problem - could not flush file_handle(%lx) for %s %s:%d\n",(long)file_handle,path, __FILE__, __LINE__);
       return -EIO;
     }
   }
@@ -1014,45 +1079,47 @@
 
 
 // Hacked up function to basically do:
-//  protectedpaths = split(PROTECTED_PATHS,',');
+//  protectedpaths = split(options.protected,':');
 
-static void init_protectedpaths() {
-  // PROTECTED_PATHS should be a #defined value from autoconf
-  // set it with configure --with-protectedpaths=/,/user,/user/foo
-  // note , seped with no other spaces and no quotes around it
-  char *tmp = PROTECTED_PATHS;
+static void init_protectedpaths(dfs_context *dfs) {
+
+  char *tmp = options.protected;
 
-  assert(tmp);
 
   // handle degenerate case up front.
-  if (0 == *tmp) {
-    protectedpaths = (char**)malloc(sizeof(char*));
-    protectedpaths[0] = NULL;
+  if (tmp == NULL || 0 == *tmp) {
+    dfs->protectedpaths = (char**)malloc(sizeof(char*));
+    dfs->protectedpaths[0] = NULL;
     return;
   }
+  assert(tmp);
+
+  if(options.debug) {
+    print_options();
+  }
+
 
   int i = 0;
-  while (tmp && (NULL != (tmp = index(tmp,',')))) {
+  while (tmp && (NULL != (tmp = index(tmp,':')))) {
     tmp++; // pass the ,
     i++;
   }
   i++; // for the last entry
   i++; // for the final NULL
-  protectedpaths = (char**)malloc(sizeof(char*)*i);
-  printf("i=%d\n",i);
-  tmp = PROTECTED_PATHS;
+  dfs->protectedpaths = (char**)malloc(sizeof(char*)*i);
+  tmp = options.protected;
   int j  = 0;
   while (NULL != tmp && j < i) {
     int length;
-    char *eos = index(tmp,',');
+    char *eos = index(tmp,':');
     if (NULL != eos) {
       length = eos - tmp; // length of this value
     } else {
       length = strlen(tmp);
     }
-    protectedpaths[j] = (char*)malloc(sizeof(char)*length+1);
-    strncpy(protectedpaths[j], tmp, length);
-    protectedpaths[j][length] = '\0';
+    dfs->protectedpaths[j] = (char*)malloc(sizeof(char)*length+1);
+    strncpy(dfs->protectedpaths[j], tmp, length);
+    dfs->protectedpaths[j][length] = '\0';
     if (eos) {
       tmp = eos + 1;
     } else {
@@ -1060,15 +1127,16 @@
     }
     j++;
   }
-  protectedpaths[j] = NULL;
+  dfs->protectedpaths[j] = NULL;
+
   /*
-  j  = 0;
-  while (protectedpaths[j]) {
-    printf("protectedpaths[%d]=%s\n",j,protectedpaths[j]);
+    j  = 0;
+    while (dfs->protectedpaths[j]) {
+    printf("dfs->protectedpaths[%d]=%s\n",j,dfs->protectedpaths[j]);
     fflush(stdout);
     j++;
-  }
-  exit(1);
+    }
+    exit(1);
   */
 }
 
@@ -1093,9 +1161,10 @@
   dfs->nn_hostname           = options.server;
   dfs->nn_port               = options.port;
   dfs->fs                    = NULL;
-  dfs->nowrites              = options.nowrites;
-  dfs->no_trash              = options.no_trash;
-
+  dfs->read_only             = options.read_only;
+  dfs->usetrash              = options.usetrash;
+  dfs->protectedpaths        = NULL;
+  dfs->rdbuffer_size         = options.rdbuffer_size;
   bzero(dfs->dfs_uri,0);
   sprintf(dfs->dfs_uri,"dfs://%s:%d/",dfs->nn_hostname,dfs->nn_port);
   dfs->dfs_uri_len = strlen(dfs->dfs_uri);
@@ -1103,7 +1172,9 @@
   // use ERR level to ensure it makes it into the log.
   syslog(LOG_ERR, "mounting %s", dfs->dfs_uri);
 
-  init_protectedpaths();
+  init_protectedpaths(dfs);
+  assert(dfs->protectedpaths != NULL);
+
 
   return (void*)dfs;
 }
@@ -1127,7 +1198,7 @@
   .write	= dfs_write,
   .flush        = dfs_flush,
   //.xsetattr      = dfs_setattr,
-    .mknod        = dfs_mknod,
+  .mknod        = dfs_mknod,
   .chmod	= dfs_chmod,
   .chown	= dfs_chown,
   //  .truncate	= dfs_truncate,
@@ -1145,11 +1216,31 @@
   /* clear structure that holds our options */
   memset(&options, 0, sizeof(struct options));
 
+  // some defaults
+  options.rdbuffer_size = 10*1024*1024; 
+  options.attribute_timeout = 60; 
+  options.entry_timeout = 60;
+
   if (fuse_opt_parse(&args, &options, dfs_opts, dfs_options) == -1)
     /** error parsing options */
     return -1;
 
 
+  // Some fuse options we set
+  if(! options.private) {
+    fuse_opt_add_arg(&args, "-oallow_other");
+  }
+  {
+    char buf[1024];
+
+    snprintf(buf, sizeof buf, "-oattr_timeout=%d",options.attribute_timeout);
+    fuse_opt_add_arg(&args, buf);
+
+    snprintf(buf, sizeof buf, "-oentry_timeout=%d",options.entry_timeout);
+    fuse_opt_add_arg(&args, buf);
+
+  }
+
   if (options.server == NULL || options.port == 0) {
     print_usage(argv[0]);
     exit(0);

Modified: hadoop/core/trunk/src/contrib/fuse-dfs/src/fuse_dfs_wrapper.sh
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/contrib/fuse-dfs/src/fuse_dfs_wrapper.sh?rev=681617&r1=681616&r2=681617&view=diff
==============================================================================
--- hadoop/core/trunk/src/contrib/fuse-dfs/src/fuse_dfs_wrapper.sh (original)
+++ hadoop/core/trunk/src/contrib/fuse-dfs/src/fuse_dfs_wrapper.sh Fri Aug  1 00:33:53 2008
@@ -35,6 +35,5 @@
 if [ "$LD_LIBRARY_PATH" = "" ]; then
 export LD_LIBRARY_PATH=$JAVA_HOME/jre/lib/$OS_ARCH/server:/usr/local/share/hdfs/libhdfs/:/usr/local/lib
 fi
-echo $LD_LIBRARY_PATH
 
-./fuse_dfs $@  -o-o allow_other
+./fuse_dfs $@

Modified: hadoop/core/trunk/src/contrib/fuse-dfs/src/test/TestFuseDFS.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/contrib/fuse-dfs/src/test/TestFuseDFS.java?rev=681617&r1=681616&r2=681617&view=diff
==============================================================================
--- hadoop/core/trunk/src/contrib/fuse-dfs/src/test/TestFuseDFS.java (original)
+++ hadoop/core/trunk/src/contrib/fuse-dfs/src/test/TestFuseDFS.java Fri Aug  1 00:33:53 2008
@@ -56,13 +56,8 @@
     String jvm = System.getProperty("java.home") + "/lib/" + arch + "/server";
     String lp = System.getProperty("LD_LIBRARY_PATH") + ":" + "/usr/local/lib:" + libhdfs + ":" + jvm;
     System.err.println("LD_LIBRARY_PATH=" + lp);
-    String cmd[] = new String[4];
-    int index = 0;
-
-    cmd[index++] = fuse_cmd;
-    cmd[index++] = "dfs://" + dfs.getHost() + ":" + String.valueOf(dfs.getPort());
-    cmd[index++] = mountpoint;
-    cmd[index++] = "-d";
+    String cmd[] =  {  fuse_cmd, "dfs://" + dfs.getHost() + ":" + String.valueOf(dfs.getPort()), 
+		       mountpoint, "-odebug", "-oentry_timeout=1",  "-oattribute_timeout=1", "-ousetrash", "rw" };
     final String [] envp = {
       "CLASSPATH="+  cp,
       "LD_LIBRARY_PATH=" + lp,
@@ -151,7 +146,6 @@
       p = r.exec(lsCmd);
       assertTrue(p.waitFor() == 0);
     } catch(Exception e) {
-      System.err.println("e=" + e);
       e.printStackTrace();
       throw e;
     }
@@ -226,6 +220,8 @@
 
       Runtime r = Runtime.getRuntime();
       Process p = r.exec("mkdir -p " + mpoint + "/test/mkdirs");
+      assertTrue(p.waitFor() == 0);
+
       Path myPath = new Path("/test/mkdirs");
       assertTrue(fileSys.exists(myPath));
 
@@ -275,25 +271,19 @@
       Path myPath = new Path("/test/hello");
       FSDataOutputStream s = fileSys.create(myPath);
       String hello = "hello world!";
-      s.write(hello.getBytes());
+      s.writeUTF(hello);
+      s.writeInt(1033);
       s.close();
 
       // check it exists
       assertTrue(fileSys.exists(myPath));
 
       // cat the file
-      p = r.exec("cat " + mpoint + "/test/hello");
-      assertTrue(p != null);
-      assertTrue(p.waitFor() == 0);
-
-      // check the data is the same
-      {
-        InputStream i = p.getInputStream();
-        byte b[] = new byte[1024];
-        int length = i.read(b);
-        String s2 = new String(b,0,length);
-        assertTrue(s2.equals(hello));
-      }
+      DataInputStream is = new DataInputStream(new FileInputStream(mpoint + "/test/hello"));
+      String s2 = DataInputStream.readUTF(is);
+      int s3 = is.readInt();
+      assertTrue(s2.equals(hello));
+      assertTrue(s3 == 1033);
 
     } catch(Exception e) {
       e.printStackTrace();