You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by cu...@apache.org on 2007/01/10 22:19:53 UTC

svn commit: r494989 - in /lucene/hadoop/trunk: CHANGES.txt src/java/org/apache/hadoop/fs/s3/S3FileSystem.java src/test/org/apache/hadoop/fs/s3/S3FileSystemBaseTest.java

Author: cutting
Date: Wed Jan 10 13:19:52 2007
New Revision: 494989

URL: http://svn.apache.org/viewvc?view=rev&rev=494989
Log:
HADOOP-880.  Fix S3 FileSystem to remove directories.  Contributed by Tom White.

Modified:
    lucene/hadoop/trunk/CHANGES.txt
    lucene/hadoop/trunk/src/java/org/apache/hadoop/fs/s3/S3FileSystem.java
    lucene/hadoop/trunk/src/test/org/apache/hadoop/fs/s3/S3FileSystemBaseTest.java

Modified: lucene/hadoop/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/CHANGES.txt?view=diff&rev=494989&r1=494988&r2=494989
==============================================================================
--- lucene/hadoop/trunk/CHANGES.txt (original)
+++ lucene/hadoop/trunk/CHANGES.txt Wed Jan 10 13:19:52 2007
@@ -36,6 +36,9 @@
     than the default, zlib-based compression, but it is only available
     when the native library is built.  (Arun C Murthy via cutting)
 
+12. HADOOP-880.  Fix S3 FileSystem to remove directories.
+    (Tom White via cutting)
+
 
 Release 0.10.0 - 2007-01-05
 

Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/fs/s3/S3FileSystem.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/fs/s3/S3FileSystem.java?view=diff&rev=494989&r1=494988&r2=494989
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/fs/s3/S3FileSystem.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/fs/s3/S3FileSystem.java Wed Jan 10 13:19:52 2007
@@ -188,17 +188,27 @@
 
   @Override
   public boolean deleteRaw(Path path) throws IOException {
-    // TODO: Check if path is directory with children
     Path absolutePath = makeAbsolute(path);
     INode inode = store.getINode(absolutePath);
     if (inode == null) {
       return false;
     }
-    store.deleteINode(absolutePath);
     if (inode.isFile()) {
+      store.deleteINode(absolutePath);
       for (Block block : inode.getBlocks()) {
         store.deleteBlock(block);
       }
+    } else {
+      Path[] contents = listPathsRaw(absolutePath);
+      if (contents == null) {
+        return false;
+      }
+      for (Path p : contents) {
+        if (! deleteRaw(p)) {
+          return false;
+        }
+      }
+      store.deleteINode(absolutePath);
     }
     return true;
   }

Modified: lucene/hadoop/trunk/src/test/org/apache/hadoop/fs/s3/S3FileSystemBaseTest.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/test/org/apache/hadoop/fs/s3/S3FileSystemBaseTest.java?view=diff&rev=494989&r1=494988&r2=494989
==============================================================================
--- lucene/hadoop/trunk/src/test/org/apache/hadoop/fs/s3/S3FileSystemBaseTest.java (original)
+++ lucene/hadoop/trunk/src/test/org/apache/hadoop/fs/s3/S3FileSystemBaseTest.java Wed Jan 10 13:19:52 2007
@@ -166,9 +166,7 @@
     
     s3FileSystem.mkdirs(path.getParent());
 
-    FSOutputStream out = s3FileSystem.createRaw(path, false, (short) 1, BLOCK_SIZE);
-    out.write(data, 0, BLOCK_SIZE);
-    out.close();
+    createEmptyFile(path);
     
     assertTrue("Exists", s3FileSystem.exists(path));
     assertEquals("Length", BLOCK_SIZE, s3FileSystem.getLength(path));
@@ -180,7 +178,7 @@
       // Expected
     }
     
-    out = s3FileSystem.createRaw(path, true, (short) 1, BLOCK_SIZE);
+    FSOutputStream out = s3FileSystem.createRaw(path, true, (short) 1, BLOCK_SIZE);
     out.write(data, 0, BLOCK_SIZE / 2);
     out.close();
     
@@ -191,21 +189,46 @@
 
   public void testWriteInNonExistentDirectory() throws IOException {
     Path path = new Path("/test/hadoop/file");    
-    FSOutputStream out = s3FileSystem.createRaw(path, false, (short) 1, BLOCK_SIZE);
-    out.write(data, 0, BLOCK_SIZE);
-    out.close();
+    createEmptyFile(path);
     
     assertTrue("Exists", s3FileSystem.exists(path));
     assertEquals("Length", BLOCK_SIZE, s3FileSystem.getLength(path));
     assertTrue("Parent exists", s3FileSystem.exists(path.getParent()));
   }
-  
+
   public void testDeleteNonExistentFile() throws IOException {
     Path path = new Path("/test/hadoop/file");    
     assertFalse("Doesn't exist", s3FileSystem.exists(path));
     assertFalse("No deletion", s3FileSystem.delete(path));
   }
 
+  public void testDeleteDirectory() throws IOException {
+    Path subdir = new Path("/test/hadoop");
+    Path dir = subdir.getParent();
+    Path root = dir.getParent();
+    s3FileSystem.mkdirs(subdir);
+    Path file1 = new Path(dir, "file1");
+    Path file2 = new Path(subdir, "file2");
+    
+    createEmptyFile(file1);
+    createEmptyFile(file2);
+    
+    assertTrue("root exists", s3FileSystem.exists(root));
+    assertTrue("dir exists", s3FileSystem.exists(dir));
+    assertTrue("file1 exists", s3FileSystem.exists(file1));
+    assertTrue("subdir exists", s3FileSystem.exists(subdir));
+    assertTrue("file2 exists", s3FileSystem.exists(file2));
+    
+    assertTrue("Delete", s3FileSystem.delete(dir));
+
+    assertTrue("root exists", s3FileSystem.exists(root));
+    assertFalse("dir exists", s3FileSystem.exists(dir));
+    assertFalse("file1 exists", s3FileSystem.exists(file1));
+    assertFalse("subdir exists", s3FileSystem.exists(subdir));
+    assertFalse("file2 exists", s3FileSystem.exists(file2));
+    
+  }
+
   public void testRename() throws Exception {
     int len = BLOCK_SIZE;
     
@@ -213,9 +236,7 @@
     
     s3FileSystem.mkdirs(path.getParent());
 
-    FSOutputStream out = s3FileSystem.createRaw(path, false, (short) 1, BLOCK_SIZE);
-    out.write(data, 0, len);
-    out.close();
+    createEmptyFile(path);
 
     assertTrue("Exists", s3FileSystem.exists(path));
 
@@ -235,5 +256,10 @@
     }
   }
 
+  private void createEmptyFile(Path path) throws IOException {
+    FSOutputStream out = s3FileSystem.createRaw(path, false, (short) 1, BLOCK_SIZE);
+    out.write(data, 0, BLOCK_SIZE);
+    out.close();
+  }
 
 }