You are viewing a plain text version of this content. The canonical link for it is here.
Posted to mapreduce-commits@hadoop.apache.org by dh...@apache.org on 2010/03/05 09:53:41 UTC

svn commit: r919335 - in /hadoop/mapreduce/trunk: CHANGES.txt src/contrib/raid/src/java/org/apache/hadoop/raid/RaidNode.java src/contrib/raid/src/test/org/apache/hadoop/raid/TestRaidPurge.java

Author: dhruba
Date: Fri Mar  5 08:53:41 2010
New Revision: 919335

URL: http://svn.apache.org/viewvc?rev=919335&view=rev
Log:
MAPREDUCE-1518. RaidNode does not run the deletion check on the
directory that stores the parity files.  (Rodrigo Schmidt via dhruba)


Modified:
    hadoop/mapreduce/trunk/CHANGES.txt
    hadoop/mapreduce/trunk/src/contrib/raid/src/java/org/apache/hadoop/raid/RaidNode.java
    hadoop/mapreduce/trunk/src/contrib/raid/src/test/org/apache/hadoop/raid/TestRaidPurge.java

Modified: hadoop/mapreduce/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/CHANGES.txt?rev=919335&r1=919334&r2=919335&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/CHANGES.txt (original)
+++ hadoop/mapreduce/trunk/CHANGES.txt Fri Mar  5 08:53:41 2010
@@ -422,6 +422,9 @@
     cleaning up, essentially to avoid following links.
     (Ravi Gummadi via yhemanth)
 
+    MAPREDUCE-1518. RaidNode does not run the deletion check on the
+    directory that stores the parity files.  (Rodrigo Schmidt via dhruba)
+
 Release 0.21.0 - Unreleased
 
   INCOMPATIBLE CHANGES

Modified: hadoop/mapreduce/trunk/src/contrib/raid/src/java/org/apache/hadoop/raid/RaidNode.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/contrib/raid/src/java/org/apache/hadoop/raid/RaidNode.java?rev=919335&r1=919334&r2=919335&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/contrib/raid/src/java/org/apache/hadoop/raid/RaidNode.java (original)
+++ hadoop/mapreduce/trunk/src/contrib/raid/src/java/org/apache/hadoop/raid/RaidNode.java Fri Mar  5 08:53:41 2010
@@ -1226,6 +1226,28 @@
                   destPrefix + " as its prefix.");
         return;
       }
+      
+      if (dest.isDir()) {
+        FileStatus[] files = null;
+        files = destFs.listStatus(destPath);
+        if (files != null) {
+          for (FileStatus one:files) {
+            recursePurge(srcFs, destFs, destPrefix, one);
+          }
+        }
+        files = destFs.listStatus(destPath);
+        if (files == null || files.length == 0){
+          boolean done = destFs.delete(destPath,false);
+          if (done) {
+            LOG.info("Purged directory " + destPath );
+          }
+          else {
+            LOG.info("Unable to purge directory " + destPath);
+          }
+        }
+        return; // the code below does the file checking
+      }
+      
       String src = destStr.replaceFirst(destPrefix, "");
       
       // if the source path does not exist or the parity file has been HARed, 
@@ -1236,20 +1258,9 @@
           !destPath.equals(getParityFile(dstPath,srcPath).getPath())) {
         boolean done = destFs.delete(destPath, false);
         if (done) {
-          LOG.info("Purged path " + destPath );
+          LOG.info("Purged file " + destPath );
         } else {
-          LOG.info("Unable to purge path " + destPath );
-        }
-        return;
-      }
-      if (!dest.isDir()) {
-        return;
-      }
-      FileStatus[] files = null;
-      files = destFs.listStatus(destPath);
-      if (files != null) {
-        for (FileStatus one:files) {
-          recursePurge(srcFs, destFs, destPrefix, one);
+          LOG.info("Unable to purge file " + destPath );
         }
       }
     } 

Modified: hadoop/mapreduce/trunk/src/contrib/raid/src/test/org/apache/hadoop/raid/TestRaidPurge.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/contrib/raid/src/test/org/apache/hadoop/raid/TestRaidPurge.java?rev=919335&r1=919334&r2=919335&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/contrib/raid/src/test/org/apache/hadoop/raid/TestRaidPurge.java (original)
+++ hadoop/mapreduce/trunk/src/contrib/raid/src/test/org/apache/hadoop/raid/TestRaidPurge.java Fri Mar  5 08:53:41 2010
@@ -171,8 +171,8 @@
     try {
       for (long blockSize : blockSizes) {
         for (long stripeLength : stripeLengths) {
-             doTestPurge(iter, targetReplication, metaReplication,
-                         stripeLength, blockSize, numBlock);
+          doTestPurge(iter, targetReplication, metaReplication,
+              stripeLength, blockSize, numBlock);
            iter++;
         }
       }
@@ -197,12 +197,10 @@
     Path file1 = new Path(dir + "/file" + iter);
     RaidNode cnode = null;
     try {
-      Path userDir = new Path("/destraid/user/dhruba");
-      Path recover1 = new Path("/destraid/" + file1 + ".recovered");
       Path destPath = new Path("/destraid/user/dhruba/raidtest");
       fileSys.delete(dir, true);
       fileSys.delete(destPath, true);
-      long crc1 = TestRaidNode.createOldFile(fileSys, file1, 1, numBlock, blockSize);
+      TestRaidNode.createOldFile(fileSys, file1, 1, numBlock, blockSize);
       LOG.info("doTestPurge created test files for iteration " + iter);
 
       // create an instance of the RaidNode
@@ -254,53 +252,12 @@
                  fileSys.delete(file1, true));
       LOG.info("deleted file " + file1);
 
-      // wait till parity file is automatically deleted
-      while (true) {
-        listPaths = fileSys.listStatus(destPath);
-        int count = 0;
-        if (listPaths != null && listPaths.length == 1) {
-          for (FileStatus s : listPaths) {
-            LOG.info("doTestPurge found path " + s.getPath());
-            if (!s.getPath().toString().endsWith(".tmp")) {
-              count++;
-            }
-          }
-        }
-	if (count == 0) {
-          break;
-        }
-        LOG.info("doTestPurge waiting for parity files to be removed. Found " + 
-                 (listPaths == null ? "none" : listPaths.length));
+      // wait till parity file and directory are automatically deleted
+      while (fileSys.exists(destPath)) {
+        LOG.info("doTestPurge waiting for parity files to be removed.");
         Thread.sleep(1000);                  // keep waiting
       }
 
-      // verify that if we delete the directory itself, then the correspoding
-      // directory in the parity space is deleted too.
-      assertTrue("The directory " + userDir + " should have one entry", 
-                 fileSys.listStatus(userDir).length == 1);
-      assertTrue("Unable to delete original directory " + dir,
-                 fileSys.delete(dir, true));
-      LOG.info("deleted dir " + dir);
-
-      // wait till parity directory is automatically deleted
-      while (true) {
-        listPaths = fileSys.listStatus(userDir);
-        int count = 0;
-        if (listPaths != null) {
-          for (FileStatus s : listPaths) {
-            LOG.info("doTestPurge found path " + s.getPath());
-            count++;
-          }
-        }
-	if (count == 0) {
-          break;
-        }
-        LOG.info("doTestPurge waiting for parity dir to be removed. Found " + 
-                 (listPaths == null ? "none" : listPaths.length));
-        Thread.sleep(1000);                  // keep waiting
-      }
-     
-      
     } catch (Exception e) {
       LOG.info("doTestPurge Exception " + e +
                                           StringUtils.stringifyException(e));