You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by dh...@apache.org on 2008/06/11 07:01:18 UTC

svn commit: r666524 - in /hadoop/core/trunk: ./ src/hdfs/org/apache/hadoop/dfs/ src/test/org/apache/hadoop/dfs/

Author: dhruba
Date: Tue Jun 10 22:01:18 2008
New Revision: 666524

URL: http://svn.apache.org/viewvc?rev=666524&view=rev
Log:
HADOOP-3418. When a directory is deleted, any leases that point to files
in the subdirectory are removed. ((Tsz Wo (Nicholas), SZE via dhruba)


Added:
    hadoop/core/trunk/src/test/org/apache/hadoop/dfs/TestFileCreationDelete.java
Modified:
    hadoop/core/trunk/CHANGES.txt
    hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/FSDirectory.java
    hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/FSEditLog.java
    hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/FSNamesystem.java
    hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/LeaseManager.java
    hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/NameNode.java
    hadoop/core/trunk/src/test/org/apache/hadoop/dfs/TestFileCreation.java

Modified: hadoop/core/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/CHANGES.txt?rev=666524&r1=666523&r2=666524&view=diff
==============================================================================
--- hadoop/core/trunk/CHANGES.txt (original)
+++ hadoop/core/trunk/CHANGES.txt Tue Jun 10 22:01:18 2008
@@ -573,6 +573,9 @@
     StackOverflowErrors. To avoid O(n*n) cases, when partitioning depth exceeds
     a multiple of log(n), change to HeapSort. (cdouglas)
 
+    HADOOP-3418. When a directory is deleted, any leases that point to files
+    in the subdirectory are removed. ((Tsz Wo (Nicholas), SZE via dhruba)
+
 Release 0.17.0 - 2008-05-18
 
   INCOMPATIBLE CHANGES

Modified: hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/FSDirectory.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/FSDirectory.java?rev=666524&r1=666523&r2=666524&view=diff
==============================================================================
--- hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/FSDirectory.java (original)
+++ hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/FSDirectory.java Tue Jun 10 22:01:18 2008
@@ -40,7 +40,7 @@
  *************************************************/
 class FSDirectory implements FSConstants, Closeable {
 
-  FSNamesystem namesystem = null;
+  final FSNamesystem namesystem;
   final INodeDirectoryWithQuota rootDir;
   FSImage fsImage;  
   boolean ready = false;
@@ -535,12 +535,13 @@
   /**
    * Remove the file from management, return blocks
    */
-  public INode delete(String src, Collection<Block> deletedBlocks) {
-    NameNode.stateChangeLog.debug("DIR* FSDirectory.delete: "
-                                  +src);
+  public INode delete(String src) {
+    if (NameNode.stateChangeLog.isDebugEnabled()) {
+      NameNode.stateChangeLog.debug("DIR* FSDirectory.delete: "+src);
+    }
     waitForReady();
     long now = FSNamesystem.now();
-    INode deletedNode = unprotectedDelete(src, now, deletedBlocks);
+    INode deletedNode = unprotectedDelete(src, now);
     if (deletedNode != null) {
       fsImage.getEditLog().logDelete(src, now);
     }
@@ -571,8 +572,7 @@
    * @param deletedBlocks the place holder for the blocks to be removed
    * @return if the deletion succeeds
    */ 
-  INode unprotectedDelete(String src, long modificationTime, 
-                          Collection<Block> deletedBlocks) {
+  INode unprotectedDelete(String src, long modificationTime) {
     src = normalizePath(src);
     String[] names = INode.getPathNames(src);
     byte[][] components = INode.getPathComponents(names);
@@ -601,16 +601,11 @@
           ArrayList<Block> v = new ArrayList<Block>();
           int filesRemoved = targetNode.collectSubtreeBlocksAndClear(v);
           incrDeletedFileCount(filesRemoved);
-          for (Block b : v) {
-            namesystem.blocksMap.removeINode(b);
-            // remove the block from corruptReplicasMap
-            namesystem.corruptReplicas.removeFromCorruptReplicasMap(b);
-            if (deletedBlocks != null) {
-              deletedBlocks.add(b);
-            }
-          }
-          NameNode.stateChangeLog.debug("DIR* FSDirectory.unprotectedDelete: "
+          namesystem.removePathAndBlocks(src, v);
+          if (NameNode.stateChangeLog.isDebugEnabled()) {
+            NameNode.stateChangeLog.debug("DIR* FSDirectory.unprotectedDelete: "
               +src+" is removed");
+          }
           return targetNode;
         } catch (IOException e) {
           NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedDelete: " +

Modified: hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/FSEditLog.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/FSEditLog.java?rev=666524&r1=666523&r2=666524&view=diff
==============================================================================
--- hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/FSEditLog.java (original)
+++ hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/FSEditLog.java Tue Jun 10 22:01:18 2008
@@ -503,7 +503,7 @@
                                      " clientMachine " + clientMachine);
             }
 
-            old = fsDir.unprotectedDelete(path, mtime, null);
+            old = fsDir.unprotectedDelete(path, mtime);
 
             // add to the file tree
             INodeFile node = (INodeFile)fsDir.unprotectedAddFile(
@@ -570,7 +570,7 @@
             }
             path = FSImage.readString(in);
             timestamp = readLong(in);
-            old = fsDir.unprotectedDelete(path, timestamp, null);
+            old = fsDir.unprotectedDelete(path, timestamp);
             if (old != null && old.isUnderConstruction()) {
               INodeFileUnderConstruction cons = (INodeFileUnderConstruction)old;
               fsNamesys.leaseManager.removeLease(cons.clientName, path);

Modified: hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/FSNamesystem.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/FSNamesystem.java?rev=666524&r1=666523&r2=666524&view=diff
==============================================================================
--- hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/FSNamesystem.java (original)
+++ hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/FSNamesystem.java Tue Jun 10 22:01:18 2008
@@ -970,7 +970,7 @@
       }
       if (!dir.isValidToCreate(src)) {
         if (overwrite) {
-          delete(src);
+          delete(src, true);
         } else {
           throw new IOException("failed to create file " + src 
                                 +" on client " + clientMachine
@@ -1425,15 +1425,6 @@
   }
 
   /**
-   * Remove the indicated filename from the namespace.  This may
-   * invalidate some blocks that make up the file.
-   */
-  @Deprecated
-  public boolean delete(String src) throws IOException {
-    return delete(src, true);
-  }
-
-  /**
    * Remove the indicated filename from namespace. If the filename 
    * is a directory (non empty) and recursive is set to false then throw exception.
    */
@@ -1466,26 +1457,25 @@
    */
   synchronized boolean deleteInternal(String src, 
       boolean enforceSafeMode, boolean enforcePermission) throws IOException {
-    NameNode.stateChangeLog.debug("DIR* NameSystem.delete: " + src);
+    if (NameNode.stateChangeLog.isDebugEnabled()) {
+      NameNode.stateChangeLog.debug("DIR* NameSystem.delete: " + src);
+    }
     if (enforceSafeMode && isInSafeMode())
       throw new SafeModeException("Cannot delete " + src, safeMode);
     if (enforcePermission && isPermissionEnabled) {
       checkPermission(src, false, null, FsAction.WRITE, null, FsAction.ALL);
     }
 
-    ArrayList<Block> deletedBlocks = new ArrayList<Block>();
-    INode old = dir.delete(src, deletedBlocks);
-    if (old == null) {
-      return false;
-    }
-    for (Block b : deletedBlocks) {
+    return dir.delete(src) != null;
+  }
+
+  void removePathAndBlocks(String src, List<Block> blocks) throws IOException {
+    leaseManager.removeLeaseWithPrefixPath(src);
+    for(Block b : blocks) {
+      blocksMap.removeINode(b);
+      corruptReplicas.removeFromCorruptReplicasMap(b);
       addToInvalidates(b);
     }
-    if (old.isUnderConstruction()) {
-      INodeFileUnderConstruction cons = (INodeFileUnderConstruction) old;
-      leaseManager.removeLease(cons.clientName, src);
-    }
-    return true;
   }
 
   /** Get the file info for a specific file.

Modified: hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/LeaseManager.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/LeaseManager.java?rev=666524&r1=666523&r2=666524&view=diff
==============================================================================
--- hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/LeaseManager.java (original)
+++ hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/LeaseManager.java Tue Jun 10 22:01:18 2008
@@ -18,7 +18,6 @@
 package org.apache.hadoop.dfs;
 
 import java.io.IOException;
-import java.io.FileNotFoundException;
 import java.util.*;
 
 import org.apache.commons.logging.Log;
@@ -120,7 +119,7 @@
    */
   synchronized void removeLease(Lease lease, String src) throws IOException {
     sortedLeasesByPath.remove(src);
-    if (!lease.paths.remove(new StringBytesWritable(src))) {
+    if (!lease.removePath(src)) {
       LOG.error(src + " not found in lease.paths (=" + lease.paths + ")");
     }
 
@@ -137,7 +136,10 @@
    */
   synchronized void removeLease(StringBytesWritable holder, String src
       ) throws IOException {
-    removeLease(getLease(holder), src);
+    Lease lease = getLease(holder);
+    if (lease != null) {
+      removeLease(lease, src);
+    }
   }
 
   /**
@@ -218,6 +220,10 @@
     /** Does this lease contain any path? */
     boolean hasPath() {return !paths.isEmpty();}
 
+    boolean removePath(String src) throws IOException {
+      return paths.remove(new StringBytesWritable(src));
+    }
+
     /** {@inheritDoc} */
     public String toString() {
       return "[Lease.  Holder: " + holder
@@ -260,79 +266,64 @@
     Collection<StringBytesWritable> getPaths() {
       return paths;
     }
-  
-    // If a file with the specified prefix exists, then replace 
-    // it with the new prefix.
-    //
-    void replacePrefix(String src, String overwrite, 
-                       String replaceBy) throws IOException {
-      List<StringBytesWritable> toAdd = new ArrayList<StringBytesWritable>();
-      for (Iterator<StringBytesWritable> f = paths.iterator(); 
-           f.hasNext();){
-        String path = f.next().getString();
-        if (!path.startsWith(src)) {
-          continue;
-        }
-        // remove this filename from this lease.
-        f.remove();
-  
-        // remember new filename
-        String newPath = path.replaceFirst(overwrite, replaceBy);
-        toAdd.add(new StringBytesWritable(newPath));
-        LOG.debug("Modified Lease for file " + path +
-                 " to new path " + newPath);
-      }
-      // add modified filenames back into lease.
-      for (Iterator<StringBytesWritable> f = toAdd.iterator(); 
-           f.hasNext();) {
-        paths.add(f.next());
-      }
+    
+    void replacePath(String oldpath, String newpath) throws IOException {
+      paths.remove(new StringBytesWritable(oldpath));
+      paths.add(new StringBytesWritable(newpath));
     }
   }
 
   synchronized void changeLease(String src, String dst,
       String overwrite, String replaceBy) throws IOException {
     if (LOG.isDebugEnabled()) {
-      LOG.debug(getClass().getName() + ".changelease: " +
+      LOG.debug(getClass().getSimpleName() + ".changelease: " +
                " src=" + src + ", dest=" + dst + 
                ", overwrite=" + overwrite +
                ", replaceBy=" + replaceBy);
     }
 
-    Map<String, Lease> addTo = new TreeMap<String, Lease>();
-    SortedMap<String, Lease> myset = sortedLeasesByPath.tailMap(src);
-    int srclen = src.length();
-    for (Iterator<Map.Entry<String, Lease>> iter = myset.entrySet().iterator(); 
-         iter.hasNext();) {
-      Map.Entry<String, Lease> entry = iter.next();
-      String path = entry.getKey();
-      if (!path.startsWith(src)) {
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("changelease comparing " + path +
-                   " with " + src + " and terminating.");
-        }
-        break;
-      }
-      if (path.length() > srclen && path.charAt(srclen) != Path.SEPARATOR_CHAR){
-        continue;
+    for(Map.Entry<String, Lease> entry : findLeaseWithPrefixPath(src, sortedLeasesByPath)) {
+      final String oldpath = entry.getKey();
+      final Lease lease = entry.getValue();
+      final String newpath = oldpath.replaceFirst(overwrite, replaceBy);
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("changeLease: replacing " + oldpath + " with " + newpath);
       }
+      lease.replacePath(oldpath, newpath);
+      sortedLeasesByPath.remove(oldpath);
+      sortedLeasesByPath.put(newpath, lease);
+    }
+  }
 
-      Lease lease = entry.getValue();
-      // Fix up all the pathnames in this lease.
+  synchronized void removeLeaseWithPrefixPath(String prefix) throws IOException {
+    for(Map.Entry<String, Lease> entry : findLeaseWithPrefixPath(prefix, sortedLeasesByPath)) {
       if (LOG.isDebugEnabled()) {
-        LOG.debug("changelease comparing " + path +
-                  " with " + src + " and replacing ");
+        LOG.debug(LeaseManager.class.getSimpleName()
+            + ".removeLeaseWithPrefixPath: entry=" + entry);
       }
-      lease.replacePrefix(src, overwrite, replaceBy);
+      removeLease(entry.getValue(), entry.getKey());    
+    }
+  }
+
+  static private List<Map.Entry<String, Lease>> findLeaseWithPrefixPath(
+      String prefix, SortedMap<String, Lease> path2lease) {
+    if (LOG.isDebugEnabled()) {
+      LOG.debug(LeaseManager.class.getSimpleName() + ".findLease: prefix=" + prefix);
+    }
+
+    List<Map.Entry<String, Lease>> entries = new ArrayList<Map.Entry<String, Lease>>();
+    final int srclen = prefix.length();
 
-      // Remove this lease from sortedLeasesByPath because the 
-      // pathname has changed.
-      String newPath = path.replaceFirst(overwrite, replaceBy);
-      addTo.put(newPath, lease);
-      iter.remove();
+    for(Map.Entry<String, Lease> entry : path2lease.tailMap(prefix).entrySet()) {
+      final String p = entry.getKey();
+      if (!p.startsWith(prefix)) {
+        return entries;
+      }
+      if (p.length() == srclen || p.charAt(srclen) == Path.SEPARATOR_CHAR) {
+        entries.add(entry);
+      }
     }
-    // re-add entries back in sortedLeasesByPath
-    sortedLeasesByPath.putAll(addTo);
+    return entries;
   }
 
   void setLeasePeriod(long softLimit, long hardLimit) {
@@ -381,6 +372,15 @@
     }
   }
 
+  /** {@inheritDoc} */
+  public String toString() {
+    return getClass().getSimpleName() + "= {"
+        + "\n leases=" + leases
+        + "\n sortedLeases=" + sortedLeases
+        + "\n sortedLeasesByPath=" + sortedLeasesByPath
+        + "\n}";
+  }
+
   /*
    * The following codes provides useful static methods for lease recovery.
    */

Modified: hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/NameNode.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/NameNode.java?rev=666524&r1=666523&r2=666524&view=diff
==============================================================================
--- hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/NameNode.java (original)
+++ hadoop/core/trunk/src/hdfs/org/apache/hadoop/dfs/NameNode.java Tue Jun 10 22:01:18 2008
@@ -409,12 +409,15 @@
    */
   @Deprecated
   public boolean delete(String src) throws IOException {
-    stateChangeLog.debug("*DIR* NameNode.delete: " + src);
-    return namesystem.delete(src);
+    return delete(src, true);
   }
 
+  /** {@inheritDoc} */
   public boolean delete(String src, boolean recursive) throws IOException {
-    stateChangeLog.debug("*DIR* Namenode.delete:  " + src);
+    if (stateChangeLog.isDebugEnabled()) {
+      stateChangeLog.debug("*DIR* Namenode.delete: src=" + src
+          + ", recursive=" + recursive);
+    }
     return namesystem.delete(src, recursive);
   }
 

Modified: hadoop/core/trunk/src/test/org/apache/hadoop/dfs/TestFileCreation.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/test/org/apache/hadoop/dfs/TestFileCreation.java?rev=666524&r1=666523&r2=666524&view=diff
==============================================================================
--- hadoop/core/trunk/src/test/org/apache/hadoop/dfs/TestFileCreation.java (original)
+++ hadoop/core/trunk/src/test/org/apache/hadoop/dfs/TestFileCreation.java Tue Jun 10 22:01:18 2008
@@ -75,7 +75,7 @@
   //
   // writes specified bytes to file.
   //
-  private void writeFile(FSDataOutputStream stm, int size) throws IOException {
+  static void writeFile(FSDataOutputStream stm, int size) throws IOException {
     byte[] buffer = new byte[fileSize];
     Random rand = new Random(seed);
     rand.nextBytes(buffer);

Added: hadoop/core/trunk/src/test/org/apache/hadoop/dfs/TestFileCreationDelete.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/test/org/apache/hadoop/dfs/TestFileCreationDelete.java?rev=666524&view=auto
==============================================================================
--- hadoop/core/trunk/src/test/org/apache/hadoop/dfs/TestFileCreationDelete.java (added)
+++ hadoop/core/trunk/src/test/org/apache/hadoop/dfs/TestFileCreationDelete.java Tue Jun 10 22:01:18 2008
@@ -0,0 +1,94 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.dfs;
+
+import java.io.IOException;
+
+import org.apache.commons.logging.impl.Log4JLogger;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.log4j.Level;
+
+public class TestFileCreationDelete extends junit.framework.TestCase {
+  {
+    ((Log4JLogger)NameNode.stateChangeLog).getLogger().setLevel(Level.ALL);
+    ((Log4JLogger)LeaseManager.LOG).getLogger().setLevel(Level.ALL);
+    ((Log4JLogger)FSNamesystem.LOG).getLogger().setLevel(Level.ALL);
+  }
+
+  public void testFileCreationDeleteParent() throws IOException {
+    Configuration conf = new Configuration();
+    final int MAX_IDLE_TIME = 2000; // 2s
+    conf.setInt("ipc.client.connection.maxidletime", MAX_IDLE_TIME);
+    conf.setInt("heartbeat.recheck.interval", 1000);
+    conf.setInt("dfs.heartbeat.interval", 1);
+
+    // create cluster
+    MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null);
+    FileSystem fs = null;
+    try {
+      cluster.waitActive();
+      fs = cluster.getFileSystem();
+      final int nnport = cluster.getNameNodePort();
+
+      // create file1.
+      Path dir = new Path("/foo");
+      Path file1 = new Path(dir, "file1");
+      FSDataOutputStream stm1 = TestFileCreation.createFile(fs, file1, 1);
+      System.out.println("testFileCreationDeleteParent: "
+          + "Created file " + file1);
+      TestFileCreation.writeFile(stm1, 1000);
+      stm1.sync();
+
+      // create file2.
+      Path file2 = new Path("/file2");
+      FSDataOutputStream stm2 = TestFileCreation.createFile(fs, file2, 1);
+      System.out.println("testFileCreationDeleteParent: "
+          + "Created file " + file2);
+      TestFileCreation.writeFile(stm2, 1000);
+      stm2.sync();
+
+      // rm dir
+      fs.delete(dir, true);
+
+      // restart cluster with the same namenode port as before.
+      // This ensures that leases are persisted in fsimage.
+      cluster.shutdown();
+      try {Thread.sleep(2*MAX_IDLE_TIME);} catch (InterruptedException e) {}
+      cluster = new MiniDFSCluster(nnport, conf, 1, false, true, 
+                                   null, null, null);
+      cluster.waitActive();
+
+      // restart cluster yet again. This triggers the code to read in
+      // persistent leases from fsimage.
+      cluster.shutdown();
+      try {Thread.sleep(5000);} catch (InterruptedException e) {}
+      cluster = new MiniDFSCluster(nnport, conf, 1, false, true, 
+                                   null, null, null);
+      cluster.waitActive();
+      fs = cluster.getFileSystem();
+
+      assertTrue(!fs.exists(file1));
+      assertTrue(fs.exists(file2));
+    } finally {
+      cluster.shutdown();
+    }
+  }
+}
\ No newline at end of file