You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by sh...@apache.org on 2009/07/29 23:04:44 UTC

svn commit: r799079 - in /hadoop/hdfs/trunk: ./ src/java/org/apache/hadoop/hdfs/server/datanode/ src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/

Author: shv
Date: Wed Jul 29 21:04:44 2009
New Revision: 799079

URL: http://svn.apache.org/viewvc?rev=799079&view=rev
Log:
HDFS-510. Rename DatanodeBlockInfo to be ReplicaInfo. Contributed by Hairong Kuang.

Added:
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java   (with props)
Removed:
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/DatanodeBlockInfo.java
Modified:
    hadoop/hdfs/trunk/CHANGES.txt
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java

Modified: hadoop/hdfs/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/CHANGES.txt?rev=799079&r1=799078&r2=799079&view=diff
==============================================================================
--- hadoop/hdfs/trunk/CHANGES.txt (original)
+++ hadoop/hdfs/trunk/CHANGES.txt Wed Jul 29 21:04:44 2009
@@ -54,6 +54,9 @@
 
     HDFS-508. Factor out BlockInfo from BlocksMap. (shv)
 
+    HDFS-510. Rename DatanodeBlockInfo to be ReplicaInfo.
+    (Hairong Kuang via shv)
+
   BUG FIXES
     HDFS-76. Better error message to users when commands fail because of 
     lack of quota. Allow quota to be set even if the limit is lower than

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java?rev=799079&r1=799078&r2=799079&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java Wed Jul 29 21:04:44 2009
@@ -171,7 +171,7 @@
       return Block.GRANDFATHER_GENERATION_STAMP;
     }
 
-    void getVolumeMap(HashMap<Block, DatanodeBlockInfo> volumeMap, FSVolume volume) {
+    void getVolumeMap(HashMap<Block, ReplicaInfo> volumeMap, FSVolume volume) {
       if (children != null) {
         for (int i = 0; i < children.length; i++) {
           children[i].getVolumeMap(volumeMap, volume);
@@ -183,7 +183,7 @@
         if (Block.isBlockFilename(blockFiles[i])) {
           long genStamp = getGenerationStampFromFile(blockFiles, blockFiles[i]);
           volumeMap.put(new Block(blockFiles[i], blockFiles[i].length(), genStamp), 
-                        new DatanodeBlockInfo(volume, blockFiles[i]));
+                        new ReplicaInfo(volume, blockFiles[i]));
         }
       }
     }
@@ -403,7 +403,7 @@
       DiskChecker.checkDir(tmpDir);
     }
       
-    void getVolumeMap(HashMap<Block, DatanodeBlockInfo> volumeMap) {
+    void getVolumeMap(HashMap<Block, ReplicaInfo> volumeMap) {
       dataDir.getVolumeMap(volumeMap, this);
     }
       
@@ -496,7 +496,7 @@
       return remaining;
     }
       
-    synchronized void getVolumeMap(HashMap<Block, DatanodeBlockInfo> volumeMap) {
+    synchronized void getVolumeMap(HashMap<Block, ReplicaInfo> volumeMap) {
       for (int idx = 0; idx < volumes.length; idx++) {
         volumes[idx].getVolumeMap(volumeMap);
       }
@@ -653,7 +653,7 @@
   FSVolumeSet volumes;
   private HashMap<Block,ActiveFile> ongoingCreates = new HashMap<Block,ActiveFile>();
   private int maxBlocksPerDir = 0;
-  HashMap<Block,DatanodeBlockInfo> volumeMap = null;
+  HashMap<Block,ReplicaInfo> volumeMap = null;
   static  Random random = new Random();
 
   // Used for synchronizing access to usage stats
@@ -669,7 +669,7 @@
       volArray[idx] = new FSVolume(storage.getStorageDir(idx).getCurrentDir(), conf);
     }
     volumes = new FSVolumeSet(volArray);
-    volumeMap = new HashMap<Block, DatanodeBlockInfo>();
+    volumeMap = new HashMap<Block, ReplicaInfo>();
     volumes.getVolumeMap(volumeMap);
     registerMBean(storage.getStorageID());
   }
@@ -742,7 +742,7 @@
   public synchronized BlockInputStreams getTmpInputStreams(Block b, 
                           long blkOffset, long ckoff) throws IOException {
 
-    DatanodeBlockInfo info = volumeMap.get(b);
+    ReplicaInfo info = volumeMap.get(b);
     if (info == null) {
       throw new IOException("Block " + b + " does not exist in volumeMap.");
     }
@@ -777,7 +777,7 @@
    * @return - true if the specified block was detached
    */
   public boolean detachBlock(Block block, int numLinks) throws IOException {
-    DatanodeBlockInfo info = null;
+    ReplicaInfo info = null;
 
     synchronized (this) {
       info = volumeMap.get(block);
@@ -1006,12 +1006,12 @@
         v = volumes.getNextVolume(blockSize);
         // create temporary file to hold block in the designated volume
         f = createTmpFile(v, b);
-        volumeMap.put(b, new DatanodeBlockInfo(v));
+        volumeMap.put(b, new ReplicaInfo(v));
       } else if (f != null) {
         DataNode.LOG.info("Reopen already-open Block for append " + b);
         // create or reuse temporary file to hold block in the designated volume
         v = volumeMap.get(b).getVolume();
-        volumeMap.put(b, new DatanodeBlockInfo(v));
+        volumeMap.put(b, new ReplicaInfo(v));
       } else {
         // reopening block for appending to it.
         DataNode.LOG.info("Reopen Block for append " + b);
@@ -1042,7 +1042,7 @@
                                   " to tmp dir " + f);
           }
         }
-        volumeMap.put(b, new DatanodeBlockInfo(v));
+        volumeMap.put(b, new ReplicaInfo(v));
       }
       if (f == null) {
         DataNode.LOG.warn("Block " + b + " reopen failed " +
@@ -1147,7 +1147,7 @@
         
     File dest = null;
     dest = v.addBlock(b, f);
-    volumeMap.put(b, new DatanodeBlockInfo(v, dest));
+    volumeMap.put(b, new ReplicaInfo(v, dest));
     ongoingCreates.remove(b);
   }
 
@@ -1248,7 +1248,7 @@
 
   /** {@inheritDoc} */
   public void validateBlockMetadata(Block b) throws IOException {
-    DatanodeBlockInfo info = volumeMap.get(b);
+    ReplicaInfo info = volumeMap.get(b);
     if (info == null) {
       throw new IOException("Block " + b + " does not exist in volumeMap.");
     }
@@ -1306,7 +1306,7 @@
       FSVolume v;
       synchronized (this) {
         f = getFile(invalidBlks[i]);
-        DatanodeBlockInfo dinfo = volumeMap.get(invalidBlks[i]);
+        ReplicaInfo dinfo = volumeMap.get(invalidBlks[i]);
         if (dinfo == null) {
           DataNode.LOG.warn("Unexpected error trying to delete block "
                            + invalidBlks[i] + 
@@ -1369,7 +1369,7 @@
    * Turn the block identifier into a filename.
    */
   public synchronized File getFile(Block b) {
-    DatanodeBlockInfo info = volumeMap.get(b);
+    ReplicaInfo info = volumeMap.get(b);
     if (info != null) {
       return info.getFile();
     }
@@ -1448,8 +1448,8 @@
    * generation stamp</li>
    * <li>If the block length in memory does not match the actual block file length
    * then mark the block as corrupt and update the block length in memory</li>
-   * <li>If the file in {@link DatanodeBlockInfo} does not match the file on
-   * the disk, update {@link DatanodeBlockInfo} with the correct file</li>
+   * <li>If the file in {@link ReplicaInfo} does not match the file on
+   * the disk, update {@link ReplicaInfo} with the correct file</li>
    * </ul>
    *
    * @param blockId Block that differs
@@ -1472,7 +1472,7 @@
           Block.getGenerationStamp(diskMetaFile.getName()) :
             Block.GRANDFATHER_GENERATION_STAMP;
 
-      DatanodeBlockInfo memBlockInfo = volumeMap.get(block);
+      ReplicaInfo memBlockInfo = volumeMap.get(block);
       if (diskFile == null || !diskFile.exists()) {
         if (memBlockInfo == null) {
           // Block file does not exist and block does not exist in memory
@@ -1507,7 +1507,7 @@
        */
       if (memBlockInfo == null) {
         // Block is missing in memory - add the block to volumeMap
-        DatanodeBlockInfo diskBlockInfo = new DatanodeBlockInfo(vol, diskFile);
+        ReplicaInfo diskBlockInfo = new ReplicaInfo(vol, diskFile);
         Block diskBlock = new Block(diskFile, diskFile.length(), diskGS);
         volumeMap.put(diskBlock, diskBlockInfo);
         if (datanode.blockScanner != null) {
@@ -1540,7 +1540,7 @@
             + memFile.getAbsolutePath()
             + " does not exist. Updating it to the file found during scan "
             + diskFile.getAbsolutePath());
-        DatanodeBlockInfo info = volumeMap.remove(memBlock);
+        ReplicaInfo info = volumeMap.remove(memBlock);
         info.setFile(diskFile);
         memFile = diskFile;
 
@@ -1571,7 +1571,7 @@
           DataNode.LOG.warn("Updating generation stamp for block " + blockId
               + " from " + memBlock.getGenerationStamp() + " to " + gs);
 
-          DatanodeBlockInfo info = volumeMap.remove(memBlock);
+          ReplicaInfo info = volumeMap.remove(memBlock);
           memBlock.setGenerationStamp(gs);
           volumeMap.put(memBlock, info);
         }
@@ -1583,7 +1583,7 @@
         corruptBlock = new Block(memBlock);
         DataNode.LOG.warn("Updating size of block " + blockId + " from "
             + memBlock.getNumBytes() + " to " + memFile.length());
-        DatanodeBlockInfo info = volumeMap.remove(memBlock);
+        ReplicaInfo info = volumeMap.remove(memBlock);
         memBlock.setNumBytes(memFile.length());
         volumeMap.put(memBlock, info);
       }

Added: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java?rev=799079&view=auto
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java (added)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java Wed Jul 29 21:04:44 2009
@@ -0,0 +1,146 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.datanode;
+
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.FileOutputStream;
+import java.io.IOException;
+
+import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume;
+import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.fs.FileUtil.HardLink;
+import org.apache.hadoop.io.IOUtils;
+
+/**
+ * This class is used by the datanode to maintain the map from a block 
+ * to its metadata.
+ */
+class ReplicaInfo {
+
+  private FSVolume volume;       // volume where the block belongs
+  private File     file;         // block file
+  private boolean detached;      // copy-on-write done for block
+
+  ReplicaInfo(FSVolume vol, File file) {
+    this.volume = vol;
+    this.file = file;
+    detached = false;
+  }
+
+  ReplicaInfo(FSVolume vol) {
+    this.volume = vol;
+    this.file = null;
+    detached = false;
+  }
+
+  FSVolume getVolume() {
+    return volume;
+  }
+
+  File getFile() {
+    return file;
+  }
+
+  void setFile(File f) {
+    file = f;
+  }
+
+  /**
+   * Is this block already detached?
+   */
+  boolean isDetached() {
+    return detached;
+  }
+
+  /**
+   *  Block has been successfully detached
+   */
+  void setDetached() {
+    detached = true;
+  }
+
+  /**
+   * Copy specified file into a temporary file. Then rename the
+   * temporary file to the original name. This will cause any
+   * hardlinks to the original file to be removed. The temporary
+   * files are created in the detachDir. The temporary files will
+   * be recovered (especially on Windows) on datanode restart.
+   */
+  private void detachFile(File file, Block b) throws IOException {
+    File tmpFile = volume.createDetachFile(b, file.getName());
+    try {
+      FileInputStream in = new FileInputStream(file);
+      try {
+        FileOutputStream out = new FileOutputStream(tmpFile);
+        try {
+          IOUtils.copyBytes(in, out, 16*1024);
+        } finally {
+          out.close();
+        }
+      } finally {
+        in.close();
+      }
+      if (file.length() != tmpFile.length()) {
+        throw new IOException("Copy of file " + file + " size " + file.length()+
+                              " into file " + tmpFile +
+                              " resulted in a size of " + tmpFile.length());
+      }
+      FileUtil.replaceFile(tmpFile, file);
+    } catch (IOException e) {
+      boolean done = tmpFile.delete();
+      if (!done) {
+        DataNode.LOG.info("detachFile failed to delete temporary file " +
+                          tmpFile);
+      }
+      throw e;
+    }
+  }
+
+  /**
+   * Returns true if this block was copied, otherwise returns false.
+   */
+  boolean detachBlock(Block block, int numLinks) throws IOException {
+    if (isDetached()) {
+      return false;
+    }
+    if (file == null || volume == null) {
+      throw new IOException("detachBlock:Block not found. " + block);
+    }
+    File meta = FSDataset.getMetaFile(file, block);
+    if (meta == null) {
+      throw new IOException("Meta file not found for block " + block);
+    }
+
+    if (HardLink.getLinkCount(file) > numLinks) {
+      DataNode.LOG.info("CopyOnWrite for block " + block);
+      detachFile(file, block);
+    }
+    if (HardLink.getLinkCount(meta) > numLinks) {
+      detachFile(meta, block);
+    }
+    setDetached();
+    return true;
+  }
+  
+  public String toString() {
+    return getClass().getSimpleName() + "(volume=" + volume
+        + ", file=" + file + ", detached=" + detached + ")";
+  }
+}

Propchange: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java
------------------------------------------------------------------------------
    svn:mime-type = text/plain

Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java?rev=799079&r1=799078&r2=799079&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java (original)
+++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java Wed Jul 29 21:04:44 2009
@@ -67,7 +67,7 @@
   /** Truncate a block file */
   private long truncateBlockFile() throws IOException {
     synchronized (fds) {
-      for (Entry<Block, DatanodeBlockInfo> entry : fds.volumeMap.entrySet()) {
+      for (Entry<Block, ReplicaInfo> entry : fds.volumeMap.entrySet()) {
         Block b = entry.getKey();
         File f = entry.getValue().getFile();
         File mf = FSDataset.getMetaFile(f, b);
@@ -87,7 +87,7 @@
   /** Delete a block file */
   private long deleteBlockFile() {
     synchronized(fds) {
-      for (Entry<Block, DatanodeBlockInfo> entry : fds.volumeMap.entrySet()) {
+      for (Entry<Block, ReplicaInfo> entry : fds.volumeMap.entrySet()) {
         Block b = entry.getKey();
         File f = entry.getValue().getFile();
         File mf = FSDataset.getMetaFile(f, b);
@@ -104,7 +104,7 @@
   /** Delete block meta file */
   private long deleteMetaFile() {
     synchronized(fds) {
-      for (Entry<Block, DatanodeBlockInfo> entry : fds.volumeMap.entrySet()) {
+      for (Entry<Block, ReplicaInfo> entry : fds.volumeMap.entrySet()) {
         Block b = entry.getKey();
         String blkfile = entry.getValue().getFile().getAbsolutePath();
         long genStamp = b.getGenerationStamp();
@@ -126,7 +126,7 @@
     while (true) {
       id = rand.nextLong();
       Block b = new Block(id);
-      DatanodeBlockInfo info = null;
+      ReplicaInfo info = null;
       synchronized(fds) {
         info = fds.volumeMap.get(b);
       }
@@ -326,7 +326,7 @@
   private void verifyAddition(long blockId, long genStamp, long size) {
     Block memBlock = fds.getBlockKey(blockId);
     assertNotNull(memBlock);
-    DatanodeBlockInfo blockInfo;
+    ReplicaInfo blockInfo;
     synchronized(fds) {
       blockInfo = fds.volumeMap.get(memBlock);
     }