You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by ha...@apache.org on 2009/09/17 07:52:42 UTC

svn commit: r816063 - in /hadoop/hdfs/branches/HDFS-265: ./ src/java/org/apache/hadoop/hdfs/protocol/ src/java/org/apache/hadoop/hdfs/server/namenode/ src/test/hdfs/org/apache/hadoop/hdfs/

Author: hairong
Date: Thu Sep 17 05:52:41 2009
New Revision: 816063

URL: http://svn.apache.org/viewvc?rev=816063&view=rev
Log:
HDFS-592. Allow clients to fetch a new generation stamp from NameNode for pipeline recovery. Contributed by Hairong Kuang.

Added:
    hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/TestClientProtocolForPipelineRecovery.java
Modified:
    hadoop/hdfs/branches/HDFS-265/CHANGES.txt
    hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
    hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
    hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
    hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSClientRetries.java

Modified: hadoop/hdfs/branches/HDFS-265/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-265/CHANGES.txt?rev=816063&r1=816062&r2=816063&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-265/CHANGES.txt (original)
+++ hadoop/hdfs/branches/HDFS-265/CHANGES.txt Thu Sep 17 05:52:41 2009
@@ -30,6 +30,9 @@
     HDFS-619. Support replica recovery initialization in datanode for the new
     append design.  (szetszwo)
 
+    HDFS-592. Allow clients to fetch a new generation stamp from NameNode for
+    pipeline recovery. (hairong)
+
   IMPROVEMENTS
 
     HDFS-509. Redesign DataNode volumeMap to include all types of Replicas.

Modified: hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java?rev=816063&r1=816062&r2=816063&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java (original)
+++ hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java Thu Sep 17 05:52:41 2009
@@ -44,9 +44,10 @@
    * Compared to the previous version the following changes have been introduced:
    * (Only the latest change is reflected.
    * The log of historical changes can be retrieved from the svn).
-   * 48: modified mkdirs() to take an additional boolean parameter
+   * 49: added a new method getNewStampForPipeline(Block, String) 
+   * to support pipeline recovery
    */
-  public static final long versionID = 48L;
+  public static final long versionID = 49L;
   
   ///////////////////////////////////////
   // File contents
@@ -510,4 +511,19 @@
    *              by this call.
    */
   public void setTimes(String src, long mtime, long atime) throws IOException;
+  
+  /**
+   * Get a new generation stamp together with an access token for 
+   * a block under construction
+   * 
+   * This method is called only when a client needs to recover a failed
+   * pipeline or set up a pipeline for appending to a block.
+   * 
+   * @param block a block
+   * @param clientName the name of the client
+   * @return a located block with a new generation stamp and an access token
+   * @throws IOException if any error occurs
+   */
+  public LocatedBlock getNewStampForPipeline(Block block, String clientName) 
+  throws IOException;
 }

Modified: hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java?rev=816063&r1=816062&r2=816063&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java (original)
+++ hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java Thu Sep 17 05:52:41 2009
@@ -23,6 +23,7 @@
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.protocol.*;
 import org.apache.hadoop.hdfs.server.common.GenerationStamp;
+import org.apache.hadoop.hdfs.server.common.HdfsConstants.BlockUCState;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption;
 import org.apache.hadoop.hdfs.server.common.Storage;
 import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport;
@@ -3726,6 +3727,57 @@
     return nextGenerationStamp();
   }
 
+  /**
+   * Get a new generation stamp together with an access token for 
+   * a block under construction
+   * 
+   * This method is called for recovering a failed pipeline or setting up
+   * a pipeline to append to a block.
+   * 
+   * @param block a block
+   * @param clientName the name of a client
+   * @return a located block with a new generation stamp and an access token
+   * @throws IOException if any error occurs
+   */
+  synchronized LocatedBlock getNewStampForPipeline(Block block, String clientName) 
+  throws IOException {
+    // check safe mode
+    if (isInSafeMode())
+      throw new SafeModeException("Cannot get a new generation stamp and an " +
+      		"access token for block " + block, safeMode);
+    
+    // check stored block state
+    BlockInfo storedBlock = blockManager.getStoredBlock(block);
+    if (storedBlock == null || 
+        storedBlock.getBlockUCState() != BlockUCState.UNDER_CONSTRUCTION) {
+        throw new IOException(block + 
+            " does not exist or is not under Construction");
+    }
+    
+    // check file inode
+    INodeFile file = storedBlock.getINode();
+    if (file==null || !file.isUnderConstruction()) {
+      throw new IOException("The file " + storedBlock + 
+          " is belonged to does not exist or it is not under construction.");
+    }
+    
+    // check lease
+    INodeFileUnderConstruction pendingFile = (INodeFileUnderConstruction)file;
+    if (clientName == null || !clientName.equals(pendingFile.getClientName())) {
+      throw new LeaseExpiredException("Lease mismatch: " + block + 
+          " is accessed by a non lease holder " + clientName); 
+    }
+
+    // get a new generation stamp and an access token
+    block.setGenerationStamp(nextGenerationStamp());
+    LocatedBlock locatedBlock = new LocatedBlock(block, new DatanodeInfo[0]);
+    if (isAccessTokenEnabled) {
+      locatedBlock.setAccessToken(accessTokenHandler.generateToken(
+          block.getBlockId(), EnumSet.of(AccessTokenHandler.AccessMode.WRITE)));
+    }
+    return locatedBlock;
+  }
+  
   // rename was successful. If any part of the renamed subtree had
   // files that were being written to, update with new filename.
   //

Modified: hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java?rev=816063&r1=816062&r2=816063&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java (original)
+++ hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java Thu Sep 17 05:52:41 2009
@@ -671,6 +671,12 @@
     return namesystem.nextGenerationStampForBlock(block);
   }
 
+  @Override
+  public LocatedBlock getNewStampForPipeline(Block block, String clientName)
+  throws IOException {
+    return namesystem.getNewStampForPipeline(block, clientName);
+  }
+
   /** {@inheritDoc} */
   public void commitBlockSynchronization(Block block,
       long newgenerationstamp, long newlength,

Added: hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/TestClientProtocolForPipelineRecovery.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/TestClientProtocolForPipelineRecovery.java?rev=816063&view=auto
==============================================================================
--- hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/TestClientProtocolForPipelineRecovery.java (added)
+++ hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/TestClientProtocolForPipelineRecovery.java Thu Sep 17 05:52:41 2009
@@ -0,0 +1,118 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import java.io.IOException;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.DFSClient.DFSOutputStream;
+import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.server.namenode.LeaseExpiredException;
+import org.apache.hadoop.hdfs.server.namenode.NameNode;
+import org.apache.hadoop.io.IOUtils;
+
+import org.junit.Test;
+import org.junit.Assert;
+
+/**
+ * This tests pipeline recovery related client protocol works correct or not.
+ */
+public class TestClientProtocolForPipelineRecovery {
+  
+  @Test public void testGetNewStamp() throws IOException {
+    int numDataNodes = 1;
+    Configuration conf = new Configuration();
+    conf.setBoolean("dfs.support.append", true);
+    MiniDFSCluster cluster = new MiniDFSCluster(conf, numDataNodes, true, null);
+    try {
+      cluster.waitActive();
+      FileSystem fileSys = cluster.getFileSystem();
+      NameNode namenode = cluster.getNameNode();
+
+      /* Test writing to finalized replicas */
+      Path file = new Path("dataprotocol.dat");    
+      DFSTestUtil.createFile(fileSys, file, 1L, (short)numDataNodes, 0L);
+      // get the first blockid for the file
+      Block firstBlock = DFSTestUtil.getFirstBlock(fileSys, file);
+
+      // test getNewStampAndToken on a finalized block
+      try {
+        namenode.getNewStampForPipeline(firstBlock, "");
+        Assert.fail("Can not get a new GS from a finalized block");
+      } catch (IOException e) {
+        Assert.assertTrue(e.getMessage().contains("is not under Construction"));
+      }
+      
+      // test getNewStampAndToken on a non-existent block
+      try {
+        long newBlockId = firstBlock.getBlockId() + 1;
+        Block newBlock = new Block(newBlockId, 0, 
+            firstBlock.getGenerationStamp());
+        namenode.getNewStampForPipeline(newBlock, "");
+        Assert.fail("Cannot get a new GS from a non-existent block");
+      } catch (IOException e) {
+        Assert.assertTrue(e.getMessage().contains("does not exist"));
+      }
+
+      
+      /* Test RBW replicas */
+      // change first block to a RBW
+      DFSOutputStream out = null;
+      try {
+        out = (DFSOutputStream)(fileSys.append(file).
+            getWrappedStream()); 
+        out.write(1);
+        out.hflush();
+        FSDataInputStream in = null;
+        try {
+          in = fileSys.open(file);
+          firstBlock = DFSTestUtil.getAllBlocks(in).get(0).getBlock();
+        } finally {
+          IOUtils.closeStream(in);
+        }
+
+        // test non-lease holder
+        DFSClient dfs = ((DistributedFileSystem)fileSys).dfs;
+        try {
+          namenode.getNewStampForPipeline(firstBlock, "test" + dfs.clientName);
+          Assert.fail("Cannot get a new GS for a non lease holder");
+        } catch (LeaseExpiredException e) {
+          Assert.assertTrue(e.getMessage().startsWith("Lease mismatch"));
+        }
+
+        // test null lease holder
+        try {
+          namenode.getNewStampForPipeline(firstBlock, null);
+          Assert.fail("Cannot get a new GS for a null lease holder");
+        } catch (LeaseExpiredException e) {
+          Assert.assertTrue(e.getMessage().startsWith("Lease mismatch"));
+        }
+
+        // test getNewStampAndToken on a rbw block
+        namenode.getNewStampForPipeline(firstBlock, dfs.clientName);
+      } finally {
+        IOUtils.closeStream(out);
+      }
+    } finally {
+      cluster.shutdown();
+    }
+  }
+}

Modified: hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSClientRetries.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSClientRetries.java?rev=816063&r1=816062&r2=816063&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSClientRetries.java (original)
+++ hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSClientRetries.java Thu Sep 17 05:52:41 2009
@@ -215,6 +215,9 @@
 
     public void setTimes(String src, long mtime, long atime) throws IOException {}
 
+    public LocatedBlock getNewStampForPipeline(Block block, String clientName)
+    throws IOException { return null; }
+
   }
   
   public void testNotYetReplicatedErrors() throws IOException