You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by su...@apache.org on 2011/09/09 02:24:45 UTC

svn commit: r1166946 - in /hadoop/common/branches/branch-0.20-security: ./ src/hdfs/org/apache/hadoop/hdfs/ src/hdfs/org/apache/hadoop/hdfs/protocol/ src/hdfs/org/apache/hadoop/hdfs/server/protocol/

Author: suresh
Date: Fri Sep  9 00:24:45 2011
New Revision: 1166946

URL: http://svn.apache.org/viewvc?rev=1166946&view=rev
Log:
HDFS-2320. Make 0.20-append protocol changes compatible with 0.20-secuirty. Contributed by Suresh Srinivas.


Modified:
    hadoop/common/branches/branch-0.20-security/CHANGES.txt
    hadoop/common/branches/branch-0.20-security/src/hdfs/org/apache/hadoop/hdfs/DFSClient.java
    hadoop/common/branches/branch-0.20-security/src/hdfs/org/apache/hadoop/hdfs/protocol/ClientDatanodeProtocol.java
    hadoop/common/branches/branch-0.20-security/src/hdfs/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
    hadoop/common/branches/branch-0.20-security/src/hdfs/org/apache/hadoop/hdfs/protocol/DataTransferProtocol.java
    hadoop/common/branches/branch-0.20-security/src/hdfs/org/apache/hadoop/hdfs/server/protocol/DatanodeProtocol.java

Modified: hadoop/common/branches/branch-0.20-security/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security/CHANGES.txt?rev=1166946&r1=1166945&r2=1166946&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.20-security/CHANGES.txt (original)
+++ hadoop/common/branches/branch-0.20-security/CHANGES.txt Fri Sep  9 00:24:45 2011
@@ -161,6 +161,9 @@ Release 0.20.205.0 - unreleased
     HDFS-1242. Add test for appendFile() race solved in HDFS-142.
     (Todd Lipcon via jitendra)
 
+    HDFS-2320. Make 0.20-append protocol changes compatible with
+    0.20-secuirty. (suresh)
+
 Release 0.20.204.0 - 2011-8-25
 
   NEW FEATURES

Modified: hadoop/common/branches/branch-0.20-security/src/hdfs/org/apache/hadoop/hdfs/DFSClient.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security/src/hdfs/org/apache/hadoop/hdfs/DFSClient.java?rev=1166946&r1=1166945&r2=1166946&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.20-security/src/hdfs/org/apache/hadoop/hdfs/DFSClient.java (original)
+++ hadoop/common/branches/branch-0.20-security/src/hdfs/org/apache/hadoop/hdfs/DFSClient.java Fri Sep  9 00:24:45 2011
@@ -96,6 +96,7 @@ public class DFSClient implements FSCons
    * it doesn't, we'll set this false and stop trying.
    */
   private volatile boolean serverSupportsHdfs630 = true;
+  private volatile boolean serverSupportsHdfs200 = true;
  
   public static ClientProtocol createNamenode(Configuration conf) throws IOException {
     return createNamenode(NameNode.getAddress(conf), conf);
@@ -1641,41 +1642,56 @@ public class DFSClient implements FSCons
           }
         }
       }
+      updateBlockInfo(newInfo);
+      this.locatedBlocks = newInfo;
+      this.currentNode = null;
+    }
+    
+    /** 
+     * For files under construction, update the last block size based
+     * on the length of the block from the datanode.
+     */
+    private void updateBlockInfo(LocatedBlocks newInfo) {
+      if (!serverSupportsHdfs200 || !newInfo.isUnderConstruction()
+          || !(newInfo.locatedBlockCount() > 0)) {
+        return;
+      }
 
-      // if the file is under construction, then fetch size of last block
-      // from datanode.
-      if (newInfo.isUnderConstruction() && newInfo.locatedBlockCount() > 0) {
-        LocatedBlock last = newInfo.get(newInfo.locatedBlockCount()-1);
-        boolean lastBlockInFile = (last.getStartOffset() + 
-                                   last.getBlockSize() == 
-                                   newInfo.getFileLength()); 
-        if (lastBlockInFile && last.getLocations().length > 0) {
-          ClientDatanodeProtocol primary =  null;
-          DatanodeInfo primaryNode = last.getLocations()[0];
-          try {
-            primary = createClientDatanodeProtocolProxy(primaryNode, conf,
-                last.getBlock(), last.getBlockToken(), socketTimeout);
-            Block newBlock = primary.getBlockInfo(last.getBlock());
-            long newBlockSize = newBlock.getNumBytes();
-            long delta = newBlockSize - last.getBlockSize();
-            // if the size of the block on the datanode is different
-            // from what the NN knows about, the datanode wins!
-            last.getBlock().setNumBytes(newBlockSize);
-            long newlength = newInfo.getFileLength() + delta;
-            newInfo.setFileLength(newlength);
-            LOG.debug("DFSClient setting last block " + last + 
-                      " to length " + newBlockSize +
-                      " filesize is now " + newInfo.getFileLength());
-          } catch (IOException e) {
-            LOG.debug("DFSClient file " + src + 
-                      " is being concurrently append to" +
-                      " but datanode " + primaryNode.getHostName() +
-                      " probably does not have block " + last.getBlock());
-          }
+      LocatedBlock last = newInfo.get(newInfo.locatedBlockCount() - 1);
+      boolean lastBlockInFile = (last.getStartOffset() + last.getBlockSize() == newInfo
+          .getFileLength());
+      if (!lastBlockInFile || last.getLocations().length <= 0) {
+        return;
+      }
+      ClientDatanodeProtocol primary = null;
+      DatanodeInfo primaryNode = last.getLocations()[0];
+      try {
+        primary = createClientDatanodeProtocolProxy(primaryNode, conf,
+            last.getBlock(), last.getBlockToken(), socketTimeout);
+        Block newBlock = primary.getBlockInfo(last.getBlock());
+        long newBlockSize = newBlock.getNumBytes();
+        long delta = newBlockSize - last.getBlockSize();
+        // if the size of the block on the datanode is different
+        // from what the NN knows about, the datanode wins!
+        last.getBlock().setNumBytes(newBlockSize);
+        long newlength = newInfo.getFileLength() + delta;
+        newInfo.setFileLength(newlength);
+        LOG.debug("DFSClient setting last block " + last + " to length "
+            + newBlockSize + " filesize is now " + newInfo.getFileLength());
+      } catch (IOException e) {
+        if (e.getMessage().startsWith(
+            "java.io.IOException: java.lang.NoSuchMethodException: "
+                + "org.apache.hadoop.hdfs.protocol"
+                + ".ClientDatanodeProtocol.getBlockInfo")) {
+          // We're talking to a server that doesn't implement HDFS-200.
+          serverSupportsHdfs200 = false;
+        } else {
+          LOG.debug("DFSClient file " + src
+              + " is being concurrently append to" + " but datanode "
+              + primaryNode.getHostName() + " probably does not have block "
+              + last.getBlock());
         }
       }
-      this.locatedBlocks = newInfo;
-      this.currentNode = null;
     }
     
     public synchronized long getFileLength() {

Modified: hadoop/common/branches/branch-0.20-security/src/hdfs/org/apache/hadoop/hdfs/protocol/ClientDatanodeProtocol.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security/src/hdfs/org/apache/hadoop/hdfs/protocol/ClientDatanodeProtocol.java?rev=1166946&r1=1166945&r2=1166946&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.20-security/src/hdfs/org/apache/hadoop/hdfs/protocol/ClientDatanodeProtocol.java (original)
+++ hadoop/common/branches/branch-0.20-security/src/hdfs/org/apache/hadoop/hdfs/protocol/ClientDatanodeProtocol.java Fri Sep  9 00:24:45 2011
@@ -32,9 +32,9 @@ public interface ClientDatanodeProtocol 
   public static final Log LOG = LogFactory.getLog(ClientDatanodeProtocol.class);
 
   /**
-   * 5: added getBlockInfo
+   * 4: never return null and always return a newly generated access token
    */
-  public static final long versionID = 5L;
+  public static final long versionID = 4L;
 
   /** Start generation-stamp recovery for specified block
    * @param block the specified block

Modified: hadoop/common/branches/branch-0.20-security/src/hdfs/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security/src/hdfs/org/apache/hadoop/hdfs/protocol/ClientProtocol.java?rev=1166946&r1=1166945&r2=1166946&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.20-security/src/hdfs/org/apache/hadoop/hdfs/protocol/ClientProtocol.java (original)
+++ hadoop/common/branches/branch-0.20-security/src/hdfs/org/apache/hadoop/hdfs/protocol/ClientProtocol.java Fri Sep  9 00:24:45 2011
@@ -50,9 +50,12 @@ public interface ClientProtocol extends 
    * Compared to the previous version the following changes have been introduced:
    * (Only the latest change is reflected.
    * The log of historical changes can be retrieved from the svn).
-   * 63: recoverLease return if the file is closed or not
+   * 61: Serialized format of BlockTokenIdentifier changed to contain
+   *     multiple blocks within a single BlockTokenIdentifier 
+   *     
+   *     (bumped to 61 to bring in line with trunk)
    */
-  public static final long versionID = 63L;
+  public static final long versionID = 61L;
   
   ///////////////////////////////////////
   // File contents

Modified: hadoop/common/branches/branch-0.20-security/src/hdfs/org/apache/hadoop/hdfs/protocol/DataTransferProtocol.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security/src/hdfs/org/apache/hadoop/hdfs/protocol/DataTransferProtocol.java?rev=1166946&r1=1166945&r2=1166946&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.20-security/src/hdfs/org/apache/hadoop/hdfs/protocol/DataTransferProtocol.java (original)
+++ hadoop/common/branches/branch-0.20-security/src/hdfs/org/apache/hadoop/hdfs/protocol/DataTransferProtocol.java Fri Sep  9 00:24:45 2011
@@ -36,10 +36,11 @@ public interface DataTransferProtocol {
    * when protocol changes. It is not very obvious. 
    */
   /*
-   * Version 19:
-   * A heartbeat is sent from the client to pipeline and then acked back
+   * Version 18:
+   *    Change the block packet ack protocol to include seqno,
+   *    numberOfReplies, reply0, reply1, ...
    */
-  public static final int DATA_TRANSFER_VERSION = 19;
+  public static final int DATA_TRANSFER_VERSION = 17;
 
   // Processed at datanode stream-handler
   public static final byte OP_WRITE_BLOCK = (byte) 80;

Modified: hadoop/common/branches/branch-0.20-security/src/hdfs/org/apache/hadoop/hdfs/server/protocol/DatanodeProtocol.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security/src/hdfs/org/apache/hadoop/hdfs/server/protocol/DatanodeProtocol.java?rev=1166946&r1=1166945&r2=1166946&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.20-security/src/hdfs/org/apache/hadoop/hdfs/server/protocol/DatanodeProtocol.java (original)
+++ hadoop/common/branches/branch-0.20-security/src/hdfs/org/apache/hadoop/hdfs/server/protocol/DatanodeProtocol.java Fri Sep  9 00:24:45 2011
@@ -41,11 +41,13 @@ import org.apache.hadoop.security.Kerber
     clientPrincipal = DFSConfigKeys.DFS_DATANODE_USER_NAME_KEY)
 public interface DatanodeProtocol extends VersionedProtocol {
   /**
-   * 26: nextGenerationStamp has a new parameter indicating if it is for
-   * NameNode initiated lease recovery or not
+   * 25: Serialized format of BlockTokenIdentifier changed to contain
+   *     multiple blocks within a single BlockTokenIdentifier
+   *     
+   *     (bumped to 25 to bring in line with trunk)
    */
-  public static final long versionID = 26L;
-
+  public static final long versionID = 25L;
+  
   // error code
   final static int NOTIFY = 0;
   final static int DISK_ERROR = 1; // there are still valid volumes on DN