You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by sz...@apache.org on 2012/05/01 03:11:34 UTC

svn commit: r1332487 - in /hadoop/common/branches/branch-1: CHANGES.txt src/hdfs/org/apache/hadoop/hdfs/DFSClient.java src/test/org/apache/hadoop/hdfs/TestDFSClientRetries.java

Author: szetszwo
Date: Tue May  1 01:11:34 2012
New Revision: 1332487

URL: http://svn.apache.org/viewvc?rev=1332487&view=rev
Log:
HDFS-1041. DFSClient.getFileChecksum(..) should retry if connection to the first datanode fails.

Modified:
    hadoop/common/branches/branch-1/CHANGES.txt
    hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/DFSClient.java
    hadoop/common/branches/branch-1/src/test/org/apache/hadoop/hdfs/TestDFSClientRetries.java

Modified: hadoop/common/branches/branch-1/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1/CHANGES.txt?rev=1332487&r1=1332486&r2=1332487&view=diff
==============================================================================
--- hadoop/common/branches/branch-1/CHANGES.txt (original)
+++ hadoop/common/branches/branch-1/CHANGES.txt Tue May  1 01:11:34 2012
@@ -296,6 +296,9 @@ Release 1.0.3 - unreleased
     HDFS-3316. The tar ball doesn't include jsvc any more.
     (Owen O'Malley via mattf)
 
+    HDFS-1041. DFSClient.getFileChecksum(..) should retry if connection to
+    the first datanode fails.  (szetszwo)
+
 Release 1.0.2 - 2012.03.24
 
   NEW FEATURES

Modified: hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/DFSClient.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/DFSClient.java?rev=1332487&r1=1332486&r2=1332487&view=diff
==============================================================================
--- hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/DFSClient.java (original)
+++ hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/DFSClient.java Tue May  1 01:11:34 2012
@@ -559,7 +559,7 @@ public class DFSClient implements FSCons
     return hints;
   }
 
-  private static LocatedBlocks callGetBlockLocations(ClientProtocol namenode,
+  static LocatedBlocks callGetBlockLocations(ClientProtocol namenode,
       String src, long start, long length) throws IOException {
     try {
       return namenode.getBlockLocations(src, start, length);
@@ -1016,27 +1016,29 @@ public class DFSClient implements FSCons
      
       boolean done = false;
       for(int j = 0; !done && j < datanodes.length; j++) {
-        //connect to a datanode
-        final Socket sock = socketFactory.createSocket();
         final String dnName = datanodes[j].getName(connectToDnViaHostname);
-        LOG.debug("Connecting to " + dnName);
-        NetUtils.connect(sock, 
-                         NetUtils.createSocketAddr(dnName),
-                         timeout);
-        sock.setSoTimeout(timeout);
-
-        DataOutputStream out = new DataOutputStream(
-            new BufferedOutputStream(NetUtils.getOutputStream(sock), 
-                                     DataNode.SMALL_BUFFER_SIZE));
-        DataInputStream in = new DataInputStream(NetUtils.getInputStream(sock));
-
-        // get block MD5
+        Socket sock = null;
+        DataOutputStream out = null;
+        DataInputStream in = null;
+        
         try {
+          //connect to a datanode
+          sock = socketFactory.createSocket();
+          LOG.debug("Connecting to " + dnName);
+          NetUtils.connect(sock, NetUtils.createSocketAddr(dnName), timeout);
+          sock.setSoTimeout(timeout);
+
+          out = new DataOutputStream(
+              new BufferedOutputStream(NetUtils.getOutputStream(sock),
+                                       DataNode.SMALL_BUFFER_SIZE));
+          in = new DataInputStream(NetUtils.getInputStream(sock));
+
           if (LOG.isDebugEnabled()) {
             LOG.debug("write to " + dnName + ": "
-                + DataTransferProtocol.OP_BLOCK_CHECKSUM +
-                ", block=" + block);
+                + DataTransferProtocol.OP_BLOCK_CHECKSUM + ", block=" + block);
           }
+
+          // get block MD5
           out.writeShort(DataTransferProtocol.DATA_TRANSFER_VERSION);
           out.write(DataTransferProtocol.OP_BLOCK_CHECKSUM);
           out.writeLong(block.getBlockId());

Modified: hadoop/common/branches/branch-1/src/test/org/apache/hadoop/hdfs/TestDFSClientRetries.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1/src/test/org/apache/hadoop/hdfs/TestDFSClientRetries.java?rev=1332487&r1=1332486&r2=1332487&view=diff
==============================================================================
--- hadoop/common/branches/branch-1/src/test/org/apache/hadoop/hdfs/TestDFSClientRetries.java (original)
+++ hadoop/common/branches/branch-1/src/test/org/apache/hadoop/hdfs/TestDFSClientRetries.java Tue May  1 01:11:34 2012
@@ -478,4 +478,35 @@ public class TestDFSClientRetries extend
       server.stop();
     }
   }
+
+  public void testGetFileChecksum() throws Exception {
+    final String f = "/testGetFileChecksum";
+    final Path p = new Path(f);
+
+    final Configuration conf = new Configuration();
+    final MiniDFSCluster cluster = new MiniDFSCluster(conf, 3, true, null);
+    try {
+      cluster.waitActive();
+
+      //create a file
+      final FileSystem fs = cluster.getFileSystem();
+      DFSTestUtil.createFile(fs, p, 1L << 20, (short)3, 20100402L);
+
+      //get checksum
+      final FileChecksum cs1 = fs.getFileChecksum(p);
+      assertTrue(cs1 != null);
+
+      //stop the first datanode
+      final List<LocatedBlock> locatedblocks = DFSClient.callGetBlockLocations(
+          cluster.getNameNode(), f, 0, Long.MAX_VALUE).getLocatedBlocks();
+      final DatanodeInfo first = locatedblocks.get(0).getLocations()[0];
+      cluster.stopDataNode(first.getName());
+
+      //get checksum again
+      final FileChecksum cs2 = fs.getFileChecksum(p);
+      assertEquals(cs1, cs2);
+    } finally {
+      cluster.shutdown();
+    }
+  }
 }