You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by su...@apache.org on 2013/06/14 01:11:24 UTC

svn commit: r1492898 - in /hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs: ./ src/main/java/org/apache/hadoop/hdfs/ src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ src/main/java/org/apache/hadoop/hdfs/server/namenod...

Author: suresh
Date: Thu Jun 13 23:11:23 2013
New Revision: 1492898

URL: http://svn.apache.org/r1492898
Log:
Merge r1492894 for HDFS-3792, r1492892 for HDFS-3009, r1492890 for HDFS-2857 and r1492876 for HDFS-2572 from branch-2

Modified:
    hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
    hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
    hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java
    hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
    hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestRBWBlockInvalidation.java

Modified: hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1492898&r1=1492897&r2=1492898&view=diff
==============================================================================
--- hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original)
+++ hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Thu Jun 13 23:11:23 2013
@@ -4,6 +4,8 @@ Release 2.1.0-beta - UNRELEASED
 
   INCOMPATIBLE CHANGES
 
+    HDFS-4053. Increase the default block size. (eli)
+
     HDFS-4305. Add a configurable limit on number of blocks per file, and min
     block size. (Andrew Wang via atm)
 
@@ -128,11 +130,22 @@ Release 2.1.0-beta - UNRELEASED
     HDFS-4698. Provide client-side metrics for remote reads, local reads, and
     short-circuit reads. (Colin Patrick McCabe via atm)
 
+    HDFS-3498. Support replica removal in BlockPlacementPolicy and make
+    BlockPlacementPolicyDefault extensible for reusing code in subclasses.
+    (Junping Du via szetszwo)
+
     HDFS-4234. Use generic code for choosing datanode in Balancer.  (szetszwo)
 
     HDFS-4880. Print the image and edits file loaded by the namenode in the
     logs. (Arpit Agarwal via suresh)
 
+    HDFS-2572. Remove unnecessary double-check in DN#getHostName. (harsh)
+
+    HDFS-2857. Cleanup BlockInfo class. (suresh)
+
+    HDFS-3009. Remove duplicate code in DFSClient#isLocalAddress by using 
+    NetUtils. (Hari Mankude via suresh)
+
   OPTIMIZATIONS
 
   BUG FIXES
@@ -327,6 +340,8 @@ Release 2.1.0-beta - UNRELEASED
     HDFS-4586. TestDataDirs.testGetDataDirsFromURIs fails with all directories
     in dfs.datanode.data.dir are invalid. (Ivan Mitic via atm)
 
+    HDFS-3792. Fix two findbugs introduced by HDFS-3695 (todd)
+
   BREAKDOWN OF HDFS-347 SUBTASKS AND RELATED JIRAS
 
     HDFS-4353. Encapsulate connections to peers in Peer and PeerServer classes.
@@ -456,6 +471,9 @@ Release 2.1.0-beta - UNRELEASED
 
     HDFS-4677. Editlog should support synchronous writes. (ivanmi)
 
+    HDFS-4752. TestRBWBlockInvalidation fails on Windows due to file locking.
+    (Chris Nauroth via suresh)
+
   BREAKDOWN OF HDFS-2802 HDFS SNAPSHOT SUBTASKS AND RELATED JIRAS
 
     HDFS-4076. Support snapshot of single files.  (szetszwo)
@@ -1078,14 +1096,8 @@ Release 2.0.3-alpha - 2013-02-06
 
     HDFS-4456. Add concat to HttpFS and WebHDFS REST API docs. (plamenj2003 via tucu)
 
-    HDFS-4053. Increase the default block size. (eli)
-
     HDFS-3131. Improve TestStorageRestore. (Brandon Li via atm)
 
-    HDFS-3498. Support replica removal in BlockPlacementPolicy and make
-    BlockPlacementPolicyDefault extensible for reusing code in subclasses.
-    (Junping Du via szetszwo)
-
   OPTIMIZATIONS
 
     HDFS-3429. DataNode reads checksums even if client does not need them (todd)
@@ -4192,6 +4204,7 @@ Release 0.23.0 - 2011-11-01 
 
     HDFS-1620. Rename HdfsConstants -> HdfsServerConstants, FSConstants ->
                HdfsConstants. (Harsh J Chouraria via atm)
+
     HDFS-2197. Refactor RPC call implementations out of NameNode class (todd)
 
     HDFS-2332. Add test for HADOOP-7629 (using an immutable FsPermission

Modified: hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java?rev=1492898&r1=1492897&r2=1492898&view=diff
==============================================================================
--- hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java (original)
+++ hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java Thu Jun 13 23:11:23 2013
@@ -819,18 +819,9 @@ public class DFSClient implements java.i
       }
       return cached;
     }
+    
+    boolean local = NetUtils.isLocalAddress(addr);
 
-    // Check if the address is any local or loop back
-    boolean local = addr.isAnyLocalAddress() || addr.isLoopbackAddress();
-
-    // Check if the address is defined on any interface
-    if (!local) {
-      try {
-        local = NetworkInterface.getByInetAddress(addr) != null;
-      } catch (SocketException e) {
-        local = false;
-      }
-    }
     if (LOG.isTraceEnabled()) {
       LOG.trace("Address " + targetAddr +
                 (local ? " is local" : " is not local"));

Modified: hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java?rev=1492898&r1=1492897&r2=1492898&view=diff
==============================================================================
--- hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java (original)
+++ hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java Thu Jun 13 23:11:23 2013
@@ -17,13 +17,17 @@
  */
 package org.apache.hadoop.hdfs.server.blockmanagement;
 
+import java.util.LinkedList;
+
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
 import org.apache.hadoop.hdfs.util.LightWeightGSet;
 
 /**
- * Internal class for block metadata.
+ * BlockInfo class maintains for a given block
+ * the {@link INodeFile} it is part of and datanodes where the replicas of 
+ * the block are stored.
  * BlockInfo class maintains for a given block
  * the {@link BlockCollection} it is part of and datanodes where the replicas of 
  * the block are stored.
@@ -38,12 +42,16 @@ public class BlockInfo extends Block imp
   private LightWeightGSet.LinkedElement nextLinkedElement;
 
   /**
-   * This array contains triplets of references.
-   * For each i-th datanode the block belongs to
-   * triplets[3*i] is the reference to the DatanodeDescriptor
-   * and triplets[3*i+1] and triplets[3*i+2] are references 
-   * to the previous and the next blocks, respectively, in the 
-   * list of blocks belonging to this data-node.
+   * This array contains triplets of references. For each i-th datanode the
+   * block belongs to triplets[3*i] is the reference to the DatanodeDescriptor
+   * and triplets[3*i+1] and triplets[3*i+2] are references to the previous and
+   * the next blocks, respectively, in the list of blocks belonging to this
+   * data-node.
+   * 
+   * Using previous and next in Object triplets is done instead of a
+   * {@link LinkedList} list to efficiently use memory. With LinkedList the cost
+   * per replica is 42 bytes (LinkedList#Entry object per replica) versus 16
+   * bytes using the triplets.
    */
   private Object[] triplets;
 
@@ -86,7 +94,7 @@ public class BlockInfo extends Block imp
     return (DatanodeDescriptor)triplets[index*3];
   }
 
-  BlockInfo getPrevious(int index) {
+  private BlockInfo getPrevious(int index) {
     assert this.triplets != null : "BlockInfo is not initialized";
     assert index >= 0 && index*3+1 < triplets.length : "Index is out of bound";
     BlockInfo info = (BlockInfo)triplets[index*3+1];
@@ -106,22 +114,14 @@ public class BlockInfo extends Block imp
     return info;
   }
 
-  void setDatanode(int index, DatanodeDescriptor node) {
-    assert this.triplets != null : "BlockInfo is not initialized";
-    assert index >= 0 && index*3 < triplets.length : "Index is out of bound";
-    triplets[index*3] = node;
-  }
-
-  void setPrevious(int index, BlockInfo to) {
+  private void setDatanode(int index, DatanodeDescriptor node, BlockInfo previous,
+      BlockInfo next) {
     assert this.triplets != null : "BlockInfo is not initialized";
-    assert index >= 0 && index*3+1 < triplets.length : "Index is out of bound";
-    triplets[index*3+1] = to;
-  }
-
-  void setNext(int index, BlockInfo to) {
-    assert this.triplets != null : "BlockInfo is not initialized";
-    assert index >= 0 && index*3+2 < triplets.length : "Index is out of bound";
-    triplets[index*3+2] = to;
+    int i = index * 3;
+    assert index >= 0 && i+2 < triplets.length : "Index is out of bound";
+    triplets[i] = node;
+    triplets[i+1] = previous;
+    triplets[i+2] = next;
   }
 
   /**
@@ -132,7 +132,7 @@ public class BlockInfo extends Block imp
    * @param to - block to be set to previous on the list of blocks
    * @return current previous block on the list of blocks
    */
-  BlockInfo getSetPrevious(int index, BlockInfo to) {
+  private BlockInfo setPrevious(int index, BlockInfo to) {
 	assert this.triplets != null : "BlockInfo is not initialized";
 	assert index >= 0 && index*3+1 < triplets.length : "Index is out of bound";
     BlockInfo info = (BlockInfo)triplets[index*3+1];
@@ -148,7 +148,7 @@ public class BlockInfo extends Block imp
    * @param to - block to be set to next on the list of blocks
    *    * @return current next block on the list of blocks
    */
-  BlockInfo getSetNext(int index, BlockInfo to) {
+  private BlockInfo setNext(int index, BlockInfo to) {
 	assert this.triplets != null : "BlockInfo is not initialized";
 	assert index >= 0 && index*3+2 < triplets.length : "Index is out of bound";
     BlockInfo info = (BlockInfo)triplets[index*3+2];
@@ -200,9 +200,7 @@ public class BlockInfo extends Block imp
       return false;
     // find the last null node
     int lastNode = ensureCapacity(1);
-    setDatanode(lastNode, node);
-    setNext(lastNode, null);
-    setPrevious(lastNode, null);
+    setDatanode(lastNode, node, null, null);
     return true;
   }
 
@@ -218,13 +216,10 @@ public class BlockInfo extends Block imp
     // find the last not null node
     int lastNode = numNodes()-1; 
     // replace current node triplet by the lastNode one 
-    setDatanode(dnIndex, getDatanode(lastNode));
-    setNext(dnIndex, getNext(lastNode)); 
-    setPrevious(dnIndex, getPrevious(lastNode)); 
+    setDatanode(dnIndex, getDatanode(lastNode), getPrevious(lastNode),
+        getNext(lastNode));
     // set the last triplet to null
-    setDatanode(lastNode, null);
-    setNext(lastNode, null); 
-    setPrevious(lastNode, null); 
+    setDatanode(lastNode, null, null, null);
     return true;
   }
 
@@ -302,8 +297,8 @@ public class BlockInfo extends Block imp
     if (head == this) {
       return this;
     }
-    BlockInfo next = this.getSetNext(curIndex, head);
-    BlockInfo prev = this.getSetPrevious(curIndex, null);
+    BlockInfo next = this.setNext(curIndex, head);
+    BlockInfo prev = this.setPrevious(curIndex, null);
 
     head.setPrevious(headIndex, this);
     prev.setNext(prev.findDatanode(dn), next);
@@ -333,7 +328,6 @@ public class BlockInfo extends Block imp
 
   /**
    * Convert a complete block to an under construction block.
-   * 
    * @return BlockInfoUnderConstruction -  an under construction block.
    */
   public BlockInfoUnderConstruction convertToBlockUnderConstruction(

Modified: hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java?rev=1492898&r1=1492897&r2=1492898&view=diff
==============================================================================
--- hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java (original)
+++ hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java Thu Jun 13 23:11:23 2013
@@ -349,7 +349,7 @@ public class FSEditLog implements LogsPu
    * File-based journals are skipped, since they are formatted by the
    * Storage format code.
    */
-  void formatNonFileJournals(NamespaceInfo nsInfo) throws IOException {
+  synchronized void formatNonFileJournals(NamespaceInfo nsInfo) throws IOException {
     Preconditions.checkState(state == State.BETWEEN_LOG_SEGMENTS,
         "Bad state: %s", state);
     
@@ -360,7 +360,7 @@ public class FSEditLog implements LogsPu
     }
   }
   
-  List<FormatConfirmable> getFormatConfirmables() {
+  synchronized List<FormatConfirmable> getFormatConfirmables() {
     Preconditions.checkState(state == State.BETWEEN_LOG_SEGMENTS,
         "Bad state: %s", state);
 

Modified: hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestRBWBlockInvalidation.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestRBWBlockInvalidation.java?rev=1492898&r1=1492897&r2=1492898&view=diff
==============================================================================
--- hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestRBWBlockInvalidation.java (original)
+++ hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestRBWBlockInvalidation.java Thu Jun 13 23:11:23 2013
@@ -19,6 +19,7 @@ package org.apache.hadoop.hdfs.server.bl
 
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
+import static org.junit.Assume.assumeTrue;
 
 import java.io.Closeable;
 import java.io.File;
@@ -68,6 +69,10 @@ public class TestRBWBlockInvalidation {
   @Test(timeout=60000)
   public void testBlockInvalidationWhenRBWReplicaMissedInDN()
       throws IOException, InterruptedException {
+    // This test cannot pass on Windows due to file locking enforcement.  It will
+    // reject the attempt to delete the block file from the RBW folder.
+    assumeTrue(!Path.WINDOWS);
+
     Configuration conf = new HdfsConfiguration();
     conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 2);
     conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 300);