You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by cm...@apache.org on 2014/08/20 01:50:25 UTC

svn commit: r1619012 [30/35] - in /hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project: hadoop-hdfs-httpfs/ hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/ hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/ hadoop...

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyConsiderLoad.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyConsiderLoad.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyConsiderLoad.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyConsiderLoad.java Tue Aug 19 23:49:39 2014
@@ -28,6 +28,7 @@ import org.apache.hadoop.hdfs.protocol.H
 import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys;
 import org.apache.hadoop.hdfs.server.common.StorageInfo;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType;
+import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
 import org.apache.hadoop.test.PathUtils;
@@ -101,6 +102,7 @@ public class TestReplicationPolicyConsid
     }
   }
 
+  private final double EPSILON = 0.0001;
   /**
    * Tests that chooseTarget with considerLoad set to true correctly calculates
    * load with decommissioned nodes.
@@ -109,14 +111,6 @@ public class TestReplicationPolicyConsid
   public void testChooseTargetWithDecomNodes() throws IOException {
     namenode.getNamesystem().writeLock();
     try {
-      // Decommission DNs so BlockPlacementPolicyDefault.isGoodTarget()
-      // returns false
-      for (int i = 0; i < 3; i++) {
-        DatanodeInfo d = dnManager.getDatanodeByXferAddr(
-            dnrList.get(i).getIpAddr(),
-            dnrList.get(i).getXferPort());
-        d.setDecommissioned();
-      }
       String blockPoolId = namenode.getNamesystem().getBlockPoolId();
       dnManager.handleHeartbeat(dnrList.get(3),
           BlockManagerTestUtil.getStorageReportsForDatanode(dataNodes[3]),
@@ -133,6 +127,20 @@ public class TestReplicationPolicyConsid
           blockPoolId, dataNodes[5].getCacheCapacity(),
           dataNodes[5].getCacheRemaining(),
           4, 0, 0);
+      // value in the above heartbeats
+      final int load = 2 + 4 + 4;
+      
+      FSNamesystem fsn = namenode.getNamesystem();
+      assertEquals((double)load/6, fsn.getInServiceXceiverAverage(), EPSILON);
+      
+      // Decommission DNs so BlockPlacementPolicyDefault.isGoodTarget()
+      // returns false
+      for (int i = 0; i < 3; i++) {
+        DatanodeDescriptor d = dnManager.getDatanode(dnrList.get(i));
+        dnManager.startDecommission(d);
+        d.setDecommissioned();
+      }
+      assertEquals((double)load/3, fsn.getInServiceXceiverAverage(), EPSILON);
 
       // Call chooseTarget()
       DatanodeStorageInfo[] targets = namenode.getNamesystem().getBlockManager()

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyWithNodeGroup.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyWithNodeGroup.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyWithNodeGroup.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyWithNodeGroup.java Tue Aug 19 23:49:39 2014
@@ -47,11 +47,13 @@ import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
 
+
 public class TestReplicationPolicyWithNodeGroup {
   private static final int BLOCK_SIZE = 1024;
   private static final int NUM_OF_DATANODES = 8;
   private static final int NUM_OF_DATANODES_BOUNDARY = 6;
   private static final int NUM_OF_DATANODES_MORE_TARGETS = 12;
+  private static final int NUM_OF_DATANODES_FOR_DEPENDENCIES = 6;
   private final Configuration CONF = new HdfsConfiguration();
   private NetworkTopology cluster;
   private NameNode namenode;
@@ -113,7 +115,33 @@ public class TestReplicationPolicyWithNo
 
   private final static DatanodeDescriptor NODE = 
       new DatanodeDescriptor(DFSTestUtil.getDatanodeDescriptor("9.9.9.9", "/d2/r4/n7"));
-
+  
+  private static final DatanodeStorageInfo[] storagesForDependencies;
+  private static final DatanodeDescriptor[]  dataNodesForDependencies;
+  static {
+    final String[] racksForDependencies = {
+        "/d1/r1/n1",
+        "/d1/r1/n1",
+        "/d1/r1/n2",
+        "/d1/r1/n2",
+        "/d1/r1/n3",
+        "/d1/r1/n4"
+    };
+    final String[] hostNamesForDependencies = {
+        "h1",
+        "h2",
+        "h3",
+        "h4",
+        "h5",
+        "h6"
+    };
+    
+    storagesForDependencies = DFSTestUtil.createDatanodeStorageInfos(
+        racksForDependencies, hostNamesForDependencies);
+    dataNodesForDependencies = DFSTestUtil.toDatanodeDescriptor(storagesForDependencies);
+    
+  };
+  
   @Before
   public void setUp() throws Exception {
     FileSystem.setDefaultUri(CONF, "hdfs://localhost:0");
@@ -563,51 +591,50 @@ public class TestReplicationPolicyWithNo
    */
   @Test
   public void testChooseReplicaToDelete() throws Exception {
-    List<DatanodeDescriptor> replicaNodeList = 
-        new ArrayList<DatanodeDescriptor>();
-    final Map<String, List<DatanodeDescriptor>> rackMap = 
-        new HashMap<String, List<DatanodeDescriptor>>();
+    List<DatanodeStorageInfo> replicaList = new ArrayList<DatanodeStorageInfo>();
+    final Map<String, List<DatanodeStorageInfo>> rackMap
+        = new HashMap<String, List<DatanodeStorageInfo>>();
     dataNodes[0].setRemaining(4*1024*1024);
-    replicaNodeList.add(dataNodes[0]);
+    replicaList.add(storages[0]);
 
     dataNodes[1].setRemaining(3*1024*1024);
-    replicaNodeList.add(dataNodes[1]);
+    replicaList.add(storages[1]);
 
     dataNodes[2].setRemaining(2*1024*1024);
-    replicaNodeList.add(dataNodes[2]);
+    replicaList.add(storages[2]);
 
     dataNodes[5].setRemaining(1*1024*1024);
-    replicaNodeList.add(dataNodes[5]);
+    replicaList.add(storages[5]);
 
-    List<DatanodeDescriptor> first = new ArrayList<DatanodeDescriptor>();
-    List<DatanodeDescriptor> second = new ArrayList<DatanodeDescriptor>();
+    List<DatanodeStorageInfo> first = new ArrayList<DatanodeStorageInfo>();
+    List<DatanodeStorageInfo> second = new ArrayList<DatanodeStorageInfo>();
     replicator.splitNodesWithRack(
-        replicaNodeList, rackMap, first, second);
+        replicaList, rackMap, first, second);
     assertEquals(3, first.size());
     assertEquals(1, second.size());
-    DatanodeDescriptor chosenNode = replicator.chooseReplicaToDelete(
+    DatanodeStorageInfo chosen = replicator.chooseReplicaToDelete(
         null, null, (short)3, first, second);
     // Within first set {dataNodes[0], dataNodes[1], dataNodes[2]}, 
     // dataNodes[0] and dataNodes[1] are in the same nodegroup, 
     // but dataNodes[1] is chosen as less free space
-    assertEquals(chosenNode, dataNodes[1]);
+    assertEquals(chosen, storages[1]);
 
-    replicator.adjustSetsWithChosenReplica(rackMap, first, second, chosenNode);
+    replicator.adjustSetsWithChosenReplica(rackMap, first, second, chosen);
     assertEquals(2, first.size());
     assertEquals(1, second.size());
     // Within first set {dataNodes[0], dataNodes[2]}, dataNodes[2] is chosen
     // as less free space
-    chosenNode = replicator.chooseReplicaToDelete(
+    chosen = replicator.chooseReplicaToDelete(
         null, null, (short)2, first, second);
-    assertEquals(chosenNode, dataNodes[2]);
+    assertEquals(chosen, storages[2]);
 
-    replicator.adjustSetsWithChosenReplica(rackMap, first, second, chosenNode);
+    replicator.adjustSetsWithChosenReplica(rackMap, first, second, chosen);
     assertEquals(0, first.size());
     assertEquals(2, second.size());
     // Within second set, dataNodes[5] with less free space
-    chosenNode = replicator.chooseReplicaToDelete(
+    chosen = replicator.chooseReplicaToDelete(
         null, null, (short)1, first, second);
-    assertEquals(chosenNode, dataNodes[5]);
+    assertEquals(chosen, storages[5]);
   }
   
   /**
@@ -720,5 +747,63 @@ public class TestReplicationPolicyWithNo
     assertEquals(targets.length, 6);
   }
 
-
+  @Test
+  public void testChooseTargetWithDependencies() throws Exception {
+    for(int i=0; i<NUM_OF_DATANODES; i++) {
+      cluster.remove(dataNodes[i]);
+    }
+    
+    for(int i=0; i<NUM_OF_DATANODES_MORE_TARGETS; i++) {
+      DatanodeDescriptor node = dataNodesInMoreTargetsCase[i];
+      if (cluster.contains(node)) {
+        cluster.remove(node);
+      }
+    }
+    
+    Host2NodesMap host2DatanodeMap = namenode.getNamesystem()
+        .getBlockManager()
+        .getDatanodeManager().getHost2DatanodeMap();
+    for(int i=0; i<NUM_OF_DATANODES_FOR_DEPENDENCIES; i++) {
+      cluster.add(dataNodesForDependencies[i]);
+      host2DatanodeMap.add(dataNodesForDependencies[i]);
+    }
+    
+    //add dependencies (node1 <-> node2, and node3<->node4)
+    dataNodesForDependencies[1].addDependentHostName(
+        dataNodesForDependencies[2].getHostName());
+    dataNodesForDependencies[2].addDependentHostName(
+        dataNodesForDependencies[1].getHostName());
+    dataNodesForDependencies[3].addDependentHostName(
+        dataNodesForDependencies[4].getHostName());
+    dataNodesForDependencies[4].addDependentHostName(
+        dataNodesForDependencies[3].getHostName());
+    
+    //Update heartbeat
+    for(int i=0; i<NUM_OF_DATANODES_FOR_DEPENDENCIES; i++) {
+      updateHeartbeatWithUsage(dataNodesForDependencies[i],
+          2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
+          2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 0L, 0L, 0, 0);
+    }
+    
+    List<DatanodeStorageInfo> chosenNodes = new ArrayList<DatanodeStorageInfo>();
+    
+    DatanodeStorageInfo[] targets;
+    Set<Node> excludedNodes = new HashSet<Node>();
+    excludedNodes.add(dataNodesForDependencies[5]);
+    
+    //try to select three targets as there are three node groups
+    targets = chooseTarget(3, dataNodesForDependencies[1], chosenNodes, excludedNodes);
+    
+    //Even there are three node groups, verify that 
+    //only two targets are selected due to dependencies
+    assertEquals(targets.length, 2);
+    assertEquals(targets[0], storagesForDependencies[1]);
+    assertTrue(targets[1].equals(storagesForDependencies[3]) || targets[1].equals(storagesForDependencies[4]));
+    
+    //verify that all data nodes are in the excluded list
+    assertEquals(excludedNodes.size(), NUM_OF_DATANODES_FOR_DEPENDENCIES);
+    for(int i=0; i<NUM_OF_DATANODES_FOR_DEPENDENCIES; i++) {
+      assertTrue(excludedNodes.contains(dataNodesForDependencies[i]));
+    }
+  }
 }

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/TestJspHelper.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/TestJspHelper.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/TestJspHelper.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/TestJspHelper.java Tue Aug 19 23:49:39 2014
@@ -17,37 +17,11 @@
  */
 package org.apache.hadoop.hdfs.server.common;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.fail;
-import static org.mockito.Mockito.doAnswer;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.when;
-
-import java.io.IOException;
-import java.io.StringReader;
-import java.net.InetSocketAddress;
-import java.util.ArrayList;
-
-import javax.servlet.ServletContext;
-import javax.servlet.http.HttpServletRequest;
-import javax.servlet.jsp.JspWriter;
-import javax.xml.parsers.DocumentBuilder;
-import javax.xml.parsers.DocumentBuilderFactory;
-import javax.xml.parsers.ParserConfigurationException;
-
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
-import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
-import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
-import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeHttpServer;
-import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
-import org.apache.hadoop.hdfs.server.protocol.StorageReport;
 import org.apache.hadoop.hdfs.web.resources.DoAsParam;
 import org.apache.hadoop.hdfs.web.resources.UserParam;
 import org.apache.hadoop.io.DataInputBuffer;
@@ -56,6 +30,8 @@ import org.apache.hadoop.io.Text;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
 import org.apache.hadoop.security.authorize.AuthorizationException;
+import org.apache.hadoop.security.authorize.DefaultImpersonationProvider;
+import org.apache.hadoop.security.authorize.ProxyServers;
 import org.apache.hadoop.security.authorize.ProxyUsers;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.TokenIdentifier;
@@ -63,20 +39,20 @@ import org.apache.hadoop.security.token.
 import org.junit.Assert;
 import org.junit.BeforeClass;
 import org.junit.Test;
-import org.mockito.ArgumentCaptor;
-import org.mockito.Mockito;
-import org.mockito.invocation.InvocationOnMock;
-import org.mockito.stubbing.Answer;
-import org.xml.sax.InputSource;
-import org.xml.sax.SAXException;
 
-import com.google.common.base.Strings;
+import javax.servlet.ServletContext;
+import javax.servlet.http.HttpServletRequest;
+import java.io.IOException;
+import java.net.InetSocketAddress;
+
+import static org.junit.Assert.*;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
 
 
 public class TestJspHelper {
 
   private final Configuration conf = new HdfsConfiguration();
-  private String jspWriterOutput = "";
 
   // allow user with TGT to run tests
   @BeforeClass
@@ -158,25 +134,6 @@ public class TestJspHelper {
         .next();
     Assert.assertEquals(expected, tokenInUgi.getService().toString());
   }
-  
-  
-  @Test
-  public void testDelegationTokenUrlParam() {
-    conf.set(DFSConfigKeys.HADOOP_SECURITY_AUTHENTICATION, "kerberos");
-    UserGroupInformation.setConfiguration(conf);
-    String tokenString = "xyzabc";
-    String delegationTokenParam = JspHelper
-        .getDelegationTokenUrlParam(tokenString);
-    //Security is enabled
-    Assert.assertEquals(JspHelper.SET_DELEGATION + "xyzabc",
-        delegationTokenParam);
-    conf.set(DFSConfigKeys.HADOOP_SECURITY_AUTHENTICATION, "simple");
-    UserGroupInformation.setConfiguration(conf);
-    delegationTokenParam = JspHelper
-        .getDelegationTokenUrlParam(tokenString);
-    //Empty string must be returned because security is disabled.
-    Assert.assertEquals("", delegationTokenParam);
-  }
 
   @Test
   public void testGetUgiFromToken() throws IOException {
@@ -328,8 +285,10 @@ public class TestJspHelper {
     String user = "TheNurse";
     conf.set(DFSConfigKeys.HADOOP_SECURITY_AUTHENTICATION, "kerberos");
     
-    conf.set(ProxyUsers.CONF_HADOOP_PROXYUSER+realUser+".groups", "*");
-    conf.set(ProxyUsers.CONF_HADOOP_PROXYUSER+realUser+".hosts", "*");
+    conf.set(DefaultImpersonationProvider.getTestProvider().
+        getProxySuperuserGroupConfKey(realUser), "*");
+    conf.set(DefaultImpersonationProvider.getTestProvider().
+        getProxySuperuserIpConfKey(realUser), "*");
     ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
     UserGroupInformation.setConfiguration(conf);
     UserGroupInformation ugi;
@@ -403,32 +362,6 @@ public class TestJspHelper {
     }
   }
 
-  @Test
-  public void testPrintGotoFormWritesValidXML() throws IOException,
-         ParserConfigurationException, SAXException {
-    JspWriter mockJspWriter = mock(JspWriter.class);
-    ArgumentCaptor<String> arg = ArgumentCaptor.forClass(String.class);
-    doAnswer(new Answer<Object>() {
-      @Override
-      public Object answer(InvocationOnMock invok) {
-        Object[] args = invok.getArguments();
-        jspWriterOutput += (String) args[0];
-        return null;
-      }
-    }).when(mockJspWriter).print(arg.capture());
-
-    jspWriterOutput = "";
-
-    JspHelper.printGotoForm(mockJspWriter, 424242, "a token string",
-            "foobar/file", "0.0.0.0");
-
-    DocumentBuilder parser =
-        DocumentBuilderFactory.newInstance().newDocumentBuilder();
-    InputSource is = new InputSource();
-    is.setCharacterStream(new StringReader(jspWriterOutput));
-    parser.parse(is);
-  }
-
   private HttpServletRequest getMockRequest(String remoteUser, String user, String doAs) {
     HttpServletRequest request = mock(HttpServletRequest.class);
     when(request.getParameter(UserParam.NAME)).thenReturn(user);
@@ -464,146 +397,6 @@ public class TestJspHelper {
   }
 
   @Test
-  public void testSortNodeByFields() throws Exception {
-    DatanodeID dnId1 = new DatanodeID("127.0.0.1", "localhost1", "datanode1",
-        1234, 2345, 3456, 4567);
-    DatanodeID dnId2 = new DatanodeID("127.0.0.2", "localhost2", "datanode2",
-        1235, 2346, 3457, 4568);
-
-    // Setup DatanodeDescriptors with one storage each.
-    DatanodeDescriptor dnDesc1 = new DatanodeDescriptor(dnId1, "rack1");
-    DatanodeDescriptor dnDesc2 = new DatanodeDescriptor(dnId2, "rack2");
-
-    // Update the DatanodeDescriptors with their attached storages.
-    BlockManagerTestUtil.updateStorage(dnDesc1, new DatanodeStorage("dnStorage1"));
-    BlockManagerTestUtil.updateStorage(dnDesc2, new DatanodeStorage("dnStorage2"));
-
-    DatanodeStorage dns1 = new DatanodeStorage("dnStorage1");
-    DatanodeStorage dns2 = new DatanodeStorage("dnStorage2");
-
-    StorageReport[] report1 = new StorageReport[] {
-        new StorageReport(dns1, false, 1024, 100, 924, 100)
-    };
-    StorageReport[] report2 = new StorageReport[] {
-        new StorageReport(dns2, false, 2500, 200, 1848, 200)
-    };
-    dnDesc1.updateHeartbeat(report1, 5l, 3l, 10, 2);
-    dnDesc2.updateHeartbeat(report2, 10l, 2l, 20, 1);
-
-    ArrayList<DatanodeDescriptor> live = new ArrayList<DatanodeDescriptor>();
-    live.add(dnDesc1);
-    live.add(dnDesc2);
-
-    // Test sorting by failed volumes
-    JspHelper.sortNodeList(live, "volfails", "ASC");
-    Assert.assertEquals(dnDesc2, live.get(0));
-    Assert.assertEquals(dnDesc1, live.get(1));
-    JspHelper.sortNodeList(live, "volfails", "DSC");
-    Assert.assertEquals(dnDesc1, live.get(0));
-    Assert.assertEquals(dnDesc2, live.get(1));
-
-    // Test sorting by Blockpool used
-    JspHelper.sortNodeList(live, "bpused", "ASC");
-    Assert.assertEquals(dnDesc1, live.get(0));
-    Assert.assertEquals(dnDesc2, live.get(1));
-    JspHelper.sortNodeList(live, "bpused", "DSC");
-    Assert.assertEquals(dnDesc2, live.get(0));
-    Assert.assertEquals(dnDesc1, live.get(1));
-
-    // Test sorting by Percentage Blockpool used
-    JspHelper.sortNodeList(live, "pcbpused", "ASC");
-    Assert.assertEquals(dnDesc2, live.get(0));
-    Assert.assertEquals(dnDesc1, live.get(1));
-    JspHelper.sortNodeList(live, "pcbpused", "DSC");
-    Assert.assertEquals(dnDesc1, live.get(0));
-    Assert.assertEquals(dnDesc2, live.get(1));
-    
-    //unexisted field comparition is d1.getHostName().compareTo(d2.getHostName());    
-    JspHelper.sortNodeList(live, "unexists", "ASC");
-    Assert.assertEquals(dnDesc1, live.get(0));
-    Assert.assertEquals(dnDesc2, live.get(1));
-    
-    JspHelper.sortNodeList(live, "unexists", "DSC");
-    Assert.assertEquals(dnDesc2, live.get(0));
-    Assert.assertEquals(dnDesc1, live.get(1));  
-    
-    // test sorting by capacity
-    JspHelper.sortNodeList(live, "capacity", "ASC");
-    Assert.assertEquals(dnDesc1, live.get(0));
-    Assert.assertEquals(dnDesc2, live.get(1));
-    
-    JspHelper.sortNodeList(live, "capacity", "DSC");
-    Assert.assertEquals(dnDesc2, live.get(0));
-    Assert.assertEquals(dnDesc1, live.get(1));
-
-    // test sorting by used
-    JspHelper.sortNodeList(live, "used", "ASC");
-    Assert.assertEquals(dnDesc1, live.get(0));
-    Assert.assertEquals(dnDesc2, live.get(1));
-    
-    JspHelper.sortNodeList(live, "used", "DSC");
-    Assert.assertEquals(dnDesc2, live.get(0));
-    Assert.assertEquals(dnDesc1, live.get(1)); 
-    
-    // test sorting by nondfsused
-    JspHelper.sortNodeList(live, "nondfsused", "ASC");
-    Assert.assertEquals(dnDesc1, live.get(0));
-    Assert.assertEquals(dnDesc2, live.get(1));
-    
-    JspHelper.sortNodeList(live, "nondfsused", "DSC");
-    Assert.assertEquals(dnDesc2, live.get(0));
-    Assert.assertEquals(dnDesc1, live.get(1));
-   
-    // test sorting by remaining
-    JspHelper.sortNodeList(live, "remaining", "ASC");
-    Assert.assertEquals(dnDesc1, live.get(0));
-    Assert.assertEquals(dnDesc2, live.get(1));
-    
-    JspHelper.sortNodeList(live, "remaining", "DSC");
-    Assert.assertEquals(dnDesc2, live.get(0));
-    Assert.assertEquals(dnDesc1, live.get(1));
-  }
-  
-  @Test
-  public void testPrintMethods() throws IOException {
-    JspWriter out = mock(JspWriter.class);      
-    HttpServletRequest req = mock(HttpServletRequest.class);
-    
-    final StringBuffer buffer = new StringBuffer();
-    
-    ArgumentCaptor<String> arg = ArgumentCaptor.forClass(String.class);
-    doAnswer(new Answer<Object>() {      
-      @Override
-      public Object answer(InvocationOnMock invok) {
-        Object[] args = invok.getArguments();
-        buffer.append((String)args[0]);
-        return null;
-      }
-    }).when(out).print(arg.capture());
-    
-    
-    JspHelper.createTitle(out, req, "testfile.txt");
-    Mockito.verify(out, Mockito.times(1)).print(Mockito.anyString());
-    
-    JspHelper.addTableHeader(out);
-    Mockito.verify(out, Mockito.times(1 + 2)).print(Mockito.anyString());                  
-     
-    JspHelper.addTableRow(out, new String[] {" row11", "row12 "});
-    Mockito.verify(out, Mockito.times(1 + 2 + 4)).print(Mockito.anyString());      
-    
-    JspHelper.addTableRow(out, new String[] {" row11", "row12 "}, 3);
-    Mockito.verify(out, Mockito.times(1 + 2 + 4 + 4)).print(Mockito.anyString());
-      
-    JspHelper.addTableRow(out, new String[] {" row21", "row22"});
-    Mockito.verify(out, Mockito.times(1 + 2 + 4 + 4 + 4)).print(Mockito.anyString());      
-      
-    JspHelper.addTableFooter(out);
-    Mockito.verify(out, Mockito.times(1 + 2 + 4 + 4 + 4 + 1)).print(Mockito.anyString());
-    
-    assertFalse(Strings.isNullOrEmpty(buffer.toString()));               
-  }
-  
-  @Test
   public void testReadWriteReplicaState() {
     try {
       DataOutputBuffer out = new DataOutputBuffer();
@@ -622,21 +415,6 @@ public class TestJspHelper {
       fail("testReadWrite ex error ReplicaState");
     }
   }
-
-  @Test 
-  public void testAuthority(){
-    DatanodeID dnWithIp = new DatanodeID("127.0.0.1", "hostName", null,
-        50020, 50075, 50076, 50010);
-    assertNotNull(JspHelper.Url.authority("http", dnWithIp));
-
-    DatanodeID dnWithNullIp = new DatanodeID(null, "hostName", null,
-        50020, 50075, 50076, 50010);
-    assertNotNull(JspHelper.Url.authority("http", dnWithNullIp));
-
-    DatanodeID dnWithEmptyIp = new DatanodeID("", "hostName", null,
-        50020, 50075, 50076, 50010);
-    assertNotNull(JspHelper.Url.authority("http", dnWithEmptyIp));
-  }
  
   private static String clientAddr = "1.1.1.1";
   private static String chainedClientAddr = clientAddr+", 2.2.2.2";
@@ -675,7 +453,7 @@ public class TestJspHelper {
       when(req.getRemoteAddr()).thenReturn(proxyAddr);
       when(req.getHeader("X-Forwarded-For")).thenReturn(clientAddr);
       if (trusted) {
-        conf.set(ProxyUsers.CONF_HADOOP_PROXYSERVERS, proxyAddr);
+        conf.set(ProxyServers.CONF_HADOOP_PROXYSERVERS, proxyAddr);
       }
     }
     ProxyUsers.refreshSuperUserGroupsConfiguration(conf);

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/BlockReportTestBase.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/BlockReportTestBase.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/BlockReportTestBase.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/BlockReportTestBase.java Tue Aug 19 23:49:39 2014
@@ -43,6 +43,7 @@ import org.apache.hadoop.hdfs.DFSConfigK
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.StorageType;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
@@ -171,9 +172,6 @@ public abstract class BlockReportTestBas
    * Utility routine to send block reports to the NN, either in a single call
    * or reporting one storage per call.
    *
-   * @param dnR
-   * @param poolId
-   * @param reports
    * @throws IOException
    */
   protected abstract void sendBlockReports(DatanodeRegistration dnR, String poolId,
@@ -327,7 +325,7 @@ public abstract class BlockReportTestBas
   public void blockReport_03() throws IOException {
     final String METHOD_NAME = GenericTestUtils.getMethodName();
     Path filePath = new Path("/" + METHOD_NAME + ".dat");
-    ArrayList<Block> blocks = writeFile(METHOD_NAME, FILE_SIZE, filePath);
+    writeFile(METHOD_NAME, FILE_SIZE, filePath);
 
     // all blocks belong to the same file, hence same BP
     DataNode dn = cluster.getDataNodes().get(DN_N0);
@@ -366,7 +364,7 @@ public abstract class BlockReportTestBas
     // Create a bogus new block which will not be present on the namenode.
     ExtendedBlock b = new ExtendedBlock(
         poolId, rand.nextLong(), 1024L, rand.nextLong());
-    dn.getFSDataset().createRbw(b);
+    dn.getFSDataset().createRbw(StorageType.DEFAULT, b);
 
     DatanodeRegistration dnR = dn.getDNRegistrationForBP(poolId);
     StorageBlockReport[] reports = getBlockReports(dn, poolId, false, false);
@@ -413,6 +411,7 @@ public abstract class BlockReportTestBas
    * The second datanode is started in the cluster.
    * As soon as the replication process is completed test finds a block from
    * the second DN and sets its GS to be < of original one.
+   * this is the markBlockAsCorrupt case 3 so we expect one pending deletion
    * Block report is forced and the check for # of currupted blocks is performed.
    * Another block is chosen and its length is set to a lesser than original.
    * A check for another corrupted block is performed after yet another
@@ -439,20 +438,20 @@ public abstract class BlockReportTestBas
     printStats();
 
     assertThat("Wrong number of corrupt blocks",
-               cluster.getNamesystem().getCorruptReplicaBlocks(), is(1L));
+               cluster.getNamesystem().getCorruptReplicaBlocks(), is(0L));
     assertThat("Wrong number of PendingDeletion blocks",
-               cluster.getNamesystem().getPendingDeletionBlocks(), is(0L));
+               cluster.getNamesystem().getPendingDeletionBlocks(), is(1L));
     assertThat("Wrong number of PendingReplication blocks",
                cluster.getNamesystem().getPendingReplicationBlocks(), is(0L));
 
-    reports = getBlockReports(dn, poolId, true, true);
+    reports = getBlockReports(dn, poolId, false, true);
     sendBlockReports(dnR, poolId, reports);
     printStats();
 
     assertThat("Wrong number of corrupt blocks",
-               cluster.getNamesystem().getCorruptReplicaBlocks(), is(2L));
+               cluster.getNamesystem().getCorruptReplicaBlocks(), is(1L));
     assertThat("Wrong number of PendingDeletion blocks",
-               cluster.getNamesystem().getPendingDeletionBlocks(), is(0L));
+               cluster.getNamesystem().getPendingDeletionBlocks(), is(1L));
     assertThat("Wrong number of PendingReplication blocks",
                cluster.getNamesystem().getPendingReplicationBlocks(), is(0L));
 

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java Tue Aug 19 23:49:39 2014
@@ -116,7 +116,8 @@ public class DataNodeTestUtils {  
   
   public static void runBlockScannerForBlock(DataNode dn, ExtendedBlock b) {
     BlockPoolSliceScanner bpScanner = getBlockPoolScanner(dn, b);
-    bpScanner.verifyBlock(b);
+    bpScanner.verifyBlock(new ExtendedBlock(b.getBlockPoolId(),
+        new BlockPoolSliceScanner.BlockScanInfo(b.getLocalBlock())));
   }
 
   private static BlockPoolSliceScanner getBlockPoolScanner(DataNode dn,

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java Tue Aug 19 23:49:39 2014
@@ -18,10 +18,12 @@
 package org.apache.hadoop.hdfs.server.datanode;
 
 import java.io.File;
+import java.io.FileDescriptor;
 import java.io.IOException;
 import java.io.InputStream;
 import java.io.OutputStream;
 import java.util.ArrayList;
+import java.util.Collection;
 import java.util.Collections;
 import java.util.HashMap;
 import java.util.LinkedList;
@@ -743,14 +745,14 @@ public class SimulatedFSDataset implemen
   }
 
   @Override // FsDatasetSpi
-  public synchronized ReplicaInPipelineInterface createRbw(ExtendedBlock b) 
-  throws IOException {
-    return createTemporary(b);
+  public synchronized ReplicaInPipelineInterface createRbw(
+      StorageType storageType, ExtendedBlock b) throws IOException {
+    return createTemporary(storageType, b);
   }
 
   @Override // FsDatasetSpi
-  public synchronized ReplicaInPipelineInterface createTemporary(ExtendedBlock b)
-      throws IOException {
+  public synchronized ReplicaInPipelineInterface createTemporary(
+      StorageType storageType, ExtendedBlock b) throws IOException {
     if (isValidBlock(b)) {
           throw new ReplicaAlreadyExistsException("Block " + b + 
               " is valid, and cannot be written to.");
@@ -833,8 +835,8 @@ public class SimulatedFSDataset implemen
     
     /**
      * An input stream of size l with repeated bytes
-     * @param l
-     * @param iRepeatedData
+     * @param l size of the stream
+     * @param iRepeatedData byte that is repeated in the stream
      */
     SimulatedInputStream(long l, byte iRepeatedData) {
       length = l;
@@ -843,17 +845,14 @@ public class SimulatedFSDataset implemen
     
     /**
      * An input stream of of the supplied data
-     * 
-     * @param iData
+     * @param iData data to construct the stream
      */
     SimulatedInputStream(byte[] iData) {
       data = iData;
       length = data.length;
-      
     }
     
     /**
-     * 
      * @return the lenght of the input stream
      */
     long getLength() {
@@ -1085,6 +1084,11 @@ public class SimulatedFSDataset implemen
   }
 
   @Override
+  public void addVolumes(Collection<StorageLocation> volumes) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
   public DatanodeStorage getStorage(final String storageUuid) {
     return storageUuid.equals(storage.getStorageUuid()) ?
         storage.dnStorage :
@@ -1115,5 +1119,11 @@ public class SimulatedFSDataset implemen
   public FsVolumeSpi getVolume(ExtendedBlock b) {
     throw new UnsupportedOperationException();
   }
+
+  @Override
+  public void submitBackgroundSyncFileRangeRequest(ExtendedBlock block,
+      FileDescriptor fd, long offset, long nbytes, int flags) {
+    throw new UnsupportedOperationException();
+  }
 }
 

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java Tue Aug 19 23:49:39 2014
@@ -325,13 +325,14 @@ public class TestBPOfferService {
       }
     }).when(mockDn).initBlockPool(Mockito.any(BPOfferService.class));
     BPOfferService bpos = setupBPOSForNNs(mockDn, mockNN1, mockNN2);
+    List<BPServiceActor> actors = bpos.getBPServiceActors();
+    assertEquals(2, actors.size());
     bpos.start();
     try {
       waitForInitialization(bpos);
-      List<BPServiceActor> actors = bpos.getBPServiceActors();
-      assertEquals(1, actors.size());
-      BPServiceActor actor = actors.get(0);
-      waitForBlockReport(actor.getNameNodeProxy());
+      // even if one of the actor initialization fails, the other one will be
+      // finish block report.
+      waitForBlockReport(mockNN1, mockNN2);
     } finally {
       bpos.stop();
     }
@@ -342,7 +343,14 @@ public class TestBPOfferService {
     GenericTestUtils.waitFor(new Supplier<Boolean>() {
       @Override
       public Boolean get() {
-        return bpos.countNameNodes() == 1;
+        List<BPServiceActor> actors = bpos.getBPServiceActors();
+        int failedcount = 0;
+        for (BPServiceActor actor : actors) {
+          if (!actor.isAlive()) {
+            failedcount++;
+          }
+        }
+        return failedcount == 1;
       }
     }, 100, 10000);
   }
@@ -400,10 +408,36 @@ public class TestBPOfferService {
       }
     }, 500, 10000);
   }
-  
+
+  private void waitForBlockReport(
+      final DatanodeProtocolClientSideTranslatorPB mockNN1,
+      final DatanodeProtocolClientSideTranslatorPB mockNN2)
+          throws Exception {
+    GenericTestUtils.waitFor(new Supplier<Boolean>() {
+      @Override
+      public Boolean get() {
+        return get(mockNN1) || get(mockNN2);
+      }
+
+      private Boolean get(DatanodeProtocolClientSideTranslatorPB mockNN) {
+        try {
+          Mockito.verify(mockNN).blockReport(
+                  Mockito.<DatanodeRegistration>anyObject(),
+                  Mockito.eq(FAKE_BPID),
+                  Mockito.<StorageBlockReport[]>anyObject());
+          return true;
+        } catch (Throwable t) {
+          LOG.info("waiting on block report: " + t.getMessage());
+          return false;
+        }
+      }
+    }, 500, 10000);
+  }
+
   private ReceivedDeletedBlockInfo[] waitForBlockReceived(
-      ExtendedBlock fakeBlock,
-      DatanodeProtocolClientSideTranslatorPB mockNN) throws Exception {
+      final ExtendedBlock fakeBlock,
+      final DatanodeProtocolClientSideTranslatorPB mockNN) throws Exception {
+    final String fakeBlockPoolId = fakeBlock.getBlockPoolId();
     final ArgumentCaptor<StorageReceivedDeletedBlocks[]> captor =
       ArgumentCaptor.forClass(StorageReceivedDeletedBlocks[].class);
     GenericTestUtils.waitFor(new Supplier<Boolean>() {
@@ -411,9 +445,9 @@ public class TestBPOfferService {
       @Override
       public Boolean get() {
         try {
-          Mockito.verify(mockNN1).blockReceivedAndDeleted(
+          Mockito.verify(mockNN).blockReceivedAndDeleted(
             Mockito.<DatanodeRegistration>anyObject(),
-            Mockito.eq(FAKE_BPID),
+            Mockito.eq(fakeBlockPoolId),
             captor.capture());
           return true;
         } catch (Throwable t) {

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java Tue Aug 19 23:49:39 2014
@@ -57,6 +57,7 @@ import org.apache.hadoop.hdfs.Distribute
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.MiniDFSNNTopology;
+import org.apache.hadoop.hdfs.StorageType;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
@@ -531,7 +532,7 @@ public class TestBlockRecovery {
     if(LOG.isDebugEnabled()) {
       LOG.debug("Running " + GenericTestUtils.getMethodName());
     }
-    dn.data.createRbw(block);
+    dn.data.createRbw(StorageType.DEFAULT, block);
     try {
       dn.syncBlock(rBlock, initBlockRecords(dn));
       fail("Sync should fail");
@@ -554,7 +555,8 @@ public class TestBlockRecovery {
     if(LOG.isDebugEnabled()) {
       LOG.debug("Running " + GenericTestUtils.getMethodName());
     }
-    ReplicaInPipelineInterface replicaInfo = dn.data.createRbw(block);
+    ReplicaInPipelineInterface replicaInfo = dn.data.createRbw(
+        StorageType.DEFAULT, block);
     ReplicaOutputStreams streams = null;
     try {
       streams = replicaInfo.createStreams(true,
@@ -588,7 +590,6 @@ public class TestBlockRecovery {
     Configuration conf = new HdfsConfiguration();
     conf.set(DFSConfigKeys.DFS_DATANODE_XCEIVER_STOP_TIMEOUT_MILLIS_KEY, "1000");
     MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
-        .nnTopology(MiniDFSNNTopology.simpleSingleNN(8020, 50070))
         .numDataNodes(1).build();
     try {
       cluster.waitClusterUp();

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java Tue Aug 19 23:49:39 2014
@@ -42,6 +42,7 @@ import org.apache.hadoop.hdfs.DFSConfigK
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.StorageType;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
@@ -264,14 +265,17 @@ public class TestBlockReplacement {
     sock.setKeepAlive(true);
     // sendRequest
     DataOutputStream out = new DataOutputStream(sock.getOutputStream());
-    new Sender(out).replaceBlock(block, BlockTokenSecretManager.DUMMY_TOKEN,
+    new Sender(out).replaceBlock(block, StorageType.DEFAULT,
+        BlockTokenSecretManager.DUMMY_TOKEN,
         source.getDatanodeUuid(), sourceProxy);
     out.flush();
     // receiveResponse
     DataInputStream reply = new DataInputStream(sock.getInputStream());
 
-    BlockOpResponseProto proto =
-      BlockOpResponseProto.parseDelimitedFrom(reply);
+    BlockOpResponseProto proto = BlockOpResponseProto.parseDelimitedFrom(reply);
+    while (proto.getStatus() == Status.IN_PROGRESS) {
+      proto = BlockOpResponseProto.parseDelimitedFrom(reply);
+    }
     return proto.getStatus() == Status.SUCCESS;
   }
 

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMultipleRegistrations.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMultipleRegistrations.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMultipleRegistrations.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMultipleRegistrations.java Tue Aug 19 23:49:39 2014
@@ -19,8 +19,10 @@
 package org.apache.hadoop.hdfs.server.datanode;
 
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertNotSame;
+import static org.junit.Assert.assertTrue;
 
 import java.io.IOException;
 import java.net.InetSocketAddress;
@@ -30,11 +32,14 @@ import java.util.Map;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties;
 import org.apache.hadoop.hdfs.MiniDFSNNTopology;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
 import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil;
+import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.util.StringUtils;
 import org.junit.Assert;
@@ -189,7 +194,7 @@ public class TestDataNodeMultipleRegistr
   }
   
   @Test
-  public void testClusterIdMismatch() throws IOException {
+  public void testClusterIdMismatch() throws Exception {
     MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
         .nnTopology(MiniDFSNNTopology.simpleFederatedTopology(2))
         .build();
@@ -203,6 +208,7 @@ public class TestDataNodeMultipleRegistr
       
       // add another namenode
       cluster.addNameNode(conf, 9938);
+      Thread.sleep(500);// lets wait for the registration to happen
       bposs = dn.getAllBpOs(); 
       LOG.info("dn bpos len (should be 3):" + bposs.length);
       Assert.assertEquals("should've registered with three namenodes", bposs.length,3);
@@ -212,16 +218,90 @@ public class TestDataNodeMultipleRegistr
       cluster.addNameNode(conf, 9948);
       NameNode nn4 = cluster.getNameNode(3);
       assertNotNull("cannot create nn4", nn4);
-      
+
+      Thread.sleep(500);// lets wait for the registration to happen
       bposs = dn.getAllBpOs(); 
       LOG.info("dn bpos len (still should be 3):" + bposs.length);
       Assert.assertEquals("should've registered with three namenodes", 3, bposs.length);
     } finally {
+        cluster.shutdown();
+    }
+  }
+
+  @Test(timeout = 20000)
+  public void testClusterIdMismatchAtStartupWithHA() throws Exception {
+    MiniDFSNNTopology top = new MiniDFSNNTopology()
+      .addNameservice(new MiniDFSNNTopology.NSConf("ns1")
+        .addNN(new MiniDFSNNTopology.NNConf("nn0"))
+        .addNN(new MiniDFSNNTopology.NNConf("nn1")))
+      .addNameservice(new MiniDFSNNTopology.NSConf("ns2")
+        .addNN(new MiniDFSNNTopology.NNConf("nn2").setClusterId("bad-cid"))
+        .addNN(new MiniDFSNNTopology.NNConf("nn3").setClusterId("bad-cid")));
+
+    top.setFederation(true);
+
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).nnTopology(top)
+        .numDataNodes(0).build();
+    
+    try {
+      cluster.startDataNodes(conf, 1, true, null, null);
+      // let the initialization be complete
+      Thread.sleep(10000);
+      DataNode dn = cluster.getDataNodes().get(0);
+      assertTrue("Datanode should be running", dn.isDatanodeUp());
+      assertEquals("Only one BPOfferService should be running", 1,
+          dn.getAllBpOs().length);
+    } finally {
       cluster.shutdown();
     }
   }
 
   @Test
+  public void testDNWithInvalidStorageWithHA() throws Exception {
+    MiniDFSNNTopology top = new MiniDFSNNTopology()
+      .addNameservice(new MiniDFSNNTopology.NSConf("ns1")
+        .addNN(new MiniDFSNNTopology.NNConf("nn0").setClusterId("cluster-1"))
+        .addNN(new MiniDFSNNTopology.NNConf("nn1").setClusterId("cluster-1")));
+
+    top.setFederation(true);
+
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).nnTopology(top)
+        .numDataNodes(0).build();
+    try {
+      cluster.startDataNodes(conf, 1, true, null, null);
+      // let the initialization be complete
+      Thread.sleep(10000);
+      DataNode dn = cluster.getDataNodes().get(0);
+      assertTrue("Datanode should be running", dn.isDatanodeUp());
+      assertEquals("BPOfferService should be running", 1,
+          dn.getAllBpOs().length);
+      DataNodeProperties dnProp = cluster.stopDataNode(0);
+
+      cluster.getNameNode(0).stop();
+      cluster.getNameNode(1).stop();
+      Configuration nn1 = cluster.getConfiguration(0);
+      Configuration nn2 = cluster.getConfiguration(1);
+      // setting up invalid cluster
+      StartupOption.FORMAT.setClusterId("cluster-2");
+      DFSTestUtil.formatNameNode(nn1);
+      MiniDFSCluster.copyNameDirs(FSNamesystem.getNamespaceDirs(nn1),
+          FSNamesystem.getNamespaceDirs(nn2), nn2);
+      cluster.restartNameNode(0, false);
+      cluster.restartNameNode(1, false);
+      cluster.restartDataNode(dnProp);
+      
+      // let the initialization be complete
+      Thread.sleep(10000);
+      dn = cluster.getDataNodes().get(0);
+      assertFalse("Datanode should have shutdown as only service failed",
+          dn.isDatanodeUp());
+    } finally {
+      cluster.shutdown();
+    }
+  }
+
+  
+  @Test
   public void testMiniDFSClusterWithMultipleNN() throws IOException {
     Configuration conf = new HdfsConfiguration();
     // start Federated cluster and add a node.
@@ -231,7 +311,6 @@ public class TestDataNodeMultipleRegistr
     
     // add a node
     try {
-      Assert.assertNotNull(cluster);
       cluster.waitActive();
       Assert.assertEquals("(1)Should be 2 namenodes", 2, cluster.getNumNameNodes());
 

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeRollingUpgrade.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeRollingUpgrade.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeRollingUpgrade.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeRollingUpgrade.java Tue Aug 19 23:49:39 2014
@@ -27,11 +27,14 @@ import static org.junit.Assert.assertTru
 
 import java.io.File;
 import java.io.IOException;
+import java.util.Random;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.DFSClient;
+import org.apache.hadoop.hdfs.DFSOutputStream;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
@@ -67,6 +70,7 @@ public class TestDataNodeRollingUpgrade 
 
   private void startCluster() throws IOException {
     conf = new HdfsConfiguration();
+    conf.setInt("dfs.blocksize", 1024*1024);
     cluster = new Builder(conf).numDataNodes(REPL_FACTOR).build();
     cluster.waitActive();
     fs = cluster.getFileSystem();
@@ -243,4 +247,48 @@ public class TestDataNodeRollingUpgrade 
       shutdownCluster();
     }
   }
+  
+  @Test (timeout=600000)
+  // Test DatanodeXceiver has correct peer-dataxceiver pairs for sending OOB message
+  public void testDatanodePeersXceiver() throws Exception {
+    try {
+      startCluster();
+
+      // Create files in DFS.
+      String testFile1 = "/TestDataNodeXceiver1.dat";
+      String testFile2 = "/TestDataNodeXceiver2.dat";
+      String testFile3 = "/TestDataNodeXceiver3.dat";
+
+      DFSClient client1 = new DFSClient(NameNode.getAddress(conf), conf);
+      DFSClient client2 = new DFSClient(NameNode.getAddress(conf), conf);
+      DFSClient client3 = new DFSClient(NameNode.getAddress(conf), conf);
+
+      DFSOutputStream s1 = (DFSOutputStream) client1.create(testFile1, true);
+      DFSOutputStream s2 = (DFSOutputStream) client2.create(testFile2, true);
+      DFSOutputStream s3 = (DFSOutputStream) client3.create(testFile3, true);
+
+      byte[] toWrite = new byte[1024*1024*8];
+      Random rb = new Random(1111);
+      rb.nextBytes(toWrite);
+      s1.write(toWrite, 0, 1024*1024*8);
+      s1.flush();
+      s2.write(toWrite, 0, 1024*1024*8);
+      s2.flush();
+      s3.write(toWrite, 0, 1024*1024*8);
+      s3.flush();       
+
+      assertTrue(dn.getXferServer().getNumPeersXceiver() == dn.getXferServer()
+          .getNumPeersXceiver());
+      s1.close();
+      s2.close();
+      s3.close();
+      assertTrue(dn.getXferServer().getNumPeersXceiver() == dn.getXferServer()
+          .getNumPeersXceiver());
+      client1.close();
+      client2.close();
+      client3.close();      
+    } finally {
+      shutdownCluster();
+    }
+  }
 }

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java Tue Aug 19 23:49:39 2014
@@ -25,6 +25,7 @@ import java.io.FilenameFilter;
 import java.io.IOException;
 import java.net.InetSocketAddress;
 import java.net.Socket;
+import java.util.ArrayList;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
@@ -46,9 +47,11 @@ import org.apache.hadoop.hdfs.net.Peer;
 import org.apache.hadoop.hdfs.net.TcpPeerServer;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
+import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
@@ -58,6 +61,7 @@ import org.apache.hadoop.hdfs.server.pro
 import org.apache.hadoop.hdfs.server.protocol.StorageBlockReport;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.security.token.Token;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
@@ -307,7 +311,8 @@ public class TestDataNodeVolumeFailure {
       setConfiguration(conf).
       setRemotePeerFactory(new RemotePeerFactory() {
         @Override
-        public Peer newConnectedPeer(InetSocketAddress addr)
+        public Peer newConnectedPeer(InetSocketAddress addr,
+            Token<BlockTokenIdentifier> blockToken, DatanodeID datanodeId)
             throws IOException {
           Peer peer = null;
           Socket sock = NetUtils.getDefaultSocketFactory(conf).createSocket();
@@ -380,7 +385,7 @@ public class TestDataNodeVolumeFailure {
           continue;
         }
       
-        String [] res = metaFilesInDir(dir);
+        List<File> res = MiniDFSCluster.getAllBlockMetadataFiles(dir);
         if(res == null) {
           System.out.println("res is null for dir = " + dir + " i=" + i + " and j=" + j);
           continue;
@@ -388,7 +393,8 @@ public class TestDataNodeVolumeFailure {
         //System.out.println("for dn" + i + "." + j + ": " + dir + "=" + res.length+ " files");
       
         //int ii = 0;
-        for(String s: res) {
+        for(File f: res) {
+          String s = f.getName();
           // cut off "blk_-" at the beginning and ".meta" at the end
           assertNotNull("Block file name should not be null", s);
           String bid = s.substring(s.indexOf("_")+1, s.lastIndexOf("_"));
@@ -404,25 +410,9 @@ public class TestDataNodeVolumeFailure {
         //System.out.println("dir1="+dir.getPath() + "blocks=" + res.length);
         //System.out.println("dir2="+dir2.getPath() + "blocks=" + res2.length);
 
-        total += res.length;
+        total += res.size();
       }
     }
     return total;
   }
-
-  /*
-   * count how many files *.meta are in the dir
-   */
-  private String [] metaFilesInDir(File dir) {
-    String [] res = dir.list(
-        new FilenameFilter() {
-          @Override
-          public boolean accept(File dir, String name) {
-            return name.startsWith("blk_") &&
-            name.endsWith(Block.METADATA_EXTENSION);
-          }
-        }
-    );
-    return res;
-  }
 }

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDeleteBlockPool.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDeleteBlockPool.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDeleteBlockPool.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDeleteBlockPool.java Tue Aug 19 23:49:39 2014
@@ -103,9 +103,10 @@ public class TestDeleteBlockPool {
       fs1.delete(new Path("/alpha"), true);
       
       // Wait till all blocks are deleted from the dn2 for bpid1.
-      while ((MiniDFSCluster.getFinalizedDir(dn2StorageDir1, 
-          bpid1).list().length != 0) || (MiniDFSCluster.getFinalizedDir(
-              dn2StorageDir2, bpid1).list().length != 0)) {
+      File finalDir1 = MiniDFSCluster.getFinalizedDir(dn2StorageDir1, bpid1);
+      File finalDir2 = MiniDFSCluster.getFinalizedDir(dn2StorageDir1, bpid2);
+      while ((!DatanodeUtil.dirNoFilesRecursive(finalDir1)) ||
+          (!DatanodeUtil.dirNoFilesRecursive(finalDir2))) {
         try {
           Thread.sleep(3000);
         } catch (Exception ignored) {
@@ -160,7 +161,8 @@ public class TestDeleteBlockPool {
       conf.set(DFSConfigKeys.DFS_NAMESERVICES,
           "namesServerId1,namesServerId2");
       cluster = new MiniDFSCluster.Builder(conf)
-        .nnTopology(MiniDFSNNTopology.simpleFederatedTopology(2))
+        .nnTopology(MiniDFSNNTopology.simpleFederatedTopology(
+            conf.get(DFSConfigKeys.DFS_NAMESERVICES)))
         .numDataNodes(1).build();
 
       cluster.waitActive();

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java Tue Aug 19 23:49:39 2014
@@ -18,16 +18,13 @@
 package org.apache.hadoop.hdfs.server.datanode;
 
 import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 
 import java.io.DataOutputStream;
 import java.io.File;
+import java.io.IOException;
 import java.net.InetSocketAddress;
 import java.net.Socket;
-import java.net.SocketException;
-import java.net.SocketTimeoutException;
-import java.nio.channels.ClosedChannelException;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
@@ -38,6 +35,7 @@ import org.apache.hadoop.hdfs.DFSConfigK
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.StorageType;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
@@ -47,6 +45,7 @@ import org.apache.hadoop.hdfs.security.t
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
 import org.apache.hadoop.util.DataChecksum;
+import org.apache.hadoop.util.Time;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
@@ -149,9 +148,9 @@ public class TestDiskError {
 
     DataChecksum checksum = DataChecksum.newDataChecksum(
         DataChecksum.Type.CRC32, 512);
-    new Sender(out).writeBlock(block.getBlock(),
+    new Sender(out).writeBlock(block.getBlock(), StorageType.DEFAULT,
         BlockTokenSecretManager.DUMMY_TOKEN, "",
-        new DatanodeInfo[0], null,
+        new DatanodeInfo[0], new StorageType[0], null,
         BlockConstructionStage.PIPELINE_SETUP_CREATE, 1, 0L, 0L, 0L,
         checksum, CachingStrategy.newDefaultStrategy());
     out.flush();
@@ -201,15 +200,23 @@ public class TestDiskError {
     }
   }
   
+  /**
+   * Checks whether {@link DataNode#checkDiskErrorAsync()} is being called or not.
+   * Before refactoring the code the above function was not getting called 
+   * @throws IOException, InterruptedException
+   */
   @Test
-  public void testNetworkErrorsIgnored() {
-    DataNode dn = cluster.getDataNodes().iterator().next();
-    
-    assertTrue(dn.isNetworkRelatedException(new SocketException()));
-    assertTrue(dn.isNetworkRelatedException(new SocketTimeoutException()));
-    assertTrue(dn.isNetworkRelatedException(new ClosedChannelException()));
-    assertTrue(dn.isNetworkRelatedException(new Exception("Broken pipe foo bar")));
-    assertFalse(dn.isNetworkRelatedException(new Exception()));
-    assertFalse(dn.isNetworkRelatedException(new Exception("random problem")));
+  public void testcheckDiskError() throws IOException, InterruptedException {
+    if(cluster.getDataNodes().size() <= 0) {
+      cluster.startDataNodes(conf, 1, true, null, null);
+      cluster.waitActive();
+    }
+    DataNode dataNode = cluster.getDataNodes().get(0);
+    long slackTime = dataNode.checkDiskErrorInterval/2;
+    //checking for disk error
+    dataNode.checkDiskErrorAsync();
+    Thread.sleep(dataNode.checkDiskErrorInterval);
+    long lastDiskErrorCheck = dataNode.getLastDiskErrorCheck();
+    assertTrue("Disk Error check is not performed within  " + dataNode.checkDiskErrorInterval +  "  ms", ((Time.monotonicNow()-lastDiskErrorCheck) < (dataNode.checkDiskErrorInterval + slackTime)));
   }
 }

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestFsDatasetCache.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestFsDatasetCache.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestFsDatasetCache.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestFsDatasetCache.java Tue Aug 19 23:49:39 2014
@@ -20,7 +20,6 @@ package org.apache.hadoop.hdfs.server.da
 import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
-import static org.junit.Assume.assumeTrue;
 import static org.mockito.Matchers.any;
 import static org.mockito.Matchers.anyInt;
 import static org.mockito.Matchers.anyLong;
@@ -68,6 +67,7 @@ import org.apache.hadoop.hdfs.server.pro
 import org.apache.hadoop.hdfs.server.protocol.HeartbeatResponse;
 import org.apache.hadoop.hdfs.server.protocol.NNHAStatusHeartbeat;
 import org.apache.hadoop.hdfs.server.protocol.StorageReport;
+import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.nativeio.NativeIO;
 import org.apache.hadoop.io.nativeio.NativeIO.POSIX.CacheManipulator;
 import org.apache.hadoop.io.nativeio.NativeIO.POSIX.NoMlockCacheManipulator;
@@ -114,7 +114,6 @@ public class TestFsDatasetCache {
 
   @Before
   public void setUp() throws Exception {
-    assumeTrue(!Path.WINDOWS);
     conf = new HdfsConfiguration();
     conf.setLong(
         DFSConfigKeys.DFS_NAMENODE_PATH_BASED_CACHE_REFRESH_INTERVAL_MS, 100);
@@ -143,6 +142,9 @@ public class TestFsDatasetCache {
 
   @After
   public void tearDown() throws Exception {
+    // Verify that each test uncached whatever it cached.  This cleanup is
+    // required so that file descriptors are not leaked across tests.
+    DFSTestUtil.verifyExpectedCacheUsage(0, 0, fsd);
     if (fs != null) {
       fs.close();
     }
@@ -205,9 +207,16 @@ public class TestFsDatasetCache {
       String bpid = loc.getLocatedBlock().getBlock().getBlockPoolId();
       Block block = loc.getLocatedBlock().getBlock().getLocalBlock();
       ExtendedBlock extBlock = new ExtendedBlock(bpid, block);
-      FileChannel blockChannel =
-          ((FileInputStream)fsd.getBlockInputStream(extBlock, 0)).getChannel();
-      sizes[i] = blockChannel.size();
+      FileInputStream blockInputStream = null;
+      FileChannel blockChannel = null;
+      try {
+        blockInputStream =
+          (FileInputStream)fsd.getBlockInputStream(extBlock, 0);
+        blockChannel = blockInputStream.getChannel();
+        sizes[i] = blockChannel.size();
+      } finally {
+        IOUtils.cleanup(LOG, blockChannel, blockInputStream);
+      }
     }
     return sizes;
   }
@@ -571,5 +580,7 @@ public class TestFsDatasetCache {
         return true;
       }
     }, 1000, 30000);
+
+    dfs.removeCacheDirective(shortCacheDirectiveId);
   }
 }

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestHdfsServerConstants.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestHdfsServerConstants.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestHdfsServerConstants.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestHdfsServerConstants.java Tue Aug 19 23:49:39 2014
@@ -20,7 +20,7 @@ package org.apache.hadoop.hdfs.server.da
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.*;
 import org.junit.Test;
 
-import static junit.framework.Assert.fail;
+import static org.junit.Assert.fail;
 import static org.hamcrest.core.Is.is;
 import static org.junit.Assert.assertThat;
 

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestIncrementalBlockReports.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestIncrementalBlockReports.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestIncrementalBlockReports.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestIncrementalBlockReports.java Tue Aug 19 23:49:39 2014
@@ -17,7 +17,7 @@
  */
 package org.apache.hadoop.hdfs.server.datanode;
 
-import static junit.framework.Assert.assertFalse;
+import static org.junit.Assert.assertFalse;
 import static org.mockito.Matchers.any;
 import static org.mockito.Matchers.anyString;
 import static org.mockito.Mockito.atLeastOnce;

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestSimulatedFSDataset.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestSimulatedFSDataset.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestSimulatedFSDataset.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestSimulatedFSDataset.java Tue Aug 19 23:49:39 2014
@@ -29,6 +29,7 @@ import java.io.OutputStream;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.hdfs.StorageType;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
@@ -65,7 +66,8 @@ public class TestSimulatedFSDataset {
       ExtendedBlock b = new ExtendedBlock(bpid, i, 0, 0); 
       // we pass expected len as zero, - fsdataset should use the sizeof actual
       // data written
-      ReplicaInPipelineInterface bInfo = fsdataset.createRbw(b);
+      ReplicaInPipelineInterface bInfo = fsdataset.createRbw(
+          StorageType.DEFAULT, b);
       ReplicaOutputStreams out = bInfo.createStreams(true,
           DataChecksum.newDataChecksum(DataChecksum.Type.CRC32, 512));
       try {

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/TestAvailableSpaceVolumeChoosingPolicy.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/TestAvailableSpaceVolumeChoosingPolicy.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/TestAvailableSpaceVolumeChoosingPolicy.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/TestAvailableSpaceVolumeChoosingPolicy.java Tue Aug 19 23:49:39 2014
@@ -22,6 +22,7 @@ import static org.apache.hadoop.hdfs.DFS
 
 import java.util.ArrayList;
 import java.util.List;
+import java.util.Random;
 
 import org.apache.hadoop.conf.Configurable;
 import org.apache.hadoop.conf.Configuration;
@@ -251,10 +252,10 @@ public class TestAvailableSpaceVolumeCho
    */
   public void doRandomizedTest(float preferencePercent, int lowSpaceVolumes,
       int highSpaceVolumes) throws Exception {
-    @SuppressWarnings("unchecked")
-    final AvailableSpaceVolumeChoosingPolicy<FsVolumeSpi> policy = 
-        ReflectionUtils.newInstance(AvailableSpaceVolumeChoosingPolicy.class, null);
-    
+    Random random = new Random(123L);
+    final AvailableSpaceVolumeChoosingPolicy<FsVolumeSpi> policy =
+        new AvailableSpaceVolumeChoosingPolicy<FsVolumeSpi>(random);
+
     List<FsVolumeSpi> volumes = new ArrayList<FsVolumeSpi>();
     
     // Volumes with 1MB free space

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestWriteToReplica.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestWriteToReplica.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestWriteToReplica.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestWriteToReplica.java Tue Aug 19 23:49:39 2014
@@ -21,6 +21,7 @@ import java.io.IOException;
 
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.StorageType;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
@@ -147,7 +148,7 @@ public class TestWriteToReplica {
     };
     
     ReplicaMap replicasMap = dataSet.volumeMap;
-    FsVolumeImpl vol = dataSet.volumes.getNextVolume(0);
+    FsVolumeImpl vol = dataSet.volumes.getNextVolume(StorageType.DEFAULT, 0);
     ReplicaInfo replicaInfo = new FinalizedReplica(
         blocks[FINALIZED].getLocalBlock(), vol, vol.getCurrentDir().getParentFile());
     replicasMap.add(bpid, replicaInfo);
@@ -357,7 +358,7 @@ public class TestWriteToReplica {
     }
  
     try {
-      dataSet.createRbw(blocks[FINALIZED]);
+      dataSet.createRbw(StorageType.DEFAULT, blocks[FINALIZED]);
       Assert.fail("Should not have created a replica that's already " +
       		"finalized " + blocks[FINALIZED]);
     } catch (ReplicaAlreadyExistsException e) {
@@ -375,7 +376,7 @@ public class TestWriteToReplica {
     }
 
     try {
-      dataSet.createRbw(blocks[TEMPORARY]);
+      dataSet.createRbw(StorageType.DEFAULT, blocks[TEMPORARY]);
       Assert.fail("Should not have created a replica that had created as " +
       		"temporary " + blocks[TEMPORARY]);
     } catch (ReplicaAlreadyExistsException e) {
@@ -385,7 +386,7 @@ public class TestWriteToReplica {
         0L, blocks[RBW].getNumBytes());  // expect to be successful
     
     try {
-      dataSet.createRbw(blocks[RBW]);
+      dataSet.createRbw(StorageType.DEFAULT, blocks[RBW]);
       Assert.fail("Should not have created a replica that had created as RBW " +
           blocks[RBW]);
     } catch (ReplicaAlreadyExistsException e) {
@@ -401,7 +402,7 @@ public class TestWriteToReplica {
     }
 
     try {
-      dataSet.createRbw(blocks[RWR]);
+      dataSet.createRbw(StorageType.DEFAULT, blocks[RWR]);
       Assert.fail("Should not have created a replica that was waiting to be " +
       		"recovered " + blocks[RWR]);
     } catch (ReplicaAlreadyExistsException e) {
@@ -417,7 +418,7 @@ public class TestWriteToReplica {
     }
 
     try {
-      dataSet.createRbw(blocks[RUR]);
+      dataSet.createRbw(StorageType.DEFAULT, blocks[RUR]);
       Assert.fail("Should not have created a replica that was under recovery " +
           blocks[RUR]);
     } catch (ReplicaAlreadyExistsException e) {
@@ -434,45 +435,45 @@ public class TestWriteToReplica {
           e.getMessage().contains(ReplicaNotFoundException.NON_EXISTENT_REPLICA));
     }
     
-    dataSet.createRbw(blocks[NON_EXISTENT]);
+    dataSet.createRbw(StorageType.DEFAULT, blocks[NON_EXISTENT]);
   }
   
   private void testWriteToTemporary(FsDatasetImpl dataSet, ExtendedBlock[] blocks) throws IOException {
     try {
-      dataSet.createTemporary(blocks[FINALIZED]);
+      dataSet.createTemporary(StorageType.DEFAULT, blocks[FINALIZED]);
       Assert.fail("Should not have created a temporary replica that was " +
       		"finalized " + blocks[FINALIZED]);
     } catch (ReplicaAlreadyExistsException e) {
     }
  
     try {
-      dataSet.createTemporary(blocks[TEMPORARY]);
+      dataSet.createTemporary(StorageType.DEFAULT, blocks[TEMPORARY]);
       Assert.fail("Should not have created a replica that had created as" +
       		"temporary " + blocks[TEMPORARY]);
     } catch (ReplicaAlreadyExistsException e) {
     }
     
     try {
-      dataSet.createTemporary(blocks[RBW]);
+      dataSet.createTemporary(StorageType.DEFAULT, blocks[RBW]);
       Assert.fail("Should not have created a replica that had created as RBW " +
           blocks[RBW]);
     } catch (ReplicaAlreadyExistsException e) {
     }
     
     try {
-      dataSet.createTemporary(blocks[RWR]);
+      dataSet.createTemporary(StorageType.DEFAULT, blocks[RWR]);
       Assert.fail("Should not have created a replica that was waiting to be " +
       		"recovered " + blocks[RWR]);
     } catch (ReplicaAlreadyExistsException e) {
     }
     
     try {
-      dataSet.createTemporary(blocks[RUR]);
+      dataSet.createTemporary(StorageType.DEFAULT, blocks[RUR]);
       Assert.fail("Should not have created a replica that was under recovery " +
           blocks[RUR]);
     } catch (ReplicaAlreadyExistsException e) {
     }
     
-    dataSet.createTemporary(blocks[NON_EXISTENT]);
+    dataSet.createTemporary(StorageType.DEFAULT, blocks[NON_EXISTENT]);
   }
 }

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/AclTestHelpers.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/AclTestHelpers.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/AclTestHelpers.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/AclTestHelpers.java Tue Aug 19 23:49:39 2014
@@ -27,6 +27,7 @@ import org.apache.hadoop.fs.permission.A
 import org.apache.hadoop.fs.permission.AclEntryScope;
 import org.apache.hadoop.fs.permission.AclEntryType;
 import org.apache.hadoop.fs.permission.FsAction;
+import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.UserGroupInformation;
@@ -150,6 +151,9 @@ public final class AclTestHelpers {
    */
   public static void assertPermission(FileSystem fs, Path pathToCheck,
       short perm) throws IOException {
-    assertEquals(perm, fs.getFileStatus(pathToCheck).getPermission().toShort());
+    short filteredPerm = (short)(perm & 01777);
+    FsPermission fsPermission = fs.getFileStatus(pathToCheck).getPermission();
+    assertEquals(filteredPerm, fsPermission.toShort());
+    assertEquals(((perm & (1 << 12)) != 0), fsPermission.getAclBit());
   }
 }

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/CreateEditsLog.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/CreateEditsLog.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/CreateEditsLog.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/CreateEditsLog.java Tue Aug 19 23:49:39 2014
@@ -83,7 +83,7 @@ public class CreateEditsLog {
 
       final INodeFile inode = new INodeFile(inodeId.nextValue(), null,
           p, 0L, 0L, blocks, replication, blockSize);
-      inode.toUnderConstruction("", "", null);
+      inode.toUnderConstruction("", "");
 
      // Append path to filename with information about blockIDs 
       String path = "_" + iF + "_B" + blocks[0].getBlockId() + 
@@ -98,7 +98,7 @@ public class CreateEditsLog {
       }
       INodeFile fileUc = new INodeFile(inodeId.nextValue(), null,
           p, 0L, 0L, BlockInfo.EMPTY_ARRAY, replication, blockSize);
-      fileUc.toUnderConstruction("", "", null);
+      fileUc.toUnderConstruction("", "");
       editLog.logOpenFile(filePath, fileUc, false);
       editLog.logCloseFile(filePath, inode);
 
@@ -131,14 +131,10 @@ public class CreateEditsLog {
     printUsageExit();
   }
   /**
-   * @param args
+   * @param args arguments
    * @throws IOException 
    */
-  public static void main(String[] args) 
-      throws IOException {
-
-
-
+  public static void main(String[] args)  throws IOException {
     long startingBlockId = 1;
     int numFiles = 0;
     short replication = 1;