You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by um...@apache.org on 2014/05/12 14:44:05 UTC

svn commit: r1593948 [4/4] - in /hadoop/common/branches/HDFS-2006/hadoop-hdfs-project: hadoop-hdfs-httpfs/ hadoop-hdfs-nfs/ hadoop-hdfs/ hadoop-hdfs/src/main/java/ hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/ hadoop-hdfs/src/main/java/org/apache/h...

Modified: hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/TestJspHelper.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/TestJspHelper.java?rev=1593948&r1=1593947&r2=1593948&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/TestJspHelper.java (original)
+++ hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/TestJspHelper.java Mon May 12 12:43:59 2014
@@ -17,37 +17,11 @@
  */
 package org.apache.hadoop.hdfs.server.common;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.fail;
-import static org.mockito.Mockito.doAnswer;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.when;
-
-import java.io.IOException;
-import java.io.StringReader;
-import java.net.InetSocketAddress;
-import java.util.ArrayList;
-
-import javax.servlet.ServletContext;
-import javax.servlet.http.HttpServletRequest;
-import javax.servlet.jsp.JspWriter;
-import javax.xml.parsers.DocumentBuilder;
-import javax.xml.parsers.DocumentBuilderFactory;
-import javax.xml.parsers.ParserConfigurationException;
-
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
-import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
-import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
-import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeHttpServer;
-import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
-import org.apache.hadoop.hdfs.server.protocol.StorageReport;
 import org.apache.hadoop.hdfs.web.resources.DoAsParam;
 import org.apache.hadoop.hdfs.web.resources.UserParam;
 import org.apache.hadoop.io.DataInputBuffer;
@@ -63,14 +37,15 @@ import org.apache.hadoop.security.token.
 import org.junit.Assert;
 import org.junit.BeforeClass;
 import org.junit.Test;
-import org.mockito.ArgumentCaptor;
-import org.mockito.Mockito;
-import org.mockito.invocation.InvocationOnMock;
-import org.mockito.stubbing.Answer;
-import org.xml.sax.InputSource;
-import org.xml.sax.SAXException;
 
-import com.google.common.base.Strings;
+import javax.servlet.ServletContext;
+import javax.servlet.http.HttpServletRequest;
+import java.io.IOException;
+import java.net.InetSocketAddress;
+
+import static org.junit.Assert.*;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
 
 
 public class TestJspHelper {
@@ -158,25 +133,6 @@ public class TestJspHelper {
         .next();
     Assert.assertEquals(expected, tokenInUgi.getService().toString());
   }
-  
-  
-  @Test
-  public void testDelegationTokenUrlParam() {
-    conf.set(DFSConfigKeys.HADOOP_SECURITY_AUTHENTICATION, "kerberos");
-    UserGroupInformation.setConfiguration(conf);
-    String tokenString = "xyzabc";
-    String delegationTokenParam = JspHelper
-        .getDelegationTokenUrlParam(tokenString);
-    //Security is enabled
-    Assert.assertEquals(JspHelper.SET_DELEGATION + "xyzabc",
-        delegationTokenParam);
-    conf.set(DFSConfigKeys.HADOOP_SECURITY_AUTHENTICATION, "simple");
-    UserGroupInformation.setConfiguration(conf);
-    delegationTokenParam = JspHelper
-        .getDelegationTokenUrlParam(tokenString);
-    //Empty string must be returned because security is disabled.
-    Assert.assertEquals("", delegationTokenParam);
-  }
 
   @Test
   public void testGetUgiFromToken() throws IOException {
@@ -328,8 +284,8 @@ public class TestJspHelper {
     String user = "TheNurse";
     conf.set(DFSConfigKeys.HADOOP_SECURITY_AUTHENTICATION, "kerberos");
     
-    conf.set(ProxyUsers.CONF_HADOOP_PROXYUSER+realUser+".groups", "*");
-    conf.set(ProxyUsers.CONF_HADOOP_PROXYUSER+realUser+".hosts", "*");
+    conf.set(ProxyUsers.getProxySuperuserGroupConfKey(realUser), "*");
+    conf.set(ProxyUsers.getProxySuperuserIpConfKey(realUser), "*");
     ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
     UserGroupInformation.setConfiguration(conf);
     UserGroupInformation ugi;
@@ -403,32 +359,6 @@ public class TestJspHelper {
     }
   }
 
-  @Test
-  public void testPrintGotoFormWritesValidXML() throws IOException,
-         ParserConfigurationException, SAXException {
-    JspWriter mockJspWriter = mock(JspWriter.class);
-    ArgumentCaptor<String> arg = ArgumentCaptor.forClass(String.class);
-    doAnswer(new Answer<Object>() {
-      @Override
-      public Object answer(InvocationOnMock invok) {
-        Object[] args = invok.getArguments();
-        jspWriterOutput += (String) args[0];
-        return null;
-      }
-    }).when(mockJspWriter).print(arg.capture());
-
-    jspWriterOutput = "";
-
-    JspHelper.printGotoForm(mockJspWriter, 424242, "a token string",
-            "foobar/file", "0.0.0.0");
-
-    DocumentBuilder parser =
-        DocumentBuilderFactory.newInstance().newDocumentBuilder();
-    InputSource is = new InputSource();
-    is.setCharacterStream(new StringReader(jspWriterOutput));
-    parser.parse(is);
-  }
-
   private HttpServletRequest getMockRequest(String remoteUser, String user, String doAs) {
     HttpServletRequest request = mock(HttpServletRequest.class);
     when(request.getParameter(UserParam.NAME)).thenReturn(user);
@@ -464,146 +394,6 @@ public class TestJspHelper {
   }
 
   @Test
-  public void testSortNodeByFields() throws Exception {
-    DatanodeID dnId1 = new DatanodeID("127.0.0.1", "localhost1", "datanode1",
-        1234, 2345, 3456, 4567);
-    DatanodeID dnId2 = new DatanodeID("127.0.0.2", "localhost2", "datanode2",
-        1235, 2346, 3457, 4568);
-
-    // Setup DatanodeDescriptors with one storage each.
-    DatanodeDescriptor dnDesc1 = new DatanodeDescriptor(dnId1, "rack1");
-    DatanodeDescriptor dnDesc2 = new DatanodeDescriptor(dnId2, "rack2");
-
-    // Update the DatanodeDescriptors with their attached storages.
-    BlockManagerTestUtil.updateStorage(dnDesc1, new DatanodeStorage("dnStorage1"));
-    BlockManagerTestUtil.updateStorage(dnDesc2, new DatanodeStorage("dnStorage2"));
-
-    DatanodeStorage dns1 = new DatanodeStorage("dnStorage1");
-    DatanodeStorage dns2 = new DatanodeStorage("dnStorage2");
-
-    StorageReport[] report1 = new StorageReport[] {
-        new StorageReport(dns1, false, 1024, 100, 924, 100)
-    };
-    StorageReport[] report2 = new StorageReport[] {
-        new StorageReport(dns2, false, 2500, 200, 1848, 200)
-    };
-    dnDesc1.updateHeartbeat(report1, 5l, 3l, 10, 2);
-    dnDesc2.updateHeartbeat(report2, 10l, 2l, 20, 1);
-
-    ArrayList<DatanodeDescriptor> live = new ArrayList<DatanodeDescriptor>();
-    live.add(dnDesc1);
-    live.add(dnDesc2);
-
-    // Test sorting by failed volumes
-    JspHelper.sortNodeList(live, "volfails", "ASC");
-    Assert.assertEquals(dnDesc2, live.get(0));
-    Assert.assertEquals(dnDesc1, live.get(1));
-    JspHelper.sortNodeList(live, "volfails", "DSC");
-    Assert.assertEquals(dnDesc1, live.get(0));
-    Assert.assertEquals(dnDesc2, live.get(1));
-
-    // Test sorting by Blockpool used
-    JspHelper.sortNodeList(live, "bpused", "ASC");
-    Assert.assertEquals(dnDesc1, live.get(0));
-    Assert.assertEquals(dnDesc2, live.get(1));
-    JspHelper.sortNodeList(live, "bpused", "DSC");
-    Assert.assertEquals(dnDesc2, live.get(0));
-    Assert.assertEquals(dnDesc1, live.get(1));
-
-    // Test sorting by Percentage Blockpool used
-    JspHelper.sortNodeList(live, "pcbpused", "ASC");
-    Assert.assertEquals(dnDesc2, live.get(0));
-    Assert.assertEquals(dnDesc1, live.get(1));
-    JspHelper.sortNodeList(live, "pcbpused", "DSC");
-    Assert.assertEquals(dnDesc1, live.get(0));
-    Assert.assertEquals(dnDesc2, live.get(1));
-    
-    //unexisted field comparition is d1.getHostName().compareTo(d2.getHostName());    
-    JspHelper.sortNodeList(live, "unexists", "ASC");
-    Assert.assertEquals(dnDesc1, live.get(0));
-    Assert.assertEquals(dnDesc2, live.get(1));
-    
-    JspHelper.sortNodeList(live, "unexists", "DSC");
-    Assert.assertEquals(dnDesc2, live.get(0));
-    Assert.assertEquals(dnDesc1, live.get(1));  
-    
-    // test sorting by capacity
-    JspHelper.sortNodeList(live, "capacity", "ASC");
-    Assert.assertEquals(dnDesc1, live.get(0));
-    Assert.assertEquals(dnDesc2, live.get(1));
-    
-    JspHelper.sortNodeList(live, "capacity", "DSC");
-    Assert.assertEquals(dnDesc2, live.get(0));
-    Assert.assertEquals(dnDesc1, live.get(1));
-
-    // test sorting by used
-    JspHelper.sortNodeList(live, "used", "ASC");
-    Assert.assertEquals(dnDesc1, live.get(0));
-    Assert.assertEquals(dnDesc2, live.get(1));
-    
-    JspHelper.sortNodeList(live, "used", "DSC");
-    Assert.assertEquals(dnDesc2, live.get(0));
-    Assert.assertEquals(dnDesc1, live.get(1)); 
-    
-    // test sorting by nondfsused
-    JspHelper.sortNodeList(live, "nondfsused", "ASC");
-    Assert.assertEquals(dnDesc1, live.get(0));
-    Assert.assertEquals(dnDesc2, live.get(1));
-    
-    JspHelper.sortNodeList(live, "nondfsused", "DSC");
-    Assert.assertEquals(dnDesc2, live.get(0));
-    Assert.assertEquals(dnDesc1, live.get(1));
-   
-    // test sorting by remaining
-    JspHelper.sortNodeList(live, "remaining", "ASC");
-    Assert.assertEquals(dnDesc1, live.get(0));
-    Assert.assertEquals(dnDesc2, live.get(1));
-    
-    JspHelper.sortNodeList(live, "remaining", "DSC");
-    Assert.assertEquals(dnDesc2, live.get(0));
-    Assert.assertEquals(dnDesc1, live.get(1));
-  }
-  
-  @Test
-  public void testPrintMethods() throws IOException {
-    JspWriter out = mock(JspWriter.class);      
-    HttpServletRequest req = mock(HttpServletRequest.class);
-    
-    final StringBuffer buffer = new StringBuffer();
-    
-    ArgumentCaptor<String> arg = ArgumentCaptor.forClass(String.class);
-    doAnswer(new Answer<Object>() {      
-      @Override
-      public Object answer(InvocationOnMock invok) {
-        Object[] args = invok.getArguments();
-        buffer.append((String)args[0]);
-        return null;
-      }
-    }).when(out).print(arg.capture());
-    
-    
-    JspHelper.createTitle(out, req, "testfile.txt");
-    Mockito.verify(out, Mockito.times(1)).print(Mockito.anyString());
-    
-    JspHelper.addTableHeader(out);
-    Mockito.verify(out, Mockito.times(1 + 2)).print(Mockito.anyString());                  
-     
-    JspHelper.addTableRow(out, new String[] {" row11", "row12 "});
-    Mockito.verify(out, Mockito.times(1 + 2 + 4)).print(Mockito.anyString());      
-    
-    JspHelper.addTableRow(out, new String[] {" row11", "row12 "}, 3);
-    Mockito.verify(out, Mockito.times(1 + 2 + 4 + 4)).print(Mockito.anyString());
-      
-    JspHelper.addTableRow(out, new String[] {" row21", "row22"});
-    Mockito.verify(out, Mockito.times(1 + 2 + 4 + 4 + 4)).print(Mockito.anyString());      
-      
-    JspHelper.addTableFooter(out);
-    Mockito.verify(out, Mockito.times(1 + 2 + 4 + 4 + 4 + 1)).print(Mockito.anyString());
-    
-    assertFalse(Strings.isNullOrEmpty(buffer.toString()));               
-  }
-  
-  @Test
   public void testReadWriteReplicaState() {
     try {
       DataOutputBuffer out = new DataOutputBuffer();
@@ -622,21 +412,6 @@ public class TestJspHelper {
       fail("testReadWrite ex error ReplicaState");
     }
   }
-
-  @Test 
-  public void testAuthority(){
-    DatanodeID dnWithIp = new DatanodeID("127.0.0.1", "hostName", null,
-        50020, 50075, 50076, 50010);
-    assertNotNull(JspHelper.Url.authority("http", dnWithIp));
-
-    DatanodeID dnWithNullIp = new DatanodeID(null, "hostName", null,
-        50020, 50075, 50076, 50010);
-    assertNotNull(JspHelper.Url.authority("http", dnWithNullIp));
-
-    DatanodeID dnWithEmptyIp = new DatanodeID("", "hostName", null,
-        50020, 50075, 50076, 50010);
-    assertNotNull(JspHelper.Url.authority("http", dnWithEmptyIp));
-  }
  
   private static String clientAddr = "1.1.1.1";
   private static String chainedClientAddr = clientAddr+", 2.2.2.2";

Modified: hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java?rev=1593948&r1=1593947&r2=1593948&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java (original)
+++ hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java Mon May 12 12:43:59 2014
@@ -329,7 +329,9 @@ public class TestBPOfferService {
     try {
       waitForInitialization(bpos);
       List<BPServiceActor> actors = bpos.getBPServiceActors();
-      assertEquals(1, actors.size());
+      // even if one of the actor initialization fails also other will be
+      // running until both failed.
+      assertEquals(2, actors.size());
       BPServiceActor actor = actors.get(0);
       waitForBlockReport(actor.getNameNodeProxy());
     } finally {
@@ -342,7 +344,14 @@ public class TestBPOfferService {
     GenericTestUtils.waitFor(new Supplier<Boolean>() {
       @Override
       public Boolean get() {
-        return bpos.countNameNodes() == 1;
+        List<BPServiceActor> actors = bpos.getBPServiceActors();
+        int failedcount = 0;
+        for (BPServiceActor actor : actors) {
+          if (!actor.isAlive()) {
+            failedcount++;
+          }
+        }
+        return failedcount == 1;
       }
     }, 100, 10000);
   }

Modified: hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMultipleRegistrations.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMultipleRegistrations.java?rev=1593948&r1=1593947&r2=1593948&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMultipleRegistrations.java (original)
+++ hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMultipleRegistrations.java Mon May 12 12:43:59 2014
@@ -19,8 +19,10 @@
 package org.apache.hadoop.hdfs.server.datanode;
 
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertNotSame;
+import static org.junit.Assert.assertTrue;
 
 import java.io.IOException;
 import java.net.InetSocketAddress;
@@ -30,11 +32,14 @@ import java.util.Map;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties;
 import org.apache.hadoop.hdfs.MiniDFSNNTopology;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
 import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil;
+import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.util.StringUtils;
 import org.junit.Assert;
@@ -189,7 +194,7 @@ public class TestDataNodeMultipleRegistr
   }
   
   @Test
-  public void testClusterIdMismatch() throws IOException {
+  public void testClusterIdMismatch() throws Exception {
     MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
         .nnTopology(MiniDFSNNTopology.simpleFederatedTopology(2))
         .build();
@@ -203,6 +208,7 @@ public class TestDataNodeMultipleRegistr
       
       // add another namenode
       cluster.addNameNode(conf, 9938);
+      Thread.sleep(500);// lets wait for the registration to happen
       bposs = dn.getAllBpOs(); 
       LOG.info("dn bpos len (should be 3):" + bposs.length);
       Assert.assertEquals("should've registered with three namenodes", bposs.length,3);
@@ -212,16 +218,90 @@ public class TestDataNodeMultipleRegistr
       cluster.addNameNode(conf, 9948);
       NameNode nn4 = cluster.getNameNode(3);
       assertNotNull("cannot create nn4", nn4);
-      
+
+      Thread.sleep(500);// lets wait for the registration to happen
       bposs = dn.getAllBpOs(); 
       LOG.info("dn bpos len (still should be 3):" + bposs.length);
       Assert.assertEquals("should've registered with three namenodes", 3, bposs.length);
     } finally {
+        cluster.shutdown();
+    }
+  }
+
+  @Test(timeout = 20000)
+  public void testClusterIdMismatchAtStartupWithHA() throws Exception {
+    MiniDFSNNTopology top = new MiniDFSNNTopology()
+      .addNameservice(new MiniDFSNNTopology.NSConf("ns1")
+        .addNN(new MiniDFSNNTopology.NNConf("nn0"))
+        .addNN(new MiniDFSNNTopology.NNConf("nn1")))
+      .addNameservice(new MiniDFSNNTopology.NSConf("ns2")
+        .addNN(new MiniDFSNNTopology.NNConf("nn2").setClusterId("bad-cid"))
+        .addNN(new MiniDFSNNTopology.NNConf("nn3").setClusterId("bad-cid")));
+
+    top.setFederation(true);
+
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).nnTopology(top)
+        .numDataNodes(0).build();
+    
+    try {
+      cluster.startDataNodes(conf, 1, true, null, null);
+      // let the initialization be complete
+      Thread.sleep(10000);
+      DataNode dn = cluster.getDataNodes().get(0);
+      assertTrue("Datanode should be running", dn.isDatanodeUp());
+      assertEquals("Only one BPOfferService should be running", 1,
+          dn.getAllBpOs().length);
+    } finally {
       cluster.shutdown();
     }
   }
 
   @Test
+  public void testDNWithInvalidStorageWithHA() throws Exception {
+    MiniDFSNNTopology top = new MiniDFSNNTopology()
+      .addNameservice(new MiniDFSNNTopology.NSConf("ns1")
+        .addNN(new MiniDFSNNTopology.NNConf("nn0").setClusterId("cluster-1"))
+        .addNN(new MiniDFSNNTopology.NNConf("nn1").setClusterId("cluster-1")));
+
+    top.setFederation(true);
+
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).nnTopology(top)
+        .numDataNodes(0).build();
+    try {
+      cluster.startDataNodes(conf, 1, true, null, null);
+      // let the initialization be complete
+      Thread.sleep(10000);
+      DataNode dn = cluster.getDataNodes().get(0);
+      assertTrue("Datanode should be running", dn.isDatanodeUp());
+      assertEquals("BPOfferService should be running", 1,
+          dn.getAllBpOs().length);
+      DataNodeProperties dnProp = cluster.stopDataNode(0);
+
+      cluster.getNameNode(0).stop();
+      cluster.getNameNode(1).stop();
+      Configuration nn1 = cluster.getConfiguration(0);
+      Configuration nn2 = cluster.getConfiguration(1);
+      // setting up invalid cluster
+      StartupOption.FORMAT.setClusterId("cluster-2");
+      DFSTestUtil.formatNameNode(nn1);
+      MiniDFSCluster.copyNameDirs(FSNamesystem.getNamespaceDirs(nn1),
+          FSNamesystem.getNamespaceDirs(nn2), nn2);
+      cluster.restartNameNode(0, false);
+      cluster.restartNameNode(1, false);
+      cluster.restartDataNode(dnProp);
+      
+      // let the initialization be complete
+      Thread.sleep(10000);
+      dn = cluster.getDataNodes().get(0);
+      assertFalse("Datanode should have shutdown as only service failed",
+          dn.isDatanodeUp());
+    } finally {
+      cluster.shutdown();
+    }
+  }
+
+  
+  @Test
   public void testMiniDFSClusterWithMultipleNN() throws IOException {
     Configuration conf = new HdfsConfiguration();
     // start Federated cluster and add a node.
@@ -231,7 +311,6 @@ public class TestDataNodeMultipleRegistr
     
     // add a node
     try {
-      Assert.assertNotNull(cluster);
       cluster.waitActive();
       Assert.assertEquals("(1)Should be 2 namenodes", 2, cluster.getNumNameNodes());
 

Modified: hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSAclBaseTest.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSAclBaseTest.java?rev=1593948&r1=1593947&r2=1593948&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSAclBaseTest.java (original)
+++ hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSAclBaseTest.java Mon May 12 12:43:59 2014
@@ -1306,7 +1306,7 @@ public abstract class FSAclBaseTest {
    */
   private static void assertAclFeature(Path pathToCheck,
       boolean expectAclFeature) throws IOException {
-    INode inode = cluster.getNamesystem().getFSDirectory().getRoot()
+    INode inode = cluster.getNamesystem().getFSDirectory()
       .getNode(pathToCheck.toUri().getPath(), false);
     assertNotNull(inode);
     AclFeature aclFeature = inode.getAclFeature();

Modified: hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogs.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogs.java?rev=1593948&r1=1593947&r2=1593948&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogs.java (original)
+++ hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogs.java Mon May 12 12:43:59 2014
@@ -91,6 +91,9 @@ public class TestAuditLogs {
       "perm=.*?");
   static final Pattern successPattern = Pattern.compile(
       ".*allowed=true.*");
+  static final Pattern webOpenPattern = Pattern.compile(
+      ".*cmd=open.*proto=webhdfs.*");
+
   static final String username = "bob";
   static final String[] groups = { "group1" };
   static final String fileName = "/srcdat";
@@ -240,6 +243,22 @@ public class TestAuditLogs {
     verifyAuditLogsRepeat(false, 2);
   }
 
+  /** test that open via webhdfs puts proper entry in audit log */
+  @Test
+  public void testAuditWebHdfsOpen() throws Exception {
+    final Path file = new Path(fnames[0]);
+
+    fs.setPermission(file, new FsPermission((short)0644));
+    fs.setOwner(file, "root", null);
+
+    setupAuditLogs();
+
+    WebHdfsFileSystem webfs = WebHdfsTestUtil.getWebHdfsFileSystemAs(userGroupInfo, conf, WebHdfsFileSystem.SCHEME);
+    webfs.open(file);
+
+    verifyAuditLogsCheckPattern(true, 3, webOpenPattern);
+  }
+
   /** Sets up log4j logger for auditlogs */
   private void setupAuditLogs() throws IOException {
     Logger logger = ((Log4JLogger) FSNamesystem.auditLog).getLogger();
@@ -303,4 +322,38 @@ public class TestAuditLogs {
       reader.close();
     }
   }
+
+  // Ensure audit log has exactly N entries
+  private void verifyAuditLogsCheckPattern(boolean expectSuccess, int ndupe, Pattern pattern)
+      throws IOException {
+    // Turn off the logs
+    Logger logger = ((Log4JLogger) FSNamesystem.auditLog).getLogger();
+    logger.setLevel(Level.OFF);
+
+    // Close the appenders and force all logs to be flushed
+    Enumeration<?> appenders = logger.getAllAppenders();
+    while (appenders.hasMoreElements()) {
+      Appender appender = (Appender)appenders.nextElement();
+      appender.close();
+    }
+
+    BufferedReader reader = new BufferedReader(new FileReader(auditLogFile));
+    String line = null;
+    boolean ret = true;
+    boolean patternMatches = false;
+
+    try {
+        for (int i = 0; i < ndupe; i++) {
+          line = reader.readLine();
+          assertNotNull(line);
+          patternMatches |= pattern.matcher(line).matches();
+          ret &= successPattern.matcher(line).matches();
+        }
+        assertNull("Unexpected event in audit log", reader.readLine());
+        assertTrue("Expected audit event not found in audit log", patternMatches);
+        assertTrue("Expected success=" + expectSuccess, ret == expectSuccess);
+      } finally {
+        reader.close();
+      }
+  }
 }

Modified: hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java?rev=1593948&r1=1593947&r2=1593948&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java (original)
+++ hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java Mon May 12 12:43:59 2014
@@ -21,12 +21,15 @@ import static org.junit.Assert.assertEqu
 import static org.junit.Assert.assertTrue;
 
 import java.io.IOException;
+import java.io.PrintStream;
 import java.net.InetSocketAddress;
 import java.util.ArrayList;
+import java.util.Arrays;
 import java.util.Iterator;
 import java.util.List;
 import java.util.Random;
 
+import org.apache.commons.io.output.ByteArrayOutputStream;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.fs.FSDataOutputStream;
@@ -35,12 +38,15 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.DFSClient;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSTestUtil;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager;
+import org.apache.hadoop.hdfs.tools.DFSAdmin;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.Test;
@@ -167,7 +173,51 @@ public class TestDecommissioningStatus {
     assertEquals(decommNode.decommissioningStatus
         .getUnderReplicatedInOpenFiles(), expectedUnderRepInOpenFiles);
   }
-  
+
+  private void checkDFSAdminDecommissionStatus(
+      List<DatanodeDescriptor> expectedDecomm, DistributedFileSystem dfs,
+      DFSAdmin admin) throws IOException {
+    ByteArrayOutputStream baos = new ByteArrayOutputStream();
+    PrintStream ps = new PrintStream(baos);
+    PrintStream oldOut = System.out;
+    System.setOut(ps);
+    try {
+      // Parse DFSAdmin just to check the count
+      admin.report(new String[] {"-decommissioning"}, 0);
+      String[] lines = baos.toString().split("\n");
+      Integer num = null;
+      int count = 0;
+      for (String line: lines) {
+        if (line.startsWith("Decommissioning datanodes")) {
+          // Pull out the "(num)" and parse it into an int
+          String temp = line.split(" ")[2];
+          num =
+              Integer.parseInt((String) temp.subSequence(1, temp.length() - 2));
+        }
+        if (line.contains("Decommission in progress")) {
+          count++;
+        }
+      }
+      assertTrue("No decommissioning output", num != null);
+      assertEquals("Unexpected number of decomming DNs", expectedDecomm.size(),
+          num.intValue());
+      assertEquals("Unexpected number of decomming DNs", expectedDecomm.size(),
+          count);
+
+      // Check Java API for correct contents
+      List<DatanodeInfo> decomming =
+          new ArrayList<DatanodeInfo>(Arrays.asList(dfs
+              .getDataNodeStats(DatanodeReportType.DECOMMISSIONING)));
+      assertEquals("Unexpected number of decomming DNs", expectedDecomm.size(),
+          decomming.size());
+      for (DatanodeID id : expectedDecomm) {
+        assertTrue("Did not find expected decomming DN " + id,
+            decomming.contains(id));
+      }
+    } finally {
+      System.setOut(oldOut);
+    }
+  }
   /**
    * Tests Decommissioning Status in DFS.
    */
@@ -179,7 +229,8 @@ public class TestDecommissioningStatus {
     DFSClient client = new DFSClient(addr, conf);
     DatanodeInfo[] info = client.datanodeReport(DatanodeReportType.LIVE);
     assertEquals("Number of Datanodes ", 2, info.length);
-    FileSystem fileSys = cluster.getFileSystem();
+    DistributedFileSystem fileSys = cluster.getFileSystem();
+    DFSAdmin admin = new DFSAdmin(cluster.getConfiguration(0));
 
     short replicas = 2;
     //
@@ -205,12 +256,16 @@ public class TestDecommissioningStatus {
         assertEquals(decommissioningNodes.size(), 1);
         DatanodeDescriptor decommNode = decommissioningNodes.get(0);
         checkDecommissionStatus(decommNode, 4, 0, 2);
+        checkDFSAdminDecommissionStatus(decommissioningNodes.subList(0, 1),
+            fileSys, admin);
       } else {
         assertEquals(decommissioningNodes.size(), 2);
         DatanodeDescriptor decommNode1 = decommissioningNodes.get(0);
         DatanodeDescriptor decommNode2 = decommissioningNodes.get(1);
         checkDecommissionStatus(decommNode1, 4, 4, 2);
         checkDecommissionStatus(decommNode2, 4, 4, 2);
+        checkDFSAdminDecommissionStatus(decommissioningNodes.subList(0, 2),
+            fileSys, admin);
       }
     }
     // Call refreshNodes on FSNamesystem with empty exclude file.

Modified: hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSPermissionChecker.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSPermissionChecker.java?rev=1593948&r1=1593947&r2=1593948&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSPermissionChecker.java (original)
+++ hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSPermissionChecker.java Mon May 12 12:43:59 2014
@@ -26,6 +26,7 @@ import static org.junit.Assert.*;
 import java.io.IOException;
 import java.util.Arrays;
 
+import org.apache.hadoop.conf.Configuration;
 import org.junit.Before;
 import org.junit.Test;
 
@@ -38,7 +39,10 @@ import org.apache.hadoop.fs.permission.P
 import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.UserGroupInformation;
+import org.mockito.invocation.InvocationOnMock;
+import org.mockito.stubbing.Answer;
 
+import static org.mockito.Mockito.*;
 /**
  * Unit tests covering FSPermissionChecker.  All tests in this suite have been
  * cross-validated against Linux setfacl/getfacl to check for consistency of the
@@ -56,14 +60,24 @@ public class TestFSPermissionChecker {
   private static final UserGroupInformation CLARK =
     UserGroupInformation.createUserForTesting("clark", new String[] { "execs" });
 
+  private FSDirectory dir;
   private INodeDirectory inodeRoot;
 
   @Before
   public void setUp() {
-    PermissionStatus permStatus = PermissionStatus.createImmutable(SUPERUSER,
-      SUPERGROUP, FsPermission.createImmutable((short)0755));
-    inodeRoot = new INodeDirectory(INodeId.ROOT_INODE_ID,
-      INodeDirectory.ROOT_NAME, permStatus, 0L);
+    Configuration conf = new Configuration();
+    FSNamesystem fsn = mock(FSNamesystem.class);
+    doAnswer(new Answer() {
+      @Override
+      public Object answer(InvocationOnMock invocation) throws Throwable {
+        Object[] args = invocation.getArguments();
+        FsPermission perm = (FsPermission) args[0];
+        return new PermissionStatus(SUPERUSER, SUPERGROUP, perm);
+      }
+    }).when(fsn).createFsOwnerPermissions(any(FsPermission.class));
+    FSImage image = mock(FSImage.class);
+    dir = new FSDirectory(image, fsn, conf);
+    inodeRoot = dir.getRoot();
   }
 
   @Test
@@ -379,14 +393,14 @@ public class TestFSPermissionChecker {
   private void assertPermissionGranted(UserGroupInformation user, String path,
       FsAction access) throws IOException {
     new FSPermissionChecker(SUPERUSER, SUPERGROUP, user).checkPermission(path,
-      inodeRoot, false, null, null, access, null, true);
+      dir, false, null, null, access, null, true);
   }
 
   private void assertPermissionDenied(UserGroupInformation user, String path,
       FsAction access) throws IOException {
     try {
       new FSPermissionChecker(SUPERUSER, SUPERGROUP, user).checkPermission(path,
-        inodeRoot, false, null, null, access, null, true);
+        dir, false, null, null, access, null, true);
       fail("expected AccessControlException for user + " + user + ", path = " +
         path + ", access = " + access);
     } catch (AccessControlException e) {

Modified: hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java?rev=1593948&r1=1593947&r2=1593948&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java (original)
+++ hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java Mon May 12 12:43:59 2014
@@ -68,6 +68,8 @@ import org.apache.hadoop.hdfs.protocol.E
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
+import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager;
 import org.apache.hadoop.hdfs.server.namenode.NamenodeFsck.Result;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
 import org.apache.hadoop.hdfs.tools.DFSck;
@@ -99,13 +101,13 @@ public class TestFsck {
       "ugi=.*?\\s" + 
       "ip=/\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\s" + 
       "cmd=fsck\\ssrc=\\/\\sdst=null\\s" + 
-      "perm=null");
+      "perm=null\\s" + "proto=.*");
   static final Pattern getfileinfoPattern = Pattern.compile(
       "allowed=.*?\\s" +
       "ugi=.*?\\s" + 
       "ip=/\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\s" + 
       "cmd=getfileinfo\\ssrc=\\/\\sdst=null\\s" + 
-      "perm=null");
+      "perm=null\\s" + "proto=.*");
   
   static final Pattern numCorruptBlocksPattern = Pattern.compile(
       ".*Corrupt blocks:\t\t([0123456789]*).*");
@@ -699,7 +701,7 @@ public class TestFsck {
       DFSTestUtil.waitReplication(fs, filePath, (short)1);
       
       // intentionally corrupt NN data structure
-      INodeFile node = (INodeFile)cluster.getNamesystem().dir.rootDir.getNode(
+      INodeFile node = (INodeFile)cluster.getNamesystem().dir.getNode(
           fileName, true);
       final BlockInfo[] blocks = node.getBlocks(); 
       assertEquals(blocks.length, 1);
@@ -981,10 +983,15 @@ public class TestFsck {
     PrintWriter out = new PrintWriter(result, true);
     InetAddress remoteAddress = InetAddress.getLocalHost();
     FSNamesystem fsName = mock(FSNamesystem.class);
+    BlockManager blockManager = mock(BlockManager.class);
+    DatanodeManager dnManager = mock(DatanodeManager.class);
+    
     when(namenode.getNamesystem()).thenReturn(fsName);
     when(fsName.getBlockLocations(anyString(), anyLong(), anyLong(),
         anyBoolean(), anyBoolean(), anyBoolean())).
         thenThrow(new FileNotFoundException()) ;
+    when(fsName.getBlockManager()).thenReturn(blockManager);
+    when(blockManager.getDatanodeManager()).thenReturn(dnManager);
 
     NamenodeFsck fsck = new NamenodeFsck(conf, namenode, nettop, pmap, out,
         NUM_REPLICAS, (short)1, remoteAddress);

Modified: hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestHostsFiles.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestHostsFiles.java?rev=1593948&r1=1593947&r2=1593948&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestHostsFiles.java (original)
+++ hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestHostsFiles.java Mon May 12 12:43:59 2014
@@ -19,10 +19,8 @@ package org.apache.hadoop.hdfs.server.na
 
 import static org.junit.Assert.assertTrue;
 
-import java.net.InetSocketAddress;
-import java.net.URL;
+import java.lang.management.ManagementFactory;
 
-import org.apache.commons.lang.StringEscapeUtils;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -36,6 +34,9 @@ import org.apache.hadoop.hdfs.MiniDFSClu
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.junit.Test;
 
+import javax.management.MBeanServer;
+import javax.management.ObjectName;
+
 /**
  * DFS_HOSTS and DFS_HOSTS_EXCLUDE tests
  * 
@@ -73,7 +74,7 @@ public class TestHostsFiles {
   }
 
   @Test
-  public void testHostsExcludeDfshealthJsp() throws Exception {
+  public void testHostsExcludeInUI() throws Exception {
     Configuration conf = getConf();
     short REPLICATION_FACTOR = 2;
     final Path filePath = new Path("/testFile");
@@ -117,17 +118,13 @@ public class TestHostsFiles {
 
       // Check the block still has sufficient # replicas across racks
       DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0);
-      
-      InetSocketAddress nnHttpAddress = cluster.getNameNode().getHttpAddress();
-      LOG.info("nnaddr = '" + nnHttpAddress + "'");
-      String nnHostName = nnHttpAddress.getHostName();
-      URL nnjsp = new URL("http://" + nnHostName + ":" + nnHttpAddress.getPort() + "/dfshealth.jsp");
-      LOG.info("fetching " + nnjsp);
-      String dfshealthPage = StringEscapeUtils.unescapeHtml(DFSTestUtil.urlGet(nnjsp));
-      LOG.info("got " + dfshealthPage);
-      assertTrue("dfshealth should contain " + nnHostName + ", got:" + dfshealthPage,
-          dfshealthPage.contains(nnHostName));
 
+      MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
+      ObjectName mxbeanName = new ObjectName(
+              "Hadoop:service=NameNode,name=NameNodeInfo");
+      String nodes = (String) mbs.getAttribute(mxbeanName, "LiveNodes");
+      assertTrue("Live nodes should contain the decommissioned node",
+              nodes.contains("Decommissioned"));
     } finally {
       cluster.shutdown();
     }

Modified: hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java?rev=1593948&r1=1593947&r2=1593948&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java (original)
+++ hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java Mon May 12 12:43:59 2014
@@ -468,8 +468,8 @@ public class TestINodeFile {
     }
   }
 
-  @Test
-  public void testWriteToRenamedFile() throws IOException {
+  @Test(timeout=120000)
+  public void testWriteToDeletedFile() throws IOException {
     Configuration conf = new Configuration();
     MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
         .build();
@@ -486,18 +486,16 @@ public class TestINodeFile {
     Path filePath = new Path("/test1/file");
     FSDataOutputStream fos = fs.create(filePath);
 
-    // Rename /test1 to test2, and recreate /test1/file
-    Path renamedPath = new Path("/test2");
-    fs.rename(path, renamedPath);
-    fs.create(filePath, (short) 1);
+    // Delete the file
+    fs.delete(filePath, false);
 
-    // Add new block should fail since /test1/file has a different fileId
+    // Add new block should fail since /test1/file has been deleted.
     try {
       fos.write(data, 0, data.length);
       // make sure addBlock() request gets to NN immediately
       fos.hflush();
 
-      fail("Write should fail after rename");
+      fail("Write should fail after delete");
     } catch (Exception e) {
       /* Ignore */
     } finally {

Modified: hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMetaSave.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMetaSave.java?rev=1593948&r1=1593947&r2=1593948&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMetaSave.java (original)
+++ hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMetaSave.java Mon May 12 12:43:59 2014
@@ -36,6 +36,7 @@ import org.apache.hadoop.hdfs.DFSTestUti
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.junit.AfterClass;
+import org.junit.Assert;
 import org.junit.BeforeClass;
 import org.junit.Test;
 
@@ -92,8 +93,9 @@ public class TestMetaSave {
     try {
       reader = new BufferedReader(new InputStreamReader(in));
       String line = reader.readLine();
-      assertTrue(line.equals(
-          "3 files and directories, 2 blocks = 5 total filesystem objects"));
+      Assert.assertEquals(
+          "3 files and directories, 2 blocks = 5 total filesystem objects",
+          line);
       line = reader.readLine();
       assertTrue(line.equals("Live Datanodes: 1"));
       line = reader.readLine();

Modified: hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSecondaryWebUi.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSecondaryWebUi.java?rev=1593948&r1=1593947&r2=1593948&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSecondaryWebUi.java (original)
+++ hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSecondaryWebUi.java Mon May 12 12:43:59 2014
@@ -78,18 +78,4 @@ public class TestSecondaryWebUi {
     Assert.assertArrayEquals(checkpointEditlogDir,
             snn.getCheckpointEditlogDirectories());
   }
-
-  @Test
-  public void testSecondaryWebUiJsp()
-          throws IOException, MalformedObjectNameException,
-                 AttributeNotFoundException, MBeanException,
-                 ReflectionException, InstanceNotFoundException {
-    String pageContents = DFSTestUtil.urlGet(new URL("http://localhost:" +
-        SecondaryNameNode.getHttpAddress(conf).getPort() + "/status.jsp"));
-    Assert.assertTrue("Didn't find \"Last Checkpoint\"",
-        pageContents.contains("Last Checkpoint"));
-    Assert.assertTrue("Didn't find Checkpoint Transactions: 500",
-        pageContents.contains("Checkpoint Transactions: 500"));
-
-  }
 }

Modified: hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHASafeMode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHASafeMode.java?rev=1593948&r1=1593947&r2=1593948&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHASafeMode.java (original)
+++ hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHASafeMode.java Mon May 12 12:43:59 2014
@@ -36,6 +36,7 @@ import org.apache.commons.logging.impl.L
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
@@ -819,10 +820,13 @@ public class TestHASafeMode {
           null);
       create.write(testData.getBytes());
       create.hflush();
+      long fileId = ((DFSOutputStream)create.
+          getWrappedStream()).getFileId();
+      FileStatus fileStatus = dfs.getFileStatus(filePath);
       DFSClient client = DFSClientAdapter.getClient(dfs);
       // add one dummy block at NN, but not write to DataNode
-      ExtendedBlock previousBlock = DFSClientAdapter.getPreviousBlock(client,
-          pathString);
+      ExtendedBlock previousBlock =
+          DFSClientAdapter.getPreviousBlock(client, fileId);
       DFSClientAdapter.getNamenode(client).addBlock(
           pathString,
           client.getClientName(),

Modified: hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java?rev=1593948&r1=1593947&r2=1593948&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java (original)
+++ hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java Mon May 12 12:43:59 2014
@@ -120,6 +120,11 @@ public class TestOfflineImageViewer {
         }
       }
 
+      // Create an empty directory
+      Path emptydir = new Path("/emptydir");
+      hdfs.mkdirs(emptydir);
+      writtenFiles.put(emptydir.toString(), hdfs.getFileStatus(emptydir));
+
       // Get delegation tokens so we log the delegation token op
       Token<?>[] delegationTokens = hdfs
           .addDelegationTokens(TEST_RENEWER, null);
@@ -205,8 +210,8 @@ public class TestOfflineImageViewer {
     matcher = p.matcher(output.getBuffer());
     assertTrue(matcher.find() && matcher.groupCount() == 1);
     int totalDirs = Integer.parseInt(matcher.group(1));
-    // totalDirs includes root directory
-    assertEquals(NUM_DIRS + 1, totalDirs);
+    // totalDirs includes root directory and empty directory
+    assertEquals(NUM_DIRS + 2, totalDirs);
 
     FileStatus maxFile = Collections.max(writtenFiles.values(),
         new Comparator<FileStatus>() {
@@ -259,7 +264,7 @@ public class TestOfflineImageViewer {
 
       // verify the number of directories
       FileStatus[] statuses = webhdfs.listStatus(new Path("/"));
-      assertEquals(NUM_DIRS, statuses.length);
+      assertEquals(NUM_DIRS + 1, statuses.length); // contains empty directory
 
       // verify the number of files in the directory
       statuses = webhdfs.listStatus(new Path("/dir0"));
@@ -270,6 +275,10 @@ public class TestOfflineImageViewer {
       FileStatus expected = writtenFiles.get("/dir0/file0");
       compareFile(expected, status);
 
+      // LISTSTATUS operation to an empty directory
+      statuses = webhdfs.listStatus(new Path("/emptydir"));
+      assertEquals(0, statuses.length);
+
       // LISTSTATUS operation to a invalid path
       URL url = new URL("http://localhost:" + port +
                     "/webhdfs/v1/invalid/?op=LISTSTATUS");

Modified: hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFSForHA.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFSForHA.java?rev=1593948&r1=1593947&r2=1593948&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFSForHA.java (original)
+++ hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFSForHA.java Mon May 12 12:43:59 2014
@@ -28,6 +28,7 @@ import org.apache.hadoop.hdfs.DFSConfigK
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.MiniDFSNNTopology;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.security.token.Token;
@@ -37,6 +38,7 @@ import org.junit.Test;
 import java.io.IOException;
 import java.net.URI;
 
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY;
 import static org.mockito.Mockito.spy;
 import static org.mockito.Mockito.verify;
 
@@ -119,6 +121,8 @@ public class TestWebHDFSForHA {
   @Test
   public void testFailoverAfterOpen() throws IOException {
     Configuration conf = DFSTestUtil.newHAConfiguration(LOGICAL_NAME);
+    conf.set(FS_DEFAULT_NAME_KEY, HdfsConstants.HDFS_URI_SCHEME +
+        "://" + LOGICAL_NAME);
     MiniDFSCluster cluster = null;
     FileSystem fs = null;
     final Path p = new Path("/test");
@@ -152,4 +156,30 @@ public class TestWebHDFSForHA {
       }
     }
   }
-}
\ No newline at end of file
+
+  @Test
+  public void testMultipleNamespacesConfigured() throws Exception {
+    Configuration conf = DFSTestUtil.newHAConfiguration(LOGICAL_NAME);
+    MiniDFSCluster cluster = null;
+    WebHdfsFileSystem fs = null;
+
+    try {
+      cluster = new MiniDFSCluster.Builder(conf).nnTopology(topo)
+              .numDataNodes(1).build();
+
+      HATestUtil.setFailoverConfigurations(cluster, conf, LOGICAL_NAME);
+
+      cluster.waitActive();
+      DFSTestUtil.addHAConfiguration(conf, LOGICAL_NAME + "remote");
+      DFSTestUtil.setFakeHttpAddresses(conf, LOGICAL_NAME + "remote");
+
+      fs = (WebHdfsFileSystem)FileSystem.get(WEBHDFS_URI, conf);
+      Assert.assertEquals(2, fs.getResolvedNNAddr().length);
+    } finally {
+      IOUtils.cleanup(null, fs);
+      if (cluster != null) {
+        cluster.shutdown();
+      }
+    }
+  }
+}

Modified: hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testAclCLI.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testAclCLI.xml?rev=1593948&r1=1593947&r2=1593948&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testAclCLI.xml (original)
+++ hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testAclCLI.xml Mon May 12 12:43:59 2014
@@ -903,7 +903,7 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^drwxr-xr-x\+( )*-( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir1/dir2</expected-output>
+          <expected-output>^drwxr-xr-x\+( )*-( )*USERNAME( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir1/dir2</expected-output>
         </comparator>
       </comparators>
     </test>

Modified: hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml?rev=1593948&r1=1593947&r2=1593948&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml (original)
+++ hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml Mon May 12 12:43:59 2014
@@ -15220,7 +15220,11 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-report:( |\t)*Reports basic filesystem information and statistics.( )*</expected-output>
+          <expected-output>^-report \[-live\] \[-dead\] \[-decommissioning\]:(.)*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^[ \t]*Reports basic filesystem information and statistics.( )*</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -15900,9 +15904,9 @@
           <type>RegexpComparator</type>
           <expected-output>DFS Used\%: [0-9\.]+%</expected-output>
         </comparator>
-	<comparator>
+        <comparator>
           <type>RegexpComparator</type>
-          <expected-output>Datanodes available: [0-9]+ \([0-9]+ total, [0-9]+ dead\)</expected-output>
+          <expected-output>Live datanodes \([0-9]+\):</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
@@ -15930,7 +15934,7 @@
         </comparator>
         <comparator>
           <type>TokenComparator</type>
-          <expected-output>Live datanodes:</expected-output>
+          <expected-output>Live datanodes</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -16020,10 +16024,6 @@
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>Datanodes available: [0-9]+ \([0-9]+ total, [0-9]+ dead\)</expected-output>
-        </comparator>
-        <comparator>
-          <type>RegexpComparator</type>
           <expected-output>Name: [0-9\.:]+ \([-.a-zA-z0-9\.]+\)</expected-output>
         </comparator>
         <comparator>
@@ -16048,7 +16048,7 @@
         </comparator>
 	<comparator>
           <type>TokenComparator</type>
-          <expected-output>Live datanodes:</expected-output>
+          <expected-output>Live datanodes</expected-output>
         </comparator>
       </comparators>
     </test>