You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by cm...@apache.org on 2014/08/20 01:50:25 UTC

svn commit: r1619012 [31/35] - in /hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project: hadoop-hdfs-httpfs/ hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/ hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/ hadoop...

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSAclBaseTest.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSAclBaseTest.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSAclBaseTest.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSAclBaseTest.java Tue Aug 19 23:49:39 2014
@@ -34,10 +34,12 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.AclStatus;
 import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.protocol.AclException;
+import org.apache.hadoop.hdfs.protocol.FsAclPermission;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.UserGroupInformation;
@@ -118,7 +120,7 @@ public abstract class FSAclBaseTest {
       aclEntry(DEFAULT, GROUP, READ_EXECUTE),
       aclEntry(DEFAULT, MASK, READ_EXECUTE),
       aclEntry(DEFAULT, OTHER, NONE) }, returned);
-    assertPermission((short)0750);
+    assertPermission((short)010750);
     assertAclFeature(true);
   }
 
@@ -140,7 +142,7 @@ public abstract class FSAclBaseTest {
     assertArrayEquals(new AclEntry[] {
       aclEntry(ACCESS, USER, "foo", READ_EXECUTE),
       aclEntry(ACCESS, GROUP, READ_EXECUTE) }, returned);
-    assertPermission((short)0750);
+    assertPermission((short)010750);
     assertAclFeature(true);
   }
 
@@ -161,7 +163,7 @@ public abstract class FSAclBaseTest {
       aclEntry(DEFAULT, GROUP, READ_EXECUTE),
       aclEntry(DEFAULT, MASK, READ_EXECUTE),
       aclEntry(DEFAULT, OTHER, NONE) }, returned);
-    assertPermission((short)0750);
+    assertPermission((short)010750);
     assertAclFeature(true);
   }
 
@@ -177,7 +179,7 @@ public abstract class FSAclBaseTest {
     assertArrayEquals(new AclEntry[] {
       aclEntry(ACCESS, USER, "foo", READ_WRITE),
       aclEntry(ACCESS, GROUP, READ) }, returned);
-    assertPermission((short)0660);
+    assertPermission((short)010660);
     assertAclFeature(true);
   }
 
@@ -195,7 +197,7 @@ public abstract class FSAclBaseTest {
       aclEntry(DEFAULT, USER, ALL),
       aclEntry(DEFAULT, GROUP, READ_EXECUTE),
       aclEntry(DEFAULT, OTHER, NONE) }, returned);
-    assertPermission((short)0750);
+    assertPermission((short)010750);
     assertAclFeature(true);
   }
 
@@ -212,7 +214,7 @@ public abstract class FSAclBaseTest {
     assertArrayEquals(new AclEntry[] {
       aclEntry(ACCESS, USER, "foo", ALL),
       aclEntry(ACCESS, GROUP, READ) }, returned);
-    assertPermission((short)0600);
+    assertPermission((short)010600);
     assertAclFeature(true);
   }
 
@@ -240,7 +242,7 @@ public abstract class FSAclBaseTest {
       aclEntry(DEFAULT, GROUP, READ_EXECUTE),
       aclEntry(DEFAULT, MASK, READ_EXECUTE),
       aclEntry(DEFAULT, OTHER, NONE) }, returned);
-    assertPermission((short)01750);
+    assertPermission((short)011750);
     assertAclFeature(true);
   }
 
@@ -286,7 +288,7 @@ public abstract class FSAclBaseTest {
       aclEntry(DEFAULT, GROUP, READ_EXECUTE),
       aclEntry(DEFAULT, MASK, READ_EXECUTE),
       aclEntry(DEFAULT, OTHER, NONE) }, returned);
-    assertPermission((short)0750);
+    assertPermission((short)010750);
     assertAclFeature(true);
   }
 
@@ -309,7 +311,7 @@ public abstract class FSAclBaseTest {
     assertArrayEquals(new AclEntry[] {
       aclEntry(ACCESS, USER, "bar", READ_WRITE),
       aclEntry(ACCESS, GROUP, READ_WRITE) }, returned);
-    assertPermission((short)0760);
+    assertPermission((short)010760);
     assertAclFeature(true);
   }
 
@@ -334,7 +336,7 @@ public abstract class FSAclBaseTest {
       aclEntry(DEFAULT, GROUP, READ_EXECUTE),
       aclEntry(DEFAULT, MASK, READ_EXECUTE),
       aclEntry(DEFAULT, OTHER, NONE) }, returned);
-    assertPermission((short)0750);
+    assertPermission((short)010750);
     assertAclFeature(true);
   }
 
@@ -382,7 +384,7 @@ public abstract class FSAclBaseTest {
       aclEntry(DEFAULT, USER, ALL),
       aclEntry(DEFAULT, GROUP, READ_EXECUTE),
       aclEntry(DEFAULT, OTHER, NONE) }, returned);
-    assertPermission((short)0750);
+    assertPermission((short)010750);
     assertAclFeature(true);
   }
 
@@ -408,7 +410,7 @@ public abstract class FSAclBaseTest {
       aclEntry(DEFAULT, GROUP, READ_EXECUTE),
       aclEntry(DEFAULT, MASK, READ_EXECUTE),
       aclEntry(DEFAULT, OTHER, NONE) }, returned);
-    assertPermission((short)01750);
+    assertPermission((short)011750);
     assertAclFeature(true);
   }
 
@@ -436,7 +438,7 @@ public abstract class FSAclBaseTest {
     assertArrayEquals(new AclEntry[] {
       aclEntry(ACCESS, USER, "foo", ALL),
       aclEntry(ACCESS, GROUP, READ_EXECUTE) }, returned);
-    assertPermission((short)0770);
+    assertPermission((short)010770);
     assertAclFeature(true);
   }
 
@@ -456,7 +458,7 @@ public abstract class FSAclBaseTest {
     assertArrayEquals(new AclEntry[] {
       aclEntry(ACCESS, USER, "foo", ALL),
       aclEntry(ACCESS, GROUP, READ_EXECUTE) }, returned);
-    assertPermission((short)0770);
+    assertPermission((short)010770);
     assertAclFeature(true);
   }
 
@@ -501,7 +503,7 @@ public abstract class FSAclBaseTest {
     assertArrayEquals(new AclEntry[] {
       aclEntry(ACCESS, USER, "foo", ALL),
       aclEntry(ACCESS, GROUP, READ_EXECUTE) }, returned);
-    assertPermission((short)01770);
+    assertPermission((short)011770);
     assertAclFeature(true);
   }
 
@@ -602,7 +604,7 @@ public abstract class FSAclBaseTest {
       aclEntry(DEFAULT, GROUP, READ_EXECUTE),
       aclEntry(DEFAULT, MASK, ALL),
       aclEntry(DEFAULT, OTHER, NONE) }, returned);
-    assertPermission((short)0770);
+    assertPermission((short)010770);
     assertAclFeature(true);
   }
 
@@ -621,7 +623,7 @@ public abstract class FSAclBaseTest {
     assertArrayEquals(new AclEntry[] {
       aclEntry(ACCESS, USER, "foo", READ),
       aclEntry(ACCESS, GROUP, READ) }, returned);
-    assertPermission((short)0640);
+    assertPermission((short)010640);
     assertAclFeature(true);
   }
 
@@ -639,7 +641,7 @@ public abstract class FSAclBaseTest {
       aclEntry(DEFAULT, GROUP, READ_EXECUTE),
       aclEntry(DEFAULT, MASK, ALL),
       aclEntry(DEFAULT, OTHER, NONE) }, returned);
-    assertPermission((short)0750);
+    assertPermission((short)010750);
     assertAclFeature(true);
   }
 
@@ -679,7 +681,7 @@ public abstract class FSAclBaseTest {
       aclEntry(DEFAULT, USER, ALL),
       aclEntry(DEFAULT, GROUP, READ_EXECUTE),
       aclEntry(DEFAULT, OTHER, NONE) }, returned);
-    assertPermission((short)0750);
+    assertPermission((short)010750);
     assertAclFeature(true);
   }
 
@@ -699,7 +701,7 @@ public abstract class FSAclBaseTest {
     assertArrayEquals(new AclEntry[] {
       aclEntry(ACCESS, USER, "foo", READ),
       aclEntry(ACCESS, GROUP, READ) }, returned);
-    assertPermission((short)0670);
+    assertPermission((short)010670);
     assertAclFeature(true);
   }
 
@@ -723,7 +725,7 @@ public abstract class FSAclBaseTest {
       aclEntry(DEFAULT, GROUP, READ_EXECUTE),
       aclEntry(DEFAULT, MASK, ALL),
       aclEntry(DEFAULT, OTHER, NONE) }, returned);
-    assertPermission((short)01770);
+    assertPermission((short)011770);
     assertAclFeature(true);
   }
 
@@ -768,7 +770,7 @@ public abstract class FSAclBaseTest {
       aclEntry(DEFAULT, GROUP, READ_EXECUTE),
       aclEntry(DEFAULT, MASK, ALL),
       aclEntry(DEFAULT, OTHER, NONE) }, returned);
-    assertPermission((short)0700);
+    assertPermission((short)010700);
     assertAclFeature(true);
   }
 
@@ -788,7 +790,7 @@ public abstract class FSAclBaseTest {
     assertArrayEquals(new AclEntry[] {
       aclEntry(ACCESS, USER, "foo", READ),
       aclEntry(ACCESS, GROUP, READ) }, returned);
-    assertPermission((short)0600);
+    assertPermission((short)010600);
     assertAclFeature(true);
   }
 
@@ -810,11 +812,28 @@ public abstract class FSAclBaseTest {
       aclEntry(DEFAULT, GROUP, READ_EXECUTE),
       aclEntry(DEFAULT, MASK, ALL),
       aclEntry(DEFAULT, OTHER, NONE) }, returned);
-    assertPermission((short)0700);
+    assertPermission((short)010700);
     assertAclFeature(true);
   }
 
   @Test
+  public void testSetPermissionCannotSetAclBit() throws IOException {
+    FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short)0750));
+    fs.setPermission(path, FsPermission.createImmutable((short)0700));
+    assertPermission((short)0700);
+    fs.setPermission(path,
+      new FsAclPermission(FsPermission.createImmutable((short)0755)));
+    INode inode = cluster.getNamesystem().getFSDirectory().getNode(
+      path.toUri().getPath(), false);
+    assertNotNull(inode);
+    FsPermission perm = inode.getFsPermission();
+    assertNotNull(perm);
+    assertEquals(0755, perm.toShort());
+    assertEquals(0755, perm.toExtendedShort());
+    assertAclFeature(false);
+  }
+
+  @Test
   public void testDefaultAclNewFile() throws Exception {
     FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short)0750));
     List<AclEntry> aclSpec = Lists.newArrayList(
@@ -827,7 +846,7 @@ public abstract class FSAclBaseTest {
     assertArrayEquals(new AclEntry[] {
       aclEntry(ACCESS, USER, "foo", ALL),
       aclEntry(ACCESS, GROUP, READ_EXECUTE) }, returned);
-    assertPermission(filePath, (short)0640);
+    assertPermission(filePath, (short)010640);
     assertAclFeature(filePath, true);
   }
 
@@ -881,7 +900,7 @@ public abstract class FSAclBaseTest {
       aclEntry(DEFAULT, GROUP, READ_EXECUTE),
       aclEntry(DEFAULT, MASK, ALL),
       aclEntry(DEFAULT, OTHER, NONE) }, returned);
-    assertPermission(dirPath, (short)0750);
+    assertPermission(dirPath, (short)010750);
     assertAclFeature(dirPath, true);
   }
 
@@ -916,7 +935,7 @@ public abstract class FSAclBaseTest {
       aclEntry(DEFAULT, USER, ALL),
       aclEntry(DEFAULT, GROUP, READ_EXECUTE),
       aclEntry(DEFAULT, OTHER, NONE) }, returned);
-    assertPermission(dirPath, (short)0750);
+    assertPermission(dirPath, (short)010750);
     assertAclFeature(dirPath, true);
   }
 
@@ -940,7 +959,7 @@ public abstract class FSAclBaseTest {
     AclStatus s = fs.getAclStatus(dirPath);
     AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]);
     assertArrayEquals(expected, returned);
-    assertPermission(dirPath, (short)0750);
+    assertPermission(dirPath, (short)010750);
     assertAclFeature(dirPath, true);
     expected = new AclEntry[] {
       aclEntry(ACCESS, USER, "foo", ALL),
@@ -948,7 +967,7 @@ public abstract class FSAclBaseTest {
     s = fs.getAclStatus(filePath);
     returned = s.getEntries().toArray(new AclEntry[0]);
     assertArrayEquals(expected, returned);
-    assertPermission(filePath, (short)0640);
+    assertPermission(filePath, (short)010640);
     assertAclFeature(filePath, true);
   }
 
@@ -972,12 +991,12 @@ public abstract class FSAclBaseTest {
     AclStatus s = fs.getAclStatus(dirPath);
     AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]);
     assertArrayEquals(expected, returned);
-    assertPermission(dirPath, (short)0750);
+    assertPermission(dirPath, (short)010750);
     assertAclFeature(dirPath, true);
     s = fs.getAclStatus(subdirPath);
     returned = s.getEntries().toArray(new AclEntry[0]);
     assertArrayEquals(expected, returned);
-    assertPermission(subdirPath, (short)0750);
+    assertPermission(subdirPath, (short)010750);
     assertAclFeature(subdirPath, true);
   }
 
@@ -1004,7 +1023,7 @@ public abstract class FSAclBaseTest {
     AclStatus s = fs.getAclStatus(dirPath);
     AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]);
     assertArrayEquals(expected, returned);
-    assertPermission(dirPath, (short)0750);
+    assertPermission(dirPath, (short)010750);
     assertAclFeature(dirPath, true);
     expected = new AclEntry[] { };
     s = fs.getAclStatus(linkPath);
@@ -1037,7 +1056,7 @@ public abstract class FSAclBaseTest {
     assertArrayEquals(new AclEntry[] {
       aclEntry(ACCESS, USER, "foo", ALL),
       aclEntry(ACCESS, GROUP, READ_EXECUTE) }, returned);
-    assertPermission(filePath, (short)0740);
+    assertPermission(filePath, (short)010740);
     assertAclFeature(filePath, true);
   }
 
@@ -1059,7 +1078,7 @@ public abstract class FSAclBaseTest {
       aclEntry(DEFAULT, GROUP, READ_EXECUTE),
       aclEntry(DEFAULT, MASK, ALL),
       aclEntry(DEFAULT, OTHER, READ_EXECUTE) }, returned);
-    assertPermission(dirPath, (short)0740);
+    assertPermission(dirPath, (short)010740);
     assertAclFeature(dirPath, true);
   }
 
@@ -1238,6 +1257,33 @@ public abstract class FSAclBaseTest {
     fsAsDiana.getAclStatus(bruceFile);
   }
 
+  @Test
+  public void testAccess() throws IOException, InterruptedException {
+    Path p1 = new Path("/p1");
+    fs.mkdirs(p1);
+    fs.setOwner(p1, BRUCE.getShortUserName(), "groupX");
+    fsAsBruce.setAcl(p1, Lists.newArrayList(
+        aclEntry(ACCESS, USER, READ),
+        aclEntry(ACCESS, USER, "bruce", READ),
+        aclEntry(ACCESS, GROUP, NONE),
+        aclEntry(ACCESS, OTHER, NONE)));
+    fsAsBruce.access(p1, FsAction.READ);
+    try {
+      fsAsBruce.access(p1, FsAction.WRITE);
+      fail("The access call should have failed.");
+    } catch (AccessControlException e) {
+      // expected
+    }
+
+    Path badPath = new Path("/bad/bad");
+    try {
+      fsAsBruce.access(badPath, FsAction.READ);
+      fail("The access call should have failed");
+    } catch (FileNotFoundException e) {
+      // expected
+    }
+  }
+
   /**
    * Creates a FileSystem for the super-user.
    *
@@ -1306,7 +1352,7 @@ public abstract class FSAclBaseTest {
    */
   private static void assertAclFeature(Path pathToCheck,
       boolean expectAclFeature) throws IOException {
-    INode inode = cluster.getNamesystem().getFSDirectory().getRoot()
+    INode inode = cluster.getNamesystem().getFSDirectory()
       .getNode(pathToCheck.toUri().getPath(), false);
     assertNotNull(inode);
     AclFeature aclFeature = inode.getAclFeature();

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java Tue Aug 19 23:49:39 2014
@@ -202,7 +202,7 @@ public class NNThroughputBenchmark imple
      * {@link #executeOp(int, int, String)}, which can have different meanings
      * depending on the operation performed.
      * 
-     * @param daemonId
+     * @param daemonId id of the daemon calling this method
      * @return the argument
      */
     abstract String getExecutionArgument(int daemonId);
@@ -322,11 +322,10 @@ public class NNThroughputBenchmark imple
     /**
      * Parse first 2 arguments, corresponding to the "-op" option.
      * 
-     * @param args
+     * @param args argument list
      * @return true if operation is all, which means that options not related
      * to this operation should be ignored, or false otherwise, meaning
      * that usage should be printed when an unrelated option is encountered.
-     * @throws IOException
      */
     protected boolean verifyOpArgument(List<String> args) {
       if(args.size() < 2 || ! args.get(0).startsWith("-op"))

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java Tue Aug 19 23:49:39 2014
@@ -45,6 +45,7 @@ import org.apache.hadoop.ipc.Server;
 import org.apache.hadoop.ipc.StandbyException;
 import org.apache.hadoop.security.AccessControlException;
 import org.mockito.Mockito;
+import org.mockito.internal.util.reflection.Whitebox;
 
 /**
  * This is a utility class to expose NameNode functionality for unit tests.
@@ -177,8 +178,9 @@ public class NameNodeAdapter {
   }
 
   public static FSImage spyOnFsImage(NameNode nn1) {
-    FSImage spy = Mockito.spy(nn1.getNamesystem().dir.fsImage);
-    nn1.getNamesystem().dir.fsImage = spy;
+    FSNamesystem fsn = nn1.getNamesystem();
+    FSImage spy = Mockito.spy(fsn.getFSImage());
+    Whitebox.setInternalState(fsn, "fsImage", spy);
     return spy;
   }
   

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAclTransformation.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAclTransformation.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAclTransformation.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAclTransformation.java Tue Aug 19 23:49:39 2014
@@ -304,7 +304,7 @@ public class TestAclTransformation {
       .add(aclEntry(DEFAULT, MASK, ALL))
       .add(aclEntry(DEFAULT, OTHER, READ))
       .build();
-    List<AclEntry> aclSpec = Lists.<AclEntry>newArrayList();
+    List<AclEntry> aclSpec = Lists.newArrayList();
     assertEquals(existing, filterAclEntriesByAclSpec(existing, aclSpec));
   }
 
@@ -705,7 +705,7 @@ public class TestAclTransformation {
       .add(aclEntry(DEFAULT, MASK, ALL))
       .add(aclEntry(DEFAULT, OTHER, READ))
       .build();
-    List<AclEntry> aclSpec = Lists.<AclEntry>newArrayList();
+    List<AclEntry> aclSpec = Lists.newArrayList();
     assertEquals(existing, mergeAclEntries(existing, aclSpec));
   }
 

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogger.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogger.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogger.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogger.java Tue Aug 19 23:49:39 2014
@@ -24,18 +24,24 @@ import static org.junit.Assert.assertTru
 import static org.junit.Assert.fail;
 
 import java.io.IOException;
+import java.net.HttpURLConnection;
 import java.net.InetAddress;
+import java.net.URI;
+import java.net.URISyntaxException;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
+import org.apache.hadoop.hdfs.web.resources.GetOpParam;
 import org.apache.hadoop.ipc.RemoteException;
-import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.security.authorize.ProxyServers;
+import org.apache.hadoop.security.authorize.ProxyUsers;
+import org.junit.Before;
 import org.junit.Test;
 
 /**
@@ -45,6 +51,16 @@ public class TestAuditLogger {
 
   private static final short TEST_PERMISSION = (short) 0654;
 
+  @Before
+  public void setup() {
+    DummyAuditLogger.initialized = false;
+    DummyAuditLogger.logCount = 0;
+    DummyAuditLogger.remoteAddr = null;
+
+    Configuration conf = new HdfsConfiguration();
+    ProxyUsers.refreshSuperUserGroupsConfiguration(conf);    
+  }
+
   /**
    * Tests that AuditLogger works as expected.
    */
@@ -69,6 +85,57 @@ public class TestAuditLogger {
     }
   }
 
+  @Test
+  public void testWebHdfsAuditLogger() throws IOException, URISyntaxException {
+    Configuration conf = new HdfsConfiguration();
+    conf.set(DFS_NAMENODE_AUDIT_LOGGERS_KEY,
+        DummyAuditLogger.class.getName());
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
+    
+    GetOpParam.Op op = GetOpParam.Op.GETFILESTATUS;
+    try {
+      cluster.waitClusterUp();
+      assertTrue(DummyAuditLogger.initialized);      
+      URI uri = new URI(
+          "http",
+          NetUtils.getHostPortString(cluster.getNameNode().getHttpAddress()),
+          "/webhdfs/v1/", op.toQueryString(), null);
+      
+      // non-proxy request
+      HttpURLConnection conn = (HttpURLConnection) uri.toURL().openConnection();
+      conn.setRequestMethod(op.getType().toString());
+      conn.connect();
+      assertEquals(200, conn.getResponseCode());
+      conn.disconnect();
+      assertEquals(1, DummyAuditLogger.logCount);
+      assertEquals("127.0.0.1", DummyAuditLogger.remoteAddr);
+      
+      // non-trusted proxied request
+      conn = (HttpURLConnection) uri.toURL().openConnection();
+      conn.setRequestMethod(op.getType().toString());
+      conn.setRequestProperty("X-Forwarded-For", "1.1.1.1");
+      conn.connect();
+      assertEquals(200, conn.getResponseCode());
+      conn.disconnect();
+      assertEquals(2, DummyAuditLogger.logCount);
+      assertEquals("127.0.0.1", DummyAuditLogger.remoteAddr);
+      
+      // trusted proxied request
+      conf.set(ProxyServers.CONF_HADOOP_PROXYSERVERS, "127.0.0.1");
+      ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
+      conn = (HttpURLConnection) uri.toURL().openConnection();
+      conn.setRequestMethod(op.getType().toString());
+      conn.setRequestProperty("X-Forwarded-For", "1.1.1.1");
+      conn.connect();
+      assertEquals(200, conn.getResponseCode());
+      conn.disconnect();
+      assertEquals(3, DummyAuditLogger.logCount);
+      assertEquals("1.1.1.1", DummyAuditLogger.remoteAddr);
+    } finally {
+      cluster.shutdown();
+    }
+  }
+
   /**
    * Minor test related to HADOOP-9155. Verify that during a
    * FileSystem.setPermission() operation, the stat passed in during the
@@ -128,7 +195,8 @@ public class TestAuditLogger {
     static boolean initialized;
     static int logCount;
     static short foundPermission;
-
+    static String remoteAddr;
+    
     public void initialize(Configuration conf) {
       initialized = true;
     }
@@ -140,6 +208,7 @@ public class TestAuditLogger {
     public void logAuditEvent(boolean succeeded, String userName,
         InetAddress addr, String cmd, String src, String dst,
         FileStatus stat) {
+      remoteAddr = addr.getHostAddress();
       logCount++;
       if (stat != null) {
         foundPermission = stat.getPermission().toShort();

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogs.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogs.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogs.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogs.java Tue Aug 19 23:49:39 2014
@@ -91,6 +91,9 @@ public class TestAuditLogs {
       "perm=.*?");
   static final Pattern successPattern = Pattern.compile(
       ".*allowed=true.*");
+  static final Pattern webOpenPattern = Pattern.compile(
+      ".*cmd=open.*proto=webhdfs.*");
+
   static final String username = "bob";
   static final String[] groups = { "group1" };
   static final String fileName = "/srcdat";
@@ -240,6 +243,22 @@ public class TestAuditLogs {
     verifyAuditLogsRepeat(false, 2);
   }
 
+  /** test that open via webhdfs puts proper entry in audit log */
+  @Test
+  public void testAuditWebHdfsOpen() throws Exception {
+    final Path file = new Path(fnames[0]);
+
+    fs.setPermission(file, new FsPermission((short)0644));
+    fs.setOwner(file, "root", null);
+
+    setupAuditLogs();
+
+    WebHdfsFileSystem webfs = WebHdfsTestUtil.getWebHdfsFileSystemAs(userGroupInfo, conf, WebHdfsFileSystem.SCHEME);
+    webfs.open(file);
+
+    verifyAuditLogsCheckPattern(true, 3, webOpenPattern);
+  }
+
   /** Sets up log4j logger for auditlogs */
   private void setupAuditLogs() throws IOException {
     Logger logger = ((Log4JLogger) FSNamesystem.auditLog).getLogger();
@@ -303,4 +322,38 @@ public class TestAuditLogs {
       reader.close();
     }
   }
+
+  // Ensure audit log has exactly N entries
+  private void verifyAuditLogsCheckPattern(boolean expectSuccess, int ndupe, Pattern pattern)
+      throws IOException {
+    // Turn off the logs
+    Logger logger = ((Log4JLogger) FSNamesystem.auditLog).getLogger();
+    logger.setLevel(Level.OFF);
+
+    // Close the appenders and force all logs to be flushed
+    Enumeration<?> appenders = logger.getAllAppenders();
+    while (appenders.hasMoreElements()) {
+      Appender appender = (Appender)appenders.nextElement();
+      appender.close();
+    }
+
+    BufferedReader reader = new BufferedReader(new FileReader(auditLogFile));
+    String line = null;
+    boolean ret = true;
+    boolean patternMatches = false;
+
+    try {
+        for (int i = 0; i < ndupe; i++) {
+          line = reader.readLine();
+          assertNotNull(line);
+          patternMatches |= pattern.matcher(line).matches();
+          ret &= successPattern.matcher(line).matches();
+        }
+        assertNull("Unexpected event in audit log", reader.readLine());
+        assertTrue("Expected audit event not found in audit log", patternMatches);
+        assertTrue("Expected success=" + expectSuccess, ret == expectSuccess);
+      } finally {
+        reader.close();
+      }
+  }
 }

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCacheDirectives.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCacheDirectives.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCacheDirectives.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCacheDirectives.java Tue Aug 19 23:49:39 2014
@@ -34,6 +34,7 @@ import static org.junit.Assert.fail;
 import java.io.IOException;
 import java.security.PrivilegedExceptionAction;
 import java.util.ArrayList;
+import java.util.Arrays;
 import java.util.Date;
 import java.util.EnumSet;
 import java.util.Iterator;
@@ -72,7 +73,9 @@ import org.apache.hadoop.hdfs.protocol.D
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
 import org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor;
+import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.CachedBlocksList.Type;
+import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
 import org.apache.hadoop.io.nativeio.NativeIO;
@@ -477,6 +480,12 @@ public class TestCacheDirectives {
     iter = dfs.listCacheDirectives(
         new CacheDirectiveInfo.Builder().setPool("pool2").build());
     validateListAll(iter, betaId);
+    iter = dfs.listCacheDirectives(
+        new CacheDirectiveInfo.Builder().setId(alphaId2).build());
+    validateListAll(iter, alphaId2);
+    iter = dfs.listCacheDirectives(
+        new CacheDirectiveInfo.Builder().setId(relativeId).build());
+    validateListAll(iter, relativeId);
 
     dfs.removeCacheDirective(betaId);
     iter = dfs.listCacheDirectives(
@@ -674,6 +683,12 @@ public class TestCacheDirectives {
         } finally {
           namesystem.readUnlock();
         }
+
+        LOG.info(logString + " cached blocks: have " + numCachedBlocks +
+            " / " + expectedCachedBlocks + ".  " +
+            "cached replicas: have " + numCachedReplicas +
+            " / " + expectedCachedReplicas);
+
         if (expectedCachedBlocks == -1 ||
             numCachedBlocks == expectedCachedBlocks) {
           if (expectedCachedReplicas == -1 ||
@@ -681,10 +696,6 @@ public class TestCacheDirectives {
             return true;
           }
         }
-        LOG.info(logString + " cached blocks: have " + numCachedBlocks +
-            " / " + expectedCachedBlocks + ".  " +
-            "cached replicas: have " + numCachedReplicas +
-            " / " + expectedCachedReplicas);
         return false;
       }
     }, 500, 60000);
@@ -1395,6 +1406,28 @@ public class TestCacheDirectives {
         .build());
   }
 
+  /**
+   * Check that the NameNode is not attempting to cache anything.
+   */
+  private void checkPendingCachedEmpty(MiniDFSCluster cluster)
+      throws Exception {
+    cluster.getNamesystem().readLock();
+    try {
+      final DatanodeManager datanodeManager =
+          cluster.getNamesystem().getBlockManager().getDatanodeManager();
+      for (DataNode dn : cluster.getDataNodes()) {
+        DatanodeDescriptor descriptor =
+            datanodeManager.getDatanode(dn.getDatanodeId());
+        Assert.assertTrue("Pending cached list of " + descriptor +
+                " is not empty, "
+                + Arrays.toString(descriptor.getPendingCached().toArray()), 
+            descriptor.getPendingCached().isEmpty());
+      }
+    } finally {
+      cluster.getNamesystem().readUnlock();
+    }
+  }
+
   @Test(timeout=60000)
   public void testExceedsCapacity() throws Exception {
     // Create a giant file
@@ -1403,30 +1436,21 @@ public class TestCacheDirectives {
     int numCachedReplicas = (int) ((CACHE_CAPACITY*NUM_DATANODES)/BLOCK_SIZE);
     DFSTestUtil.createFile(dfs, fileName, fileLen, (short) NUM_DATANODES,
         0xFADED);
-    // Set up a log appender watcher
-    final LogVerificationAppender appender = new LogVerificationAppender();
-    final Logger logger = Logger.getRootLogger();
-    logger.addAppender(appender);
     dfs.addCachePool(new CachePoolInfo("pool"));
     dfs.addCacheDirective(new CacheDirectiveInfo.Builder().setPool("pool")
         .setPath(fileName).setReplication((short) 1).build());
     waitForCachedBlocks(namenode, -1, numCachedReplicas,
         "testExceeds:1");
-    // Check that no DNs saw an excess CACHE message
-    int lines = appender.countLinesWithMessage(
-        "more bytes in the cache: " +
-        DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY);
-    assertEquals("Namenode should not send extra CACHE commands", 0, lines);
+    checkPendingCachedEmpty(cluster);
+    Thread.sleep(1000);
+    checkPendingCachedEmpty(cluster);
+
     // Try creating a file with giant-sized blocks that exceed cache capacity
     dfs.delete(fileName, false);
     DFSTestUtil.createFile(dfs, fileName, 4096, fileLen, CACHE_CAPACITY * 2,
         (short) 1, 0xFADED);
-    // Nothing will get cached, so just force sleep for a bit
-    Thread.sleep(4000);
-    // Still should not see any excess commands
-    lines = appender.countLinesWithMessage(
-        "more bytes in the cache: " +
-        DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY);
-    assertEquals("Namenode should not send extra CACHE commands", 0, lines);
+    checkPendingCachedEmpty(cluster);
+    Thread.sleep(1000);
+    checkPendingCachedEmpty(cluster);
   }
 }

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java Tue Aug 19 23:49:39 2014
@@ -27,6 +27,7 @@ import static org.junit.Assert.assertEqu
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertSame;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
@@ -43,6 +44,7 @@ import java.util.ArrayList;
 import java.util.Collection;
 import java.util.List;
 
+import com.google.common.io.Files;
 import org.apache.commons.cli.ParseException;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -87,7 +89,6 @@ import org.apache.hadoop.util.ExitUtil.E
 import org.apache.hadoop.util.StringUtils;
 import org.apache.log4j.Level;
 import org.junit.After;
-import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
 import org.mockito.ArgumentMatcher;
@@ -622,11 +623,11 @@ public class TestCheckpoint {
   }
 
   private File filePathContaining(final String substring) {
-    return Mockito.<File>argThat(
+    return Mockito.argThat(
         new ArgumentMatcher<File>() {
           @Override
           public boolean matches(Object argument) {
-            String path = ((File)argument).getAbsolutePath();
+            String path = ((File) argument).getAbsolutePath();
             return path.contains(substring);
           }
         });
@@ -1084,7 +1085,7 @@ public class TestCheckpoint {
       
       FSDirectory secondaryFsDir = secondary.getFSNamesystem().dir;
       INode rootInMap = secondaryFsDir.getInode(secondaryFsDir.rootDir.getId());
-      Assert.assertSame(rootInMap, secondaryFsDir.rootDir);
+      assertSame(rootInMap, secondaryFsDir.rootDir);
       
       fileSys.delete(tmpDir, true);
       fileSys.mkdirs(tmpDir);
@@ -1333,7 +1334,8 @@ public class TestCheckpoint {
     SecondaryNameNode secondary2 = null;
     try {
       cluster = new MiniDFSCluster.Builder(conf)
-          .nnTopology(MiniDFSNNTopology.simpleFederatedTopology(2))
+          .nnTopology(MiniDFSNNTopology.simpleFederatedTopology(
+              conf.get(DFSConfigKeys.DFS_NAMESERVICES)))
           .build();
       Configuration snConf1 = new HdfsConfiguration(cluster.getConfiguration(0));
       Configuration snConf2 = new HdfsConfiguration(cluster.getConfiguration(1));
@@ -2404,6 +2406,46 @@ public class TestCheckpoint {
     }
   }
 
+  @Test
+  public void testLegacyOivImage() throws Exception {
+    MiniDFSCluster cluster = null;
+    SecondaryNameNode secondary = null;
+    File tmpDir = Files.createTempDir();
+    Configuration conf = new HdfsConfiguration();
+    conf.set(DFSConfigKeys.DFS_NAMENODE_LEGACY_OIV_IMAGE_DIR_KEY,
+        tmpDir.getAbsolutePath());
+    conf.set(DFSConfigKeys.DFS_NAMENODE_NUM_CHECKPOINTS_RETAINED_KEY,
+        "2");
+
+    try {
+      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
+              .format(true).build();
+
+      secondary = startSecondaryNameNode(conf);
+
+      // Checkpoint once
+      secondary.doCheckpoint();
+      String files1[] = tmpDir.list();
+      assertEquals("Only one file is expected", 1, files1.length);
+
+      // Perform more checkpointngs and check whether retention management
+      // is working.
+      secondary.doCheckpoint();
+      secondary.doCheckpoint();
+      String files2[] = tmpDir.list();
+      assertEquals("Two files are expected", 2, files2.length);
+
+      // Verify that the first file is deleted.
+      for (String fName : files2) {
+        assertFalse(fName.equals(files1[0]));
+      }
+    } finally {
+      cleanup(secondary);
+      cleanup(cluster);
+      tmpDir.delete();
+    }
+  }
+
   private static void cleanup(SecondaryNameNode snn) {
     if (snn != null) {
       try {
@@ -2441,8 +2483,8 @@ public class TestCheckpoint {
   
   private static List<File> getCheckpointCurrentDirs(SecondaryNameNode secondary) {
     List<File> ret = Lists.newArrayList();
-    for (URI u : secondary.getCheckpointDirs()) {
-      File checkpointDir = new File(u.getPath());
+    for (String u : secondary.getCheckpointDirectories()) {
+      File checkpointDir = new File(URI.create(u).getPath());
       ret.add(new File(checkpointDir, "current"));
     }
     return ret;

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCommitBlockSynchronization.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCommitBlockSynchronization.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCommitBlockSynchronization.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCommitBlockSynchronization.java Tue Aug 19 23:49:39 2014
@@ -32,7 +32,6 @@ import java.io.IOException;
 
 import static org.junit.Assert.fail;
 import static org.mockito.Matchers.any;
-import static org.mockito.Matchers.anyBoolean;
 import static org.mockito.Mockito.*;
 
 /**
@@ -50,6 +49,18 @@ public class TestCommitBlockSynchronizat
     final DatanodeStorageInfo[] targets = {};
 
     FSNamesystem namesystem = new FSNamesystem(conf, image);
+    namesystem.setImageLoaded(true);
+
+    // set file's parent as root and put the file to inodeMap, so
+    // FSNamesystem's isFileDeleted() method will return false on this file
+    if (file.getParent() == null) {
+      INodeDirectory parent = mock(INodeDirectory.class);
+      parent.setLocalName(new byte[0]);
+      parent.addChild(file);
+      file.setParent(parent);
+    }
+    namesystem.dir.getINodeMap().put(file);
+
     FSNamesystem namesystemSpy = spy(namesystem);
     BlockInfoUnderConstruction blockInfo = new BlockInfoUnderConstruction(
         block, 1, HdfsServerConstants.BlockUCState.UNDER_CONSTRUCTION, targets);
@@ -62,8 +73,6 @@ public class TestCommitBlockSynchronizat
     doReturn(blockInfo).when(namesystemSpy).getStoredBlock(any(Block.class));
     doReturn("").when(namesystemSpy).closeFileCommitBlocks(
         any(INodeFile.class), any(BlockInfo.class));
-    doReturn("").when(namesystemSpy).persistBlocks(
-        any(INodeFile.class), anyBoolean());
     doReturn(mock(FSEditLog.class)).when(namesystemSpy).getEditLog();
 
     return namesystemSpy;

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java Tue Aug 19 23:49:39 2014
@@ -21,26 +21,37 @@ import static org.junit.Assert.assertEqu
 import static org.junit.Assert.assertTrue;
 
 import java.io.IOException;
+import java.io.PrintStream;
 import java.net.InetSocketAddress;
 import java.util.ArrayList;
+import java.util.Arrays;
 import java.util.Iterator;
 import java.util.List;
 import java.util.Random;
 
+import org.apache.commons.io.output.ByteArrayOutputStream;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.BlockLocation;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.LocatedFileStatus;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.RemoteIterator;
 import org.apache.hadoop.hdfs.DFSClient;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSTestUtil;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties;
+import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager;
+import org.apache.hadoop.hdfs.tools.DFSAdmin;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.Test;
@@ -83,6 +94,8 @@ public class TestDecommissioningStatus {
         4);
     conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1000);
     conf.setInt(DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_INTERVAL_KEY, 1);
+    conf.setLong(DFSConfigKeys.DFS_DATANODE_BALANCE_BANDWIDTHPERSEC_KEY, 1);
+
     writeConfigFile(localFileSys, excludeFile, null);
     writeConfigFile(localFileSys, includeFile, null);
 
@@ -93,6 +106,7 @@ public class TestDecommissioningStatus {
 
   @AfterClass
   public static void tearDown() throws Exception {
+    if (localFileSys != null ) cleanupFile(localFileSys, dir);
     if(fileSys != null) fileSys.close();
     if(cluster != null) cluster.shutdown();
   }
@@ -132,7 +146,8 @@ public class TestDecommissioningStatus {
     return stm;
   }
   
-  private void cleanupFile(FileSystem fileSys, Path name) throws IOException {
+  static private void cleanupFile(FileSystem fileSys, Path name)
+      throws IOException {
     assertTrue(fileSys.exists(name));
     fileSys.delete(name, true);
     assertTrue(!fileSys.exists(name));
@@ -141,19 +156,26 @@ public class TestDecommissioningStatus {
   /*
    * Decommissions the node at the given index
    */
-  private String decommissionNode(FSNamesystem namesystem,
-      DFSClient client, FileSystem localFileSys, int nodeIndex)
-      throws IOException {
+  private String decommissionNode(FSNamesystem namesystem, DFSClient client,
+      FileSystem localFileSys, int nodeIndex) throws IOException {
     DatanodeInfo[] info = client.datanodeReport(DatanodeReportType.LIVE);
 
     String nodename = info[nodeIndex].getXferAddr();
-    System.out.println("Decommissioning node: " + nodename);
+    decommissionNode(namesystem, localFileSys, nodename);
+    return nodename;
+  }
+
+  /*
+   * Decommissions the node by name
+   */
+  private void decommissionNode(FSNamesystem namesystem,
+      FileSystem localFileSys, String dnName) throws IOException {
+    System.out.println("Decommissioning node: " + dnName);
 
     // write nodename into the exclude file.
     ArrayList<String> nodes = new ArrayList<String>(decommissionedNodes);
-    nodes.add(nodename);
+    nodes.add(dnName);
     writeConfigFile(localFileSys, excludeFile, nodes);
-    return nodename;
   }
 
   private void checkDecommissionStatus(DatanodeDescriptor decommNode,
@@ -167,7 +189,51 @@ public class TestDecommissioningStatus {
     assertEquals(decommNode.decommissioningStatus
         .getUnderReplicatedInOpenFiles(), expectedUnderRepInOpenFiles);
   }
-  
+
+  private void checkDFSAdminDecommissionStatus(
+      List<DatanodeDescriptor> expectedDecomm, DistributedFileSystem dfs,
+      DFSAdmin admin) throws IOException {
+    ByteArrayOutputStream baos = new ByteArrayOutputStream();
+    PrintStream ps = new PrintStream(baos);
+    PrintStream oldOut = System.out;
+    System.setOut(ps);
+    try {
+      // Parse DFSAdmin just to check the count
+      admin.report(new String[] {"-decommissioning"}, 0);
+      String[] lines = baos.toString().split("\n");
+      Integer num = null;
+      int count = 0;
+      for (String line: lines) {
+        if (line.startsWith("Decommissioning datanodes")) {
+          // Pull out the "(num)" and parse it into an int
+          String temp = line.split(" ")[2];
+          num =
+              Integer.parseInt((String) temp.subSequence(1, temp.length() - 2));
+        }
+        if (line.contains("Decommission in progress")) {
+          count++;
+        }
+      }
+      assertTrue("No decommissioning output", num != null);
+      assertEquals("Unexpected number of decomming DNs", expectedDecomm.size(),
+          num.intValue());
+      assertEquals("Unexpected number of decomming DNs", expectedDecomm.size(),
+          count);
+
+      // Check Java API for correct contents
+      List<DatanodeInfo> decomming =
+          new ArrayList<DatanodeInfo>(Arrays.asList(dfs
+              .getDataNodeStats(DatanodeReportType.DECOMMISSIONING)));
+      assertEquals("Unexpected number of decomming DNs", expectedDecomm.size(),
+          decomming.size());
+      for (DatanodeID id : expectedDecomm) {
+        assertTrue("Did not find expected decomming DN " + id,
+            decomming.contains(id));
+      }
+    } finally {
+      System.setOut(oldOut);
+    }
+  }
   /**
    * Tests Decommissioning Status in DFS.
    */
@@ -179,7 +245,8 @@ public class TestDecommissioningStatus {
     DFSClient client = new DFSClient(addr, conf);
     DatanodeInfo[] info = client.datanodeReport(DatanodeReportType.LIVE);
     assertEquals("Number of Datanodes ", 2, info.length);
-    FileSystem fileSys = cluster.getFileSystem();
+    DistributedFileSystem fileSys = cluster.getFileSystem();
+    DFSAdmin admin = new DFSAdmin(cluster.getConfiguration(0));
 
     short replicas = 2;
     //
@@ -205,12 +272,16 @@ public class TestDecommissioningStatus {
         assertEquals(decommissioningNodes.size(), 1);
         DatanodeDescriptor decommNode = decommissioningNodes.get(0);
         checkDecommissionStatus(decommNode, 4, 0, 2);
+        checkDFSAdminDecommissionStatus(decommissioningNodes.subList(0, 1),
+            fileSys, admin);
       } else {
         assertEquals(decommissioningNodes.size(), 2);
         DatanodeDescriptor decommNode1 = decommissioningNodes.get(0);
         DatanodeDescriptor decommNode2 = decommissioningNodes.get(1);
         checkDecommissionStatus(decommNode1, 4, 4, 2);
         checkDecommissionStatus(decommNode2, 4, 4, 2);
+        checkDFSAdminDecommissionStatus(decommissioningNodes.subList(0, 2),
+            fileSys, admin);
       }
     }
     // Call refreshNodes on FSNamesystem with empty exclude file.
@@ -221,6 +292,69 @@ public class TestDecommissioningStatus {
     st1.close();
     cleanupFile(fileSys, file1);
     cleanupFile(fileSys, file2);
-    cleanupFile(localFileSys, dir);
+  }
+
+  /**
+   * Verify a DN remains in DECOMMISSION_INPROGRESS state if it is marked
+   * as dead before decommission has completed. That will allow DN to resume
+   * the replication process after it rejoins the cluster.
+   */
+  @Test(timeout=120000)
+  public void testDecommissionStatusAfterDNRestart()
+      throws IOException, InterruptedException {
+    DistributedFileSystem fileSys =
+        (DistributedFileSystem)cluster.getFileSystem();
+
+    // Create a file with one block. That block has one replica.
+    Path f = new Path("decommission.dat");
+    DFSTestUtil.createFile(fileSys, f, fileSize, fileSize, fileSize,
+        (short)1, seed);
+
+    // Find the DN that owns the only replica.
+    RemoteIterator<LocatedFileStatus> fileList = fileSys.listLocatedStatus(f);
+    BlockLocation[] blockLocations = fileList.next().getBlockLocations();
+    String dnName = blockLocations[0].getNames()[0];
+
+    // Decommission the DN.
+    FSNamesystem fsn = cluster.getNamesystem();
+    final DatanodeManager dm = fsn.getBlockManager().getDatanodeManager();
+    decommissionNode(fsn, localFileSys, dnName);
+    dm.refreshNodes(conf);
+
+    // Stop the DN when decommission is in progress.
+    // Given DFS_DATANODE_BALANCE_BANDWIDTHPERSEC_KEY is to 1 and the size of
+    // the block, it will take much longer time that test timeout value for
+    // the decommission to complete. So when stopDataNode is called,
+    // decommission should be in progress.
+    DataNodeProperties dataNodeProperties = cluster.stopDataNode(dnName);
+    final List<DatanodeDescriptor> dead = new ArrayList<DatanodeDescriptor>();
+    while (true) {
+      dm.fetchDatanodes(null, dead, false);
+      if (dead.size() == 1) {
+        break;
+      }
+      Thread.sleep(1000);
+    }
+
+    // Force removal of the dead node's blocks.
+    BlockManagerTestUtil.checkHeartbeat(fsn.getBlockManager());
+
+    // Force DatanodeManager to check decommission state.
+    BlockManagerTestUtil.checkDecommissionState(dm, dead.get(0));
+
+    // Verify that the DN remains in DECOMMISSION_INPROGRESS state.
+    assertTrue("the node is in decommissioned state ",
+        !dead.get(0).isDecommissioned());
+
+    // Add the node back
+    cluster.restartDataNode(dataNodeProperties, true);
+    cluster.waitActive();
+
+    // Call refreshNodes on FSNamesystem with empty exclude file.
+    // This will remove the datanodes from decommissioning list and
+    // make them available again.
+    writeConfigFile(localFileSys, excludeFile, null);
+    dm.refreshNodes(conf);
+    cleanupFile(fileSys, f);
   }
 }

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java Tue Aug 19 23:49:39 2014
@@ -195,7 +195,7 @@ public class TestEditLog {
       for (int i = 0; i < numTransactions; i++) {
         INodeFile inode = new INodeFile(namesystem.allocateNewInodeId(), null,
             p, 0L, 0L, BlockInfo.EMPTY_ARRAY, replication, blockSize);
-        inode.toUnderConstruction("", "", null);
+        inode.toUnderConstruction("", "");
 
         editLog.logOpenFile("/filename" + (startIndex + i), inode, false);
         editLog.logCloseFile("/filename" + (startIndex + i), inode);

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogAutoroll.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogAutoroll.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogAutoroll.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogAutoroll.java Tue Aug 19 23:49:39 2014
@@ -17,11 +17,16 @@
  */
 package org.apache.hadoop.hdfs.server.namenode;
 
+import java.net.BindException;
+import java.util.Random;
+
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_PERIOD_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_TXNS_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_EDIT_LOG_AUTOROLL_CHECK_INTERVAL_MS;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_EDIT_LOG_AUTOROLL_MULTIPLIER_THRESHOLD;
 
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -43,6 +48,9 @@ public class TestEditLogAutoroll {
   private NameNode nn0;
   private FileSystem fs;
   private FSEditLog editLog;
+  private final Random random = new Random();
+
+  private static final Log LOG = LogFactory.getLog(TestEditLog.class);
 
   @Before
   public void setUp() throws Exception {
@@ -54,24 +62,35 @@ public class TestEditLogAutoroll {
     conf.setFloat(DFS_NAMENODE_EDIT_LOG_AUTOROLL_MULTIPLIER_THRESHOLD, 0.5f);
     conf.setInt(DFS_NAMENODE_EDIT_LOG_AUTOROLL_CHECK_INTERVAL_MS, 100);
 
-    MiniDFSNNTopology topology = new MiniDFSNNTopology()
-    .addNameservice(new MiniDFSNNTopology.NSConf("ns1")
-      .addNN(new MiniDFSNNTopology.NNConf("nn1").setHttpPort(10061))
-      .addNN(new MiniDFSNNTopology.NNConf("nn2").setHttpPort(10062)));
-
-    cluster = new MiniDFSCluster.Builder(conf)
-      .nnTopology(topology)
-      .numDataNodes(0)
-      .build();
-    cluster.waitActive();
-
-    nn0 = cluster.getNameNode(0);
-    fs = HATestUtil.configureFailoverFs(cluster, conf);
-
-    cluster.transitionToActive(0);
-
-    fs = cluster.getFileSystem(0);
-    editLog = nn0.getNamesystem().getEditLog();
+    int retryCount = 0;
+    while (true) {
+      try {
+        int basePort = 10060 + random.nextInt(100) * 2;
+        MiniDFSNNTopology topology = new MiniDFSNNTopology()
+            .addNameservice(new MiniDFSNNTopology.NSConf("ns1")
+                .addNN(new MiniDFSNNTopology.NNConf("nn1").setHttpPort(basePort))
+                .addNN(new MiniDFSNNTopology.NNConf("nn2").setHttpPort(basePort + 1)));
+
+        cluster = new MiniDFSCluster.Builder(conf)
+            .nnTopology(topology)
+            .numDataNodes(0)
+            .build();
+        cluster.waitActive();
+
+        nn0 = cluster.getNameNode(0);
+        fs = HATestUtil.configureFailoverFs(cluster, conf);
+
+        cluster.transitionToActive(0);
+
+        fs = cluster.getFileSystem(0);
+        editLog = nn0.getNamesystem().getEditLog();
+        ++retryCount;
+        break;
+      } catch (BindException e) {
+        LOG.info("Set up MiniDFSCluster failed due to port conflicts, retry "
+            + retryCount + " times");
+      }
+    }
   }
 
   @After

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSDirectory.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSDirectory.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSDirectory.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSDirectory.java Tue Aug 19 23:49:39 2014
@@ -22,22 +22,35 @@ package org.apache.hadoop.hdfs.server.na
 import java.io.BufferedReader;
 import java.io.IOException;
 import java.io.StringReader;
+import java.util.EnumSet;
+import java.util.List;
+import java.util.Random;
 
+import com.google.common.collect.ImmutableList;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.XAttr;
+import org.apache.hadoop.fs.XAttrSetFlag;
 import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
+import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.After;
 import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
 
+import com.google.common.collect.Lists;
+
+import static org.junit.Assert.assertArrayEquals;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
 /**
  * Test {@link FSDirectory}, the in-memory namespace tree.
  */
@@ -67,9 +80,14 @@ public class TestFSDirectory {
 
   private DistributedFileSystem hdfs;
 
+  private static final int numGeneratedXAttrs = 256;
+  private static final ImmutableList<XAttr> generatedXAttrs =
+      ImmutableList.copyOf(generateXAttrs(numGeneratedXAttrs));
+
   @Before
   public void setUp() throws Exception {
     conf = new Configuration();
+    conf.setInt(DFSConfigKeys.DFS_NAMENODE_MAX_XATTRS_PER_INODE_KEY, 2);
     cluster = new MiniDFSCluster.Builder(conf)
       .numDataNodes(REPLICATION)
       .build();
@@ -111,25 +129,16 @@ public class TestFSDirectory {
     for(; (line = in.readLine()) != null; ) {
       line = line.trim();
       if (!line.isEmpty() && !line.contains("snapshot")) {
-        Assert.assertTrue("line=" + line,
+        assertTrue("line=" + line,
             line.startsWith(INodeDirectory.DUMPTREE_LAST_ITEM)
-            || line.startsWith(INodeDirectory.DUMPTREE_EXCEPT_LAST_ITEM));
+                || line.startsWith(INodeDirectory.DUMPTREE_EXCEPT_LAST_ITEM)
+        );
         checkClassName(line);
       }
     }
   }
   
   @Test
-  public void testReset() throws Exception {
-    fsdir.reset();
-    Assert.assertFalse(fsdir.isReady());
-    final INodeDirectory root = (INodeDirectory) fsdir.getINode("/");
-    Assert.assertTrue(root.getChildrenList(Snapshot.CURRENT_STATE_ID).isEmpty());
-    fsdir.imageLoadComplete();
-    Assert.assertTrue(fsdir.isReady());
-  }
-
-  @Test
   public void testSkipQuotaCheck() throws Exception {
     try {
       // set quota. nsQuota of 1 means no files can be created
@@ -168,7 +177,202 @@ public class TestFSDirectory {
     int i = line.lastIndexOf('(');
     int j = line.lastIndexOf('@');
     final String classname = line.substring(i+1, j);
-    Assert.assertTrue(classname.startsWith(INodeFile.class.getSimpleName())
+    assertTrue(classname.startsWith(INodeFile.class.getSimpleName())
         || classname.startsWith(INodeDirectory.class.getSimpleName()));
   }
+  
+  @Test
+  public void testINodeXAttrsLimit() throws Exception {
+    List<XAttr> existingXAttrs = Lists.newArrayListWithCapacity(2);
+    XAttr xAttr1 = (new XAttr.Builder()).setNameSpace(XAttr.NameSpace.USER).
+        setName("a1").setValue(new byte[]{0x31, 0x32, 0x33}).build();
+    XAttr xAttr2 = (new XAttr.Builder()).setNameSpace(XAttr.NameSpace.USER).
+        setName("a2").setValue(new byte[]{0x31, 0x31, 0x31}).build();
+    existingXAttrs.add(xAttr1);
+    existingXAttrs.add(xAttr2);
+    
+    // Adding a system namespace xAttr, isn't affected by inode xAttrs limit.
+    XAttr newXAttr = (new XAttr.Builder()).setNameSpace(XAttr.NameSpace.SYSTEM).
+        setName("a3").setValue(new byte[]{0x33, 0x33, 0x33}).build();
+    List<XAttr> newXAttrs = Lists.newArrayListWithCapacity(1);
+    newXAttrs.add(newXAttr);
+    List<XAttr> xAttrs = fsdir.setINodeXAttrs(existingXAttrs, newXAttrs,
+        EnumSet.of(XAttrSetFlag.CREATE, XAttrSetFlag.REPLACE));
+    assertEquals(xAttrs.size(), 3);
+    
+    // Adding a trusted namespace xAttr, is affected by inode xAttrs limit.
+    XAttr newXAttr1 = (new XAttr.Builder()).setNameSpace(
+        XAttr.NameSpace.TRUSTED).setName("a4").
+        setValue(new byte[]{0x34, 0x34, 0x34}).build();
+    newXAttrs.set(0, newXAttr1);
+    try {
+      fsdir.setINodeXAttrs(existingXAttrs, newXAttrs,
+          EnumSet.of(XAttrSetFlag.CREATE, XAttrSetFlag.REPLACE));
+      fail("Setting user visible xattr on inode should fail if " +
+          "reaching limit.");
+    } catch (IOException e) {
+      GenericTestUtils.assertExceptionContains("Cannot add additional XAttr " +
+          "to inode, would exceed limit", e);
+    }
+  }
+
+  /**
+   * Verify that the first <i>num</i> generatedXAttrs are present in
+   * newXAttrs.
+   */
+  private static void verifyXAttrsPresent(List<XAttr> newXAttrs,
+      final int num) {
+    assertEquals("Unexpected number of XAttrs after multiset", num,
+        newXAttrs.size());
+    for (int i=0; i<num; i++) {
+      XAttr search = generatedXAttrs.get(i);
+      assertTrue("Did not find set XAttr " + search + " + after multiset",
+          newXAttrs.contains(search));
+    }
+  }
+
+  private static List<XAttr> generateXAttrs(final int numXAttrs) {
+    List<XAttr> generatedXAttrs = Lists.newArrayListWithCapacity(numXAttrs);
+    for (int i=0; i<numXAttrs; i++) {
+      XAttr xAttr = (new XAttr.Builder())
+          .setNameSpace(XAttr.NameSpace.SYSTEM)
+          .setName("a" + i)
+          .setValue(new byte[] { (byte) i, (byte) (i + 1), (byte) (i + 2) })
+          .build();
+      generatedXAttrs.add(xAttr);
+    }
+    return generatedXAttrs;
+  }
+
+  /**
+   * Test setting and removing multiple xattrs via single operations
+   */
+  @Test(timeout=300000)
+  public void testXAttrMultiSetRemove() throws Exception {
+    List<XAttr> existingXAttrs = Lists.newArrayListWithCapacity(0);
+
+    // Keep adding a random number of xattrs and verifying until exhausted
+    final Random rand = new Random(0xFEEDA);
+    int numExpectedXAttrs = 0;
+    while (numExpectedXAttrs < numGeneratedXAttrs) {
+      LOG.info("Currently have " + numExpectedXAttrs + " xattrs");
+      final int numToAdd = rand.nextInt(5)+1;
+
+      List<XAttr> toAdd = Lists.newArrayListWithCapacity(numToAdd);
+      for (int i = 0; i < numToAdd; i++) {
+        if (numExpectedXAttrs >= numGeneratedXAttrs) {
+          break;
+        }
+        toAdd.add(generatedXAttrs.get(numExpectedXAttrs));
+        numExpectedXAttrs++;
+      }
+      LOG.info("Attempting to add " + toAdd.size() + " XAttrs");
+      for (int i = 0; i < toAdd.size(); i++) {
+        LOG.info("Will add XAttr " + toAdd.get(i));
+      }
+      List<XAttr> newXAttrs = fsdir.setINodeXAttrs(existingXAttrs, toAdd,
+          EnumSet.of(XAttrSetFlag.CREATE));
+      verifyXAttrsPresent(newXAttrs, numExpectedXAttrs);
+      existingXAttrs = newXAttrs;
+    }
+
+    // Keep removing a random number of xattrs and verifying until all gone
+    while (numExpectedXAttrs > 0) {
+      LOG.info("Currently have " + numExpectedXAttrs + " xattrs");
+      final int numToRemove = rand.nextInt(5)+1;
+      List<XAttr> toRemove = Lists.newArrayListWithCapacity(numToRemove);
+      for (int i = 0; i < numToRemove; i++) {
+        if (numExpectedXAttrs == 0) {
+          break;
+        }
+        toRemove.add(generatedXAttrs.get(numExpectedXAttrs-1));
+        numExpectedXAttrs--;
+      }
+      final int expectedNumToRemove = toRemove.size();
+      LOG.info("Attempting to remove " + expectedNumToRemove + " XAttrs");
+      List<XAttr> removedXAttrs = Lists.newArrayList();
+      List<XAttr> newXAttrs = fsdir.filterINodeXAttrs(existingXAttrs,
+          toRemove, removedXAttrs);
+      assertEquals("Unexpected number of removed XAttrs",
+          expectedNumToRemove, removedXAttrs.size());
+      verifyXAttrsPresent(newXAttrs, numExpectedXAttrs);
+      existingXAttrs = newXAttrs;
+    }
+  }
+
+  @Test(timeout=300000)
+  public void testXAttrMultiAddRemoveErrors() throws Exception {
+
+    // Test that the same XAttr can not be multiset twice
+    List<XAttr> existingXAttrs = Lists.newArrayList();
+    List<XAttr> toAdd = Lists.newArrayList();
+    toAdd.add(generatedXAttrs.get(0));
+    toAdd.add(generatedXAttrs.get(1));
+    toAdd.add(generatedXAttrs.get(2));
+    toAdd.add(generatedXAttrs.get(0));
+    try {
+      fsdir.setINodeXAttrs(existingXAttrs, toAdd, EnumSet.of(XAttrSetFlag
+          .CREATE));
+      fail("Specified the same xattr to be set twice");
+    } catch (IOException e) {
+      GenericTestUtils.assertExceptionContains("Cannot specify the same " +
+          "XAttr to be set", e);
+    }
+
+    // Test that CREATE and REPLACE flags are obeyed
+    toAdd.remove(generatedXAttrs.get(0));
+    existingXAttrs.add(generatedXAttrs.get(0));
+    try {
+      fsdir.setINodeXAttrs(existingXAttrs, toAdd, EnumSet.of(XAttrSetFlag
+          .CREATE));
+      fail("Set XAttr that is already set without REPLACE flag");
+    } catch (IOException e) {
+      GenericTestUtils.assertExceptionContains("already exists", e);
+    }
+    try {
+      fsdir.setINodeXAttrs(existingXAttrs, toAdd, EnumSet.of(XAttrSetFlag
+          .REPLACE));
+      fail("Set XAttr that does not exist without the CREATE flag");
+    } catch (IOException e) {
+      GenericTestUtils.assertExceptionContains("does not exist", e);
+    }
+
+    // Sanity test for CREATE
+    toAdd.remove(generatedXAttrs.get(0));
+    List<XAttr> newXAttrs = fsdir.setINodeXAttrs(existingXAttrs, toAdd,
+        EnumSet.of(XAttrSetFlag.CREATE));
+    assertEquals("Unexpected toAdd size", 2, toAdd.size());
+    for (XAttr x : toAdd) {
+      assertTrue("Did not find added XAttr " + x, newXAttrs.contains(x));
+    }
+    existingXAttrs = newXAttrs;
+
+    // Sanity test for REPLACE
+    toAdd = Lists.newArrayList();
+    for (int i=0; i<3; i++) {
+      XAttr xAttr = (new XAttr.Builder())
+          .setNameSpace(XAttr.NameSpace.SYSTEM)
+          .setName("a" + i)
+          .setValue(new byte[] { (byte) (i*2) })
+          .build();
+      toAdd.add(xAttr);
+    }
+    newXAttrs = fsdir.setINodeXAttrs(existingXAttrs, toAdd,
+        EnumSet.of(XAttrSetFlag.REPLACE));
+    assertEquals("Unexpected number of new XAttrs", 3, newXAttrs.size());
+    for (int i=0; i<3; i++) {
+      assertArrayEquals("Unexpected XAttr value",
+          new byte[] {(byte)(i*2)}, newXAttrs.get(i).getValue());
+    }
+    existingXAttrs = newXAttrs;
+
+    // Sanity test for CREATE+REPLACE
+    toAdd = Lists.newArrayList();
+    for (int i=0; i<4; i++) {
+      toAdd.add(generatedXAttrs.get(i));
+    }
+    newXAttrs = fsdir.setINodeXAttrs(existingXAttrs, toAdd,
+        EnumSet.of(XAttrSetFlag.CREATE, XAttrSetFlag.REPLACE));
+    verifyXAttrsPresent(newXAttrs, 4);
+  }
 }

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java Tue Aug 19 23:49:39 2014
@@ -24,7 +24,7 @@ import java.io.File;
 import java.io.IOException;
 import java.util.EnumSet;
 
-import junit.framework.Assert;
+import org.junit.Assert;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataOutputStream;

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageWithAcl.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageWithAcl.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageWithAcl.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageWithAcl.java Tue Aug 19 23:49:39 2014
@@ -142,7 +142,7 @@ public class TestFSImageWithAcl {
     AclEntry[] subdirReturned = fs.getAclStatus(subdirPath).getEntries()
       .toArray(new AclEntry[0]);
     Assert.assertArrayEquals(subdirExpected, subdirReturned);
-    assertPermission(fs, subdirPath, (short)0755);
+    assertPermission(fs, subdirPath, (short)010755);
 
     restart(fs, persistNamespace);
 
@@ -152,7 +152,7 @@ public class TestFSImageWithAcl {
     subdirReturned = fs.getAclStatus(subdirPath).getEntries()
       .toArray(new AclEntry[0]);
     Assert.assertArrayEquals(subdirExpected, subdirReturned);
-    assertPermission(fs, subdirPath, (short)0755);
+    assertPermission(fs, subdirPath, (short)010755);
 
     aclSpec = Lists.newArrayList(aclEntry(DEFAULT, USER, "foo", READ_WRITE));
     fs.modifyAclEntries(dirPath, aclSpec);
@@ -163,7 +163,7 @@ public class TestFSImageWithAcl {
     subdirReturned = fs.getAclStatus(subdirPath).getEntries()
       .toArray(new AclEntry[0]);
     Assert.assertArrayEquals(subdirExpected, subdirReturned);
-    assertPermission(fs, subdirPath, (short)0755);
+    assertPermission(fs, subdirPath, (short)010755);
 
     restart(fs, persistNamespace);
 
@@ -173,7 +173,7 @@ public class TestFSImageWithAcl {
     subdirReturned = fs.getAclStatus(subdirPath).getEntries()
       .toArray(new AclEntry[0]);
     Assert.assertArrayEquals(subdirExpected, subdirReturned);
-    assertPermission(fs, subdirPath, (short)0755);
+    assertPermission(fs, subdirPath, (short)010755);
 
     fs.removeAcl(dirPath);
 
@@ -183,7 +183,7 @@ public class TestFSImageWithAcl {
     subdirReturned = fs.getAclStatus(subdirPath).getEntries()
       .toArray(new AclEntry[0]);
     Assert.assertArrayEquals(subdirExpected, subdirReturned);
-    assertPermission(fs, subdirPath, (short)0755);
+    assertPermission(fs, subdirPath, (short)010755);
 
     restart(fs, persistNamespace);
 
@@ -193,7 +193,7 @@ public class TestFSImageWithAcl {
     subdirReturned = fs.getAclStatus(subdirPath).getEntries()
       .toArray(new AclEntry[0]);
     Assert.assertArrayEquals(subdirExpected, subdirReturned);
-    assertPermission(fs, subdirPath, (short)0755);
+    assertPermission(fs, subdirPath, (short)010755);
   }
 
   @Test

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageWithSnapshot.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageWithSnapshot.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageWithSnapshot.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageWithSnapshot.java Tue Aug 19 23:49:39 2014
@@ -43,7 +43,6 @@ import org.apache.hadoop.hdfs.protocol.H
 import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
 import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.DirectoryDiff;
-import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectorySnapshottable;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotTestHelper;
 import org.apache.hadoop.hdfs.util.Canceler;
@@ -194,8 +193,8 @@ public class TestFSImageWithSnapshot {
     fsn = cluster.getNamesystem();
     hdfs = cluster.getFileSystem();
     
-    INodeDirectorySnapshottable rootNode = 
-        (INodeDirectorySnapshottable) fsn.dir.getINode4Write(root.toString());
+    INodeDirectory rootNode = fsn.dir.getINode4Write(root.toString())
+        .asDirectory();
     assertTrue("The children list of root should be empty", 
         rootNode.getChildrenList(Snapshot.CURRENT_STATE_ID).isEmpty());
     // one snapshot on root: s1

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystem.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystem.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystem.java Tue Aug 19 23:49:39 2014
@@ -35,6 +35,7 @@ import org.apache.hadoop.hdfs.MiniDFSClu
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
 import org.apache.hadoop.hdfs.server.namenode.ha.HAContext;
 import org.apache.hadoop.hdfs.server.namenode.ha.HAState;
+import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
 import org.junit.After;
 import org.junit.Test;
 import org.mockito.Mockito;
@@ -194,4 +195,22 @@ public class TestFSNamesystem {
     assertFalse(rwLock.isWriteLockedByCurrentThread());
     assertEquals(0, rwLock.getWriteHoldCount());
   }
+
+  @Test
+  public void testReset() throws Exception {
+    Configuration conf = new Configuration();
+    FSEditLog fsEditLog = Mockito.mock(FSEditLog.class);
+    FSImage fsImage = Mockito.mock(FSImage.class);
+    Mockito.when(fsImage.getEditLog()).thenReturn(fsEditLog);
+    FSNamesystem fsn = new FSNamesystem(conf, fsImage);
+    fsn.imageLoadComplete();
+    assertTrue(fsn.isImageLoaded());
+    fsn.clear();
+    assertFalse(fsn.isImageLoaded());
+    final INodeDirectory root = (INodeDirectory) fsn.getFSDirectory()
+            .getINode("/");
+    assertTrue(root.getChildrenList(Snapshot.CURRENT_STATE_ID).isEmpty());
+    fsn.imageLoadComplete();
+    assertTrue(fsn.isImageLoaded());
+  }
 }

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystemMBean.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystemMBean.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystemMBean.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystemMBean.java Tue Aug 19 23:49:39 2014
@@ -35,6 +35,89 @@ import org.mortbay.util.ajax.JSON;
  */
 public class TestFSNamesystemMBean {
 
+  /**
+   * MBeanClient tries to access FSNamesystem/FSNamesystemState/NameNodeInfo
+   * JMX properties. If it can access all the properties, the test is
+   * considered successful.
+   */
+  private static class MBeanClient extends Thread {
+    private boolean succeeded = false;
+    @Override
+    public void run() {
+      try {
+        MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
+
+        // Metrics that belong to "FSNamesystem", these are metrics that
+        // come from hadoop metrics framework for the class FSNamesystem.
+        ObjectName mxbeanNamefsn = new ObjectName(
+            "Hadoop:service=NameNode,name=FSNamesystem");
+        Integer blockCapacity = (Integer) (mbs.getAttribute(mxbeanNamefsn,
+            "BlockCapacity"));
+
+        // Metrics that belong to "FSNamesystemState".
+        // These are metrics that FSNamesystem registers directly with MBeanServer.
+        ObjectName mxbeanNameFsns = new ObjectName(
+            "Hadoop:service=NameNode,name=FSNamesystemState");
+        String FSState = (String) (mbs.getAttribute(mxbeanNameFsns,
+            "FSState"));
+        Long blocksTotal = (Long) (mbs.getAttribute(mxbeanNameFsns,
+            "BlocksTotal"));
+        Long capacityTotal = (Long) (mbs.getAttribute(mxbeanNameFsns,
+            "CapacityTotal"));
+        Long capacityRemaining = (Long) (mbs.getAttribute(mxbeanNameFsns,
+            "CapacityRemaining"));
+        Long capacityUsed = (Long) (mbs.getAttribute(mxbeanNameFsns,
+            "CapacityUsed"));
+        Long filesTotal = (Long) (mbs.getAttribute(mxbeanNameFsns,
+            "FilesTotal"));
+        Long pendingReplicationBlocks = (Long) (mbs.getAttribute(mxbeanNameFsns,
+            "PendingReplicationBlocks"));
+        Long underReplicatedBlocks = (Long) (mbs.getAttribute(mxbeanNameFsns,
+            "UnderReplicatedBlocks"));
+        Long scheduledReplicationBlocks = (Long) (mbs.getAttribute(mxbeanNameFsns,
+            "ScheduledReplicationBlocks"));
+        Integer totalLoad = (Integer) (mbs.getAttribute(mxbeanNameFsns,
+            "TotalLoad"));
+        Integer numLiveDataNodes = (Integer) (mbs.getAttribute(mxbeanNameFsns,
+            "NumLiveDataNodes"));
+        Integer numDeadDataNodes = (Integer) (mbs.getAttribute(mxbeanNameFsns,
+           "NumDeadDataNodes"));
+        Integer numStaleDataNodes = (Integer) (mbs.getAttribute(mxbeanNameFsns,
+            "NumStaleDataNodes"));
+        Integer numDecomLiveDataNodes = (Integer) (mbs.getAttribute(mxbeanNameFsns,
+            "NumDecomLiveDataNodes"));
+        Integer numDecomDeadDataNodes = (Integer) (mbs.getAttribute(mxbeanNameFsns,
+            "NumDecomDeadDataNodes"));
+        Integer numDecommissioningDataNodes = (Integer) (mbs.getAttribute(mxbeanNameFsns,
+            "NumDecommissioningDataNodes"));
+        String snapshotStats = (String) (mbs.getAttribute(mxbeanNameFsns,
+            "SnapshotStats"));
+        Long MaxObjects = (Long) (mbs.getAttribute(mxbeanNameFsns,
+            "MaxObjects"));
+        Integer numStaleStorages = (Integer) (mbs.getAttribute(
+            mxbeanNameFsns, "NumStaleStorages"));
+
+        // Metrics that belong to "NameNodeInfo".
+        // These are metrics that FSNamesystem registers directly with MBeanServer.
+        ObjectName mxbeanNameNni = new ObjectName(
+            "Hadoop:service=NameNode,name=NameNodeInfo");
+        String safemode = (String) (mbs.getAttribute(mxbeanNameNni,
+            "Safemode"));
+        String liveNodes = (String) (mbs.getAttribute(mxbeanNameNni,
+            "LiveNodes"));
+        String deadNodes = (String) (mbs.getAttribute(mxbeanNameNni,
+            "DeadNodes"));
+        String decomNodes = (String) (mbs.getAttribute(mxbeanNameNni,
+            "DecomNodes"));
+        String corruptFiles = (String) (mbs.getAttribute(mxbeanNameNni,
+            "CorruptFiles"));
+
+        succeeded = true;
+      } catch (Exception e) {
+      }
+    }
+  }
+
   @Test
   public void test() throws Exception {
     Configuration conf = new Configuration();
@@ -73,4 +156,35 @@ public class TestFSNamesystemMBean {
       }
     }
   }
-}
+
+  // The test makes sure JMX request can be processed even if namesystem's
+  // writeLock is owned by another thread.
+  @Test
+  public void testWithFSNamesystemWriteLock() throws Exception {
+    Configuration conf = new Configuration();
+    MiniDFSCluster cluster = null;
+    FSNamesystem fsn = null;
+
+    try {
+      cluster = new MiniDFSCluster.Builder(conf).build();
+      cluster.waitActive();
+
+      fsn = cluster.getNameNode().namesystem;
+      fsn.writeLock();
+
+      MBeanClient client = new MBeanClient();
+      client.start();
+      client.join(20000);
+      assertTrue("JMX calls are blocked when FSNamesystem's writerlock" +
+          "is owned by another thread", client.succeeded);
+      client.interrupt();
+    } finally {
+      if (fsn != null && fsn.hasWriteLock()) {
+        fsn.writeUnlock();
+      }
+      if (cluster != null) {
+        cluster.shutdown();
+      }
+    }
+  }
+}
\ No newline at end of file

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSPermissionChecker.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSPermissionChecker.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSPermissionChecker.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSPermissionChecker.java Tue Aug 19 23:49:39 2014
@@ -26,6 +26,7 @@ import static org.junit.Assert.*;
 import java.io.IOException;
 import java.util.Arrays;
 
+import org.apache.hadoop.conf.Configuration;
 import org.junit.Before;
 import org.junit.Test;
 
@@ -38,7 +39,10 @@ import org.apache.hadoop.fs.permission.P
 import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.UserGroupInformation;
+import org.mockito.invocation.InvocationOnMock;
+import org.mockito.stubbing.Answer;
 
+import static org.mockito.Mockito.*;
 /**
  * Unit tests covering FSPermissionChecker.  All tests in this suite have been
  * cross-validated against Linux setfacl/getfacl to check for consistency of the
@@ -56,14 +60,23 @@ public class TestFSPermissionChecker {
   private static final UserGroupInformation CLARK =
     UserGroupInformation.createUserForTesting("clark", new String[] { "execs" });
 
+  private FSDirectory dir;
   private INodeDirectory inodeRoot;
 
   @Before
   public void setUp() {
-    PermissionStatus permStatus = PermissionStatus.createImmutable(SUPERUSER,
-      SUPERGROUP, FsPermission.createImmutable((short)0755));
-    inodeRoot = new INodeDirectory(INodeId.ROOT_INODE_ID,
-      INodeDirectory.ROOT_NAME, permStatus, 0L);
+    Configuration conf = new Configuration();
+    FSNamesystem fsn = mock(FSNamesystem.class);
+    doAnswer(new Answer() {
+      @Override
+      public Object answer(InvocationOnMock invocation) throws Throwable {
+        Object[] args = invocation.getArguments();
+        FsPermission perm = (FsPermission) args[0];
+        return new PermissionStatus(SUPERUSER, SUPERGROUP, perm);
+      }
+    }).when(fsn).createFsOwnerPermissions(any(FsPermission.class));
+    dir = new FSDirectory(fsn, conf);
+    inodeRoot = dir.getRoot();
   }
 
   @Test
@@ -379,14 +392,14 @@ public class TestFSPermissionChecker {
   private void assertPermissionGranted(UserGroupInformation user, String path,
       FsAction access) throws IOException {
     new FSPermissionChecker(SUPERUSER, SUPERGROUP, user).checkPermission(path,
-      inodeRoot, false, null, null, access, null, true);
+      dir, false, null, null, access, null, false, true);
   }
 
   private void assertPermissionDenied(UserGroupInformation user, String path,
       FsAction access) throws IOException {
     try {
       new FSPermissionChecker(SUPERUSER, SUPERGROUP, user).checkPermission(path,
-        inodeRoot, false, null, null, access, null, true);
+        dir, false, null, null, access, null, false, true);
       fail("expected AccessControlException for user + " + user + ", path = " +
         path + ", access = " + access);
     } catch (AccessControlException e) {