You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by cm...@apache.org on 2014/08/20 01:50:25 UTC

svn commit: r1619012 [34/35] - in /hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project: hadoop-hdfs-httpfs/ hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/ hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/ hadoop...

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotFileLength.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotFileLength.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotFileLength.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotFileLength.java Tue Aug 19 23:49:39 2014
@@ -143,10 +143,10 @@ public class TestSnapshotFileLength {
 
     // Make sure we can read the entire file via its non-snapshot path.
     fileStatus = hdfs.getFileStatus(file1);
-    assertEquals(fileStatus.getLen(), BLOCKSIZE * 2);
+    assertEquals("Unexpected file length", BLOCKSIZE * 2, fileStatus.getLen());
     fis = hdfs.open(file1);
     bytesRead = fis.read(buffer, 0, buffer.length);
-    assertEquals(bytesRead, BLOCKSIZE * 2);
+    assertEquals("Unexpected # bytes read", BLOCKSIZE * 2, bytesRead);
     fis.close();
 
     Path file1snap1 =
@@ -156,21 +156,23 @@ public class TestSnapshotFileLength {
     assertEquals(fileStatus.getLen(), BLOCKSIZE);
     // Make sure we can only read up to the snapshot length.
     bytesRead = fis.read(buffer, 0, buffer.length);
-    assertEquals(bytesRead, BLOCKSIZE);
+    assertEquals("Unexpected # bytes read", BLOCKSIZE, bytesRead);
     fis.close();
 
-    PrintStream psBackup = System.out;
+    PrintStream outBackup = System.out;
+    PrintStream errBackup = System.err;
     ByteArrayOutputStream bao = new ByteArrayOutputStream();
     System.setOut(new PrintStream(bao));
     System.setErr(new PrintStream(bao));
     // Make sure we can cat the file upto to snapshot length
     FsShell shell = new FsShell();
-    try{
+    try {
       ToolRunner.run(conf, shell, new String[] { "-cat",
       "/TestSnapshotFileLength/sub1/.snapshot/snapshot1/file1" });
-      assertEquals(bao.size(), BLOCKSIZE);
-    }finally{
-      System.setOut(psBackup);
+      assertEquals("Unexpected # bytes from -cat", BLOCKSIZE, bao.size());
+    } finally {
+      System.setOut(outBackup);
+      System.setErr(errBackup);
     }
   }
 }

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotManager.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotManager.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotManager.java Tue Aug 19 23:49:39 2014
@@ -18,13 +18,19 @@
 
 package org.apache.hadoop.hdfs.server.namenode.snapshot;
 
+import static org.mockito.Matchers.anyString;
+import static org.mockito.Mockito.doReturn;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.spy;
+
 import java.util.ArrayList;
 
 import org.apache.hadoop.hdfs.protocol.SnapshotException;
 import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
 import org.apache.hadoop.hdfs.server.namenode.INode;
-import org.junit.*;
-import static org.mockito.Mockito.*;
+import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
+import org.junit.Assert;
+import org.junit.Test;
 
 
 /**
@@ -40,7 +46,7 @@ public class TestSnapshotManager {
   public void testSnapshotLimits() throws Exception {
     // Setup mock objects for SnapshotManager.createSnapshot.
     //
-    INodeDirectorySnapshottable ids = mock(INodeDirectorySnapshottable.class);
+    INodeDirectory ids = mock(INodeDirectory.class);
     FSDirectory fsdir = mock(FSDirectory.class);
 
     SnapshotManager sm = spy(new SnapshotManager(fsdir));

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotRename.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotRename.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotRename.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotRename.java Tue Aug 19 23:49:39 2014
@@ -37,6 +37,7 @@ import org.apache.hadoop.hdfs.protocol.H
 import org.apache.hadoop.hdfs.protocol.SnapshotException;
 import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
+import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.DirectoryDiff;
 import org.apache.hadoop.hdfs.util.ReadOnlyList;
 import org.apache.hadoop.ipc.RemoteException;
@@ -88,12 +89,13 @@ public class TestSnapshotRename {
   public ExpectedException exception = ExpectedException.none();
   
   /**
-   * Check the correctness of snapshot list within
-   * {@link INodeDirectorySnapshottable}
+   * Check the correctness of snapshot list within snapshottable dir
    */
-  private void checkSnapshotList(INodeDirectorySnapshottable srcRoot,
+  private void checkSnapshotList(INodeDirectory srcRoot,
       String[] sortedNames, String[] names) {
-    ReadOnlyList<Snapshot> listByName = srcRoot.getSnapshotsByNames();
+    assertTrue(srcRoot.isSnapshottable());
+    ReadOnlyList<Snapshot> listByName = srcRoot
+        .getDirectorySnapshottableFeature().getSnapshotList();
     assertEquals(sortedNames.length, listByName.size());
     for (int i = 0; i < listByName.size(); i++) {
       assertEquals(sortedNames[i], listByName.get(i).getRoot().getLocalName());
@@ -101,7 +103,8 @@ public class TestSnapshotRename {
     List<DirectoryDiff> listByTime = srcRoot.getDiffs().asList();
     assertEquals(names.length, listByTime.size());
     for (int i = 0; i < listByTime.size(); i++) {
-      Snapshot s = srcRoot.getSnapshotById(listByTime.get(i).getSnapshotId());
+      Snapshot s = srcRoot.getDirectorySnapshottableFeature().getSnapshotById(
+          listByTime.get(i).getSnapshotId());
       assertEquals(names[i], s.getRoot().getLocalName());
     }
   }
@@ -121,8 +124,7 @@ public class TestSnapshotRename {
     // Rename s3 to s22
     hdfs.renameSnapshot(sub1, "s3", "s22");
     // Check the snapshots list
-    INodeDirectorySnapshottable srcRoot = INodeDirectorySnapshottable.valueOf(
-        fsdir.getINode(sub1.toString()), sub1.toString());
+    INodeDirectory srcRoot = fsdir.getINode(sub1.toString()).asDirectory();
     checkSnapshotList(srcRoot, new String[] { "s1", "s2", "s22" },
         new String[] { "s1", "s2", "s22" });
     

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/web/resources/TestWebHdfsDataLocality.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/web/resources/TestWebHdfsDataLocality.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/web/resources/TestWebHdfsDataLocality.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/web/resources/TestWebHdfsDataLocality.java Tue Aug 19 23:49:39 2014
@@ -92,7 +92,7 @@ public class TestWebHdfsDataLocality {
 
           //The chosen datanode must be the same as the client address
           final DatanodeInfo chosen = NamenodeWebHdfsMethods.chooseDatanode(
-              namenode, f, PutOpParam.Op.CREATE, -1L, blocksize);
+              namenode, f, PutOpParam.Op.CREATE, -1L, blocksize, null);
           Assert.assertEquals(ipAddr, chosen.getIpAddr());
         }
       }
@@ -117,23 +117,104 @@ public class TestWebHdfsDataLocality {
 
       { //test GETFILECHECKSUM
         final DatanodeInfo chosen = NamenodeWebHdfsMethods.chooseDatanode(
-            namenode, f, GetOpParam.Op.GETFILECHECKSUM, -1L, blocksize);
+            namenode, f, GetOpParam.Op.GETFILECHECKSUM, -1L, blocksize, null);
         Assert.assertEquals(expected, chosen);
       }
   
       { //test OPEN
         final DatanodeInfo chosen = NamenodeWebHdfsMethods.chooseDatanode(
-            namenode, f, GetOpParam.Op.OPEN, 0, blocksize);
+            namenode, f, GetOpParam.Op.OPEN, 0, blocksize, null);
         Assert.assertEquals(expected, chosen);
       }
 
       { //test APPEND
         final DatanodeInfo chosen = NamenodeWebHdfsMethods.chooseDatanode(
-            namenode, f, PostOpParam.Op.APPEND, -1L, blocksize);
+            namenode, f, PostOpParam.Op.APPEND, -1L, blocksize, null);
         Assert.assertEquals(expected, chosen);
       }
     } finally {
       cluster.shutdown();
     }
   }
+  
+  @Test
+  public void testExcludeDataNodes() throws Exception {
+    final Configuration conf = WebHdfsTestUtil.createConf();
+    final String[] racks = {RACK0, RACK0, RACK1, RACK1, RACK2, RACK2};
+    final String[] hosts = {"DataNode1", "DataNode2", "DataNode3","DataNode4","DataNode5","DataNode6"};
+    final int nDataNodes = hosts.length;
+    LOG.info("nDataNodes=" + nDataNodes + ", racks=" + Arrays.asList(racks)
+        + ", hosts=" + Arrays.asList(hosts));
+
+    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
+        .hosts(hosts).numDataNodes(nDataNodes).racks(racks).build();
+    
+    try {
+      cluster.waitActive();
+
+      final DistributedFileSystem dfs = cluster.getFileSystem();
+      final NameNode namenode = cluster.getNameNode();
+      final DatanodeManager dm = namenode.getNamesystem().getBlockManager(
+          ).getDatanodeManager();
+      LOG.info("dm=" + dm);
+  
+      final long blocksize = DFSConfigKeys.DFS_BLOCK_SIZE_DEFAULT;
+      final String f = "/foo";
+      
+      //create a file with three replica.
+      final Path p = new Path(f);
+      final FSDataOutputStream out = dfs.create(p, (short)3);
+      out.write(1);
+      out.close(); 
+      
+      //get replica location.
+      final LocatedBlocks locatedblocks = NameNodeAdapter.getBlockLocations(
+          namenode, f, 0, 1);
+      final List<LocatedBlock> lb = locatedblocks.getLocatedBlocks();
+      Assert.assertEquals(1, lb.size());
+      final DatanodeInfo[] locations = lb.get(0).getLocations();
+      Assert.assertEquals(3, locations.length);
+      
+      
+      //For GETFILECHECKSUM, OPEN and APPEND,
+      //the chosen datanode must be different with exclude nodes.
+
+      StringBuffer sb = new StringBuffer();
+      for (int i = 0; i < 2; i++) {
+        sb.append(locations[i].getXferAddr());
+        { // test GETFILECHECKSUM
+          final DatanodeInfo chosen = NamenodeWebHdfsMethods.chooseDatanode(
+              namenode, f, GetOpParam.Op.GETFILECHECKSUM, -1L, blocksize,
+              sb.toString());
+          for (int j = 0; j <= i; j++) {
+            Assert.assertNotEquals(locations[j].getHostName(),
+                chosen.getHostName());
+          }
+        }
+
+        { // test OPEN
+          final DatanodeInfo chosen = NamenodeWebHdfsMethods.chooseDatanode(
+              namenode, f, GetOpParam.Op.OPEN, 0, blocksize, sb.toString());
+          for (int j = 0; j <= i; j++) {
+            Assert.assertNotEquals(locations[j].getHostName(),
+                chosen.getHostName());
+          }
+        }
+  
+        { // test APPEND
+          final DatanodeInfo chosen = NamenodeWebHdfsMethods
+              .chooseDatanode(namenode, f, PostOpParam.Op.APPEND, -1L,
+                  blocksize, sb.toString());
+          for (int j = 0; j <= i; j++) {
+            Assert.assertNotEquals(locations[j].getHostName(),
+                chosen.getHostName());
+          }
+        }
+        
+        sb.append(",");
+      }
+    } finally {
+      cluster.shutdown();
+    }
+  }
 }
\ No newline at end of file

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitCache.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitCache.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitCache.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitCache.java Tue Aug 19 23:49:39 2014
@@ -23,6 +23,7 @@ import static org.apache.hadoop.hdfs.DFS
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_SKIP_CHECKSUM_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_STREAMS_CACHE_EXPIRY_MS_KEY;
 import static org.hamcrest.CoreMatchers.equalTo;
 
 import java.io.DataOutputStream;
@@ -30,7 +31,9 @@ import java.io.File;
 import java.io.FileInputStream;
 import java.io.FileOutputStream;
 import java.io.IOException;
+import java.util.Arrays;
 import java.util.HashMap;
+import java.util.Iterator;
 import java.util.Map;
 
 import org.apache.commons.lang.mutable.MutableBoolean;
@@ -197,11 +200,12 @@ public class TestShortCircuitCache {
   @Test(timeout=60000)
   public void testExpiry() throws Exception {
     final ShortCircuitCache cache =
-        new ShortCircuitCache(2, 1, 1, 10000000, 1, 10000, 0);
+        new ShortCircuitCache(2, 1, 1, 10000000, 1, 10000000, 0);
     final TestFileDescriptorPair pair = new TestFileDescriptorPair();
     ShortCircuitReplicaInfo replicaInfo1 =
       cache.fetchOrCreate(
-        new ExtendedBlockId(123, "test_bp1"), new SimpleReplicaCreator(123, cache, pair));
+        new ExtendedBlockId(123, "test_bp1"),
+          new SimpleReplicaCreator(123, cache, pair));
     Preconditions.checkNotNull(replicaInfo1.getReplica());
     Preconditions.checkState(replicaInfo1.getInvalidTokenException() == null);
     pair.compareWith(replicaInfo1.getReplica().getDataStream(),
@@ -461,6 +465,7 @@ public class TestShortCircuitCache {
       }
     }, 10, 60000);
     cluster.shutdown();
+    sockDir.close();
   }
 
   @Test(timeout=60000)
@@ -515,4 +520,98 @@ public class TestShortCircuitCache {
     });
     cluster.shutdown();
   }
+
+  /**
+   * Test unlinking a file whose blocks we are caching in the DFSClient.
+   * The DataNode will notify the DFSClient that the replica is stale via the
+   * ShortCircuitShm.
+   */
+  @Test(timeout=60000)
+  public void testUnlinkingReplicasInFileDescriptorCache() throws Exception {
+    BlockReaderTestUtil.enableShortCircuitShmTracing();
+    TemporarySocketDirectory sockDir = new TemporarySocketDirectory();
+    Configuration conf = createShortCircuitConf(
+        "testUnlinkingReplicasInFileDescriptorCache", sockDir);
+    // We don't want the CacheCleaner to time out short-circuit shared memory
+    // segments during the test, so set the timeout really high.
+    conf.setLong(DFS_CLIENT_READ_SHORTCIRCUIT_STREAMS_CACHE_EXPIRY_MS_KEY,
+        1000000000L);
+    MiniDFSCluster cluster =
+        new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
+    cluster.waitActive();
+    DistributedFileSystem fs = cluster.getFileSystem();
+    final ShortCircuitCache cache =
+        fs.getClient().getClientContext().getShortCircuitCache();
+    cache.getDfsClientShmManager().visit(new Visitor() {
+      @Override
+      public void visit(HashMap<DatanodeInfo, PerDatanodeVisitorInfo> info)
+          throws IOException {
+        // The ClientShmManager starts off empty.
+        Assert.assertEquals(0,  info.size());
+      }
+    });
+    final Path TEST_PATH = new Path("/test_file");
+    final int TEST_FILE_LEN = 8193;
+    final int SEED = 0xFADE0;
+    DFSTestUtil.createFile(fs, TEST_PATH, TEST_FILE_LEN,
+        (short)1, SEED);
+    byte contents[] = DFSTestUtil.readFileBuffer(fs, TEST_PATH);
+    byte expected[] = DFSTestUtil.
+        calculateFileContentsFromSeed(SEED, TEST_FILE_LEN);
+    Assert.assertTrue(Arrays.equals(contents, expected));
+    // Loading this file brought the ShortCircuitReplica into our local
+    // replica cache.
+    final DatanodeInfo datanode =
+        new DatanodeInfo(cluster.getDataNodes().get(0).getDatanodeId());
+    cache.getDfsClientShmManager().visit(new Visitor() {
+      @Override
+      public void visit(HashMap<DatanodeInfo, PerDatanodeVisitorInfo> info)
+          throws IOException {
+        Assert.assertTrue(info.get(datanode).full.isEmpty());
+        Assert.assertFalse(info.get(datanode).disabled);
+        Assert.assertEquals(1, info.get(datanode).notFull.values().size());
+        DfsClientShm shm =
+            info.get(datanode).notFull.values().iterator().next();
+        Assert.assertFalse(shm.isDisconnected());
+      }
+    });
+    // Remove the file whose blocks we just read.
+    fs.delete(TEST_PATH, false);
+
+    // Wait for the replica to be purged from the DFSClient's cache.
+    GenericTestUtils.waitFor(new Supplier<Boolean>() {
+      MutableBoolean done = new MutableBoolean(true);
+      @Override
+      public Boolean get() {
+        try {
+          done.setValue(true);
+          cache.getDfsClientShmManager().visit(new Visitor() {
+            @Override
+            public void visit(HashMap<DatanodeInfo,
+                  PerDatanodeVisitorInfo> info) throws IOException {
+              Assert.assertTrue(info.get(datanode).full.isEmpty());
+              Assert.assertFalse(info.get(datanode).disabled);
+              Assert.assertEquals(1,
+                  info.get(datanode).notFull.values().size());
+              DfsClientShm shm = info.get(datanode).notFull.values().
+                  iterator().next();
+              // Check that all slots have been invalidated.
+              for (Iterator<Slot> iter = shm.slotIterator();
+                   iter.hasNext(); ) {
+                Slot slot = iter.next();
+                if (slot.isValid()) {
+                  done.setValue(false);
+                }
+              }
+            }
+          });
+        } catch (IOException e) {
+          LOG.error("error running visitor", e);
+        }
+        return done.booleanValue();
+      }
+    }, 10, 60000);
+    cluster.shutdown();
+    sockDir.close();
+  }
 }

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitLocalRead.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitLocalRead.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitLocalRead.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitLocalRead.java Tue Aug 19 23:49:39 2014
@@ -291,17 +291,17 @@ public class TestShortCircuitLocalRead {
     }
   }
 
-  @Test(timeout=10000)
+  @Test(timeout=60000)
   public void testFileLocalReadNoChecksum() throws Exception {
     doTestShortCircuitRead(true, 3*blockSize+100, 0);
   }
 
-  @Test(timeout=10000)
+  @Test(timeout=60000)
   public void testFileLocalReadChecksum() throws Exception {
     doTestShortCircuitRead(false, 3*blockSize+100, 0);
   }
   
-  @Test(timeout=10000)
+  @Test(timeout=60000)
   public void testSmallFileLocalRead() throws Exception {
     doTestShortCircuitRead(false, 13, 0);
     doTestShortCircuitRead(false, 13, 5);
@@ -309,7 +309,7 @@ public class TestShortCircuitLocalRead {
     doTestShortCircuitRead(true, 13, 5);
   }
   
-  @Test(timeout=10000)
+  @Test(timeout=60000)
   public void testLocalReadLegacy() throws Exception {
     doTestShortCircuitReadLegacy(true, 13, 0, getCurrentUser(),
         getCurrentUser(), false);
@@ -320,18 +320,18 @@ public class TestShortCircuitLocalRead {
    * to use short circuit. The test ensures reader falls back to non
    * shortcircuit reads when shortcircuit is disallowed.
    */
-  @Test(timeout=10000)
+  @Test(timeout=60000)
   public void testLocalReadFallback() throws Exception {
     doTestShortCircuitReadLegacy(true, 13, 0, getCurrentUser(), "notallowed", true);
   }
   
-  @Test(timeout=10000)
+  @Test(timeout=60000)
   public void testReadFromAnOffset() throws Exception {
     doTestShortCircuitRead(false, 3*blockSize+100, 777);
     doTestShortCircuitRead(true, 3*blockSize+100, 777);
   }
   
-  @Test(timeout=10000)
+  @Test(timeout=60000)
   public void testLongFile() throws Exception {
     doTestShortCircuitRead(false, 10*blockSize+100, 777);
     doTestShortCircuitRead(true, 10*blockSize+100, 777);
@@ -578,6 +578,7 @@ public class TestShortCircuitLocalRead {
     fs.delete(file1, false);
   }
 
+  @Test(timeout=60000)
   public void testReadWithRemoteBlockReader() throws IOException, InterruptedException {
     doTestShortCircuitReadWithRemoteBlockReader(true, 3*blockSize+100, getCurrentUser(), 0, false);
   }

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdmin.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdmin.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdmin.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdmin.java Tue Aug 19 23:49:39 2014
@@ -176,6 +176,7 @@ public class TestDFSHAAdmin {
   
   @Test
   public void testTransitionToActive() throws Exception {
+    Mockito.doReturn(STANDBY_READY_RESULT).when(mockProtocol).getServiceStatus();
     assertEquals(0, runTool("-transitionToActive", "nn1"));
     Mockito.verify(mockProtocol).transitionToActive(
         reqInfoCaptor.capture());
@@ -414,6 +415,6 @@ public class TestDFSHAAdmin {
   }
   
   private StateChangeRequestInfo anyReqInfo() {
-    return Mockito.<StateChangeRequestInfo>any();
+    return Mockito.any();
   }
 }

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdminMiniCluster.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdminMiniCluster.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdminMiniCluster.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdminMiniCluster.java Tue Aug 19 23:49:39 2014
@@ -31,6 +31,7 @@ import org.apache.commons.logging.LogFac
 import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.ha.HAAdmin;
+import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.MiniDFSNNTopology;
@@ -204,6 +205,70 @@ public class TestDFSHAAdminMiniCluster {
     assertEquals(0, runTool("-checkHealth", "nn2"));
   }
   
+  /**
+   * Test case to check whether both the name node is active or not
+   * @throws Exception
+   */
+  @Test
+  public void testTransitionToActiveWhenOtherNamenodeisActive() 
+      throws Exception {
+    NameNode nn1 = cluster.getNameNode(0);
+    NameNode nn2 = cluster.getNameNode(1);
+    if(nn1.getState() != null && !nn1.getState().
+        equals(HAServiceState.STANDBY.name()) ) {
+      cluster.transitionToStandby(0);
+    }
+    if(nn2.getState() != null && !nn2.getState().
+        equals(HAServiceState.STANDBY.name()) ) {
+      cluster.transitionToStandby(1);
+    }
+    //Making sure both the namenode are in standby state
+    assertTrue(nn1.isStandbyState());
+    assertTrue(nn2.isStandbyState());
+    // Triggering the transition for both namenode to Active
+    runTool("-transitionToActive", "nn1");
+    runTool("-transitionToActive", "nn2");
+
+    assertFalse("Both namenodes cannot be active", nn1.isActiveState() 
+        && nn2.isActiveState());
+   
+    /*  This test case doesn't allow nn2 to transition to Active even with
+        forceActive switch since nn1 is already active  */
+    if(nn1.getState() != null && !nn1.getState().
+        equals(HAServiceState.STANDBY.name()) ) {
+      cluster.transitionToStandby(0);
+    }
+    if(nn2.getState() != null && !nn2.getState().
+        equals(HAServiceState.STANDBY.name()) ) {
+      cluster.transitionToStandby(1);
+    }
+    //Making sure both the namenode are in standby state
+    assertTrue(nn1.isStandbyState());
+    assertTrue(nn2.isStandbyState());
+    
+    runTool("-transitionToActive", "nn1");
+    runTool("-transitionToActive", "nn2","--forceactive");
+    
+    assertFalse("Both namenodes cannot be active even though with forceActive",
+        nn1.isActiveState() && nn2.isActiveState());
+
+    /*  In this test case, we have deliberately shut down nn1 and this will
+        cause HAAAdmin#isOtherTargetNodeActive to throw an Exception 
+        and transitionToActive for nn2 with  forceActive switch will succeed 
+        even with Exception  */
+    cluster.shutdownNameNode(0);
+    if(nn2.getState() != null && !nn2.getState().
+        equals(HAServiceState.STANDBY.name()) ) {
+      cluster.transitionToStandby(1);
+    }
+    //Making sure both the namenode (nn2) is in standby state
+    assertTrue(nn2.isStandbyState());
+    assertFalse(cluster.isNameNodeUp(0));
+    
+    runTool("-transitionToActive", "nn2", "--forceactive");
+    assertTrue("Namenode nn2 should be active", nn2.isActiveState());
+  }
+  
   private int runTool(String ... args) throws Exception {
     errOutBytes.reset();
     LOG.info("Running: DFSHAAdmin " + Joiner.on(" ").join(args));

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/TestOfflineEditsViewer.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/TestOfflineEditsViewer.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/TestOfflineEditsViewer.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/TestOfflineEditsViewer.java Tue Aug 19 23:49:39 2014
@@ -56,8 +56,7 @@ public class TestOfflineEditsViewer {
 
   @SuppressWarnings("deprecation")
   private static ImmutableSet<FSEditLogOpCodes> skippedOps() {
-    ImmutableSet.Builder<FSEditLogOpCodes> b = ImmutableSet
-        .<FSEditLogOpCodes> builder();
+    ImmutableSet.Builder<FSEditLogOpCodes> b = ImmutableSet.builder();
 
     // Deprecated opcodes
     b.add(FSEditLogOpCodes.OP_DATANODE_ADD)

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java Tue Aug 19 23:49:39 2014
@@ -120,6 +120,11 @@ public class TestOfflineImageViewer {
         }
       }
 
+      // Create an empty directory
+      Path emptydir = new Path("/emptydir");
+      hdfs.mkdirs(emptydir);
+      writtenFiles.put(emptydir.toString(), hdfs.getFileStatus(emptydir));
+
       // Get delegation tokens so we log the delegation token op
       Token<?>[] delegationTokens = hdfs
           .addDelegationTokens(TEST_RENEWER, null);
@@ -133,6 +138,15 @@ public class TestOfflineImageViewer {
       hdfs.mkdirs(new Path("/snapshot/1"));
       hdfs.delete(snapshot, true);
 
+      // Set XAttrs so the fsimage contains XAttr ops
+      final Path xattr = new Path("/xattr");
+      hdfs.mkdirs(xattr);
+      hdfs.setXAttr(xattr, "user.a1", new byte[]{ 0x31, 0x32, 0x33 });
+      hdfs.setXAttr(xattr, "user.a2", new byte[]{ 0x37, 0x38, 0x39 });
+      // OIV should be able to handle empty value XAttrs
+      hdfs.setXAttr(xattr, "user.a3", null);
+      writtenFiles.put(xattr.toString(), hdfs.getFileStatus(xattr));
+
       // Write results to the fsimage file
       hdfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER, false);
       hdfs.saveNamespace();
@@ -205,8 +219,8 @@ public class TestOfflineImageViewer {
     matcher = p.matcher(output.getBuffer());
     assertTrue(matcher.find() && matcher.groupCount() == 1);
     int totalDirs = Integer.parseInt(matcher.group(1));
-    // totalDirs includes root directory
-    assertEquals(NUM_DIRS + 1, totalDirs);
+    // totalDirs includes root directory, empty directory, and xattr directory
+    assertEquals(NUM_DIRS + 3, totalDirs);
 
     FileStatus maxFile = Collections.max(writtenFiles.values(),
         new Comparator<FileStatus>() {
@@ -259,7 +273,7 @@ public class TestOfflineImageViewer {
 
       // verify the number of directories
       FileStatus[] statuses = webhdfs.listStatus(new Path("/"));
-      assertEquals(NUM_DIRS, statuses.length);
+      assertEquals(NUM_DIRS + 2, statuses.length); // contains empty and xattr directory
 
       // verify the number of files in the directory
       statuses = webhdfs.listStatus(new Path("/dir0"));
@@ -270,6 +284,10 @@ public class TestOfflineImageViewer {
       FileStatus expected = writtenFiles.get("/dir0/file0");
       compareFile(expected, status);
 
+      // LISTSTATUS operation to an empty directory
+      statuses = webhdfs.listStatus(new Path("/emptydir"));
+      assertEquals(0, statuses.length);
+
       // LISTSTATUS operation to a invalid path
       URL url = new URL("http://localhost:" + port +
                     "/webhdfs/v1/invalid/?op=LISTSTATUS");

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java Tue Aug 19 23:49:39 2014
@@ -22,16 +22,22 @@ import static org.apache.hadoop.fs.permi
 import static org.apache.hadoop.fs.permission.FsAction.*;
 import static org.apache.hadoop.hdfs.server.namenode.AclTestHelpers.*;
 
+import java.io.IOException;
 import java.util.HashMap;
+import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
+import java.util.Map.Entry;
 
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.XAttr;
+import org.apache.hadoop.fs.XAttrCodec;
 import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.AclStatus;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.DFSUtil;
+import org.apache.hadoop.hdfs.XAttrHelper;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.server.namenode.INodeId;
@@ -186,6 +192,60 @@ public class TestJsonUtil {
         JsonUtil.toJsonString(aclStatusBuilder.build()));
 
   }
+  
+  @Test
+  public void testToJsonFromXAttrs() throws IOException {
+    String jsonString = 
+        "{\"XAttrs\":[{\"name\":\"user.a1\",\"value\":\"0x313233\"}," +
+        "{\"name\":\"user.a2\",\"value\":\"0x313131\"}]}";
+    XAttr xAttr1 = (new XAttr.Builder()).setNameSpace(XAttr.NameSpace.USER).
+        setName("a1").setValue(XAttrCodec.decodeValue("0x313233")).build();
+    XAttr xAttr2 = (new XAttr.Builder()).setNameSpace(XAttr.NameSpace.USER).
+        setName("a2").setValue(XAttrCodec.decodeValue("0x313131")).build();
+    List<XAttr> xAttrs = Lists.newArrayList();
+    xAttrs.add(xAttr1);
+    xAttrs.add(xAttr2);
+    
+    Assert.assertEquals(jsonString, JsonUtil.toJsonString(xAttrs, 
+        XAttrCodec.HEX));
+  }
+  
+  @Test
+  public void testToXAttrMap() throws IOException {
+    String jsonString = 
+        "{\"XAttrs\":[{\"name\":\"user.a1\",\"value\":\"0x313233\"}," +
+        "{\"name\":\"user.a2\",\"value\":\"0x313131\"}]}";
+    Map<?, ?> json = (Map<?, ?>)JSON.parse(jsonString);
+    XAttr xAttr1 = (new XAttr.Builder()).setNameSpace(XAttr.NameSpace.USER).
+        setName("a1").setValue(XAttrCodec.decodeValue("0x313233")).build();
+    XAttr xAttr2 = (new XAttr.Builder()).setNameSpace(XAttr.NameSpace.USER).
+        setName("a2").setValue(XAttrCodec.decodeValue("0x313131")).build();
+    List<XAttr> xAttrs = Lists.newArrayList();
+    xAttrs.add(xAttr1);
+    xAttrs.add(xAttr2);
+    Map<String, byte[]> xAttrMap = XAttrHelper.buildXAttrMap(xAttrs);
+    Map<String, byte[]> parsedXAttrMap = JsonUtil.toXAttrs(json);
+    
+    Assert.assertEquals(xAttrMap.size(), parsedXAttrMap.size());
+    Iterator<Entry<String, byte[]>> iter = xAttrMap.entrySet().iterator();
+    while(iter.hasNext()) {
+      Entry<String, byte[]> entry = iter.next();
+      Assert.assertArrayEquals(entry.getValue(), 
+          parsedXAttrMap.get(entry.getKey()));
+    }
+  }
+  
+  @Test
+  public void testGetXAttrFromJson() throws IOException {
+    String jsonString = 
+        "{\"XAttrs\":[{\"name\":\"user.a1\",\"value\":\"0x313233\"}," +
+        "{\"name\":\"user.a2\",\"value\":\"0x313131\"}]}";
+    Map<?, ?> json = (Map<?, ?>) JSON.parse(jsonString);
+    
+    // Get xattr: user.a2
+    byte[] value = JsonUtil.getXAttr(json, "user.a2");
+    Assert.assertArrayEquals(XAttrCodec.decodeValue("0x313131"), value);
+  }
 
   private void checkDecodeFailure(Map<String, Object> map) {
     try {

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java Tue Aug 19 23:49:39 2014
@@ -18,6 +18,8 @@
 
 package org.apache.hadoop.hdfs.web;
 
+import static org.junit.Assert.fail;
+
 import java.io.IOException;
 import java.net.URISyntaxException;
 import java.security.PrivilegedExceptionAction;
@@ -34,15 +36,21 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.hdfs.server.namenode.web.resources.NamenodeWebHdfsMethods;
 import org.apache.hadoop.hdfs.TestDFSClientRetries;
+import org.apache.hadoop.hdfs.server.namenode.NameNode;
+import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotTestHelper;
+import org.apache.hadoop.hdfs.server.namenode.web.resources.NamenodeWebHdfsMethods;
+import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
+import org.apache.hadoop.ipc.RetriableException;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.log4j.Level;
 import org.junit.Assert;
 import org.junit.Test;
+import org.mockito.internal.util.reflection.Whitebox;
 
 /** Test WebHDFS */
 public class TestWebHDFS {
@@ -326,4 +334,152 @@ public class TestWebHDFS {
     Assert.assertTrue(conf.getBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY,
         false));
   }
+
+  /**
+   * Test snapshot creation through WebHdfs
+   */
+  @Test
+  public void testWebHdfsCreateSnapshot() throws Exception {
+    MiniDFSCluster cluster = null;
+    final Configuration conf = WebHdfsTestUtil.createConf();
+    try {
+      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
+      cluster.waitActive();
+      final DistributedFileSystem dfs = cluster.getFileSystem();
+      final FileSystem webHdfs = WebHdfsTestUtil.getWebHdfsFileSystem(conf,
+          WebHdfsFileSystem.SCHEME);
+
+      final Path foo = new Path("/foo");
+      dfs.mkdirs(foo);
+
+      try {
+        webHdfs.createSnapshot(foo);
+        fail("Cannot create snapshot on a non-snapshottable directory");
+      } catch (Exception e) {
+        GenericTestUtils.assertExceptionContains(
+            "Directory is not a snapshottable directory", e);
+      }
+
+      // allow snapshots on /foo
+      dfs.allowSnapshot(foo);
+      // create snapshots on foo using WebHdfs
+      webHdfs.createSnapshot(foo, "s1");
+      // create snapshot without specifying name
+      final Path spath = webHdfs.createSnapshot(foo, null);
+
+      Assert.assertTrue(webHdfs.exists(spath));
+      final Path s1path = SnapshotTestHelper.getSnapshotRoot(foo, "s1");
+      Assert.assertTrue(webHdfs.exists(s1path));
+    } finally {
+      if (cluster != null) {
+        cluster.shutdown();
+      }
+    }
+  }
+
+  /**
+   * Test snapshot deletion through WebHdfs
+   */
+  @Test
+  public void testWebHdfsDeleteSnapshot() throws Exception {
+    MiniDFSCluster cluster = null;
+    final Configuration conf = WebHdfsTestUtil.createConf();
+    try {
+      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
+      cluster.waitActive();
+      final DistributedFileSystem dfs = cluster.getFileSystem();
+      final FileSystem webHdfs = WebHdfsTestUtil.getWebHdfsFileSystem(conf,
+          WebHdfsFileSystem.SCHEME);
+
+      final Path foo = new Path("/foo");
+      dfs.mkdirs(foo);
+      dfs.allowSnapshot(foo);
+
+      webHdfs.createSnapshot(foo, "s1");
+      final Path spath = webHdfs.createSnapshot(foo, null);
+      Assert.assertTrue(webHdfs.exists(spath));
+      final Path s1path = SnapshotTestHelper.getSnapshotRoot(foo, "s1");
+      Assert.assertTrue(webHdfs.exists(s1path));
+
+      // delete the two snapshots
+      webHdfs.deleteSnapshot(foo, "s1");
+      Assert.assertFalse(webHdfs.exists(s1path));
+      webHdfs.deleteSnapshot(foo, spath.getName());
+      Assert.assertFalse(webHdfs.exists(spath));
+    } finally {
+      if (cluster != null) {
+        cluster.shutdown();
+      }
+    }
+  }
+
+  /**
+   * Test snapshot rename through WebHdfs
+   */
+  @Test
+  public void testWebHdfsRenameSnapshot() throws Exception {
+    MiniDFSCluster cluster = null;
+    final Configuration conf = WebHdfsTestUtil.createConf();
+    try {
+      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
+      cluster.waitActive();
+      final DistributedFileSystem dfs = cluster.getFileSystem();
+      final FileSystem webHdfs = WebHdfsTestUtil.getWebHdfsFileSystem(conf,
+          WebHdfsFileSystem.SCHEME);
+
+      final Path foo = new Path("/foo");
+      dfs.mkdirs(foo);
+      dfs.allowSnapshot(foo);
+
+      webHdfs.createSnapshot(foo, "s1");
+      final Path s1path = SnapshotTestHelper.getSnapshotRoot(foo, "s1");
+      Assert.assertTrue(webHdfs.exists(s1path));
+
+      // rename s1 to s2
+      webHdfs.renameSnapshot(foo, "s1", "s2");
+      Assert.assertFalse(webHdfs.exists(s1path));
+      final Path s2path = SnapshotTestHelper.getSnapshotRoot(foo, "s2");
+      Assert.assertTrue(webHdfs.exists(s2path));
+
+      webHdfs.deleteSnapshot(foo, "s2");
+      Assert.assertFalse(webHdfs.exists(s2path));
+    } finally {
+      if (cluster != null) {
+        cluster.shutdown();
+      }
+    }
+  }
+
+  /**
+   * Make sure a RetriableException is thrown when rpcServer is null in
+   * NamenodeWebHdfsMethods.
+   */
+  @Test
+  public void testRaceWhileNNStartup() throws Exception {
+    MiniDFSCluster cluster = null;
+    final Configuration conf = WebHdfsTestUtil.createConf();
+    try {
+      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
+      cluster.waitActive();
+      final NameNode namenode = cluster.getNameNode();
+      final NamenodeProtocols rpcServer = namenode.getRpcServer();
+      Whitebox.setInternalState(namenode, "rpcServer", null);
+
+      final Path foo = new Path("/foo");
+      final FileSystem webHdfs = WebHdfsTestUtil.getWebHdfsFileSystem(conf,
+          WebHdfsFileSystem.SCHEME);
+      try {
+        webHdfs.mkdirs(foo);
+        fail("Expected RetriableException");
+      } catch (RetriableException e) {
+        GenericTestUtils.assertExceptionContains("Namenode is in startup mode",
+            e);
+      }
+      Whitebox.setInternalState(namenode, "rpcServer", rpcServer);
+    } finally {
+      if (cluster != null) {
+        cluster.shutdown();
+      }
+    }
+  }
 }

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFSForHA.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFSForHA.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFSForHA.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFSForHA.java Tue Aug 19 23:49:39 2014
@@ -18,6 +18,15 @@
 
 package org.apache.hadoop.hdfs.web;
 
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY;
+import static org.mockito.Mockito.spy;
+import static org.mockito.Mockito.verify;
+
+import java.io.IOException;
+import java.net.URI;
+import java.util.HashMap;
+import java.util.Map;
+
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
@@ -28,17 +37,15 @@ import org.apache.hadoop.hdfs.DFSConfigK
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.MiniDFSNNTopology;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil;
+import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.security.token.Token;
 import org.junit.Assert;
 import org.junit.Test;
-
-import java.io.IOException;
-import java.net.URI;
-
-import static org.mockito.Mockito.spy;
-import static org.mockito.Mockito.verify;
+import org.mockito.internal.util.reflection.Whitebox;
 
 public class TestWebHDFSForHA {
   private static final String LOGICAL_NAME = "minidfs";
@@ -119,6 +126,8 @@ public class TestWebHDFSForHA {
   @Test
   public void testFailoverAfterOpen() throws IOException {
     Configuration conf = DFSTestUtil.newHAConfiguration(LOGICAL_NAME);
+    conf.set(FS_DEFAULT_NAME_KEY, HdfsConstants.HDFS_URI_SCHEME +
+        "://" + LOGICAL_NAME);
     MiniDFSCluster cluster = null;
     FileSystem fs = null;
     final Path p = new Path("/test");
@@ -152,4 +161,87 @@ public class TestWebHDFSForHA {
       }
     }
   }
-}
\ No newline at end of file
+
+  @Test
+  public void testMultipleNamespacesConfigured() throws Exception {
+    Configuration conf = DFSTestUtil.newHAConfiguration(LOGICAL_NAME);
+    MiniDFSCluster cluster = null;
+    WebHdfsFileSystem fs = null;
+
+    try {
+      cluster = new MiniDFSCluster.Builder(conf).nnTopology(topo)
+              .numDataNodes(1).build();
+
+      HATestUtil.setFailoverConfigurations(cluster, conf, LOGICAL_NAME);
+
+      cluster.waitActive();
+      DFSTestUtil.addHAConfiguration(conf, LOGICAL_NAME + "remote");
+      DFSTestUtil.setFakeHttpAddresses(conf, LOGICAL_NAME + "remote");
+
+      fs = (WebHdfsFileSystem)FileSystem.get(WEBHDFS_URI, conf);
+      Assert.assertEquals(2, fs.getResolvedNNAddr().length);
+    } finally {
+      IOUtils.cleanup(null, fs);
+      if (cluster != null) {
+        cluster.shutdown();
+      }
+    }
+  }
+
+  /**
+   * Make sure the WebHdfsFileSystem will retry based on RetriableException when
+   * rpcServer is null in NamenodeWebHdfsMethods while NameNode starts up.
+   */
+  @Test (timeout=120000)
+  public void testRetryWhileNNStartup() throws Exception {
+    final Configuration conf = DFSTestUtil.newHAConfiguration(LOGICAL_NAME);
+    MiniDFSCluster cluster = null;
+    final Map<String, Boolean> resultMap = new HashMap<String, Boolean>();
+
+    try {
+      cluster = new MiniDFSCluster.Builder(conf).nnTopology(topo)
+          .numDataNodes(0).build();
+      HATestUtil.setFailoverConfigurations(cluster, conf, LOGICAL_NAME);
+      cluster.waitActive();
+      cluster.transitionToActive(0);
+
+      final NameNode namenode = cluster.getNameNode(0);
+      final NamenodeProtocols rpcServer = namenode.getRpcServer();
+      Whitebox.setInternalState(namenode, "rpcServer", null);
+
+      new Thread() {
+        @Override
+        public void run() {
+          boolean result = false;
+          FileSystem fs = null;
+          try {
+            fs = FileSystem.get(WEBHDFS_URI, conf);
+            final Path dir = new Path("/test");
+            result = fs.mkdirs(dir);
+          } catch (IOException e) {
+            result = false;
+          } finally {
+            IOUtils.cleanup(null, fs);
+          }
+          synchronized (TestWebHDFSForHA.this) {
+            resultMap.put("mkdirs", result);
+            TestWebHDFSForHA.this.notifyAll();
+          }
+        }
+      }.start();
+
+      Thread.sleep(1000);
+      Whitebox.setInternalState(namenode, "rpcServer", rpcServer);
+      synchronized (this) {
+        while (!resultMap.containsKey("mkdirs")) {
+          this.wait();
+        }
+        Assert.assertTrue(resultMap.get("mkdirs"));
+      }
+    } finally {
+      if (cluster != null) {
+        cluster.shutdown();
+      }
+    }
+  }
+}

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsFileSystemContract.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsFileSystemContract.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsFileSystemContract.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsFileSystemContract.java Tue Aug 19 23:49:39 2014
@@ -39,6 +39,7 @@ import org.apache.hadoop.fs.FSDataOutput
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystemContractBaseTest;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.AppendTestUtil;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
@@ -49,6 +50,7 @@ import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.junit.Assert;
+import org.junit.Test;
 
 public class TestWebHdfsFileSystemContract extends FileSystemContractBaseTest {
   private static final Configuration conf = new Configuration();
@@ -530,4 +532,35 @@ public class TestWebHdfsFileSystemContra
       }
     }
   }
+
+  @Test
+  public void testAccess() throws IOException, InterruptedException {
+    Path p1 = new Path("/pathX");
+    try {
+      UserGroupInformation ugi = UserGroupInformation.createUserForTesting("alpha",
+          new String[]{"beta"});
+      WebHdfsFileSystem fs = WebHdfsTestUtil.getWebHdfsFileSystemAs(ugi, conf,
+          WebHdfsFileSystem.SCHEME);
+
+      fs.mkdirs(p1);
+      fs.setPermission(p1, new FsPermission((short) 0444));
+      fs.access(p1, FsAction.READ);
+      try {
+        fs.access(p1, FsAction.WRITE);
+        fail("The access call should have failed.");
+      } catch (AccessControlException e) {
+        // expected
+      }
+
+      Path badPath = new Path("/bad");
+      try {
+        fs.access(badPath, FsAction.READ);
+        fail("The access call should have failed");
+      } catch (FileNotFoundException e) {
+        // expected
+      }
+    } finally {
+      fs.delete(p1, true);
+    }
+  }
 }

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsTokens.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsTokens.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsTokens.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsTokens.java Tue Aug 19 23:49:39 2014
@@ -19,47 +19,63 @@
 package org.apache.hadoop.hdfs.web;
 
 import static org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod.KERBEROS;
+import static org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod.SIMPLE;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
-import static org.mockito.Matchers.any;
-import static org.mockito.Mockito.doReturn;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.never;
-import static org.mockito.Mockito.reset;
-import static org.mockito.Mockito.spy;
-import static org.mockito.Mockito.verify;
+import static org.mockito.Matchers.*;
+import static org.mockito.Mockito.*;
 
+import java.io.File;
 import java.io.IOException;
+import java.io.InputStream;
+import java.net.InetSocketAddress;
 import java.net.URI;
+import java.security.PrivilegedExceptionAction;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DFSUtil;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
 import org.apache.hadoop.hdfs.web.resources.DeleteOpParam;
 import org.apache.hadoop.hdfs.web.resources.GetOpParam;
 import org.apache.hadoop.hdfs.web.resources.HttpOpParam;
 import org.apache.hadoop.hdfs.web.resources.PostOpParam;
 import org.apache.hadoop.hdfs.web.resources.PutOpParam;
+import org.apache.hadoop.http.HttpConfig;
+import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.ssl.KeyStoreTestUtil;
+import org.apache.hadoop.security.token.SecretManager.InvalidToken;
 import org.apache.hadoop.security.token.Token;
+import org.junit.Assert;
 import org.junit.BeforeClass;
 import org.junit.Test;
-import org.mockito.internal.util.reflection.Whitebox;
 
 public class TestWebHdfsTokens {
   private static Configuration conf;
+  URI uri = null;
 
   @BeforeClass
   public static void setUp() {
     conf = new Configuration();
     SecurityUtil.setAuthenticationMethod(KERBEROS, conf);
     UserGroupInformation.setConfiguration(conf);    
+    UserGroupInformation.setLoginUser(
+        UserGroupInformation.createUserForTesting(
+            "LoginUser", new String[]{"supergroup"}));
   }
 
   private WebHdfsFileSystem spyWebhdfsInSecureSetup() throws IOException {
     WebHdfsFileSystem fsOrig = new WebHdfsFileSystem();
     fsOrig.initialize(URI.create("webhdfs://127.0.0.1:0"), conf);
     WebHdfsFileSystem fs = spy(fsOrig);
-    Whitebox.setInternalState(fsOrig.tokenAspect, "fs", fs);
     return fs;
   }
 
@@ -89,7 +105,7 @@ public class TestWebHdfsTokens {
   }
 
   @Test(timeout = 5000)
-  public void testNoTokenForCanclToken() throws IOException {
+  public void testNoTokenForRenewToken() throws IOException {
     checkNoTokenForOperation(PutOpParam.Op.RENEWDELEGATIONTOKEN);
   }
 
@@ -139,4 +155,277 @@ public class TestWebHdfsTokens {
       assertFalse(op.getRequireAuth());
     }
   }
+  
+  @SuppressWarnings("unchecked") // for any(Token.class)
+  @Test
+  public void testLazyTokenFetchForWebhdfs() throws Exception {
+    MiniDFSCluster cluster = null;
+    WebHdfsFileSystem fs = null;
+    try {
+      final Configuration clusterConf = new HdfsConfiguration(conf);
+      SecurityUtil.setAuthenticationMethod(SIMPLE, clusterConf);
+      clusterConf.setBoolean(DFSConfigKeys
+          .DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);
+
+      // trick the NN into thinking security is enabled w/o it trying
+      // to login from a keytab
+      UserGroupInformation.setConfiguration(clusterConf);
+      cluster = new MiniDFSCluster.Builder(clusterConf).numDataNodes(1).build();
+      cluster.waitActive();
+      SecurityUtil.setAuthenticationMethod(KERBEROS, clusterConf);
+      UserGroupInformation.setConfiguration(clusterConf);
+      
+      uri = DFSUtil.createUri(
+          "webhdfs", cluster.getNameNode().getHttpAddress());
+      validateLazyTokenFetch(clusterConf);
+    } finally {
+      IOUtils.cleanup(null, fs);
+      if (cluster != null) {
+        cluster.shutdown();
+      }
+    }
+  }
+  
+  @SuppressWarnings("unchecked") // for any(Token.class)
+  @Test
+  public void testLazyTokenFetchForSWebhdfs() throws Exception {
+    MiniDFSCluster cluster = null;
+    SWebHdfsFileSystem fs = null;
+    try {
+      final Configuration clusterConf = new HdfsConfiguration(conf);
+      SecurityUtil.setAuthenticationMethod(SIMPLE, clusterConf);
+      clusterConf.setBoolean(DFSConfigKeys
+	    .DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);
+      String BASEDIR = System.getProperty("test.build.dir",
+	      	  "target/test-dir") + "/" + TestWebHdfsTokens.class.getSimpleName();
+      String keystoresDir;
+      String sslConfDir;
+	    
+      clusterConf.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
+      clusterConf.set(DFSConfigKeys.DFS_HTTP_POLICY_KEY, HttpConfig.Policy.HTTPS_ONLY.name());
+      clusterConf.set(DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY, "localhost:0");
+      clusterConf.set(DFSConfigKeys.DFS_DATANODE_HTTPS_ADDRESS_KEY, "localhost:0");
+	  
+      File base = new File(BASEDIR);
+      FileUtil.fullyDelete(base);
+      base.mkdirs();
+      keystoresDir = new File(BASEDIR).getAbsolutePath();
+      sslConfDir = KeyStoreTestUtil.getClasspathDir(TestWebHdfsTokens.class);
+      KeyStoreTestUtil.setupSSLConfig(keystoresDir, sslConfDir, clusterConf, false);
+	  
+      // trick the NN into thinking security is enabled w/o it trying
+      // to login from a keytab
+      UserGroupInformation.setConfiguration(clusterConf);
+      cluster = new MiniDFSCluster.Builder(clusterConf).numDataNodes(1).build();
+      cluster.waitActive();
+      InetSocketAddress addr = cluster.getNameNode().getHttpsAddress();
+      String nnAddr = NetUtils.getHostPortString(addr);
+      clusterConf.set(DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY, nnAddr);
+      SecurityUtil.setAuthenticationMethod(KERBEROS, clusterConf);
+      UserGroupInformation.setConfiguration(clusterConf);
+      
+      uri = DFSUtil.createUri(
+        "swebhdfs", cluster.getNameNode().getHttpsAddress());
+      validateLazyTokenFetch(clusterConf);
+      } finally {
+        IOUtils.cleanup(null, fs);
+        if (cluster != null) {
+          cluster.shutdown();
+        }
+     }
+  }
+  
+  @SuppressWarnings("unchecked")
+  private void validateLazyTokenFetch(final Configuration clusterConf) throws Exception{
+    final String testUser = "DummyUser";
+    UserGroupInformation ugi = UserGroupInformation.createUserForTesting(
+      testUser, new String[]{"supergroup"});
+    WebHdfsFileSystem fs = ugi.doAs(new PrivilegedExceptionAction<WebHdfsFileSystem>() {
+    @Override
+      public WebHdfsFileSystem run() throws IOException {
+        return spy((WebHdfsFileSystem) FileSystem.newInstance(uri, clusterConf));
+	  }
+    });
+    // verify token ops don't get a token
+    Assert.assertNull(fs.getRenewToken());
+    Token<?> token = fs.getDelegationToken(null);
+    fs.renewDelegationToken(token);
+    fs.cancelDelegationToken(token);
+    verify(fs, never()).getDelegationToken();
+    verify(fs, never()).replaceExpiredDelegationToken();
+    verify(fs, never()).setDelegationToken(any(Token.class));
+    Assert.assertNull(fs.getRenewToken());
+    reset(fs);
+
+    // verify first non-token op gets a token
+    final Path p = new Path("/f");
+    fs.create(p, (short)1).close();
+    verify(fs, times(1)).getDelegationToken();
+    verify(fs, never()).replaceExpiredDelegationToken();
+    verify(fs, times(1)).getDelegationToken(anyString());
+    verify(fs, times(1)).setDelegationToken(any(Token.class));
+    token = fs.getRenewToken();
+    Assert.assertNotNull(token);      
+    Assert.assertEquals(testUser, getTokenOwner(token));
+    Assert.assertEquals(fs.getTokenKind(), token.getKind());
+    reset(fs);
+
+    // verify prior token is reused
+    fs.getFileStatus(p);
+    verify(fs, times(1)).getDelegationToken();
+    verify(fs, never()).replaceExpiredDelegationToken();
+    verify(fs, never()).getDelegationToken(anyString());
+    verify(fs, never()).setDelegationToken(any(Token.class));
+    Token<?> token2 = fs.getRenewToken();
+    Assert.assertNotNull(token2);
+    Assert.assertEquals(fs.getTokenKind(), token.getKind());
+    Assert.assertSame(token, token2);
+    reset(fs);
+
+    // verify renew of expired token fails w/o getting a new token
+    token = fs.getRenewToken();
+    fs.cancelDelegationToken(token);
+    try {
+      fs.renewDelegationToken(token);
+      Assert.fail("should have failed");
+    } catch (InvalidToken it) {
+    } catch (Exception ex) {
+      Assert.fail("wrong exception:"+ex);
+    }
+    verify(fs, never()).getDelegationToken();
+    verify(fs, never()).replaceExpiredDelegationToken();
+    verify(fs, never()).getDelegationToken(anyString());
+    verify(fs, never()).setDelegationToken(any(Token.class));
+    token2 = fs.getRenewToken();
+    Assert.assertNotNull(token2);
+    Assert.assertEquals(fs.getTokenKind(), token.getKind());
+    Assert.assertSame(token, token2);
+    reset(fs);
+
+    // verify cancel of expired token fails w/o getting a new token
+    try {
+      fs.cancelDelegationToken(token);
+      Assert.fail("should have failed");
+    } catch (InvalidToken it) {
+    } catch (Exception ex) {
+      Assert.fail("wrong exception:"+ex);
+    }
+    verify(fs, never()).getDelegationToken();
+    verify(fs, never()).replaceExpiredDelegationToken();
+    verify(fs, never()).getDelegationToken(anyString());
+    verify(fs, never()).setDelegationToken(any(Token.class));
+    token2 = fs.getRenewToken();
+    Assert.assertNotNull(token2);
+    Assert.assertEquals(fs.getTokenKind(), token.getKind());
+    Assert.assertSame(token, token2);
+    reset(fs);
+
+    // verify an expired token is replaced with a new token
+    fs.open(p).close();
+    verify(fs, times(2)).getDelegationToken(); // first bad, then good
+    verify(fs, times(1)).replaceExpiredDelegationToken();
+    verify(fs, times(1)).getDelegationToken(null);
+    verify(fs, times(1)).setDelegationToken(any(Token.class));
+    token2 = fs.getRenewToken();
+    Assert.assertNotNull(token2);
+    Assert.assertNotSame(token, token2);
+    Assert.assertEquals(fs.getTokenKind(), token.getKind());
+    Assert.assertEquals(testUser, getTokenOwner(token2));
+    reset(fs);
+
+    // verify with open because it's a little different in how it
+    // opens connections
+    fs.cancelDelegationToken(fs.getRenewToken());
+    InputStream is = fs.open(p);
+    is.read();
+    is.close();
+    verify(fs, times(2)).getDelegationToken(); // first bad, then good
+    verify(fs, times(1)).replaceExpiredDelegationToken();
+    verify(fs, times(1)).getDelegationToken(null);
+    verify(fs, times(1)).setDelegationToken(any(Token.class));
+    token2 = fs.getRenewToken();
+    Assert.assertNotNull(token2);
+    Assert.assertNotSame(token, token2);
+    Assert.assertEquals(fs.getTokenKind(), token.getKind());
+    Assert.assertEquals(testUser, getTokenOwner(token2));
+    reset(fs);
+
+    // verify fs close cancels the token
+    fs.close();
+    verify(fs, never()).getDelegationToken();
+    verify(fs, never()).replaceExpiredDelegationToken();
+    verify(fs, never()).getDelegationToken(anyString());
+    verify(fs, never()).setDelegationToken(any(Token.class));
+    verify(fs, times(1)).cancelDelegationToken(eq(token2));
+
+    // add a token to ugi for a new fs, verify it uses that token
+    token = fs.getDelegationToken(null);
+    ugi.addToken(token);
+    fs = ugi.doAs(new PrivilegedExceptionAction<WebHdfsFileSystem>() {
+      @Override
+      public WebHdfsFileSystem run() throws IOException {
+        return spy((WebHdfsFileSystem) FileSystem.newInstance(uri, clusterConf));
+      }
+    });
+    Assert.assertNull(fs.getRenewToken());
+    fs.getFileStatus(new Path("/"));
+    verify(fs, times(1)).getDelegationToken();
+    verify(fs, never()).replaceExpiredDelegationToken();
+    verify(fs, never()).getDelegationToken(anyString());
+    verify(fs, times(1)).setDelegationToken(eq(token));
+    token2 = fs.getRenewToken();
+    Assert.assertNotNull(token2);
+    Assert.assertEquals(fs.getTokenKind(), token.getKind());
+    Assert.assertSame(token, token2);
+    reset(fs);
+
+    // verify it reuses the prior ugi token
+    fs.getFileStatus(new Path("/"));
+    verify(fs, times(1)).getDelegationToken();
+    verify(fs, never()).replaceExpiredDelegationToken();
+    verify(fs, never()).getDelegationToken(anyString());
+    verify(fs, never()).setDelegationToken(any(Token.class));
+    token2 = fs.getRenewToken();
+    Assert.assertNotNull(token2);
+    Assert.assertEquals(fs.getTokenKind(), token.getKind());
+    Assert.assertSame(token, token2);
+    reset(fs);
+
+    // verify an expired ugi token is NOT replaced with a new token
+    fs.cancelDelegationToken(token);
+    for (int i=0; i<2; i++) {
+      try {
+        fs.getFileStatus(new Path("/"));
+        Assert.fail("didn't fail");
+      } catch (InvalidToken it) {
+      } catch (Exception ex) {
+        Assert.fail("wrong exception:"+ex);
+      }
+      verify(fs, times(1)).getDelegationToken();
+      verify(fs, times(1)).replaceExpiredDelegationToken();
+      verify(fs, never()).getDelegationToken(anyString());
+      verify(fs, never()).setDelegationToken(any(Token.class));
+      token2 = fs.getRenewToken();
+      Assert.assertNotNull(token2);
+      Assert.assertEquals(fs.getTokenKind(), token.getKind());
+      Assert.assertSame(token, token2);
+      reset(fs);
+    }
+    
+    // verify fs close does NOT cancel the ugi token
+    fs.close();
+    verify(fs, never()).getDelegationToken();
+    verify(fs, never()).replaceExpiredDelegationToken();
+    verify(fs, never()).getDelegationToken(anyString());
+    verify(fs, never()).setDelegationToken(any(Token.class));
+    verify(fs, never()).cancelDelegationToken(any(Token.class));
+  } 
+  
+  private String getTokenOwner(Token<?> token) throws IOException {
+    // webhdfs doesn't register properly with the class loader
+    @SuppressWarnings({ "rawtypes", "unchecked" })
+    Token<?> clone = new Token(token);
+    clone.setKind(DelegationTokenIdentifier.HDFS_DELEGATION_KIND);
+    return clone.decodeIdentifier().getUser().getUserName();
+  }
 }

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsUrl.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsUrl.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsUrl.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsUrl.java Tue Aug 19 23:49:39 2014
@@ -31,6 +31,7 @@ import java.util.Arrays;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
@@ -40,6 +41,7 @@ import org.apache.hadoop.hdfs.web.resour
 import org.apache.hadoop.hdfs.web.resources.PutOpParam;
 import org.apache.hadoop.hdfs.web.resources.TokenArgumentParam;
 import org.apache.hadoop.hdfs.web.resources.UserParam;
+import org.apache.hadoop.hdfs.web.resources.FsActionParam;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.SecurityUtil;
@@ -283,6 +285,28 @@ public class TestWebHdfsUrl {
         },
         fileStatusUrl);    
   }
+
+  @Test(timeout=60000)
+  public void testCheckAccessUrl() throws IOException {
+    Configuration conf = new Configuration();
+
+    UserGroupInformation ugi =
+        UserGroupInformation.createRemoteUser("test-user");
+    UserGroupInformation.setLoginUser(ugi);
+
+    WebHdfsFileSystem webhdfs = getWebHdfsFileSystem(ugi, conf);
+    Path fsPath = new Path("/p1");
+
+    URL checkAccessUrl = webhdfs.toUrl(GetOpParam.Op.CHECKACCESS,
+        fsPath, new FsActionParam(FsAction.READ_WRITE));
+    checkQueryParams(
+        new String[]{
+            GetOpParam.Op.CHECKACCESS.toQueryString(),
+            new UserParam(ugi.getShortUserName()).toString(),
+            FsActionParam.NAME + "=" + FsAction.READ_WRITE.SYMBOL
+        },
+        checkAccessUrl);
+  }
   
   private void checkQueryParams(String[] expected, URL url) {
     Arrays.sort(expected);

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/WebHdfsTestUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/WebHdfsTestUtil.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/WebHdfsTestUtil.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/WebHdfsTestUtil.java Tue Aug 19 23:49:39 2014
@@ -94,10 +94,4 @@ public class WebHdfsTestUtil {
     Assert.assertEquals(expectedResponseCode, conn.getResponseCode());
     return WebHdfsFileSystem.jsonParse(conn, false);
   }
-
-  public static FSDataOutputStream write(final WebHdfsFileSystem webhdfs,
-      final HttpOpParam.Op op, final HttpURLConnection conn,
-      final int bufferSize) throws IOException {
-    return webhdfs.write(op, conn, bufferSize);
-  }
 }

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/resources/TestParam.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/resources/TestParam.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/resources/TestParam.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/resources/TestParam.java Tue Aug 19 23:49:39 2014
@@ -20,14 +20,19 @@ package org.apache.hadoop.hdfs.web.resou
 import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertNull;
 
+import java.io.IOException;
 import java.util.Arrays;
+import java.util.EnumSet;
 import java.util.List;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
+import org.apache.hadoop.fs.Options;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.XAttrCodec;
+import org.apache.hadoop.fs.XAttrSetFlag;
 import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
@@ -346,4 +351,52 @@ public class TestParam {
     }
   }
  
+  @Test
+  public void testXAttrNameParam() {
+    final XAttrNameParam p = new XAttrNameParam("user.a1");
+    Assert.assertEquals(p.getXAttrName(), "user.a1");
+  }
+  
+  @Test
+  public void testXAttrValueParam() throws IOException {
+    final XAttrValueParam p = new XAttrValueParam("0x313233");
+    Assert.assertArrayEquals(p.getXAttrValue(), 
+        XAttrCodec.decodeValue("0x313233"));
+  }
+  
+  @Test
+  public void testXAttrEncodingParam() {
+    final XAttrEncodingParam p = new XAttrEncodingParam(XAttrCodec.BASE64);
+    Assert.assertEquals(p.getEncoding(), XAttrCodec.BASE64);
+    final XAttrEncodingParam p1 = new XAttrEncodingParam(p.getValueString());
+    Assert.assertEquals(p1.getEncoding(), XAttrCodec.BASE64);
+  }
+  
+  @Test
+  public void testXAttrSetFlagParam() {
+    EnumSet<XAttrSetFlag> flag = EnumSet.of(
+        XAttrSetFlag.CREATE, XAttrSetFlag.REPLACE);
+    final XAttrSetFlagParam p = new XAttrSetFlagParam(flag);
+    Assert.assertEquals(p.getFlag(), flag);
+    final XAttrSetFlagParam p1 = new XAttrSetFlagParam(p.getValueString());
+    Assert.assertEquals(p1.getFlag(), flag);
+  }
+  
+  @Test
+  public void testRenameOptionSetParam() {
+    final RenameOptionSetParam p = new RenameOptionSetParam(
+        Options.Rename.OVERWRITE, Options.Rename.NONE);
+    final RenameOptionSetParam p1 = new RenameOptionSetParam(
+        p.getValueString());
+    Assert.assertEquals(p1.getValue(), EnumSet.of(
+        Options.Rename.OVERWRITE, Options.Rename.NONE));
+  }
+
+  @Test
+  public void testSnapshotNameParam() {
+    final OldSnapshotNameParam s1 = new OldSnapshotNameParam("s1");
+    final SnapshotNameParam s2 = new SnapshotNameParam("s2");
+    Assert.assertEquals("s1", s1.getValue());
+    Assert.assertEquals("s2", s2.getValue());
+  }
 }

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/net/TestNetworkTopology.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/net/TestNetworkTopology.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/net/TestNetworkTopology.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/net/TestNetworkTopology.java Tue Aug 19 23:49:39 2014
@@ -54,11 +54,26 @@ public class TestNetworkTopology {
         DFSTestUtil.getDatanodeDescriptor("4.4.4.4", "/d1/r2"),
         DFSTestUtil.getDatanodeDescriptor("5.5.5.5", "/d1/r2"),
         DFSTestUtil.getDatanodeDescriptor("6.6.6.6", "/d2/r3"),
-        DFSTestUtil.getDatanodeDescriptor("7.7.7.7", "/d2/r3")
+        DFSTestUtil.getDatanodeDescriptor("7.7.7.7", "/d2/r3"),
+        DFSTestUtil.getDatanodeDescriptor("8.8.8.8", "/d2/r3"),
+        DFSTestUtil.getDatanodeDescriptor("9.9.9.9", "/d3/r1"),
+        DFSTestUtil.getDatanodeDescriptor("10.10.10.10", "/d3/r1"),
+        DFSTestUtil.getDatanodeDescriptor("11.11.11.11", "/d3/r1"),
+        DFSTestUtil.getDatanodeDescriptor("12.12.12.12", "/d3/r2"),
+        DFSTestUtil.getDatanodeDescriptor("13.13.13.13", "/d3/r2"),
+        DFSTestUtil.getDatanodeDescriptor("14.14.14.14", "/d4/r1"),
+        DFSTestUtil.getDatanodeDescriptor("15.15.15.15", "/d4/r1"),
+        DFSTestUtil.getDatanodeDescriptor("16.16.16.16", "/d4/r1"),
+        DFSTestUtil.getDatanodeDescriptor("17.17.17.17", "/d4/r1"),
+        DFSTestUtil.getDatanodeDescriptor("18.18.18.18", "/d4/r1"),
+        DFSTestUtil.getDatanodeDescriptor("19.19.19.19", "/d4/r1"),
+        DFSTestUtil.getDatanodeDescriptor("20.20.20.20", "/d4/r1"),        
     };
     for (int i = 0; i < dataNodes.length; i++) {
       cluster.add(dataNodes[i]);
     }
+    dataNodes[9].setDecommissioned();
+    dataNodes[10].setDecommissioned();
   }
   
   @Test
@@ -99,7 +114,7 @@ public class TestNetworkTopology {
 
   @Test
   public void testRacks() throws Exception {
-    assertEquals(cluster.getNumOfRacks(), 3);
+    assertEquals(cluster.getNumOfRacks(), 6);
     assertTrue(cluster.isOnSameRack(dataNodes[0], dataNodes[1]));
     assertFalse(cluster.isOnSameRack(dataNodes[1], dataNodes[2]));
     assertTrue(cluster.isOnSameRack(dataNodes[2], dataNodes[3]));
@@ -117,23 +132,40 @@ public class TestNetworkTopology {
   }
 
   @Test
-  public void testPseudoSortByDistance() throws Exception {
+  public void testSortByDistance() throws Exception {
     DatanodeDescriptor[] testNodes = new DatanodeDescriptor[3];
     
     // array contains both local node & local rack node
     testNodes[0] = dataNodes[1];
     testNodes[1] = dataNodes[2];
     testNodes[2] = dataNodes[0];
-    cluster.pseudoSortByDistance(dataNodes[0], testNodes );
+    cluster.sortByDistance(dataNodes[0], testNodes,
+        testNodes.length, 0xDEADBEEF, false);
     assertTrue(testNodes[0] == dataNodes[0]);
     assertTrue(testNodes[1] == dataNodes[1]);
     assertTrue(testNodes[2] == dataNodes[2]);
 
+    // array contains both local node & local rack node & decommissioned node
+    DatanodeDescriptor[] dtestNodes = new DatanodeDescriptor[5];
+    dtestNodes[0] = dataNodes[8];
+    dtestNodes[1] = dataNodes[12];
+    dtestNodes[2] = dataNodes[11];
+    dtestNodes[3] = dataNodes[9];
+    dtestNodes[4] = dataNodes[10];
+    cluster.sortByDistance(dataNodes[8], dtestNodes,
+        dtestNodes.length - 2, 0xDEADBEEF, false);
+    assertTrue(dtestNodes[0] == dataNodes[8]);
+    assertTrue(dtestNodes[1] == dataNodes[11]);
+    assertTrue(dtestNodes[2] == dataNodes[12]);
+    assertTrue(dtestNodes[3] == dataNodes[9]);
+    assertTrue(dtestNodes[4] == dataNodes[10]);
+
     // array contains local node
     testNodes[0] = dataNodes[1];
     testNodes[1] = dataNodes[3];
     testNodes[2] = dataNodes[0];
-    cluster.pseudoSortByDistance(dataNodes[0], testNodes );
+    cluster.sortByDistance(dataNodes[0], testNodes,
+        testNodes.length, 0xDEADBEEF, false);
     assertTrue(testNodes[0] == dataNodes[0]);
     assertTrue(testNodes[1] == dataNodes[1]);
     assertTrue(testNodes[2] == dataNodes[3]);
@@ -142,21 +174,74 @@ public class TestNetworkTopology {
     testNodes[0] = dataNodes[5];
     testNodes[1] = dataNodes[3];
     testNodes[2] = dataNodes[1];
-    cluster.pseudoSortByDistance(dataNodes[0], testNodes );
+    cluster.sortByDistance(dataNodes[0], testNodes,
+        testNodes.length, 0xDEADBEEF, false);
     assertTrue(testNodes[0] == dataNodes[1]);
     assertTrue(testNodes[1] == dataNodes[3]);
     assertTrue(testNodes[2] == dataNodes[5]);
-    
+
     // array contains local rack node which happens to be in position 0
     testNodes[0] = dataNodes[1];
     testNodes[1] = dataNodes[5];
     testNodes[2] = dataNodes[3];
-    cluster.pseudoSortByDistance(dataNodes[0], testNodes );
-    // peudoSortByDistance does not take the "data center" layer into consideration 
+    cluster.sortByDistance(dataNodes[0], testNodes,
+        testNodes.length, 0xDEADBEEF, false);
+    assertTrue(testNodes[0] == dataNodes[1]);
+    assertTrue(testNodes[1] == dataNodes[3]);
+    assertTrue(testNodes[2] == dataNodes[5]);
+
+    // Same as previous, but with a different random seed to test randomization
+    testNodes[0] = dataNodes[1];
+    testNodes[1] = dataNodes[5];
+    testNodes[2] = dataNodes[3];
+    cluster.sortByDistance(dataNodes[0], testNodes,
+        testNodes.length, 0xDEAD, false);
+    // sortByDistance does not take the "data center" layer into consideration
     // and it doesn't sort by getDistance, so 1, 5, 3 is also valid here
     assertTrue(testNodes[0] == dataNodes[1]);
     assertTrue(testNodes[1] == dataNodes[5]);
     assertTrue(testNodes[2] == dataNodes[3]);
+
+    // Array is just local rack nodes
+    // Expect a random first node depending on the seed (normally the block ID).
+    DatanodeDescriptor first = null;
+    boolean foundRandom = false;
+    for (int i=5; i<=7; i++) {
+      testNodes[0] = dataNodes[5];
+      testNodes[1] = dataNodes[6];
+      testNodes[2] = dataNodes[7];
+      cluster.sortByDistance(dataNodes[i], testNodes,
+          testNodes.length, 0xBEADED+i, false);
+      if (first == null) {
+        first = testNodes[0];
+      } else {
+        if (first != testNodes[0]) {
+          foundRandom = true;
+          break;
+        }
+      }
+    }
+    assertTrue("Expected to find a different first location", foundRandom);
+    // Array of rack local nodes with randomizeBlockLocationsPerBlock set to
+    // true
+    // Expect random order of block locations for same block
+    first = null;
+    for (int i = 1; i <= 4; i++) {
+      testNodes[0] = dataNodes[13];
+      testNodes[1] = dataNodes[14];
+      testNodes[2] = dataNodes[15];
+      cluster.sortByDistance(dataNodes[15 + i], testNodes, testNodes.length,
+          0xBEADED, true);
+      if (first == null) {
+        first = testNodes[0];
+      } else {
+        if (first != testNodes[0]) {
+          foundRandom = true;
+          break;
+        }
+      }
+    }
+    assertTrue("Expected to find a different first location", foundRandom);
   }
   
   @Test

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestPermissionSymlinks.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestPermissionSymlinks.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestPermissionSymlinks.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestPermissionSymlinks.java Tue Aug 19 23:49:39 2014
@@ -27,6 +27,7 @@ import static org.junit.Assert.assertTru
 import static org.junit.Assert.fail;
 
 import java.io.IOException;
+import java.io.FileNotFoundException;
 import java.security.PrivilegedExceptionAction;
 import java.util.Arrays;
 
@@ -39,6 +40,7 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileSystemTestWrapper;
 import org.apache.hadoop.fs.Options.Rename;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSTestUtil;
@@ -393,4 +395,37 @@ public class TestPermissionSymlinks {
       GenericTestUtils.assertExceptionContains("Permission denied", e);
     }
   }
+
+  @Test
+  public void testAccess() throws Exception {
+    fs.setPermission(target, new FsPermission((short) 0002));
+    fs.setAcl(target, Arrays.asList(
+        aclEntry(ACCESS, USER, ALL),
+        aclEntry(ACCESS, GROUP, NONE),
+        aclEntry(ACCESS, USER, user.getShortUserName(), WRITE),
+        aclEntry(ACCESS, OTHER, WRITE)));
+    FileContext myfc = user.doAs(new PrivilegedExceptionAction<FileContext>() {
+      @Override
+      public FileContext run() throws IOException {
+        return FileContext.getFileContext(conf);
+      }
+    });
+
+    // Path to targetChild via symlink
+    myfc.access(link, FsAction.WRITE);
+    try {
+      myfc.access(link, FsAction.ALL);
+      fail("The access call should have failed.");
+    } catch (AccessControlException e) {
+      // expected
+    }
+
+    Path badPath = new Path(link, "bad");
+    try {
+      myfc.access(badPath, FsAction.READ);
+      fail("The access call should have failed");
+    } catch (FileNotFoundException e) {
+      // expected
+    }
+  }
 }

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestRefreshUserMappings.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestRefreshUserMappings.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestRefreshUserMappings.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestRefreshUserMappings.java Tue Aug 19 23:49:39 2014
@@ -41,6 +41,7 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.tools.DFSAdmin;
 import org.apache.hadoop.security.authorize.AuthorizationException;
+import org.apache.hadoop.security.authorize.DefaultImpersonationProvider;
 import org.apache.hadoop.security.authorize.ProxyUsers;
 import org.junit.After;
 import org.junit.Before;
@@ -150,8 +151,10 @@ public class TestRefreshUserMappings {
     final String [] GROUP_NAMES2 = new String [] {"gr3" , "gr4"};
     
     //keys in conf
-    String userKeyGroups = ProxyUsers.getProxySuperuserGroupConfKey(SUPER_USER);
-    String userKeyHosts = ProxyUsers.getProxySuperuserIpConfKey (SUPER_USER);
+    String userKeyGroups = DefaultImpersonationProvider.getTestProvider().
+        getProxySuperuserGroupConfKey(SUPER_USER);
+    String userKeyHosts = DefaultImpersonationProvider.getTestProvider().
+        getProxySuperuserIpConfKey (SUPER_USER);
     
     config.set(userKeyGroups, "gr3,gr4,gr5"); // superuser can proxy for this group
     config.set(userKeyHosts,"127.0.0.1");

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
Binary files - no diff available.