You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by wa...@apache.org on 2014/07/07 22:44:09 UTC

svn commit: r1608603 [6/6] - in /hadoop/common/branches/fs-encryption/hadoop-hdfs-project: hadoop-hdfs-httpfs/ hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/ hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/ hadoop-...

Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestXAttrWithSnapshot.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestXAttrWithSnapshot.java?rev=1608603&r1=1608602&r2=1608603&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestXAttrWithSnapshot.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestXAttrWithSnapshot.java Mon Jul  7 20:43:56 2014
@@ -26,6 +26,7 @@ import java.util.Map;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FsShell;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.XAttrSetFlag;
 import org.apache.hadoop.fs.permission.FsPermission;
@@ -38,6 +39,7 @@ import org.apache.hadoop.hdfs.protocol.S
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
 import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.util.ToolRunner;
 import org.junit.AfterClass;
 import org.junit.Assert;
 import org.junit.Before;
@@ -57,6 +59,7 @@ public class TestXAttrWithSnapshot {
   private static int pathCount = 0;
   private static Path path, snapshotPath;
   private static String snapshotName;
+  private final int SUCCESS = 0;
   // XAttrs
   private static final String name1 = "user.a1";
   private static final byte[] value1 = { 0x31, 0x32, 0x33 };
@@ -269,6 +272,18 @@ public class TestXAttrWithSnapshot {
   }
 
   /**
+   * Assert exception of removing xattr on read-only snapshot.
+   */
+  @Test
+  public void testRemoveXAttrSnapshotPath() throws Exception {
+    FileSystem.mkdirs(hdfs, path, FsPermission.createImmutable((short) 0700));
+    hdfs.setXAttr(path, name1, value1);
+    SnapshotTestHelper.createSnapshot(hdfs, path, snapshotName);
+    exception.expect(SnapshotAccessControlException.class);
+    hdfs.removeXAttr(snapshotPath, name1);
+  }
+
+  /**
    * Assert exception of setting xattr when exceeding quota.
    */
   @Test
@@ -340,6 +355,26 @@ public class TestXAttrWithSnapshot {
   }
 
   /**
+   * Test that users can copy a snapshot while preserving its xattrs.
+   */
+  @Test (timeout = 120000)
+  public void testCopySnapshotShouldPreserveXAttrs() throws Exception {
+    FileSystem.mkdirs(hdfs, path, FsPermission.createImmutable((short) 0700));
+    hdfs.setXAttr(path, name1, value1);
+    hdfs.setXAttr(path, name2, value2);
+    SnapshotTestHelper.createSnapshot(hdfs, path, snapshotName);
+    Path snapshotCopy = new Path(path.toString() + "-copy");
+    String[] argv = new String[] { "-cp", "-px", snapshotPath.toUri().toString(),
+        snapshotCopy.toUri().toString() };
+    int ret = ToolRunner.run(new FsShell(conf), argv);
+    assertEquals("cp -px is not working on a snapshot", SUCCESS, ret);
+
+    Map<String, byte[]> xattrs = hdfs.getXAttrs(snapshotCopy);
+    assertArrayEquals(value1, xattrs.get(name1));
+    assertArrayEquals(value2, xattrs.get(name2));
+  }
+
+  /**
    * Initialize the cluster, wait for it to become active, and get FileSystem
    * instances for our test users.
    * 

Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitCache.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitCache.java?rev=1608603&r1=1608602&r2=1608603&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitCache.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitCache.java Mon Jul  7 20:43:56 2014
@@ -197,11 +197,12 @@ public class TestShortCircuitCache {
   @Test(timeout=60000)
   public void testExpiry() throws Exception {
     final ShortCircuitCache cache =
-        new ShortCircuitCache(2, 1, 1, 10000000, 1, 10000, 0);
+        new ShortCircuitCache(2, 1, 1, 10000000, 1, 10000000, 0);
     final TestFileDescriptorPair pair = new TestFileDescriptorPair();
     ShortCircuitReplicaInfo replicaInfo1 =
       cache.fetchOrCreate(
-        new ExtendedBlockId(123, "test_bp1"), new SimpleReplicaCreator(123, cache, pair));
+        new ExtendedBlockId(123, "test_bp1"),
+          new SimpleReplicaCreator(123, cache, pair));
     Preconditions.checkNotNull(replicaInfo1.getReplica());
     Preconditions.checkState(replicaInfo1.getInvalidTokenException() == null);
     pair.compareWith(replicaInfo1.getReplica().getDataStream(),

Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitLocalRead.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitLocalRead.java?rev=1608603&r1=1608602&r2=1608603&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitLocalRead.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitLocalRead.java Mon Jul  7 20:43:56 2014
@@ -291,17 +291,17 @@ public class TestShortCircuitLocalRead {
     }
   }
 
-  @Test(timeout=10000)
+  @Test(timeout=60000)
   public void testFileLocalReadNoChecksum() throws Exception {
     doTestShortCircuitRead(true, 3*blockSize+100, 0);
   }
 
-  @Test(timeout=10000)
+  @Test(timeout=60000)
   public void testFileLocalReadChecksum() throws Exception {
     doTestShortCircuitRead(false, 3*blockSize+100, 0);
   }
   
-  @Test(timeout=10000)
+  @Test(timeout=60000)
   public void testSmallFileLocalRead() throws Exception {
     doTestShortCircuitRead(false, 13, 0);
     doTestShortCircuitRead(false, 13, 5);
@@ -309,7 +309,7 @@ public class TestShortCircuitLocalRead {
     doTestShortCircuitRead(true, 13, 5);
   }
   
-  @Test(timeout=10000)
+  @Test(timeout=60000)
   public void testLocalReadLegacy() throws Exception {
     doTestShortCircuitReadLegacy(true, 13, 0, getCurrentUser(),
         getCurrentUser(), false);
@@ -320,18 +320,18 @@ public class TestShortCircuitLocalRead {
    * to use short circuit. The test ensures reader falls back to non
    * shortcircuit reads when shortcircuit is disallowed.
    */
-  @Test(timeout=10000)
+  @Test(timeout=60000)
   public void testLocalReadFallback() throws Exception {
     doTestShortCircuitReadLegacy(true, 13, 0, getCurrentUser(), "notallowed", true);
   }
   
-  @Test(timeout=10000)
+  @Test(timeout=60000)
   public void testReadFromAnOffset() throws Exception {
     doTestShortCircuitRead(false, 3*blockSize+100, 777);
     doTestShortCircuitRead(true, 3*blockSize+100, 777);
   }
   
-  @Test(timeout=10000)
+  @Test(timeout=60000)
   public void testLongFile() throws Exception {
     doTestShortCircuitRead(false, 10*blockSize+100, 777);
     doTestShortCircuitRead(true, 10*blockSize+100, 777);
@@ -578,6 +578,7 @@ public class TestShortCircuitLocalRead {
     fs.delete(file1, false);
   }
 
+  @Test(timeout=60000)
   public void testReadWithRemoteBlockReader() throws IOException, InterruptedException {
     doTestShortCircuitReadWithRemoteBlockReader(true, 3*blockSize+100, getCurrentUser(), 0, false);
   }

Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java?rev=1608603&r1=1608602&r2=1608603&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java Mon Jul  7 20:43:56 2014
@@ -143,6 +143,8 @@ public class TestOfflineImageViewer {
       hdfs.mkdirs(xattr);
       hdfs.setXAttr(xattr, "user.a1", new byte[]{ 0x31, 0x32, 0x33 });
       hdfs.setXAttr(xattr, "user.a2", new byte[]{ 0x37, 0x38, 0x39 });
+      // OIV should be able to handle empty value XAttrs
+      hdfs.setXAttr(xattr, "user.a3", null);
       writtenFiles.put(xattr.toString(), hdfs.getFileStatus(xattr));
 
       // Write results to the fsimage file

Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsTokens.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsTokens.java?rev=1608603&r1=1608602&r2=1608603&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsTokens.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsTokens.java Mon Jul  7 20:43:56 2014
@@ -19,47 +19,63 @@
 package org.apache.hadoop.hdfs.web;
 
 import static org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod.KERBEROS;
+import static org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod.SIMPLE;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
-import static org.mockito.Matchers.any;
-import static org.mockito.Mockito.doReturn;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.never;
-import static org.mockito.Mockito.reset;
-import static org.mockito.Mockito.spy;
-import static org.mockito.Mockito.verify;
+import static org.mockito.Matchers.*;
+import static org.mockito.Mockito.*;
 
+import java.io.File;
 import java.io.IOException;
+import java.io.InputStream;
+import java.net.InetSocketAddress;
 import java.net.URI;
+import java.security.PrivilegedExceptionAction;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DFSUtil;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
 import org.apache.hadoop.hdfs.web.resources.DeleteOpParam;
 import org.apache.hadoop.hdfs.web.resources.GetOpParam;
 import org.apache.hadoop.hdfs.web.resources.HttpOpParam;
 import org.apache.hadoop.hdfs.web.resources.PostOpParam;
 import org.apache.hadoop.hdfs.web.resources.PutOpParam;
+import org.apache.hadoop.http.HttpConfig;
+import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.ssl.KeyStoreTestUtil;
+import org.apache.hadoop.security.token.SecretManager.InvalidToken;
 import org.apache.hadoop.security.token.Token;
+import org.junit.Assert;
 import org.junit.BeforeClass;
 import org.junit.Test;
-import org.mockito.internal.util.reflection.Whitebox;
 
 public class TestWebHdfsTokens {
   private static Configuration conf;
+  URI uri = null;
 
   @BeforeClass
   public static void setUp() {
     conf = new Configuration();
     SecurityUtil.setAuthenticationMethod(KERBEROS, conf);
     UserGroupInformation.setConfiguration(conf);    
+    UserGroupInformation.setLoginUser(
+        UserGroupInformation.createUserForTesting(
+            "LoginUser", new String[]{"supergroup"}));
   }
 
   private WebHdfsFileSystem spyWebhdfsInSecureSetup() throws IOException {
     WebHdfsFileSystem fsOrig = new WebHdfsFileSystem();
     fsOrig.initialize(URI.create("webhdfs://127.0.0.1:0"), conf);
     WebHdfsFileSystem fs = spy(fsOrig);
-    Whitebox.setInternalState(fsOrig.tokenAspect, "fs", fs);
     return fs;
   }
 
@@ -89,7 +105,7 @@ public class TestWebHdfsTokens {
   }
 
   @Test(timeout = 5000)
-  public void testNoTokenForCanclToken() throws IOException {
+  public void testNoTokenForRenewToken() throws IOException {
     checkNoTokenForOperation(PutOpParam.Op.RENEWDELEGATIONTOKEN);
   }
 
@@ -139,4 +155,277 @@ public class TestWebHdfsTokens {
       assertFalse(op.getRequireAuth());
     }
   }
+  
+  @SuppressWarnings("unchecked") // for any(Token.class)
+  @Test
+  public void testLazyTokenFetchForWebhdfs() throws Exception {
+    MiniDFSCluster cluster = null;
+    WebHdfsFileSystem fs = null;
+    try {
+      final Configuration clusterConf = new HdfsConfiguration(conf);
+      SecurityUtil.setAuthenticationMethod(SIMPLE, clusterConf);
+      clusterConf.setBoolean(DFSConfigKeys
+          .DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);
+
+      // trick the NN into thinking security is enabled w/o it trying
+      // to login from a keytab
+      UserGroupInformation.setConfiguration(clusterConf);
+      cluster = new MiniDFSCluster.Builder(clusterConf).numDataNodes(1).build();
+      cluster.waitActive();
+      SecurityUtil.setAuthenticationMethod(KERBEROS, clusterConf);
+      UserGroupInformation.setConfiguration(clusterConf);
+      
+      uri = DFSUtil.createUri(
+          "webhdfs", cluster.getNameNode().getHttpAddress());
+      validateLazyTokenFetch(clusterConf);
+    } finally {
+      IOUtils.cleanup(null, fs);
+      if (cluster != null) {
+        cluster.shutdown();
+      }
+    }
+  }
+  
+  @SuppressWarnings("unchecked") // for any(Token.class)
+  @Test
+  public void testLazyTokenFetchForSWebhdfs() throws Exception {
+    MiniDFSCluster cluster = null;
+    SWebHdfsFileSystem fs = null;
+    try {
+      final Configuration clusterConf = new HdfsConfiguration(conf);
+      SecurityUtil.setAuthenticationMethod(SIMPLE, clusterConf);
+      clusterConf.setBoolean(DFSConfigKeys
+	    .DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);
+      String BASEDIR = System.getProperty("test.build.dir",
+	      	  "target/test-dir") + "/" + TestWebHdfsTokens.class.getSimpleName();
+      String keystoresDir;
+      String sslConfDir;
+	    
+      clusterConf.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
+      clusterConf.set(DFSConfigKeys.DFS_HTTP_POLICY_KEY, HttpConfig.Policy.HTTPS_ONLY.name());
+      clusterConf.set(DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY, "localhost:0");
+      clusterConf.set(DFSConfigKeys.DFS_DATANODE_HTTPS_ADDRESS_KEY, "localhost:0");
+	  
+      File base = new File(BASEDIR);
+      FileUtil.fullyDelete(base);
+      base.mkdirs();
+      keystoresDir = new File(BASEDIR).getAbsolutePath();
+      sslConfDir = KeyStoreTestUtil.getClasspathDir(TestWebHdfsTokens.class);
+      KeyStoreTestUtil.setupSSLConfig(keystoresDir, sslConfDir, clusterConf, false);
+	  
+      // trick the NN into thinking security is enabled w/o it trying
+      // to login from a keytab
+      UserGroupInformation.setConfiguration(clusterConf);
+      cluster = new MiniDFSCluster.Builder(clusterConf).numDataNodes(1).build();
+      cluster.waitActive();
+      InetSocketAddress addr = cluster.getNameNode().getHttpsAddress();
+      String nnAddr = NetUtils.getHostPortString(addr);
+      clusterConf.set(DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY, nnAddr);
+      SecurityUtil.setAuthenticationMethod(KERBEROS, clusterConf);
+      UserGroupInformation.setConfiguration(clusterConf);
+      
+      uri = DFSUtil.createUri(
+        "swebhdfs", cluster.getNameNode().getHttpsAddress());
+      validateLazyTokenFetch(clusterConf);
+      } finally {
+        IOUtils.cleanup(null, fs);
+        if (cluster != null) {
+          cluster.shutdown();
+        }
+     }
+  }
+  
+  @SuppressWarnings("unchecked")
+  private void validateLazyTokenFetch(final Configuration clusterConf) throws Exception{
+    final String testUser = "DummyUser";
+    UserGroupInformation ugi = UserGroupInformation.createUserForTesting(
+      testUser, new String[]{"supergroup"});
+    WebHdfsFileSystem fs = ugi.doAs(new PrivilegedExceptionAction<WebHdfsFileSystem>() {
+    @Override
+      public WebHdfsFileSystem run() throws IOException {
+        return spy((WebHdfsFileSystem) FileSystem.newInstance(uri, clusterConf));
+	  }
+    });
+    // verify token ops don't get a token
+    Assert.assertNull(fs.getRenewToken());
+    Token<?> token = fs.getDelegationToken(null);
+    fs.renewDelegationToken(token);
+    fs.cancelDelegationToken(token);
+    verify(fs, never()).getDelegationToken();
+    verify(fs, never()).replaceExpiredDelegationToken();
+    verify(fs, never()).setDelegationToken(any(Token.class));
+    Assert.assertNull(fs.getRenewToken());
+    reset(fs);
+
+    // verify first non-token op gets a token
+    final Path p = new Path("/f");
+    fs.create(p, (short)1).close();
+    verify(fs, times(1)).getDelegationToken();
+    verify(fs, never()).replaceExpiredDelegationToken();
+    verify(fs, times(1)).getDelegationToken(anyString());
+    verify(fs, times(1)).setDelegationToken(any(Token.class));
+    token = fs.getRenewToken();
+    Assert.assertNotNull(token);      
+    Assert.assertEquals(testUser, getTokenOwner(token));
+    Assert.assertEquals(fs.getTokenKind(), token.getKind());
+    reset(fs);
+
+    // verify prior token is reused
+    fs.getFileStatus(p);
+    verify(fs, times(1)).getDelegationToken();
+    verify(fs, never()).replaceExpiredDelegationToken();
+    verify(fs, never()).getDelegationToken(anyString());
+    verify(fs, never()).setDelegationToken(any(Token.class));
+    Token<?> token2 = fs.getRenewToken();
+    Assert.assertNotNull(token2);
+    Assert.assertEquals(fs.getTokenKind(), token.getKind());
+    Assert.assertSame(token, token2);
+    reset(fs);
+
+    // verify renew of expired token fails w/o getting a new token
+    token = fs.getRenewToken();
+    fs.cancelDelegationToken(token);
+    try {
+      fs.renewDelegationToken(token);
+      Assert.fail("should have failed");
+    } catch (InvalidToken it) {
+    } catch (Exception ex) {
+      Assert.fail("wrong exception:"+ex);
+    }
+    verify(fs, never()).getDelegationToken();
+    verify(fs, never()).replaceExpiredDelegationToken();
+    verify(fs, never()).getDelegationToken(anyString());
+    verify(fs, never()).setDelegationToken(any(Token.class));
+    token2 = fs.getRenewToken();
+    Assert.assertNotNull(token2);
+    Assert.assertEquals(fs.getTokenKind(), token.getKind());
+    Assert.assertSame(token, token2);
+    reset(fs);
+
+    // verify cancel of expired token fails w/o getting a new token
+    try {
+      fs.cancelDelegationToken(token);
+      Assert.fail("should have failed");
+    } catch (InvalidToken it) {
+    } catch (Exception ex) {
+      Assert.fail("wrong exception:"+ex);
+    }
+    verify(fs, never()).getDelegationToken();
+    verify(fs, never()).replaceExpiredDelegationToken();
+    verify(fs, never()).getDelegationToken(anyString());
+    verify(fs, never()).setDelegationToken(any(Token.class));
+    token2 = fs.getRenewToken();
+    Assert.assertNotNull(token2);
+    Assert.assertEquals(fs.getTokenKind(), token.getKind());
+    Assert.assertSame(token, token2);
+    reset(fs);
+
+    // verify an expired token is replaced with a new token
+    fs.open(p).close();
+    verify(fs, times(2)).getDelegationToken(); // first bad, then good
+    verify(fs, times(1)).replaceExpiredDelegationToken();
+    verify(fs, times(1)).getDelegationToken(null);
+    verify(fs, times(1)).setDelegationToken(any(Token.class));
+    token2 = fs.getRenewToken();
+    Assert.assertNotNull(token2);
+    Assert.assertNotSame(token, token2);
+    Assert.assertEquals(fs.getTokenKind(), token.getKind());
+    Assert.assertEquals(testUser, getTokenOwner(token2));
+    reset(fs);
+
+    // verify with open because it's a little different in how it
+    // opens connections
+    fs.cancelDelegationToken(fs.getRenewToken());
+    InputStream is = fs.open(p);
+    is.read();
+    is.close();
+    verify(fs, times(2)).getDelegationToken(); // first bad, then good
+    verify(fs, times(1)).replaceExpiredDelegationToken();
+    verify(fs, times(1)).getDelegationToken(null);
+    verify(fs, times(1)).setDelegationToken(any(Token.class));
+    token2 = fs.getRenewToken();
+    Assert.assertNotNull(token2);
+    Assert.assertNotSame(token, token2);
+    Assert.assertEquals(fs.getTokenKind(), token.getKind());
+    Assert.assertEquals(testUser, getTokenOwner(token2));
+    reset(fs);
+
+    // verify fs close cancels the token
+    fs.close();
+    verify(fs, never()).getDelegationToken();
+    verify(fs, never()).replaceExpiredDelegationToken();
+    verify(fs, never()).getDelegationToken(anyString());
+    verify(fs, never()).setDelegationToken(any(Token.class));
+    verify(fs, times(1)).cancelDelegationToken(eq(token2));
+
+    // add a token to ugi for a new fs, verify it uses that token
+    token = fs.getDelegationToken(null);
+    ugi.addToken(token);
+    fs = ugi.doAs(new PrivilegedExceptionAction<WebHdfsFileSystem>() {
+      @Override
+      public WebHdfsFileSystem run() throws IOException {
+        return spy((WebHdfsFileSystem) FileSystem.newInstance(uri, clusterConf));
+      }
+    });
+    Assert.assertNull(fs.getRenewToken());
+    fs.getFileStatus(new Path("/"));
+    verify(fs, times(1)).getDelegationToken();
+    verify(fs, never()).replaceExpiredDelegationToken();
+    verify(fs, never()).getDelegationToken(anyString());
+    verify(fs, times(1)).setDelegationToken(eq(token));
+    token2 = fs.getRenewToken();
+    Assert.assertNotNull(token2);
+    Assert.assertEquals(fs.getTokenKind(), token.getKind());
+    Assert.assertSame(token, token2);
+    reset(fs);
+
+    // verify it reuses the prior ugi token
+    fs.getFileStatus(new Path("/"));
+    verify(fs, times(1)).getDelegationToken();
+    verify(fs, never()).replaceExpiredDelegationToken();
+    verify(fs, never()).getDelegationToken(anyString());
+    verify(fs, never()).setDelegationToken(any(Token.class));
+    token2 = fs.getRenewToken();
+    Assert.assertNotNull(token2);
+    Assert.assertEquals(fs.getTokenKind(), token.getKind());
+    Assert.assertSame(token, token2);
+    reset(fs);
+
+    // verify an expired ugi token is NOT replaced with a new token
+    fs.cancelDelegationToken(token);
+    for (int i=0; i<2; i++) {
+      try {
+        fs.getFileStatus(new Path("/"));
+        Assert.fail("didn't fail");
+      } catch (InvalidToken it) {
+      } catch (Exception ex) {
+        Assert.fail("wrong exception:"+ex);
+      }
+      verify(fs, times(1)).getDelegationToken();
+      verify(fs, times(1)).replaceExpiredDelegationToken();
+      verify(fs, never()).getDelegationToken(anyString());
+      verify(fs, never()).setDelegationToken(any(Token.class));
+      token2 = fs.getRenewToken();
+      Assert.assertNotNull(token2);
+      Assert.assertEquals(fs.getTokenKind(), token.getKind());
+      Assert.assertSame(token, token2);
+      reset(fs);
+    }
+    
+    // verify fs close does NOT cancel the ugi token
+    fs.close();
+    verify(fs, never()).getDelegationToken();
+    verify(fs, never()).replaceExpiredDelegationToken();
+    verify(fs, never()).getDelegationToken(anyString());
+    verify(fs, never()).setDelegationToken(any(Token.class));
+    verify(fs, never()).cancelDelegationToken(any(Token.class));
+  } 
+  
+  private String getTokenOwner(Token<?> token) throws IOException {
+    // webhdfs doesn't register properly with the class loader
+    @SuppressWarnings({ "rawtypes", "unchecked" })
+    Token<?> clone = new Token(token);
+    clone.setKind(DelegationTokenIdentifier.HDFS_DELEGATION_KIND);
+    return clone.decodeIdentifier().getUser().getUserName();
+  }
 }

Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml?rev=1608603&r1=1608602&r2=1608603&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml Mon Jul  7 20:43:56 2014
@@ -15714,8 +15714,8 @@
       </cleanup-commands>
       <comparators>
         <comparator>
-          <type>ExactComparator</type>
-          <expected-output></expected-output>
+          <type>RegexpComparator</type>
+          <expected-output>Refresh service acl successful(\n)*</expected-output>
         </comparator>
       </comparators>
     </test><!--
@@ -15951,8 +15951,8 @@
       </cleanup-commands>
       <comparators>
         <comparator>
-          <type>ExactComparator</type>
-          <expected-output></expected-output>
+          <type>RegexpComparator</type>
+          <expected-output>Save namespace successful(\n)*</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -16367,8 +16367,8 @@
       </cleanup-commands>
       <comparators>
         <comparator>
-          <type>ExactComparator</type>
-          <expected-output></expected-output>
+          <type>RegexpComparator</type>
+          <expected-output>Refresh user to groups mapping successful(\n)*</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -16383,8 +16383,8 @@
       </cleanup-commands>
       <comparators>
         <comparator>
-          <type>ExactComparator</type>
-          <expected-output></expected-output>
+          <type>RegexpComparator</type>
+          <expected-output>Refresh super user groups configuration successful(\n)*</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -16453,8 +16453,8 @@
       </cleanup-commands>
       <comparators>
         <comparator>
-          <type>ExactComparator</type>
-          <expected-output></expected-output>
+          <type>RegexpComparator</type>
+          <expected-output>Balancer bandwidth is set to 104857600(\n)*</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -16469,8 +16469,8 @@
       </cleanup-commands>
       <comparators>
         <comparator>
-          <type>ExactComparator</type>
-          <expected-output></expected-output>
+          <type>SubstringComparator</type>
+          <expected-output>Finalize upgrade successful</expected-output>
         </comparator>
       </comparators>
     </test>