You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by cu...@apache.org on 2014/08/20 03:34:47 UTC

svn commit: r1619019 [1/11] - in /hadoop/common/branches/YARN-1051/hadoop-hdfs-project: hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/ hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/ hadoop-hdfs-httpfs/src/main/ja...

Author: curino
Date: Wed Aug 20 01:34:29 2014
New Revision: 1619019

URL: http://svn.apache.org/r1619019
Log:
Merge trunk

Added:
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSAuthenticationFilter.java
      - copied unchanged from r1619017, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSAuthenticationFilter.java
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/WhitelistBasedTrustedChannelResolver.java
      - copied unchanged from r1619017, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/WhitelistBasedTrustedChannelResolver.java
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Dispatcher.java
      - copied unchanged from r1619017, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Dispatcher.java
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/ExitStatus.java
      - copied unchanged from r1619017, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/ExitStatus.java
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/KeyManager.java
      - copied unchanged from r1619017, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/KeyManager.java
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Matcher.java
      - copied unchanged from r1619017, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Matcher.java
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/MovedBlocks.java
      - copied unchanged from r1619017, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/MovedBlocks.java
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeStorageReport.java
      - copied unchanged from r1619017, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeStorageReport.java
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/EnumDoubles.java
      - copied unchanged from r1619017, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/EnumDoubles.java
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/FsActionParam.java
      - copied unchanged from r1619017, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/FsActionParam.java
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/common/
      - copied from r1619017, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/common/
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/common/htable.c
      - copied unchanged from r1619017, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/common/htable.c
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/common/htable.h
      - copied unchanged from r1619017, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/common/htable.h
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/
      - copied from r1619017, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/mutexes.h
      - copied unchanged from r1619017, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/mutexes.h
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/posix/
      - copied from r1619017, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/posix/
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/posix/mutexes.c
      - copied unchanged from r1619017, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/posix/mutexes.c
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/posix/platform.h
      - copied unchanged from r1619017, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/posix/platform.h
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/posix/thread.c
      - copied unchanged from r1619017, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/posix/thread.c
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/posix/thread_local_storage.c
      - copied unchanged from r1619017, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/posix/thread_local_storage.c
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/thread.h
      - copied unchanged from r1619017, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/thread.h
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/thread_local_storage.h
      - copied unchanged from r1619017, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/thread_local_storage.h
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/windows/
      - copied from r1619017, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/windows/
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/windows/inttypes.h
      - copied unchanged from r1619017, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/windows/inttypes.h
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/windows/mutexes.c
      - copied unchanged from r1619017, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/windows/mutexes.c
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/windows/platform.h
      - copied unchanged from r1619017, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/windows/platform.h
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/windows/thread.c
      - copied unchanged from r1619017, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/windows/thread.c
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/windows/thread_local_storage.c
      - copied unchanged from r1619017, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/windows/thread_local_storage.c
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/windows/unistd.h
      - copied unchanged from r1619017, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/windows/unistd.h
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HDFSCommands.apt.vm
      - copied unchanged from r1619017, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HDFSCommands.apt.vm
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemWithXAttrs.java
      - copied unchanged from r1619017, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemWithXAttrs.java
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsWithXAttrs.java
      - copied unchanged from r1619017, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsWithXAttrs.java
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeLayoutUpgrade.java
      - copied unchanged from r1619017, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeLayoutUpgrade.java
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataStorage.java
      - copied unchanged from r1619017, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataStorage.java
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java
      - copied unchanged from r1619017, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/hadoop-24-datanode-dir.tgz
      - copied unchanged from r1619017, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/hadoop-24-datanode-dir.tgz
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/hadoop-datanode-dir.txt
      - copied unchanged from r1619017, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/hadoop-datanode-dir.txt
Removed:
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSKerberosAuthenticator.java
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSPseudoAuthenticator.java
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSKerberosAuthenticationHandler.java
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/DelegationTokenIdentifier.java
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/DelegationTokenManager.java
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/DelegationTokenManagerException.java
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/ProxyUser.java
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/security/DelegationTokenManagerService.java
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/security/ProxyUserService.java
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/UserProvider.java
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSCustomUserName.java
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSKerberosAuthenticationHandler.java
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/service/security/TestDelegationTokenManagerService.java
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/service/security/TestProxyUserService.java
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestUserProvider.java
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/conf/hadoop-metrics2.properties
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/LDir.java
Modified:
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/   (props changed)
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSParametersProvider.java
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServerWebApp.java
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/resources/httpfs-default.xml
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/HttpFSKerberosAuthenticationHandlerForTesting.java
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServer.java
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSWithKerberos.java
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestRpcProgramNfs3.java
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestWrites.java
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/pom.xml
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/CMakeLists.txt
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/distribute-exclude.sh
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs-config.sh
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/refresh-namenodes.sh
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/start-balancer.sh
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/start-dfs.sh
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/start-secure-dns.sh
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/stop-balancer.sh
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/stop-dfs.sh
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/stop-secure-dns.sh
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/   (props changed)
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/StorageType.java
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/Block.java
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsFileStatus.java
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsLocatedFileStatus.java
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/BalancingPolicy.java
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/NameNodeConnector.java
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoUnderConstruction.java
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlocksMap.java
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CacheReplicationMonitor.java
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HeartbeatManager.java
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/PendingDataNodeMessages.java
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeLayoutVersion.java
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiverServer.java
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DatanodeUtil.java
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ShortCircuitRegistry.java
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StorageLocation.java
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsDatasetSpi.java
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetAsyncDiskService.java
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeMap.java
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeSymlink.java
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/FSNamesystemMBean.java
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlocksWithLocations.java
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/RegisterCommand.java
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/shortcircuit/DfsClientShm.java
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/shortcircuit/DfsClientShmManager.java
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitShm.java
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/XmlEditsVisitor.java
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/EnumCounters.java
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/GetOpParam.java
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/native/   (props changed)
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/CMakeLists.txt
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/exception.c
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/exception.h
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/expect.c
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/expect.h
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/hdfs.c
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/jni_helper.c
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/jni_helper.h
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/native_mini_dfs.c
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test/test_libhdfs_ops.c
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test/test_libhdfs_read.c
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test/test_libhdfs_write.c
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test/test_libhdfs_zerocopy.c
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test_libhdfs_threaded.c
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test_native_mini_dfs.c
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/datatransfer.proto
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/   (props changed)
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/   (props changed)
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.js
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/   (props changed)
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsNfsGateway.apt.vm
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsUserGuide.apt.vm
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/WebHDFS.apt.vm
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs/   (props changed)
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/TestGenericRefresh.java
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/TestRefreshCallQueue.java
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientFailover.java
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSFinalize.java
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSPermission.java
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRollback.java
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStorageStateRecovery.java
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgrade.java
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeReport.java
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/UpgradeUtilities.java
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithHANameNodes.java
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithMultipleNameNodes.java
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithNodeGroup.java
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerTestUtil.java
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfo.java
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestCorruptReplicaInfo.java
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestDatanodeDescriptor.java
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingDataNodeMessages.java
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeRollingUpgrade.java
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDeleteBlockPool.java
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSAclBaseTest.java
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCommitBlockSynchronization.java
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeleteRace.java
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystemMBean.java
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListCorruptFileBlocks.java
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestPipelinesFailover.java
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestAclWithSnapshot.java
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitCache.java
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsFileSystemContract.java
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsUrl.java
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestPermissionSymlinks.java
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml

Propchange: hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs:r1613508-1619017

Modified: hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java?rev=1619019&r1=1619018&r2=1619019&view=diff
==============================================================================
--- hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java (original)
+++ hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java Wed Aug 20 01:34:29 2014
@@ -39,12 +39,14 @@ import org.apache.hadoop.fs.permission.A
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.lib.wsrs.EnumSetParam;
-import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.security.authentication.client.AuthenticatedURL;
-import org.apache.hadoop.security.authentication.client.Authenticator;
+import org.apache.hadoop.security.authentication.client.AuthenticationException;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.TokenIdentifier;
+import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier;
+import org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticatedURL;
+import org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticator;
+import org.apache.hadoop.security.token.delegation.web.KerberosDelegationTokenAuthenticator;
 import org.apache.hadoop.util.Progressable;
 import org.apache.hadoop.util.ReflectionUtils;
 import org.apache.hadoop.util.StringUtils;
@@ -67,7 +69,6 @@ import java.io.IOException;
 import java.io.InputStream;
 import java.io.OutputStream;
 import java.net.HttpURLConnection;
-import java.net.InetSocketAddress;
 import java.net.URI;
 import java.net.URISyntaxException;
 import java.net.URL;
@@ -75,7 +76,6 @@ import java.security.PrivilegedException
 import java.text.MessageFormat;
 import java.util.HashMap;
 import java.util.Map;
-import java.util.concurrent.Callable;
 
 /**
  * HttpFSServer implementation of the FileSystemAccess FileSystem.
@@ -217,34 +217,15 @@ public class HttpFSFileSystem extends Fi
 
   }
 
-
-  private AuthenticatedURL.Token authToken = new AuthenticatedURL.Token();
+  private DelegationTokenAuthenticatedURL authURL;
+  private DelegationTokenAuthenticatedURL.Token authToken =
+      new DelegationTokenAuthenticatedURL.Token();
   private URI uri;
-  private InetSocketAddress httpFSAddr;
   private Path workingDir;
   private UserGroupInformation realUser;
   private String doAs;
-  private Token<?> delegationToken;
 
-  //This method enables handling UGI doAs with SPNEGO, we have to
-  //fallback to the realuser who logged in with Kerberos credentials
-  private <T> T doAsRealUserIfNecessary(final Callable<T> callable)
-    throws IOException {
-    try {
-      if (realUser.getShortUserName().equals(doAs)) {
-        return callable.call();
-      } else {
-        return realUser.doAs(new PrivilegedExceptionAction<T>() {
-          @Override
-          public T run() throws Exception {
-            return callable.call();
-          }
-        });
-      }
-    } catch (Exception ex) {
-      throw new IOException(ex.toString(), ex);
-    }
-  }
+
 
   /**
    * Convenience method that creates a <code>HttpURLConnection</code> for the
@@ -291,20 +272,26 @@ public class HttpFSFileSystem extends Fi
   private HttpURLConnection getConnection(final String method,
       Map<String, String> params, Map<String, List<String>> multiValuedParams,
       Path path, boolean makeQualified) throws IOException {
-    if (!realUser.getShortUserName().equals(doAs)) {
-      params.put(DO_AS_PARAM, doAs);
-    }
-    HttpFSKerberosAuthenticator.injectDelegationToken(params, delegationToken);
     if (makeQualified) {
       path = makeQualified(path);
     }
     final URL url = HttpFSUtils.createURL(path, params, multiValuedParams);
-    return doAsRealUserIfNecessary(new Callable<HttpURLConnection>() {
-      @Override
-      public HttpURLConnection call() throws Exception {
-        return getConnection(url, method);
+    try {
+      return UserGroupInformation.getCurrentUser().doAs(
+          new PrivilegedExceptionAction<HttpURLConnection>() {
+            @Override
+            public HttpURLConnection run() throws Exception {
+              return getConnection(url, method);
+            }
+          }
+      );
+    } catch (Exception ex) {
+      if (ex instanceof IOException) {
+        throw (IOException) ex;
+      } else {
+        throw new IOException(ex);
       }
-    });
+    }
   }
 
   /**
@@ -321,12 +308,8 @@ public class HttpFSFileSystem extends Fi
    * @throws IOException thrown if an IO error occurrs.
    */
   private HttpURLConnection getConnection(URL url, String method) throws IOException {
-    Class<? extends Authenticator> klass =
-      getConf().getClass("httpfs.authenticator.class",
-                         HttpFSKerberosAuthenticator.class, Authenticator.class);
-    Authenticator authenticator = ReflectionUtils.newInstance(klass, getConf());
     try {
-      HttpURLConnection conn = new AuthenticatedURL(authenticator).openConnection(url, authToken);
+      HttpURLConnection conn = authURL.openConnection(url, authToken);
       conn.setRequestMethod(method);
       if (method.equals(HTTP_POST) || method.equals(HTTP_PUT)) {
         conn.setDoOutput(true);
@@ -357,10 +340,17 @@ public class HttpFSFileSystem extends Fi
     super.initialize(name, conf);
     try {
       uri = new URI(name.getScheme() + "://" + name.getAuthority());
-      httpFSAddr = NetUtils.createSocketAddr(getCanonicalUri().toString());
     } catch (URISyntaxException ex) {
       throw new IOException(ex);
     }
+
+    Class<? extends DelegationTokenAuthenticator> klass =
+        getConf().getClass("httpfs.authenticator.class",
+            KerberosDelegationTokenAuthenticator.class,
+            DelegationTokenAuthenticator.class);
+    DelegationTokenAuthenticator authenticator =
+        ReflectionUtils.newInstance(klass, getConf());
+    authURL = new DelegationTokenAuthenticatedURL(authenticator);
   }
 
   @Override
@@ -1059,38 +1049,57 @@ public class HttpFSFileSystem extends Fi
   @Override
   public Token<?> getDelegationToken(final String renewer)
     throws IOException {
-    return doAsRealUserIfNecessary(new Callable<Token<?>>() {
-      @Override
-      public Token<?> call() throws Exception {
-        return HttpFSKerberosAuthenticator.
-          getDelegationToken(uri, httpFSAddr, authToken, renewer);
+    try {
+      return UserGroupInformation.getCurrentUser().doAs(
+          new PrivilegedExceptionAction<Token<?>>() {
+            @Override
+            public Token<?> run() throws Exception {
+              return authURL.getDelegationToken(uri.toURL(), authToken,
+                  renewer);
+            }
+          }
+      );
+    } catch (Exception ex) {
+      if (ex instanceof IOException) {
+        throw (IOException) ex;
+      } else {
+        throw new IOException(ex);
       }
-    });
+    }
   }
 
   public long renewDelegationToken(final Token<?> token) throws IOException {
-    return doAsRealUserIfNecessary(new Callable<Long>() {
-      @Override
-      public Long call() throws Exception {
-        return HttpFSKerberosAuthenticator.
-          renewDelegationToken(uri,  authToken, token);
+    try {
+      return UserGroupInformation.getCurrentUser().doAs(
+          new PrivilegedExceptionAction<Long>() {
+            @Override
+            public Long run() throws Exception {
+              return authURL.renewDelegationToken(uri.toURL(), authToken);
+            }
+          }
+      );
+    } catch (Exception ex) {
+      if (ex instanceof IOException) {
+        throw (IOException) ex;
+      } else {
+        throw new IOException(ex);
       }
-    });
+    }
   }
 
   public void cancelDelegationToken(final Token<?> token) throws IOException {
-    HttpFSKerberosAuthenticator.
-      cancelDelegationToken(uri, authToken, token);
+    authURL.cancelDelegationToken(uri.toURL(), authToken);
   }
 
   @Override
   public Token<?> getRenewToken() {
-    return delegationToken;
+    return null; //TODO : for renewer
   }
 
   @Override
+  @SuppressWarnings("unchecked")
   public <T extends TokenIdentifier> void setDelegationToken(Token<T> token) {
-    delegationToken = token;
+    //TODO : for renewer
   }
 
   @Override

Modified: hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSParametersProvider.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSParametersProvider.java?rev=1619019&r1=1619018&r2=1619019&view=diff
==============================================================================
--- hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSParametersProvider.java (original)
+++ hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSParametersProvider.java Wed Aug 20 01:34:29 2014
@@ -30,8 +30,6 @@ import org.apache.hadoop.lib.wsrs.Param;
 import org.apache.hadoop.lib.wsrs.ParametersProvider;
 import org.apache.hadoop.lib.wsrs.ShortParam;
 import org.apache.hadoop.lib.wsrs.StringParam;
-import org.apache.hadoop.lib.wsrs.UserProvider;
-import org.slf4j.MDC;
 
 import javax.ws.rs.ext.Provider;
 import java.util.HashMap;
@@ -53,57 +51,44 @@ public class HttpFSParametersProvider ex
 
   static {
     PARAMS_DEF.put(Operation.OPEN,
-      new Class[]{DoAsParam.class, OffsetParam.class, LenParam.class});
-    PARAMS_DEF.put(Operation.GETFILESTATUS, new Class[]{DoAsParam.class});
-    PARAMS_DEF.put(Operation.LISTSTATUS,
-      new Class[]{DoAsParam.class, FilterParam.class});
-    PARAMS_DEF.put(Operation.GETHOMEDIRECTORY, new Class[]{DoAsParam.class});
-    PARAMS_DEF.put(Operation.GETCONTENTSUMMARY, new Class[]{DoAsParam.class});
-    PARAMS_DEF.put(Operation.GETFILECHECKSUM, new Class[]{DoAsParam.class});
-    PARAMS_DEF.put(Operation.GETFILEBLOCKLOCATIONS,
-      new Class[]{DoAsParam.class});
-    PARAMS_DEF.put(Operation.GETACLSTATUS, new Class[]{DoAsParam.class});
-    PARAMS_DEF.put(Operation.INSTRUMENTATION, new Class[]{DoAsParam.class});
-    PARAMS_DEF.put(Operation.APPEND,
-      new Class[]{DoAsParam.class, DataParam.class});
+        new Class[]{OffsetParam.class, LenParam.class});
+    PARAMS_DEF.put(Operation.GETFILESTATUS, new Class[]{});
+    PARAMS_DEF.put(Operation.LISTSTATUS, new Class[]{FilterParam.class});
+    PARAMS_DEF.put(Operation.GETHOMEDIRECTORY, new Class[]{});
+    PARAMS_DEF.put(Operation.GETCONTENTSUMMARY, new Class[]{});
+    PARAMS_DEF.put(Operation.GETFILECHECKSUM, new Class[]{});
+    PARAMS_DEF.put(Operation.GETFILEBLOCKLOCATIONS, new Class[]{});
+    PARAMS_DEF.put(Operation.GETACLSTATUS, new Class[]{});
+    PARAMS_DEF.put(Operation.INSTRUMENTATION, new Class[]{});
+    PARAMS_DEF.put(Operation.APPEND, new Class[]{DataParam.class});
     PARAMS_DEF.put(Operation.CONCAT, new Class[]{SourcesParam.class});
     PARAMS_DEF.put(Operation.CREATE,
-      new Class[]{DoAsParam.class, PermissionParam.class, OverwriteParam.class,
+      new Class[]{PermissionParam.class, OverwriteParam.class,
                   ReplicationParam.class, BlockSizeParam.class, DataParam.class});
-    PARAMS_DEF.put(Operation.MKDIRS,
-      new Class[]{DoAsParam.class, PermissionParam.class});
-    PARAMS_DEF.put(Operation.RENAME,
-      new Class[]{DoAsParam.class, DestinationParam.class});
+    PARAMS_DEF.put(Operation.MKDIRS, new Class[]{PermissionParam.class});
+    PARAMS_DEF.put(Operation.RENAME, new Class[]{DestinationParam.class});
     PARAMS_DEF.put(Operation.SETOWNER,
-      new Class[]{DoAsParam.class, OwnerParam.class, GroupParam.class});
-    PARAMS_DEF.put(Operation.SETPERMISSION,
-      new Class[]{DoAsParam.class, PermissionParam.class});
+        new Class[]{OwnerParam.class, GroupParam.class});
+    PARAMS_DEF.put(Operation.SETPERMISSION, new Class[]{PermissionParam.class});
     PARAMS_DEF.put(Operation.SETREPLICATION,
-      new Class[]{DoAsParam.class, ReplicationParam.class});
+        new Class[]{ReplicationParam.class});
     PARAMS_DEF.put(Operation.SETTIMES,
-      new Class[]{DoAsParam.class, ModifiedTimeParam.class,
-                  AccessTimeParam.class});
-    PARAMS_DEF.put(Operation.DELETE,
-      new Class[]{DoAsParam.class, RecursiveParam.class});
-    PARAMS_DEF.put(Operation.SETACL,
-            new Class[]{DoAsParam.class, AclPermissionParam.class});
-    PARAMS_DEF.put(Operation.REMOVEACL,
-            new Class[]{DoAsParam.class});
+        new Class[]{ModifiedTimeParam.class, AccessTimeParam.class});
+    PARAMS_DEF.put(Operation.DELETE, new Class[]{RecursiveParam.class});
+    PARAMS_DEF.put(Operation.SETACL, new Class[]{AclPermissionParam.class});
+    PARAMS_DEF.put(Operation.REMOVEACL, new Class[]{});
     PARAMS_DEF.put(Operation.MODIFYACLENTRIES,
-            new Class[]{DoAsParam.class, AclPermissionParam.class});
+        new Class[]{AclPermissionParam.class});
     PARAMS_DEF.put(Operation.REMOVEACLENTRIES,
-            new Class[]{DoAsParam.class, AclPermissionParam.class});
-    PARAMS_DEF.put(Operation.REMOVEDEFAULTACL,
-            new Class[]{DoAsParam.class});
+        new Class[]{AclPermissionParam.class});
+    PARAMS_DEF.put(Operation.REMOVEDEFAULTACL, new Class[]{});
     PARAMS_DEF.put(Operation.SETXATTR,
-      new Class[]{DoAsParam.class, XAttrNameParam.class, XAttrValueParam.class, 
+        new Class[]{XAttrNameParam.class, XAttrValueParam.class,
                   XAttrSetFlagParam.class});
-    PARAMS_DEF.put(Operation.REMOVEXATTR, 
-      new Class[]{DoAsParam.class, XAttrNameParam.class});
+    PARAMS_DEF.put(Operation.REMOVEXATTR, new Class[]{XAttrNameParam.class});
     PARAMS_DEF.put(Operation.GETXATTRS, 
-      new Class[]{DoAsParam.class, XAttrNameParam.class, XAttrEncodingParam.class});
-    PARAMS_DEF.put(Operation.LISTXATTRS,
-      new Class[]{DoAsParam.class});
+        new Class[]{XAttrNameParam.class, XAttrEncodingParam.class});
+    PARAMS_DEF.put(Operation.LISTXATTRS, new Class[]{});
   }
 
   public HttpFSParametersProvider() {
@@ -206,41 +191,6 @@ public class HttpFSParametersProvider ex
   }
 
   /**
-   * Class for do-as parameter.
-   */
-  @InterfaceAudience.Private
-  public static class DoAsParam extends StringParam {
-
-    /**
-     * Parameter name.
-     */
-    public static final String NAME = HttpFSFileSystem.DO_AS_PARAM;
-
-    /**
-     * Constructor.
-     */
-    public DoAsParam() {
-      super(NAME, null, UserProvider.getUserPattern());
-    }
-
-    /**
-     * Delegates to parent and then adds do-as user to
-     * MDC context for logging purposes.
-     *
-     *
-     * @param str parameter value.
-     *
-     * @return parsed parameter
-     */
-    @Override
-    public String parseParam(String str) {
-      String doAs = super.parseParam(str);
-      MDC.put(getName(), (doAs != null) ? doAs : "-");
-      return doAs;
-    }
-  }
-
-  /**
    * Class for filter parameter.
    */
   @InterfaceAudience.Private
@@ -275,7 +225,7 @@ public class HttpFSParametersProvider ex
      * Constructor.
      */
     public GroupParam() {
-      super(NAME, null, UserProvider.getUserPattern());
+      super(NAME, null);
     }
 
   }
@@ -371,7 +321,7 @@ public class HttpFSParametersProvider ex
      * Constructor.
      */
     public OwnerParam() {
-      super(NAME, null, UserProvider.getUserPattern());
+      super(NAME, null);
     }
 
   }

Modified: hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java?rev=1619019&r1=1619018&r2=1619019&view=diff
==============================================================================
--- hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java (original)
+++ hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java Wed Aug 20 01:34:29 2014
@@ -29,7 +29,6 @@ import org.apache.hadoop.fs.http.server.
 import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.BlockSizeParam;
 import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.DataParam;
 import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.DestinationParam;
-import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.DoAsParam;
 import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.FilterParam;
 import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.GroupParam;
 import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.LenParam;
@@ -50,12 +49,11 @@ import org.apache.hadoop.lib.service.Fil
 import org.apache.hadoop.lib.service.FileSystemAccessException;
 import org.apache.hadoop.lib.service.Groups;
 import org.apache.hadoop.lib.service.Instrumentation;
-import org.apache.hadoop.lib.service.ProxyUser;
 import org.apache.hadoop.lib.servlet.FileSystemReleaseFilter;
-import org.apache.hadoop.lib.servlet.HostnameFilter;
 import org.apache.hadoop.lib.wsrs.InputStreamEntity;
 import org.apache.hadoop.lib.wsrs.Parameters;
-import org.apache.hadoop.security.authentication.server.AuthenticationToken;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.token.delegation.web.HttpUserGroupInformation;
 import org.json.simple.JSONObject;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -79,7 +77,6 @@ import java.io.IOException;
 import java.io.InputStream;
 import java.net.URI;
 import java.security.AccessControlException;
-import java.security.Principal;
 import java.text.MessageFormat;
 import java.util.EnumSet;
 import java.util.List;
@@ -97,48 +94,10 @@ public class HttpFSServer {
   private static Logger AUDIT_LOG = LoggerFactory.getLogger("httpfsaudit");
 
   /**
-   * Resolves the effective user that will be used to request a FileSystemAccess filesystem.
-   * <p/>
-   * If the doAs-user is NULL or the same as the user, it returns the user.
-   * <p/>
-   * Otherwise it uses proxyuser rules (see {@link ProxyUser} to determine if the
-   * current user can impersonate the doAs-user.
-   * <p/>
-   * If the current user cannot impersonate the doAs-user an
-   * <code>AccessControlException</code> will be thrown.
-   *
-   * @param user principal for whom the filesystem instance is.
-   * @param doAs do-as user, if any.
-   *
-   * @return the effective user.
-   *
-   * @throws IOException thrown if an IO error occurrs.
-   * @throws AccessControlException thrown if the current user cannot impersonate
-   * the doAs-user.
-   */
-  private String getEffectiveUser(Principal user, String doAs) throws IOException {
-    String effectiveUser = user.getName();
-    if (doAs != null && !doAs.equals(user.getName())) {
-      ProxyUser proxyUser = HttpFSServerWebApp.get().get(ProxyUser.class);
-      String proxyUserName;
-      if (user instanceof AuthenticationToken) {
-        proxyUserName = ((AuthenticationToken)user).getUserName();
-      } else {
-        proxyUserName = user.getName();
-      }
-      proxyUser.validate(proxyUserName, HostnameFilter.get(), doAs);
-      effectiveUser = doAs;
-      AUDIT_LOG.info("Proxy user [{}] DoAs user [{}]", proxyUserName, doAs);
-    }
-    return effectiveUser;
-  }
-
-  /**
    * Executes a {@link FileSystemAccess.FileSystemExecutor} using a filesystem for the effective
    * user.
    *
-   * @param user principal making the request.
-   * @param doAs do-as user, if any.
+   * @param ugi user making the request.
    * @param executor FileSystemExecutor to execute.
    *
    * @return FileSystemExecutor response
@@ -147,12 +106,11 @@ public class HttpFSServer {
    * @throws FileSystemAccessException thrown if a FileSystemAccess releated error occurred. Thrown
    * exceptions are handled by {@link HttpFSExceptionProvider}.
    */
-  private <T> T fsExecute(Principal user, String doAs, FileSystemAccess.FileSystemExecutor<T> executor)
+  private <T> T fsExecute(UserGroupInformation ugi, FileSystemAccess.FileSystemExecutor<T> executor)
     throws IOException, FileSystemAccessException {
-    String hadoopUser = getEffectiveUser(user, doAs);
     FileSystemAccess fsAccess = HttpFSServerWebApp.get().get(FileSystemAccess.class);
     Configuration conf = HttpFSServerWebApp.get().get(FileSystemAccess.class).getFileSystemConfiguration();
-    return fsAccess.execute(hadoopUser, conf, executor);
+    return fsAccess.execute(ugi.getShortUserName(), conf, executor);
   }
 
   /**
@@ -162,8 +120,7 @@ public class HttpFSServer {
    * If a do-as user is specified, the current user must be a valid proxyuser, otherwise an
    * <code>AccessControlException</code> will be thrown.
    *
-   * @param user principal for whom the filesystem instance is.
-   * @param doAs do-as user, if any.
+   * @param ugi principal for whom the filesystem instance is.
    *
    * @return a filesystem for the specified user or do-as user.
    *
@@ -172,8 +129,9 @@ public class HttpFSServer {
    * @throws FileSystemAccessException thrown if a FileSystemAccess releated error occurred. Thrown
    * exceptions are handled by {@link HttpFSExceptionProvider}.
    */
-  private FileSystem createFileSystem(Principal user, String doAs) throws IOException, FileSystemAccessException {
-    String hadoopUser = getEffectiveUser(user, doAs);
+  private FileSystem createFileSystem(UserGroupInformation ugi)
+      throws IOException, FileSystemAccessException {
+    String hadoopUser = ugi.getShortUserName();
     FileSystemAccess fsAccess = HttpFSServerWebApp.get().get(FileSystemAccess.class);
     Configuration conf = HttpFSServerWebApp.get().get(FileSystemAccess.class).getFileSystemConfiguration();
     FileSystem fs = fsAccess.createFileSystem(hadoopUser, conf);
@@ -192,7 +150,6 @@ public class HttpFSServer {
   /**
    * Special binding for '/' as it is not handled by the wildcard binding.
    *
-   * @param user the principal of the user making the request.
    * @param op the HttpFS operation of the request.
    * @param params the HttpFS parameters of the request.
    *
@@ -206,11 +163,10 @@ public class HttpFSServer {
    */
   @GET
   @Produces(MediaType.APPLICATION_JSON)
-  public Response getRoot(@Context Principal user,
-                          @QueryParam(OperationParam.NAME) OperationParam op,
+  public Response getRoot(@QueryParam(OperationParam.NAME) OperationParam op,
                           @Context Parameters params)
     throws IOException, FileSystemAccessException {
-    return get(user, "", op, params);
+    return get("", op, params);
   }
 
   private String makeAbsolute(String path) {
@@ -220,7 +176,6 @@ public class HttpFSServer {
   /**
    * Binding to handle GET requests, supported operations are
    *
-   * @param user the principal of the user making the request.
    * @param path the path for operation.
    * @param op the HttpFS operation of the request.
    * @param params the HttpFS parameters of the request.
@@ -236,21 +191,20 @@ public class HttpFSServer {
   @GET
   @Path("{path:.*}")
   @Produces({MediaType.APPLICATION_OCTET_STREAM, MediaType.APPLICATION_JSON})
-  public Response get(@Context Principal user,
-                      @PathParam("path") String path,
+  public Response get(@PathParam("path") String path,
                       @QueryParam(OperationParam.NAME) OperationParam op,
                       @Context Parameters params)
     throws IOException, FileSystemAccessException {
+    UserGroupInformation user = HttpUserGroupInformation.get();
     Response response;
     path = makeAbsolute(path);
     MDC.put(HttpFSFileSystem.OP_PARAM, op.value().name());
-    String doAs = params.get(DoAsParam.NAME, DoAsParam.class);
     switch (op.value()) {
       case OPEN: {
         //Invoking the command directly using an unmanaged FileSystem that is
         // released by the FileSystemReleaseFilter
         FSOperations.FSOpen command = new FSOperations.FSOpen(path);
-        FileSystem fs = createFileSystem(user, doAs);
+        FileSystem fs = createFileSystem(user);
         InputStream is = command.execute(fs);
         Long offset = params.get(OffsetParam.NAME, OffsetParam.class);
         Long len = params.get(LenParam.NAME, LenParam.class);
@@ -264,7 +218,7 @@ public class HttpFSServer {
       case GETFILESTATUS: {
         FSOperations.FSFileStatus command =
           new FSOperations.FSFileStatus(path);
-        Map json = fsExecute(user, doAs, command);
+        Map json = fsExecute(user, command);
         AUDIT_LOG.info("[{}]", path);
         response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
         break;
@@ -273,7 +227,7 @@ public class HttpFSServer {
         String filter = params.get(FilterParam.NAME, FilterParam.class);
         FSOperations.FSListStatus command = new FSOperations.FSListStatus(
           path, filter);
-        Map json = fsExecute(user, doAs, command);
+        Map json = fsExecute(user, command);
         AUDIT_LOG.info("[{}] filter [{}]", path,
                        (filter != null) ? filter : "-");
         response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
@@ -282,7 +236,7 @@ public class HttpFSServer {
       case GETHOMEDIRECTORY: {
         enforceRootPath(op.value(), path);
         FSOperations.FSHomeDir command = new FSOperations.FSHomeDir();
-        JSONObject json = fsExecute(user, doAs, command);
+        JSONObject json = fsExecute(user, command);
         AUDIT_LOG.info("");
         response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
         break;
@@ -290,7 +244,7 @@ public class HttpFSServer {
       case INSTRUMENTATION: {
         enforceRootPath(op.value(), path);
         Groups groups = HttpFSServerWebApp.get().get(Groups.class);
-        List<String> userGroups = groups.getGroups(user.getName());
+        List<String> userGroups = groups.getGroups(user.getShortUserName());
         if (!userGroups.contains(HttpFSServerWebApp.get().getAdminGroup())) {
           throw new AccessControlException(
             "User not in HttpFSServer admin group");
@@ -304,7 +258,7 @@ public class HttpFSServer {
       case GETCONTENTSUMMARY: {
         FSOperations.FSContentSummary command =
           new FSOperations.FSContentSummary(path);
-        Map json = fsExecute(user, doAs, command);
+        Map json = fsExecute(user, command);
         AUDIT_LOG.info("[{}]", path);
         response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
         break;
@@ -312,7 +266,7 @@ public class HttpFSServer {
       case GETFILECHECKSUM: {
         FSOperations.FSFileChecksum command =
           new FSOperations.FSFileChecksum(path);
-        Map json = fsExecute(user, doAs, command);
+        Map json = fsExecute(user, command);
         AUDIT_LOG.info("[{}]", path);
         response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
         break;
@@ -324,7 +278,7 @@ public class HttpFSServer {
       case GETACLSTATUS: {
         FSOperations.FSAclStatus command =
                 new FSOperations.FSAclStatus(path);
-        Map json = fsExecute(user, doAs, command);
+        Map json = fsExecute(user, command);
         AUDIT_LOG.info("ACL status for [{}]", path);
         response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
         break;
@@ -337,7 +291,7 @@ public class HttpFSServer {
         FSOperations.FSGetXAttrs command = new FSOperations.FSGetXAttrs(path, 
             xattrNames, encoding);
         @SuppressWarnings("rawtypes")
-        Map json = fsExecute(user, doAs, command);
+        Map json = fsExecute(user, command);
         AUDIT_LOG.info("XAttrs for [{}]", path);
         response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
         break;
@@ -345,7 +299,7 @@ public class HttpFSServer {
       case LISTXATTRS: {
         FSOperations.FSListXAttrs command = new FSOperations.FSListXAttrs(path);
         @SuppressWarnings("rawtypes")
-        Map json = fsExecute(user, doAs, command);
+        Map json = fsExecute(user, command);
         AUDIT_LOG.info("XAttr names for [{}]", path);
         response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
         break;
@@ -363,7 +317,6 @@ public class HttpFSServer {
   /**
    * Binding to handle DELETE requests.
    *
-   * @param user the principal of the user making the request.
    * @param path the path for operation.
    * @param op the HttpFS operation of the request.
    * @param params the HttpFS parameters of the request.
@@ -379,15 +332,14 @@ public class HttpFSServer {
   @DELETE
   @Path("{path:.*}")
   @Produces(MediaType.APPLICATION_JSON)
-  public Response delete(@Context Principal user,
-                      @PathParam("path") String path,
-                      @QueryParam(OperationParam.NAME) OperationParam op,
-                      @Context Parameters params)
+  public Response delete(@PathParam("path") String path,
+                         @QueryParam(OperationParam.NAME) OperationParam op,
+                         @Context Parameters params)
     throws IOException, FileSystemAccessException {
+    UserGroupInformation user = HttpUserGroupInformation.get();
     Response response;
     path = makeAbsolute(path);
     MDC.put(HttpFSFileSystem.OP_PARAM, op.value().name());
-    String doAs = params.get(DoAsParam.NAME, DoAsParam.class);
     switch (op.value()) {
       case DELETE: {
         Boolean recursive =
@@ -395,7 +347,7 @@ public class HttpFSServer {
         AUDIT_LOG.info("[{}] recursive [{}]", path, recursive);
         FSOperations.FSDelete command =
           new FSOperations.FSDelete(path, recursive);
-        JSONObject json = fsExecute(user, doAs, command);
+        JSONObject json = fsExecute(user, command);
         response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
         break;
       }
@@ -412,7 +364,6 @@ public class HttpFSServer {
    * Binding to handle POST requests.
    *
    * @param is the inputstream for the request payload.
-   * @param user the principal of the user making the request.
    * @param uriInfo the of the request.
    * @param path the path for operation.
    * @param op the HttpFS operation of the request.
@@ -431,18 +382,17 @@ public class HttpFSServer {
   @Consumes({"*/*"})
   @Produces({MediaType.APPLICATION_JSON})
   public Response post(InputStream is,
-                       @Context Principal user,
                        @Context UriInfo uriInfo,
                        @PathParam("path") String path,
                        @QueryParam(OperationParam.NAME) OperationParam op,
                        @Context Parameters params)
     throws IOException, FileSystemAccessException {
+    UserGroupInformation user = HttpUserGroupInformation.get();
     Response response;
     path = makeAbsolute(path);
     MDC.put(HttpFSFileSystem.OP_PARAM, op.value().name());
     switch (op.value()) {
       case APPEND: {
-        String doAs = params.get(DoAsParam.NAME, DoAsParam.class);
         Boolean hasData = params.get(DataParam.NAME, DataParam.class);
         if (!hasData) {
           response = Response.temporaryRedirect(
@@ -451,7 +401,7 @@ public class HttpFSServer {
         } else {
           FSOperations.FSAppend command =
             new FSOperations.FSAppend(is, path);
-          fsExecute(user, doAs, command);
+          fsExecute(user, command);
           AUDIT_LOG.info("[{}]", path);
           response = Response.ok().type(MediaType.APPLICATION_JSON).build();
         }
@@ -463,7 +413,7 @@ public class HttpFSServer {
 
         FSOperations.FSConcat command =
             new FSOperations.FSConcat(path, sources.split(","));
-        fsExecute(user, null, command);
+        fsExecute(user, command);
         AUDIT_LOG.info("[{}]", path);
         System.out.println("SENT RESPONSE");
         response = Response.ok().build();
@@ -498,7 +448,6 @@ public class HttpFSServer {
    * Binding to handle PUT requests.
    *
    * @param is the inputstream for the request payload.
-   * @param user the principal of the user making the request.
    * @param uriInfo the of the request.
    * @param path the path for operation.
    * @param op the HttpFS operation of the request.
@@ -517,16 +466,15 @@ public class HttpFSServer {
   @Consumes({"*/*"})
   @Produces({MediaType.APPLICATION_JSON})
   public Response put(InputStream is,
-                       @Context Principal user,
                        @Context UriInfo uriInfo,
                        @PathParam("path") String path,
                        @QueryParam(OperationParam.NAME) OperationParam op,
                        @Context Parameters params)
     throws IOException, FileSystemAccessException {
+    UserGroupInformation user = HttpUserGroupInformation.get();
     Response response;
     path = makeAbsolute(path);
     MDC.put(HttpFSFileSystem.OP_PARAM, op.value().name());
-    String doAs = params.get(DoAsParam.NAME, DoAsParam.class);
     switch (op.value()) {
       case CREATE: {
         Boolean hasData = params.get(DataParam.NAME, DataParam.class);
@@ -546,7 +494,7 @@ public class HttpFSServer {
           FSOperations.FSCreate command =
             new FSOperations.FSCreate(is, path, permission, override,
                                       replication, blockSize);
-          fsExecute(user, doAs, command);
+          fsExecute(user, command);
           AUDIT_LOG.info(
             "[{}] permission [{}] override [{}] replication [{}] blockSize [{}]",
             new Object[]{path, permission, override, replication, blockSize});
@@ -564,7 +512,7 @@ public class HttpFSServer {
 
         FSOperations.FSSetXAttr command = new FSOperations.FSSetXAttr(
             path, xattrName, xattrValue, flag);
-        fsExecute(user, doAs, command);
+        fsExecute(user, command);
         AUDIT_LOG.info("[{}] to xAttr [{}]", path, xattrName);
         response = Response.ok().build();
         break;
@@ -573,7 +521,7 @@ public class HttpFSServer {
         String xattrName = params.get(XAttrNameParam.NAME, XAttrNameParam.class);
         FSOperations.FSRemoveXAttr command = new FSOperations.FSRemoveXAttr(
             path, xattrName);
-        fsExecute(user, doAs, command);
+        fsExecute(user, command);
         AUDIT_LOG.info("[{}] removed xAttr [{}]", path, xattrName);
         response = Response.ok().build();
         break;
@@ -583,7 +531,7 @@ public class HttpFSServer {
                                        PermissionParam.class);
         FSOperations.FSMkdirs command =
           new FSOperations.FSMkdirs(path, permission);
-        JSONObject json = fsExecute(user, doAs, command);
+        JSONObject json = fsExecute(user, command);
         AUDIT_LOG.info("[{}] permission [{}]", path, permission);
         response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
         break;
@@ -592,7 +540,7 @@ public class HttpFSServer {
         String toPath = params.get(DestinationParam.NAME, DestinationParam.class);
         FSOperations.FSRename command =
           new FSOperations.FSRename(path, toPath);
-        JSONObject json = fsExecute(user, doAs, command);
+        JSONObject json = fsExecute(user, command);
         AUDIT_LOG.info("[{}] to [{}]", path, toPath);
         response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
         break;
@@ -602,7 +550,7 @@ public class HttpFSServer {
         String group = params.get(GroupParam.NAME, GroupParam.class);
         FSOperations.FSSetOwner command =
           new FSOperations.FSSetOwner(path, owner, group);
-        fsExecute(user, doAs, command);
+        fsExecute(user, command);
         AUDIT_LOG.info("[{}] to (O/G)[{}]", path, owner + ":" + group);
         response = Response.ok().build();
         break;
@@ -612,7 +560,7 @@ public class HttpFSServer {
                                       PermissionParam.class);
         FSOperations.FSSetPermission command =
           new FSOperations.FSSetPermission(path, permission);
-        fsExecute(user, doAs, command);
+        fsExecute(user, command);
         AUDIT_LOG.info("[{}] to [{}]", path, permission);
         response = Response.ok().build();
         break;
@@ -622,7 +570,7 @@ public class HttpFSServer {
                                        ReplicationParam.class);
         FSOperations.FSSetReplication command =
           new FSOperations.FSSetReplication(path, replication);
-        JSONObject json = fsExecute(user, doAs, command);
+        JSONObject json = fsExecute(user, command);
         AUDIT_LOG.info("[{}] to [{}]", path, replication);
         response = Response.ok(json).build();
         break;
@@ -634,7 +582,7 @@ public class HttpFSServer {
                                      AccessTimeParam.class);
         FSOperations.FSSetTimes command =
           new FSOperations.FSSetTimes(path, modifiedTime, accessTime);
-        fsExecute(user, doAs, command);
+        fsExecute(user, command);
         AUDIT_LOG.info("[{}] to (M/A)[{}]", path,
                        modifiedTime + ":" + accessTime);
         response = Response.ok().build();
@@ -645,7 +593,7 @@ public class HttpFSServer {
                 AclPermissionParam.class);
         FSOperations.FSSetAcl command =
                 new FSOperations.FSSetAcl(path, aclSpec);
-        fsExecute(user, doAs, command);
+        fsExecute(user, command);
         AUDIT_LOG.info("[{}] to acl [{}]", path, aclSpec);
         response = Response.ok().build();
         break;
@@ -653,7 +601,7 @@ public class HttpFSServer {
       case REMOVEACL: {
         FSOperations.FSRemoveAcl command =
                 new FSOperations.FSRemoveAcl(path);
-        fsExecute(user, doAs, command);
+        fsExecute(user, command);
         AUDIT_LOG.info("[{}] removed acl", path);
         response = Response.ok().build();
         break;
@@ -663,7 +611,7 @@ public class HttpFSServer {
                 AclPermissionParam.class);
         FSOperations.FSModifyAclEntries command =
                 new FSOperations.FSModifyAclEntries(path, aclSpec);
-        fsExecute(user, doAs, command);
+        fsExecute(user, command);
         AUDIT_LOG.info("[{}] modify acl entry with [{}]", path, aclSpec);
         response = Response.ok().build();
         break;
@@ -673,7 +621,7 @@ public class HttpFSServer {
                 AclPermissionParam.class);
         FSOperations.FSRemoveAclEntries command =
                 new FSOperations.FSRemoveAclEntries(path, aclSpec);
-        fsExecute(user, doAs, command);
+        fsExecute(user, command);
         AUDIT_LOG.info("[{}] remove acl entry [{}]", path, aclSpec);
         response = Response.ok().build();
         break;
@@ -681,7 +629,7 @@ public class HttpFSServer {
       case REMOVEDEFAULTACL: {
         FSOperations.FSRemoveDefaultAcl command =
                 new FSOperations.FSRemoveDefaultAcl(path);
-        fsExecute(user, doAs, command);
+        fsExecute(user, command);
         AUDIT_LOG.info("[{}] remove default acl", path);
         response = Response.ok().build();
         break;

Modified: hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServerWebApp.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServerWebApp.java?rev=1619019&r1=1619018&r2=1619019&view=diff
==============================================================================
--- hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServerWebApp.java (original)
+++ hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServerWebApp.java Wed Aug 20 01:34:29 2014
@@ -24,7 +24,6 @@ import org.apache.hadoop.fs.CommonConfig
 import org.apache.hadoop.lib.server.ServerException;
 import org.apache.hadoop.lib.service.FileSystemAccess;
 import org.apache.hadoop.lib.servlet.ServerWebApp;
-import org.apache.hadoop.lib.wsrs.UserProvider;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -103,9 +102,6 @@ public class HttpFSServerWebApp extends 
     LOG.info("Connects to Namenode [{}]",
              get().get(FileSystemAccess.class).getFileSystemConfiguration().
                get(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY));
-    String userPattern = getConfig().get(UserProvider.USER_PATTERN_KEY, 
-      UserProvider.USER_PATTERN_DEFAULT);
-    UserProvider.setUserPattern(userPattern);
   }
 
   /**

Modified: hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/resources/httpfs-default.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/resources/httpfs-default.xml?rev=1619019&r1=1619018&r2=1619019&view=diff
==============================================================================
--- hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/resources/httpfs-default.xml (original)
+++ hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/resources/httpfs-default.xml Wed Aug 20 01:34:29 2014
@@ -34,8 +34,6 @@
       org.apache.hadoop.lib.service.instrumentation.InstrumentationService,
       org.apache.hadoop.lib.service.scheduler.SchedulerService,
       org.apache.hadoop.lib.service.security.GroupsService,
-      org.apache.hadoop.lib.service.security.ProxyUserService,
-      org.apache.hadoop.lib.service.security.DelegationTokenManagerService,
       org.apache.hadoop.lib.service.hadoop.FileSystemAccessService
     </value>
     <description>
@@ -119,6 +117,10 @@
   </property>
 
   <!-- HttpFSServer proxy user Configuration -->
+<!--
+
+  The following 2 properties within this comment are provided as an
+  example to facilitate configuring HttpFS proxyusers.
 
   <property>
     <name>httpfs.proxyuser.#USER#.hosts</name>
@@ -153,6 +155,7 @@
       in the property name.
     </description>
   </property>
+-->
 
   <!-- HttpFS Delegation Token configuration -->
 
@@ -226,12 +229,4 @@
     </description>
   </property>
 
-  <property>
-    <name>httpfs.user.provider.user.pattern</name>
-    <value>^[A-Za-z_][A-Za-z0-9._-]*[$]?$</value>
-    <description>
-      Valid pattern for user and group names, it must be a valid java regex.
-    </description>
-  </property>
-
 </configuration>

Modified: hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/HttpFSKerberosAuthenticationHandlerForTesting.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/HttpFSKerberosAuthenticationHandlerForTesting.java?rev=1619019&r1=1619018&r2=1619019&view=diff
==============================================================================
--- hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/HttpFSKerberosAuthenticationHandlerForTesting.java (original)
+++ hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/HttpFSKerberosAuthenticationHandlerForTesting.java Wed Aug 20 01:34:29 2014
@@ -17,15 +17,19 @@
  */
 package org.apache.hadoop.fs.http.server;
 
+import org.apache.hadoop.security.token.delegation.web.KerberosDelegationTokenAuthenticationHandler;
+
 import javax.servlet.ServletException;
 import java.util.Properties;
 
 public class HttpFSKerberosAuthenticationHandlerForTesting
-  extends HttpFSKerberosAuthenticationHandler {
+  extends KerberosDelegationTokenAuthenticationHandler {
 
   @Override
   public void init(Properties config) throws ServletException {
     //NOP overwrite to avoid Kerberos initialization
+    config.setProperty(TOKEN_KIND, "t");
+    initTokenManager(config);
   }
 
   @Override

Modified: hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServer.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServer.java?rev=1619019&r1=1619018&r2=1619019&view=diff
==============================================================================
--- hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServer.java (original)
+++ hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServer.java Wed Aug 20 01:34:29 2014
@@ -18,6 +18,8 @@
 package org.apache.hadoop.fs.http.server;
 
 import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticator;
+import org.apache.hadoop.security.token.delegation.web.KerberosDelegationTokenAuthenticationHandler;
 import org.json.simple.JSONArray;
 import org.junit.Assert;
 
@@ -43,7 +45,6 @@ import org.apache.hadoop.fs.CommonConfig
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.XAttrCodec;
-import org.apache.hadoop.fs.http.client.HttpFSKerberosAuthenticator;
 import org.apache.hadoop.lib.server.Service;
 import org.apache.hadoop.lib.server.ServiceException;
 import org.apache.hadoop.lib.service.Groups;
@@ -65,6 +66,7 @@ import org.mortbay.jetty.Server;
 import org.mortbay.jetty.webapp.WebAppContext;
 
 import com.google.common.collect.Maps;
+import org.apache.hadoop.security.authentication.util.StringSignerSecretProvider;
 
 public class TestHttpFSServer extends HFSTestCase {
 
@@ -681,9 +683,9 @@ public class TestHttpFSServer extends HF
 
     AuthenticationToken token =
       new AuthenticationToken("u", "p",
-        HttpFSKerberosAuthenticationHandlerForTesting.TYPE);
+          new KerberosDelegationTokenAuthenticationHandler().getType());
     token.setExpires(System.currentTimeMillis() + 100000000);
-    Signer signer = new Signer("secret".getBytes());
+    Signer signer = new Signer(new StringSignerSecretProvider("secret"));
     String tokenSigned = signer.sign(token.toString());
 
     url = new URL(TestJettyHelper.getJettyURL(),
@@ -705,9 +707,9 @@ public class TestHttpFSServer extends HF
     JSONObject json = (JSONObject)
       new JSONParser().parse(new InputStreamReader(conn.getInputStream()));
     json = (JSONObject)
-      json.get(HttpFSKerberosAuthenticator.DELEGATION_TOKEN_JSON);
+      json.get(DelegationTokenAuthenticator.DELEGATION_TOKEN_JSON);
     String tokenStr = (String)
-        json.get(HttpFSKerberosAuthenticator.DELEGATION_TOKEN_URL_STRING_JSON);
+        json.get(DelegationTokenAuthenticator.DELEGATION_TOKEN_URL_STRING_JSON);
 
     url = new URL(TestJettyHelper.getJettyURL(),
                   "/webhdfs/v1/?op=GETHOMEDIRECTORY&delegation=" + tokenStr);

Modified: hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSWithKerberos.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSWithKerberos.java?rev=1619019&r1=1619018&r2=1619019&view=diff
==============================================================================
--- hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSWithKerberos.java (original)
+++ hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSWithKerberos.java Wed Aug 20 01:34:29 2014
@@ -23,11 +23,11 @@ import org.apache.hadoop.fs.DelegationTo
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.http.client.HttpFSFileSystem;
-import org.apache.hadoop.fs.http.client.HttpFSKerberosAuthenticator;
 import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.authentication.client.AuthenticatedURL;
 import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticator;
 import org.apache.hadoop.test.HFSTestCase;
 import org.apache.hadoop.test.KerberosTestUtils;
 import org.apache.hadoop.test.TestDir;
@@ -166,9 +166,9 @@ public class TestHttpFSWithKerberos exte
           .parse(new InputStreamReader(conn.getInputStream()));
         json =
           (JSONObject) json
-            .get(HttpFSKerberosAuthenticator.DELEGATION_TOKEN_JSON);
+            .get(DelegationTokenAuthenticator.DELEGATION_TOKEN_JSON);
         String tokenStr = (String) json
-          .get(HttpFSKerberosAuthenticator.DELEGATION_TOKEN_URL_STRING_JSON);
+          .get(DelegationTokenAuthenticator.DELEGATION_TOKEN_URL_STRING_JSON);
 
         //access httpfs using the delegation token
         url = new URL(TestJettyHelper.getJettyURL(),