You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by sz...@apache.org on 2014/08/21 07:22:16 UTC

svn commit: r1619293 [1/4] - in /hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs: ./ src/main/bin/ src/main/java/ src/main/java/org/apache/hadoop/fs/ src/main/java/org/apache/hadoop/hdfs/ src/main/java/org/apache/hadoop/hdfs/client/ sr...

Author: szetszwo
Date: Thu Aug 21 05:22:10 2014
New Revision: 1619293

URL: http://svn.apache.org/r1619293
Log:
Merge r1609845 through r1619277 from trunk.

Added:
    hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/UnknownCipherSuiteException.java
      - copied unchanged from r1619277, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/UnknownCipherSuiteException.java
    hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/EncryptionZone.java
      - copied unchanged from r1619277, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/EncryptionZone.java
    hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/EncryptionZoneIterator.java
      - copied unchanged from r1619277, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/EncryptionZoneIterator.java
    hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/EncryptionZoneWithId.java
      - copied unchanged from r1619277, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/EncryptionZoneWithId.java
    hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/EncryptionZoneWithIdIterator.java
      - copied unchanged from r1619277, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/EncryptionZoneWithIdIterator.java
    hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionFaultInjector.java
      - copied unchanged from r1619277, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionFaultInjector.java
    hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java
      - copied unchanged from r1619277, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java
    hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/RetryStartFileException.java
      - copied unchanged from r1619277, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/RetryStartFileException.java
    hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CryptoAdmin.java
      - copied unchanged from r1619277, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CryptoAdmin.java
    hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/encryption.proto
      - copied unchanged from r1619277, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/encryption.proto
    hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/TransparentEncryption.apt.vm
      - copied unchanged from r1619277, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/TransparentEncryption.apt.vm
    hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestCryptoAdminCLI.java
      - copied unchanged from r1619277, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestCryptoAdminCLI.java
    hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/util/CLICommandCryptoAdmin.java
      - copied unchanged from r1619277, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/util/CLICommandCryptoAdmin.java
    hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/util/CryptoAdminCmdExecutor.java
      - copied unchanged from r1619277, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/util/CryptoAdminCmdExecutor.java
    hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java
      - copied unchanged from r1619277, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java
    hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReservedRawPaths.java
      - copied unchanged from r1619277, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReservedRawPaths.java
    hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteBlockGetsBlockLengthHint.java
      - copied unchanged from r1619277, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteBlockGetsBlockLengthHint.java
    hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/crypto/
      - copied from r1619277, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/crypto/
    hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testCryptoConf.xml
      - copied unchanged from r1619277, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testCryptoConf.xml
Modified:
    hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/   (props changed)
    hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
    hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/pom.xml
    hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/distribute-exclude.sh
    hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
    hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs-config.sh
    hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/refresh-namenodes.sh
    hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/start-balancer.sh
    hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/start-dfs.sh
    hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/start-secure-dns.sh
    hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/stop-balancer.sh
    hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/stop-dfs.sh
    hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/stop-secure-dns.sh
    hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/   (props changed)
    hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java
    hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/XAttr.java
    hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
    hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
    hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
    hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
    hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
    hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
    hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/XAttrHelper.java
    hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java
    hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsDataInputStream.java
    hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsDataOutputStream.java
    hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
    hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsFileStatus.java
    hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsLocatedFileStatus.java
    hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlocks.java
    hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshottableDirectoryStatus.java
    hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/DataTransferSaslUtil.java
    hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
    hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
    hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
    hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
    hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java
    hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
    hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiverServer.java
    hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
    hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
    hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
    hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
    hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrPermissionFilter.java
    hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
    hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/native/   (props changed)
    hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto
    hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto
    hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/xattr.proto
    hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
    hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/   (props changed)
    hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/   (props changed)
    hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/   (props changed)
    hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/ExtendedAttributes.apt.vm
    hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsNfsGateway.apt.vm
    hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs/   (props changed)
    hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestXAttr.java
    hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
    hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
    hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSClusterWithNodeGroup.java
    hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
    hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRename.java
    hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
    hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java
    hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
    hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java
    hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLease.java
    hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestStorageReport.java
    hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSXAttrBaseTest.java
    hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java
    hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddBlockRetry.java
    hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSDirectory.java
    hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
    hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java
    hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java
    hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java
    hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testXAttrConf.xml

Propchange: hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/
------------------------------------------------------------------------------
  Merged /hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs:r1594376-1619194
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs:r1618764-1619277

Modified: hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1619293&r1=1619292&r2=1619293&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original)
+++ hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Thu Aug 21 05:22:10 2014
@@ -278,6 +278,97 @@ Trunk (Unreleased)
     HDFS-6657. Remove link to 'Legacy UI' in trunk's Namenode UI.
     (Vinayakumar B via wheat 9)
 
+  BREAKDOWN OF HDFS-6134 AND HADOOP-10150 SUBTASKS AND RELATED JIRAS
+
+    HDFS-6387. HDFS CLI admin tool for creating & deleting an
+    encryption zone. (clamb)
+
+    HDFS-6386. HDFS Encryption Zones (clamb)
+
+    HDFS-6388. HDFS integration with KeyProvider. (clamb)
+
+    HDFS-6473. Protocol and API for Encryption Zones (clamb)
+
+    HDFS-6392. Wire crypto streams for encrypted files in
+    DFSClient. (clamb and yliu)
+
+    HDFS-6476. Print out the KeyProvider after finding KP successfully on
+    startup. (Juan Yu via wang)
+
+    HDFS-6391. Get the Key/IV from the NameNode for encrypted files in
+    DFSClient. (Charles Lamb and wang)
+
+    HDFS-6389. Rename restrictions for encryption zones. (clamb)
+
+    HDFS-6605. Client server negotiation of cipher suite. (wang)
+
+    HDFS-6625. Remove the Delete Encryption Zone function (clamb)
+
+    HDFS-6516. List of Encryption Zones should be based on inodes (clamb)
+
+    HDFS-6629. Not able to create symlinks after HDFS-6516 (umamaheswararao)
+
+    HDFS-6635. Refactor encryption zone functionality into new
+    EncryptionZoneManager class. (wang)
+
+    HDFS-6474. Namenode needs to get the actual keys and iv from the
+    KeyProvider. (wang)
+
+    HDFS-6619. Clean up encryption-related tests. (wang)
+
+    HDFS-6405. Test Crypto streams in HDFS. (yliu via wang)
+
+    HDFS-6490. Fix the keyid format for generated keys in
+    FSNamesystem.createEncryptionZone (clamb)
+
+    HDFS-6716. Update usage of KeyProviderCryptoExtension APIs on NameNode.
+    (wang)
+
+    HDFS-6718. Remove EncryptionZoneManager lock. (wang)
+
+    HDFS-6720. Remove KeyProvider in EncryptionZoneManager. (wang)
+
+    HDFS-6738. Remove unnecessary getEncryptionZoneForPath call in
+    EZManager#createEncryptionZone. (clamb)
+
+    HDFS-6724. Decrypt EDEK before creating
+    CryptoInputStream/CryptoOutputStream. (wang)
+
+    HDFS-6509. Create a special /.reserved/raw directory for raw access to
+    encrypted data. (clamb via wang)
+
+    HDFS-6771. Require specification of an encryption key when creating
+    an encryption zone. (wang)
+
+    HDFS-6730. Create a .RAW extended attribute namespace. (clamb)
+
+    HDFS-6692. Add more HDFS encryption tests. (wang)
+
+    HDFS-6780. Batch the encryption zones listing API. (wang)
+
+    HDFS-6394. HDFS encryption documentation. (wang)
+
+    HDFS-6834. Improve the configuration guidance in DFSClient when there 
+    are no Codec classes found in configs. (umamahesh)
+
+    HDFS-6546. Add non-superuser capability to get the encryption zone
+    for a specific path. (clamb)
+
+    HDFS-6733. Creating encryption zone results in NPE when
+    KeyProvider is null. (clamb)
+
+    HDFS-6785. Should not be able to create encryption zone using path
+    to a non-directory file. (clamb)
+
+    HDFS-6807. Fix TestReservedRawPaths. (clamb)
+
+    HDFS-6814. Mistakenly dfs.namenode.list.encryption.zones.num.responses configured
+    as boolean. (umamahesh)
+
+    HDFS-6817. Fix findbugs and other warnings. (yliu)
+
+    HDFS-6839. Fix TestCLI to expect new output. (clamb)
+
 Release 2.6.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES
@@ -428,6 +519,15 @@ Release 2.6.0 - UNRELEASED
     HDFS-6188. An ip whitelist based implementation of TrustedChannelResolver.
     (Benoy Antony via Arpit Agarwal)
 
+    HDFS-6858. Allow dfs.data.transfer.saslproperties.resolver.class default to
+    hadoop.security.saslproperties.resolver.class. (Benoy Antony via cnauroth)
+
+    HDFS-6878. Change MiniDFSCluster to support StorageType configuration
+    for individual directories. (Arpit Agarwal)
+
+    HDFS-6758. block writer should pass the expected block size to
+    DataXceiverServer. (Arpit Agarwal)
+
   OPTIMIZATIONS
 
     HDFS-6690. Deduplicate xattr names in memory. (wang)
@@ -543,7 +643,13 @@ Release 2.6.0 - UNRELEASED
     HDFS-6569. OOB message can't be sent to the client when DataNode shuts down for upgrade
     (brandonli)
 
-Release 2.5.0 - UNRELEASED
+    HDFS-6868. portmap and nfs3 are documented as hadoop commands instead of hdfs
+    (brandonli)
+
+    HDFS-6870. Blocks and INodes could leak for Rename with overwrite flag. (Yi
+    Liu via jing9)
+
+Release 2.5.0 - 2014-08-11
 
   INCOMPATIBLE CHANGES
 

Modified: hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/pom.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/pom.xml?rev=1619293&r1=1619292&r2=1619293&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/pom.xml (original)
+++ hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/pom.xml Thu Aug 21 05:22:10 2014
@@ -304,6 +304,7 @@ http://maven.apache.org/xsd/maven-4.0.0.
                   <include>datatransfer.proto</include>
                   <include>fsimage.proto</include>
                   <include>hdfs.proto</include>
+                  <include>encryption.proto</include>
                 </includes>
               </source>
               <output>${project.build.directory}/generated-sources/java</output>

Modified: hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/distribute-exclude.sh
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/distribute-exclude.sh?rev=1619293&r1=1619292&r2=1619293&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/distribute-exclude.sh (original)
+++ hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/distribute-exclude.sh Thu Aug 21 05:22:10 2014
@@ -57,9 +57,9 @@ excludeFilenameRemote=$("$HADOOP_PREFIX/
 
 if [ "$excludeFilenameRemote" = '' ] ; then
   echo \
-    "Error: hdfs getconf -excludeFile returned empty string, " \
-    "please setup dfs.hosts.exclude in hdfs-site.xml in local cluster " \
-    "configuration and on all namenodes"
+  "Error: hdfs getconf -excludeFile returned empty string, " \
+  "please setup dfs.hosts.exclude in hdfs-site.xml in local cluster " \
+  "configuration and on all namenodes"
   exit 1
 fi
 

Modified: hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs?rev=1619293&r1=1619292&r2=1619293&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs (original)
+++ hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs Thu Aug 21 05:22:10 2014
@@ -15,250 +15,241 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-# Environment Variables
-#
-#   JSVC_HOME  home directory of jsvc binary.  Required for starting secure
-#              datanode.
-#
-#   JSVC_OUTFILE  path to jsvc output file.  Defaults to
-#                 $HADOOP_LOG_DIR/jsvc.out.
-#
-#   JSVC_ERRFILE  path to jsvc error file.  Defaults to $HADOOP_LOG_DIR/jsvc.err.
-
-bin=`which $0`
-bin=`dirname ${bin}`
-bin=`cd "$bin" > /dev/null; pwd`
-
-DEFAULT_LIBEXEC_DIR="$bin"/../libexec
-HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}
-. $HADOOP_LIBEXEC_DIR/hdfs-config.sh
-
-function print_usage(){
-  echo "Usage: hdfs [--config confdir] COMMAND"
+function hadoop_usage
+{
+  echo "Usage: hdfs [--config confdir] [--daemon (start|stop|status)] COMMAND"
   echo "       where COMMAND is one of:"
-  echo "  dfs                  run a filesystem command on the file systems supported in Hadoop."
-  echo "  namenode -format     format the DFS filesystem"
-  echo "  secondarynamenode    run the DFS secondary namenode"
-  echo "  namenode             run the DFS namenode"
-  echo "  journalnode          run the DFS journalnode"
-  echo "  zkfc                 run the ZK Failover Controller daemon"
+  echo "  balancer             run a cluster balancing utility"
+  echo "  cacheadmin           configure the HDFS cache"
+  echo "  classpath            prints the class path needed to get the"
+  echo "                       Hadoop jar and the required libraries"
   echo "  datanode             run a DFS datanode"
+  echo "  dfs                  run a filesystem command on the file system"
   echo "  dfsadmin             run a DFS admin client"
-  echo "  haadmin              run a DFS HA admin client"
-  echo "  fsck                 run a DFS filesystem checking utility"
-  echo "  balancer             run a cluster balancing utility"
-  echo "  jmxget               get JMX exported values from NameNode or DataNode."
-  echo "  oiv                  apply the offline fsimage viewer to an fsimage"
-  echo "  oiv_legacy           apply the offline fsimage viewer to an legacy fsimage"
-  echo "  oev                  apply the offline edits viewer to an edits file"
   echo "  fetchdt              fetch a delegation token from the NameNode"
+  echo "  fsck                 run a DFS filesystem checking utility"
   echo "  getconf              get config values from configuration"
   echo "  groups               get the groups which users belong to"
-  echo "  snapshotDiff         diff two snapshots of a directory or diff the"
-  echo "                       current directory contents with a snapshot"
+  echo "  haadmin              run a DFS HA admin client"
+  echo "  jmxget               get JMX exported values from NameNode or DataNode."
+  echo "  journalnode          run the DFS journalnode"
   echo "  lsSnapshottableDir   list all snapshottable dirs owned by the current user"
-  echo "						Use -help to see options"
-  echo "  portmap              run a portmap service"
+  echo "                               Use -help to see options"
+  echo "  namenode             run the DFS namenode"
+  echo "                               Use -format to initialize the DFS filesystem"
   echo "  nfs3                 run an NFS version 3 gateway"
-  echo "  cacheadmin           configure the HDFS cache"
+  echo "  oev                  apply the offline edits viewer to an edits file"
+  echo "  oiv                  apply the offline fsimage viewer to an fsimage"
+  echo "  oiv_legacy           apply the offline fsimage viewer to a legacy fsimage"
+  echo "  portmap              run a portmap service"
+  echo "  secondarynamenode    run the DFS secondary namenode"
+  echo "  snapshotDiff         diff two snapshots of a directory or diff the"
+  echo "                       current directory contents with a snapshot"
+  echo "  zkfc                 run the ZK Failover Controller daemon"
+  echo "  crypto               configure HDFS encryption zones"
   echo ""
   echo "Most commands print help when invoked w/o parameters."
 }
 
-if [ $# = 0 ]; then
-  print_usage
-  exit
+# let's locate libexec...
+if [[ -n "${HADOOP_PREFIX}" ]]; then
+  DEFAULT_LIBEXEC_DIR="${HADOOP_PREFIX}/libexec"
+else
+  this="${BASH_SOURCE-$0}"
+  bin=$(cd -P -- "$(dirname -- "${this}")" >/dev/null && pwd -P)
+  DEFAULT_LIBEXEC_DIR="${bin}/../libexec"
+fi
+
+HADOOP_LIBEXEC_DIR="${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}"
+# shellcheck disable=SC2034
+HADOOP_NEW_CONFIG=true
+if [[ -f "${HADOOP_LIBEXEC_DIR}/hdfs-config.sh" ]]; then
+  . "${HADOOP_LIBEXEC_DIR}/hdfs-config.sh"
+else
+  echo "ERROR: Cannot execute ${HADOOP_LIBEXEC_DIR}/hdfs-config.sh." 2>&1
+  exit 1
+fi
+
+if [[ $# = 0 ]]; then
+  hadoop_exit_with_usage 1
 fi
 
 COMMAND=$1
 shift
 
-case $COMMAND in
-  # usage flags
-  --help|-help|-h)
-    print_usage
+case ${COMMAND} in
+  balancer)
+    CLASS=org.apache.hadoop.hdfs.server.balancer.Balancer
+    HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_BALANCER_OPTS}"
+  ;;
+  cacheadmin)
+    CLASS=org.apache.hadoop.hdfs.tools.CacheAdmin
+  ;;
+  classpath)
+    hadoop_finalize
+    echo "${CLASSPATH}"
     exit
-    ;;
-esac
-
-# Determine if we're starting a secure datanode, and if so, redefine appropriate variables
-if [ "$COMMAND" == "datanode" ] && [ "$EUID" -eq 0 ] && [ -n "$HADOOP_SECURE_DN_USER" ]; then
-  if [ -n "$JSVC_HOME" ]; then
-    if [ -n "$HADOOP_SECURE_DN_PID_DIR" ]; then
-      HADOOP_PID_DIR=$HADOOP_SECURE_DN_PID_DIR
-    fi
-  
-    if [ -n "$HADOOP_SECURE_DN_LOG_DIR" ]; then
-      HADOOP_LOG_DIR=$HADOOP_SECURE_DN_LOG_DIR
-      HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.log.dir=$HADOOP_LOG_DIR"
-    fi
-   
-    HADOOP_IDENT_STRING=$HADOOP_SECURE_DN_USER
-    HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.id.str=$HADOOP_IDENT_STRING"
-    starting_secure_dn="true"
-  else
-    echo "It looks like you're trying to start a secure DN, but \$JSVC_HOME"\
-      "isn't set. Falling back to starting insecure DN."
-  fi
-fi
-
-# Determine if we're starting a privileged NFS daemon, and if so, redefine appropriate variables
-if [ "$COMMAND" == "nfs3" ] && [ "$EUID" -eq 0 ] && [ -n "$HADOOP_PRIVILEGED_NFS_USER" ]; then
-  if [ -n "$JSVC_HOME" ]; then
-    if [ -n "$HADOOP_PRIVILEGED_NFS_PID_DIR" ]; then
-      HADOOP_PID_DIR=$HADOOP_PRIVILEGED_NFS_PID_DIR
+  ;;
+  crypto)
+    CLASS=org.apache.hadoop.hdfs.tools.CryptoAdmin
+  ;;
+  datanode)
+    daemon="true"
+    # Determine if we're starting a secure datanode, and
+    # if so, redefine appropriate variables
+    if [[ -n "${HADOOP_SECURE_DN_USER}" ]]; then
+      secure_service="true"
+      secure_user="${HADOOP_SECURE_DN_USER}"
+      
+      # backward compatiblity
+      HADOOP_SECURE_PID_DIR="${HADOOP_SECURE_PID_DIR:-$HADOOP_SECURE_DN_PID_DIR}"
+      HADOOP_SECURE_LOG_DIR="${HADOOP_SECURE_LOG_DIR:-$HADOOP_SECURE_DN_LOG_DIR}"
+      
+      HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_DN_SECURE_EXTRA_OPTS} ${HADOOP_DATANODE_OPTS}"
+      CLASS="org.apache.hadoop.hdfs.server.datanode.SecureDataNodeStarter"
+    else
+      HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_DATANODE_OPTS}"
+      CLASS='org.apache.hadoop.hdfs.server.datanode.DataNode'
     fi
-  
-    if [ -n "$HADOOP_PRIVILEGED_NFS_LOG_DIR" ]; then
-      HADOOP_LOG_DIR=$HADOOP_PRIVILEGED_NFS_LOG_DIR
-      HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.log.dir=$HADOOP_LOG_DIR"
+  ;;
+  dfs)
+    CLASS=org.apache.hadoop.fs.FsShell
+    HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_CLIENT_OPTS}"
+  ;;
+  dfsadmin)
+    CLASS=org.apache.hadoop.hdfs.tools.DFSAdmin
+    HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_CLIENT_OPTS}"
+  ;;
+  fetchdt)
+    CLASS=org.apache.hadoop.hdfs.tools.DelegationTokenFetcher
+  ;;
+  fsck)
+    CLASS=org.apache.hadoop.hdfs.tools.DFSck
+    HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_CLIENT_OPTS}"
+  ;;
+  getconf)
+    CLASS=org.apache.hadoop.hdfs.tools.GetConf
+  ;;
+  groups)
+    CLASS=org.apache.hadoop.hdfs.tools.GetGroups
+  ;;
+  haadmin)
+    CLASS=org.apache.hadoop.hdfs.tools.DFSHAAdmin
+    CLASSPATH="${CLASSPATH}:${TOOL_PATH}"
+    HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_CLIENT_OPTS}"
+  ;;
+  journalnode)
+    daemon="true"
+    CLASS='org.apache.hadoop.hdfs.qjournal.server.JournalNode'
+    HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_JOURNALNODE_OPTS}"
+  ;;
+  jmxget)
+    CLASS=org.apache.hadoop.hdfs.tools.JMXGet
+  ;;
+  lsSnapshottableDir)
+    CLASS=org.apache.hadoop.hdfs.tools.snapshot.LsSnapshottableDir
+  ;;
+  namenode)
+    daemon="true"
+    CLASS='org.apache.hadoop.hdfs.server.namenode.NameNode'
+    HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_NAMENODE_OPTS}"
+  ;;
+  nfs3)
+    daemon="true"
+    if [[ -n "${HADOOP_PRIVILEGED_NFS_USER}" ]]; then
+      secure_service="true"
+      secure_user="${HADOOP_PRIVILEGED_NFS_USER}"
+      
+      # backward compatiblity
+      HADOOP_SECURE_PID_DIR="${HADOOP_SECURE_PID_DIR:-$HADOOP_SECURE_NFS3_PID_DIR}"
+      HADOOP_SECURE_LOG_DIR="${HADOOP_SECURE_LOG_DIR:-$HADOOP_SECURE_NFS3_LOG_DIR}"
+      
+      HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_NFS3_SECURE_EXTRA_OPTS} ${HADOOP_NFS3_OPTS}"
+      CLASS=org.apache.hadoop.hdfs.nfs.nfs3.PrivilegedNfsGatewayStarter
+    else
+      HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_NFS3_OPTS}"
+      CLASS=org.apache.hadoop.hdfs.nfs.nfs3.Nfs3
     fi
-   
-    HADOOP_IDENT_STRING=$HADOOP_PRIVILEGED_NFS_USER
-    HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.id.str=$HADOOP_IDENT_STRING"
-    starting_privileged_nfs="true"
-  else
-    echo "It looks like you're trying to start a privileged NFS server, but"\
-      "\$JSVC_HOME isn't set. Falling back to starting unprivileged NFS server."
-  fi
-fi
+  ;;
+  oev)
+    CLASS=org.apache.hadoop.hdfs.tools.offlineEditsViewer.OfflineEditsViewer
+  ;;
+  oiv)
+    CLASS=org.apache.hadoop.hdfs.tools.offlineImageViewer.OfflineImageViewerPB
+  ;;
+  oiv_legacy)
+    CLASS=org.apache.hadoop.hdfs.tools.offlineImageViewer.OfflineImageViewer
+  ;;
+  portmap)
+    daemon="true"
+    CLASS=org.apache.hadoop.portmap.Portmap
+    HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_PORTMAP_OPTS}"
+  ;;
+  secondarynamenode)
+    daemon="true"
+    CLASS='org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode'
+    HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_SECONDARYNAMENODE_OPTS}"
+  ;;
+  snapshotDiff)
+    CLASS=org.apache.hadoop.hdfs.tools.snapshot.SnapshotDiff
+  ;;
+  zkfc)
+    daemon="true"
+    CLASS='org.apache.hadoop.hdfs.tools.DFSZKFailoverController'
+    HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_ZKFC_OPTS}"
+  ;;
+  -*)
+    hadoop_exit_with_usage 1
+  ;;
+  *)
+    CLASS="${COMMAND}"
+  ;;
+esac
 
-if [ "$COMMAND" = "namenode" ] ; then
-  CLASS='org.apache.hadoop.hdfs.server.namenode.NameNode'
-  HADOOP_OPTS="$HADOOP_OPTS $HADOOP_NAMENODE_OPTS"
-elif [ "$COMMAND" = "zkfc" ] ; then
-  CLASS='org.apache.hadoop.hdfs.tools.DFSZKFailoverController'
-  HADOOP_OPTS="$HADOOP_OPTS $HADOOP_ZKFC_OPTS"
-elif [ "$COMMAND" = "secondarynamenode" ] ; then
-  CLASS='org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode'
-  HADOOP_OPTS="$HADOOP_OPTS $HADOOP_SECONDARYNAMENODE_OPTS"
-elif [ "$COMMAND" = "datanode" ] ; then
-  CLASS='org.apache.hadoop.hdfs.server.datanode.DataNode'
-  if [ "$starting_secure_dn" = "true" ]; then
-    HADOOP_OPTS="$HADOOP_OPTS -jvm server $HADOOP_DATANODE_OPTS"
-  else
-    HADOOP_OPTS="$HADOOP_OPTS -server $HADOOP_DATANODE_OPTS"
+if [[ -n "${secure_service}" ]]; then
+  HADOOP_SECURE_USER="${secure_user}"
+  if hadoop_verify_secure_prereq; then
+    hadoop_setup_secure_service
+    priv_outfile="${HADOOP_LOG_DIR}/privileged-${HADOOP_IDENT_STRING}-${COMMAND-$HOSTNAME}.out"
+    priv_errfile="${HADOOP_LOG_DIR}/privileged-${HADOOP_IDENT_STRING}-${COMMAND-$HOSTNAME}.err"
+    priv_pidfile="${HADOOP_PID_DIR}/privileged-${HADOOP_IDENT_STRING}-${COMMAND-$HOSTNAME}.pid"
+    daemon_outfile="${HADOOP_LOG_DIR}/hadoop-${HADOOP_SECURE_USER}-${HADOOP_IDENT_STRING}-${COMMAND}-${HOSTNAME}.out"
+    daemon_pidfile="${HADOOP_PID_DIR}/hadoop-${HADOOP_SECURE_USER}-${HADOOP_IDENT_STRING}-${COMMAND}.pid"
   fi
-elif [ "$COMMAND" = "journalnode" ] ; then
-  CLASS='org.apache.hadoop.hdfs.qjournal.server.JournalNode'
-  HADOOP_OPTS="$HADOOP_OPTS $HADOOP_JOURNALNODE_OPTS"
-elif [ "$COMMAND" = "dfs" ] ; then
-  CLASS=org.apache.hadoop.fs.FsShell
-  HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS"
-elif [ "$COMMAND" = "dfsadmin" ] ; then
-  CLASS=org.apache.hadoop.hdfs.tools.DFSAdmin
-  HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS"
-elif [ "$COMMAND" = "haadmin" ] ; then
-  CLASS=org.apache.hadoop.hdfs.tools.DFSHAAdmin
-  CLASSPATH=${CLASSPATH}:${TOOL_PATH}
-  HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS"
-elif [ "$COMMAND" = "fsck" ] ; then
-  CLASS=org.apache.hadoop.hdfs.tools.DFSck
-  HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS"
-elif [ "$COMMAND" = "balancer" ] ; then
-  CLASS=org.apache.hadoop.hdfs.server.balancer.Balancer
-  HADOOP_OPTS="$HADOOP_OPTS $HADOOP_BALANCER_OPTS"
-elif [ "$COMMAND" = "jmxget" ] ; then
-  CLASS=org.apache.hadoop.hdfs.tools.JMXGet
-elif [ "$COMMAND" = "oiv" ] ; then
-  CLASS=org.apache.hadoop.hdfs.tools.offlineImageViewer.OfflineImageViewerPB
-elif [ "$COMMAND" = "oiv_legacy" ] ; then
-  CLASS=org.apache.hadoop.hdfs.tools.offlineImageViewer.OfflineImageViewer
-elif [ "$COMMAND" = "oev" ] ; then
-  CLASS=org.apache.hadoop.hdfs.tools.offlineEditsViewer.OfflineEditsViewer
-elif [ "$COMMAND" = "fetchdt" ] ; then
-  CLASS=org.apache.hadoop.hdfs.tools.DelegationTokenFetcher
-elif [ "$COMMAND" = "getconf" ] ; then
-  CLASS=org.apache.hadoop.hdfs.tools.GetConf
-elif [ "$COMMAND" = "groups" ] ; then
-  CLASS=org.apache.hadoop.hdfs.tools.GetGroups
-elif [ "$COMMAND" = "snapshotDiff" ] ; then
-  CLASS=org.apache.hadoop.hdfs.tools.snapshot.SnapshotDiff
-elif [ "$COMMAND" = "lsSnapshottableDir" ] ; then
-  CLASS=org.apache.hadoop.hdfs.tools.snapshot.LsSnapshottableDir
-elif [ "$COMMAND" = "portmap" ] ; then
-  CLASS=org.apache.hadoop.portmap.Portmap
-  HADOOP_OPTS="$HADOOP_OPTS $HADOOP_PORTMAP_OPTS"
-elif [ "$COMMAND" = "nfs3" ] ; then
-  CLASS=org.apache.hadoop.hdfs.nfs.nfs3.Nfs3
-  HADOOP_OPTS="$HADOOP_OPTS $HADOOP_NFS3_OPTS"
-elif [ "$COMMAND" = "cacheadmin" ] ; then
-  CLASS=org.apache.hadoop.hdfs.tools.CacheAdmin
 else
-  CLASS="$COMMAND"
+  daemon_outfile="${HADOOP_LOG_DIR}/hadoop-${HADOOP_IDENT_STRING}-${COMMAND}-${HOSTNAME}.out"
+  daemon_pidfile="${HADOOP_PID_DIR}/hadoop-${HADOOP_IDENT_STRING}-${COMMAND}.pid"
 fi
 
-export CLASSPATH=$CLASSPATH
-
-HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,NullAppender}"
-
-# Check to see if we should start a secure datanode
-if [ "$starting_secure_dn" = "true" ]; then
-  if [ "$HADOOP_PID_DIR" = "" ]; then
-    HADOOP_SECURE_DN_PID="/tmp/hadoop_secure_dn.pid"
+if [[ "${HADOOP_DAEMON_MODE}" != "default" ]]; then
+  # shellcheck disable=SC2034
+  HADOOP_ROOT_LOGGER="${HADOOP_DAEMON_ROOT_LOGGER}"
+  if [[ -n "${secure_service}" ]]; then
+    # shellcheck disable=SC2034
+    HADOOP_LOGFILE="hadoop-${HADOOP_SECURE_USER}-${HADOOP_IDENT_STRING}-${COMMAND}-${HOSTNAME}.log"
   else
-    HADOOP_SECURE_DN_PID="$HADOOP_PID_DIR/hadoop_secure_dn.pid"
-  fi
-
-  JSVC=$JSVC_HOME/jsvc
-  if [ ! -f $JSVC ]; then
-    echo "JSVC_HOME is not set correctly so jsvc cannot be found. jsvc is required to run secure datanodes. "
-    echo "Please download and install jsvc from http://archive.apache.org/dist/commons/daemon/binaries/ "\
-      "and set JSVC_HOME to the directory containing the jsvc binary."
-    exit
+    # shellcheck disable=SC2034
+    HADOOP_LOGFILE="hadoop-${HADOOP_IDENT_STRING}-${COMMAND}-${HOSTNAME}.log"
   fi
+fi
 
-  if [[ ! $JSVC_OUTFILE ]]; then
-    JSVC_OUTFILE="$HADOOP_LOG_DIR/jsvc.out"
-  fi
+hadoop_add_param HADOOP_OPTS Xmx "${JAVA_HEAP_MAX}"
+hadoop_finalize
 
-  if [[ ! $JSVC_ERRFILE ]]; then
-    JSVC_ERRFILE="$HADOOP_LOG_DIR/jsvc.err"
-  fi
+export CLASSPATH
 
-  exec "$JSVC" \
-           -Dproc_$COMMAND -outfile "$JSVC_OUTFILE" \
-           -errfile "$JSVC_ERRFILE" \
-           -pidfile "$HADOOP_SECURE_DN_PID" \
-           -nodetach \
-           -user "$HADOOP_SECURE_DN_USER" \
-            -cp "$CLASSPATH" \
-           $JAVA_HEAP_MAX $HADOOP_OPTS \
-           org.apache.hadoop.hdfs.server.datanode.SecureDataNodeStarter "$@"
-elif [ "$starting_privileged_nfs" = "true" ] ; then
-  if [ "$HADOOP_PID_DIR" = "" ]; then
-    HADOOP_PRIVILEGED_NFS_PID="/tmp/hadoop_privileged_nfs3.pid"
+if [[ -n "${daemon}" ]]; then
+  if [[ -n "${secure_service}" ]]; then
+    hadoop_secure_daemon_handler \
+    "${HADOOP_DAEMON_MODE}" "${COMMAND}" "${CLASS}"\
+    "${daemon_pidfile}" "${daemon_outfile}" \
+    "${priv_pidfile}" "${priv_outfile}" "${priv_errfile}" "$@"
   else
-    HADOOP_PRIVILEGED_NFS_PID="$HADOOP_PID_DIR/hadoop_privileged_nfs3.pid"
-  fi
-
-  JSVC=$JSVC_HOME/jsvc
-  if [ ! -f $JSVC ]; then
-    echo "JSVC_HOME is not set correctly so jsvc cannot be found. jsvc is required to run privileged NFS gateways. "
-    echo "Please download and install jsvc from http://archive.apache.org/dist/commons/daemon/binaries/ "\
-      "and set JSVC_HOME to the directory containing the jsvc binary."
-    exit
-  fi
-
-  if [[ ! $JSVC_OUTFILE ]]; then
-    JSVC_OUTFILE="$HADOOP_LOG_DIR/nfs3_jsvc.out"
+    hadoop_daemon_handler "${HADOOP_DAEMON_MODE}" "${COMMAND}" "${CLASS}"\
+    "${daemon_pidfile}" "${daemon_outfile}" "$@"
   fi
-
-  if [[ ! $JSVC_ERRFILE ]]; then
-    JSVC_ERRFILE="$HADOOP_LOG_DIR/nfs3_jsvc.err"
-  fi
-
-  exec "$JSVC" \
-           -Dproc_$COMMAND -outfile "$JSVC_OUTFILE" \
-           -errfile "$JSVC_ERRFILE" \
-           -pidfile "$HADOOP_PRIVILEGED_NFS_PID" \
-           -nodetach \
-           -user "$HADOOP_PRIVILEGED_NFS_USER" \
-           -cp "$CLASSPATH" \
-           $JAVA_HEAP_MAX $HADOOP_OPTS \
-           org.apache.hadoop.hdfs.nfs.nfs3.PrivilegedNfsGatewayStarter "$@"
+  exit $?
 else
-  # run it
-  exec "$JAVA" -Dproc_$COMMAND $JAVA_HEAP_MAX $HADOOP_OPTS $CLASS "$@"
+  # shellcheck disable=SC2086
+  hadoop_java_exec "${COMMAND}" "${CLASS}" "$@"
 fi
-

Modified: hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs-config.sh
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs-config.sh?rev=1619293&r1=1619292&r2=1619293&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs-config.sh (original)
+++ hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs-config.sh Thu Aug 21 05:22:10 2014
@@ -18,19 +18,67 @@
 # included in all the hdfs scripts with source command
 # should not be executed directly
 
-bin=`which "$0"`
-bin=`dirname "${bin}"`
-bin=`cd "$bin"; pwd`
+function hadoop_subproject_init
+{
+  if [ -e "${HADOOP_CONF_DIR}/hdfs-env.sh" ]; then
+    . "${HADOOP_CONF_DIR}/hdfs-env.sh"
+  fi
+  
+  # at some point in time, someone thought it would be a good idea to
+  # create separate vars for every subproject.  *sigh*
+  # let's perform some overrides and setup some defaults for bw compat
+  # this way the common hadoop var's == subproject vars and can be
+  # used interchangeable from here on out
+  # ...
+  # this should get deprecated at some point.
+  HADOOP_LOG_DIR="${HADOOP_HDFS_LOG_DIR:-$HADOOP_LOG_DIR}"
+  HADOOP_HDFS_LOG_DIR="${HADOOP_LOG_DIR}"
+  
+  HADOOP_LOGFILE="${HADOOP_HDFS_LOGFILE:-$HADOOP_LOGFILE}"
+  HADOOP_HDFS_LOGFILE="${HADOOP_LOGFILE}"
+  
+  HADOOP_NICENESS=${HADOOP_HDFS_NICENESS:-$HADOOP_NICENESS}
+  HADOOP_HDFS_NICENESS="${HADOOP_NICENESS}"
+  
+  HADOOP_STOP_TIMEOUT=${HADOOP_HDFS_STOP_TIMEOUT:-$HADOOP_STOP_TIMEOUT}
+  HADOOP_HDFS_STOP_TIMEOUT="${HADOOP_STOP_TIMEOUT}"
+  
+  HADOOP_PID_DIR="${HADOOP_HDFS_PID_DIR:-$HADOOP_PID_DIR}"
+  HADOOP_HDFS_PID_DIR="${HADOOP_PID_DIR}"
+  
+  HADOOP_ROOT_LOGGER=${HADOOP_HDFS_ROOT_LOGGER:-$HADOOP_ROOT_LOGGER}
+  HADOOP_HDFS_ROOT_LOGGER="${HADOOP_ROOT_LOGGER}"
+  
+  HADOOP_HDFS_HOME="${HADOOP_HDFS_HOME:-$HADOOP_HOME_DIR}"
+  
+  HADOOP_IDENT_STRING="${HADOOP_HDFS_IDENT_STRING:-$HADOOP_IDENT_STRING}"
+  HADOOP_HDFS_IDENT_STRING="${HADOOP_IDENT_STRING}"
+  
+  # turn on the defaults
+  
+  export HADOOP_NAMENODE_OPTS=${HADOOP_NAMENODE_OPTS:-"-Dhadoop.security.logger=INFO,RFAS -Dhdfs.audit.logger=INFO,NullAppender"}
+  export HADOOP_SECONDARYNAMENODE_OPTS=${HADOOP_SECONDARYNAMENODE_OPTS:-"-Dhadoop.security.logger=INFO,RFAS -Dhdfs.audit.logger=INFO,NullAppender"}
+  export HADOOP_DATANODE_OPTS=${HADOOP_DATANODE_OPTS:-"-Dhadoop.security.logger=ERROR,RFAS"}
+  export HADOOP_DN_SECURE_EXTRA_OPTS=${HADOOP_DN_SECURE_EXTRA_OPTS:-"-jvm server"}
+  export HADOOP_NFS3_SECURE_EXTRA_OPTS=${HADOOP_NFS3_SECURE_EXTRA_OPTS:-"-jvm server"}
+  export HADOOP_PORTMAP_OPTS=${HADOOP_PORTMAP_OPTS:-"-Xmx512m"}
+  
+  
+}
+
+if [[ -z "${HADOOP_LIBEXEC_DIR}" ]]; then
+  _hd_this="${BASH_SOURCE-$0}"
+  HADOOP_LIBEXEC_DIR=$(cd -P -- "$(dirname -- "${_hd_this}")" >/dev/null && pwd -P)
+fi
 
-DEFAULT_LIBEXEC_DIR="$bin"/../libexec
-HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}
 if [ -e "${HADOOP_LIBEXEC_DIR}/hadoop-config.sh" ]; then
-  . ${HADOOP_LIBEXEC_DIR}/hadoop-config.sh
+  . "${HADOOP_LIBEXEC_DIR}/hadoop-config.sh"
 elif [ -e "${HADOOP_COMMON_HOME}/libexec/hadoop-config.sh" ]; then
-  . "$HADOOP_COMMON_HOME"/libexec/hadoop-config.sh
+  . "${HADOOP_COMMON_HOME}/libexec/hadoop-config.sh"
 elif [ -e "${HADOOP_HOME}/libexec/hadoop-config.sh" ]; then
-  . "$HADOOP_HOME"/libexec/hadoop-config.sh
+  . "${HADOOP_HOME}/libexec/hadoop-config.sh"
 else
-  echo "Hadoop common not found."
-  exit
+  echo "ERROR: Hadoop common not found." 2>&1
+  exit 1
 fi
+

Modified: hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/refresh-namenodes.sh
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/refresh-namenodes.sh?rev=1619293&r1=1619292&r2=1619293&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/refresh-namenodes.sh (original)
+++ hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/refresh-namenodes.sh Thu Aug 21 05:22:10 2014
@@ -20,24 +20,40 @@
 # This script refreshes all namenodes, it's a simple wrapper
 # for dfsadmin to support multiple namenodes.
 
-bin=`dirname "$0"`
-bin=`cd "$bin"; pwd`
+# let's locate libexec...
+if [[ -n "${HADOOP_PREFIX}" ]]; then
+  DEFAULT_LIBEXEC_DIR="${HADOOP_PREFIX}/libexec"
+else
+  this="${BASH_SOURCE-$0}"
+  bin=$(cd -P -- "$(dirname -- "${this}")" >/dev/null && pwd -P)
+  DEFAULT_LIBEXEC_DIR="${bin}/../libexec"
+fi
 
-DEFAULT_LIBEXEC_DIR="$bin"/../libexec
-HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}
-. $HADOOP_LIBEXEC_DIR/hdfs-config.sh
+HADOOP_LIBEXEC_DIR="${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}"
+# shellcheck disable=SC2034
+HADOOP_NEW_CONFIG=true
+if [[ -f "${HADOOP_LIBEXEC_DIR}/hdfs-config.sh" ]]; then
+  . "${HADOOP_LIBEXEC_DIR}/hdfs-config.sh"
+else
+  echo "ERROR: Cannot execute ${HADOOP_LIBEXEC_DIR}/hdfs-config.sh." 2>&1
+  exit 1
+fi
 
-namenodes=$("$HADOOP_PREFIX/bin/hdfs" getconf -nnRpcAddresses)
-if [ "$?" != '0' ] ; then errorFlag='1' ; 
+namenodes=$("${HADOOP_HDFS_HOME}/bin/hdfs" getconf -nnRpcAddresses)
+if [[ "$?" != '0' ]] ; then
+  errorFlag='1' ;
 else
-  for namenode in $namenodes ; do
-    echo "Refreshing namenode [$namenode]"
-    "$HADOOP_PREFIX/bin/hdfs" dfsadmin -fs hdfs://$namenode -refreshNodes
-    if [ "$?" != '0' ] ; then errorFlag='1' ; fi
+  for namenode in ${namenodes} ; do
+    echo "Refreshing namenode [${namenode}]"
+    "${HADOOP_HDFS_HOME}/bin/hdfs" dfsadmin \
+    -fs hdfs://${namenode} -refreshNodes
+    if [[ "$?" != '0' ]]; then
+      errorFlag='1'
+    fi
   done
 fi
 
-if [ "$errorFlag" = '1' ] ; then
+if [[ "${errorFlag}" = '1' ]] ; then
   echo "Error: refresh of namenodes failed, see error messages above."
   exit 1
 else

Modified: hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/start-balancer.sh
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/start-balancer.sh?rev=1619293&r1=1619292&r2=1619293&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/start-balancer.sh (original)
+++ hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/start-balancer.sh Thu Aug 21 05:22:10 2014
@@ -15,13 +15,31 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-bin=`dirname "${BASH_SOURCE-$0}"`
-bin=`cd "$bin"; pwd`
+function usage
+{
+  echo "Usage: start-balancer.sh [--config confdir]  [-policy <policy>] [-threshold <threshold>]"
+}
 
-DEFAULT_LIBEXEC_DIR="$bin"/../libexec
-HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}
-. $HADOOP_LIBEXEC_DIR/hdfs-config.sh
+this="${BASH_SOURCE-$0}"
+bin=$(cd -P -- "$(dirname -- "${this}")" >/dev/null && pwd -P)
+
+# let's locate libexec...
+if [[ -n "${HADOOP_PREFIX}" ]]; then
+  DEFAULT_LIBEXEC_DIR="${HADOOP_PREFIX}/libexec"
+else
+  DEFAULT_LIBEXEC_DIR="${bin}/../libexec"
+fi
+
+HADOOP_LIBEXEC_DIR="${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}"
+# shellcheck disable=SC2034
+HADOOP_NEW_CONFIG=true
+if [[ -f "${HADOOP_LIBEXEC_DIR}/hdfs-config.sh" ]]; then
+  . "${HADOOP_LIBEXEC_DIR}/hdfs-config.sh"
+else
+  echo "ERROR: Cannot execute ${HADOOP_LIBEXEC_DIR}/hdfs-config.sh." 2>&1
+  exit 1
+fi
 
 # Start balancer daemon.
 
-"$HADOOP_PREFIX"/sbin/hadoop-daemon.sh --config $HADOOP_CONF_DIR --script "$bin"/hdfs start balancer $@
+exec "${bin}/hadoop-daemon.sh" --config "${HADOOP_CONF_DIR}" start balancer "$@"

Modified: hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/start-dfs.sh
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/start-dfs.sh?rev=1619293&r1=1619292&r2=1619293&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/start-dfs.sh (original)
+++ hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/start-dfs.sh Thu Aug 21 05:22:10 2014
@@ -20,98 +20,128 @@
 # Optinally upgrade or rollback dfs state.
 # Run this on master node.
 
-usage="Usage: start-dfs.sh [-upgrade|-rollback] [other options such as -clusterId]"
+function hadoop_usage
+{
+  echo "Usage: start-dfs.sh [-upgrade|-rollback] [-clusterId]"
+}
+
+this="${BASH_SOURCE-$0}"
+bin=$(cd -P -- "$(dirname -- "${this}")" >/dev/null && pwd -P)
+
+# let's locate libexec...
+if [[ -n "${HADOOP_PREFIX}" ]]; then
+  DEFAULT_LIBEXEC_DIR="${HADOOP_PREFIX}/libexec"
+else
+  DEFAULT_LIBEXEC_DIR="${bin}/../libexec"
+fi
 
-bin=`dirname "${BASH_SOURCE-$0}"`
-bin=`cd "$bin"; pwd`
+HADOOP_LIBEXEC_DIR="${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}"
+# shellcheck disable=SC2034
+HADOOP_NEW_CONFIG=true
+if [[ -f "${HADOOP_LIBEXEC_DIR}/hdfs-config.sh" ]]; then
+  . "${HADOOP_LIBEXEC_DIR}/hdfs-config.sh"
+else
+  echo "ERROR: Cannot execute ${HADOOP_LIBEXEC_DIR}/hdfs-config.sh." 2>&1
+  exit 1
+fi
 
-DEFAULT_LIBEXEC_DIR="$bin"/../libexec
-HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}
-. $HADOOP_LIBEXEC_DIR/hdfs-config.sh
 
 # get arguments
-if [ $# -ge 1 ]; then
-	nameStartOpt="$1"
-	shift
-	case "$nameStartOpt" in
-	  (-upgrade)
-	  	;;
-	  (-rollback) 
-	  	dataStartOpt="$nameStartOpt"
-	  	;;
-	  (*)
-		  echo $usage
-		  exit 1
-	    ;;
-	esac
+if [[ $# -ge 1 ]]; then
+  nameStartOpt="$1"
+  shift
+  case "$nameStartOpt" in
+    -upgrade)
+    ;;
+    -rollback)
+      dataStartOpt="$nameStartOpt"
+    ;;
+    *)
+      hadoop_exit_with_usage 1
+    ;;
+  esac
 fi
 
+
 #Add other possible options
 nameStartOpt="$nameStartOpt $@"
 
 #---------------------------------------------------------
 # namenodes
 
-NAMENODES=$($HADOOP_PREFIX/bin/hdfs getconf -namenodes)
+NAMENODES=$("${HADOOP_HDFS_HOME}/bin/hdfs" getconf -namenodes 2>/dev/null)
+
+if [[ -z "${NAMENODES}" ]]; then
+  NAMENODES=$(hostname)
+fi
 
 echo "Starting namenodes on [$NAMENODES]"
 
-"$HADOOP_PREFIX/sbin/hadoop-daemons.sh" \
-  --config "$HADOOP_CONF_DIR" \
-  --hostnames "$NAMENODES" \
-  --script "$bin/hdfs" start namenode $nameStartOpt
+"${bin}/hadoop-daemons.sh" \
+--config "${HADOOP_CONF_DIR}" \
+--hostnames "${NAMENODES}" \
+start namenode ${nameStartOpt}
 
 #---------------------------------------------------------
 # datanodes (using default slaves file)
 
-if [ -n "$HADOOP_SECURE_DN_USER" ]; then
-  echo \
-    "Attempting to start secure cluster, skipping datanodes. " \
-    "Run start-secure-dns.sh as root to complete startup."
+if [[ -n "${HADOOP_SECURE_DN_USER}" ]] &&
+[[ -z "${HADOOP_SECURE_COMMAND}" ]]; then
+  echo "ERROR: Attempting to start secure cluster, skipping datanodes. "
+  echo "Run start-secure-dns.sh as root or configure "
+  echo "\${HADOOP_SECURE_COMMAND} to complete startup."
 else
-  "$HADOOP_PREFIX/sbin/hadoop-daemons.sh" \
-    --config "$HADOOP_CONF_DIR" \
-    --script "$bin/hdfs" start datanode $dataStartOpt
+  
+  echo "Starting datanodes"
+  
+  "${bin}/hadoop-daemons.sh" \
+  --config "${HADOOP_CONF_DIR}" \
+  start datanode ${dataStartOpt}
 fi
 
 #---------------------------------------------------------
 # secondary namenodes (if any)
 
-SECONDARY_NAMENODES=$($HADOOP_PREFIX/bin/hdfs getconf -secondarynamenodes 2>/dev/null)
+SECONDARY_NAMENODES=$("${HADOOP_HDFS_HOME}/bin/hdfs" getconf -secondarynamenodes 2>/dev/null)
 
-if [ -n "$SECONDARY_NAMENODES" ]; then
-  echo "Starting secondary namenodes [$SECONDARY_NAMENODES]"
+if [[ "${SECONDARY_NAMENODES}" == "0.0.0.0" ]]; then
+  SECONDARY_NAMENODES=$(hostname)
+fi
 
-  "$HADOOP_PREFIX/sbin/hadoop-daemons.sh" \
-      --config "$HADOOP_CONF_DIR" \
-      --hostnames "$SECONDARY_NAMENODES" \
-      --script "$bin/hdfs" start secondarynamenode
+if [[ -n "${SECONDARY_NAMENODES}" ]]; then
+  echo "Starting secondary namenodes [${SECONDARY_NAMENODES}]"
+  
+  "${bin}/hadoop-daemons.sh" \
+  --config "${HADOOP_CONF_DIR}" \
+  --hostnames "${SECONDARY_NAMENODES}" \
+  start secondarynamenode
 fi
 
 #---------------------------------------------------------
 # quorumjournal nodes (if any)
 
-SHARED_EDITS_DIR=$($HADOOP_PREFIX/bin/hdfs getconf -confKey dfs.namenode.shared.edits.dir 2>&-)
+SHARED_EDITS_DIR=$("${HADOOP_HDFS_HOME}/bin/hdfs" getconf -confKey dfs.namenode.shared.edits.dir 2>&-)
 
-case "$SHARED_EDITS_DIR" in
-qjournal://*)
-  JOURNAL_NODES=$(echo "$SHARED_EDITS_DIR" | sed 's,qjournal://\([^/]*\)/.*,\1,g; s/;/ /g; s/:[0-9]*//g')
-  echo "Starting journal nodes [$JOURNAL_NODES]"
-  "$HADOOP_PREFIX/sbin/hadoop-daemons.sh" \
-      --config "$HADOOP_CONF_DIR" \
-      --hostnames "$JOURNAL_NODES" \
-      --script "$bin/hdfs" start journalnode ;;
+case "${SHARED_EDITS_DIR}" in
+  qjournal://*)
+    JOURNAL_NODES=$(echo "${SHARED_EDITS_DIR}" | sed 's,qjournal://\([^/]*\)/.*,\1,g; s/;/ /g; s/:[0-9]*//g')
+    echo "Starting journal nodes [${JOURNAL_NODES}]"
+    "${bin}/hadoop-daemons.sh" \
+    --config "${HADOOP_CONF_DIR}" \
+    --hostnames "${JOURNAL_NODES}" \
+    start journalnode
+  ;;
 esac
 
 #---------------------------------------------------------
 # ZK Failover controllers, if auto-HA is enabled
-AUTOHA_ENABLED=$($HADOOP_PREFIX/bin/hdfs getconf -confKey dfs.ha.automatic-failover.enabled)
-if [ "$(echo "$AUTOHA_ENABLED" | tr A-Z a-z)" = "true" ]; then
-  echo "Starting ZK Failover Controllers on NN hosts [$NAMENODES]"
-  "$HADOOP_PREFIX/sbin/hadoop-daemons.sh" \
-    --config "$HADOOP_CONF_DIR" \
-    --hostnames "$NAMENODES" \
-    --script "$bin/hdfs" start zkfc
+AUTOHA_ENABLED=$("${HADOOP_HDFS_HOME}/bin/hdfs" getconf -confKey dfs.ha.automatic-failover.enabled | tr '[:upper:]' '[:lower:]')
+if [[ "${AUTOHA_ENABLED}" = "true" ]]; then
+  echo "Starting ZK Failover Controllers on NN hosts [${NAMENODES}]"
+  "${bin}/hadoop-daemons.sh" \
+  --config "${HADOOP_CONF_DIR}" \
+  --hostnames "${NAMENODES}" \
+  start zkfc
 fi
 
 # eof

Modified: hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/start-secure-dns.sh
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/start-secure-dns.sh?rev=1619293&r1=1619292&r2=1619293&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/start-secure-dns.sh (original)
+++ hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/start-secure-dns.sh Thu Aug 21 05:22:10 2014
@@ -17,17 +17,33 @@
 
 # Run as root to start secure datanodes in a security-enabled cluster.
 
-usage="Usage (run as root in order to start secure datanodes): start-secure-dns.sh"
 
-bin=`dirname "${BASH_SOURCE-$0}"`
-bin=`cd "$bin"; pwd`
+function hadoop_usage {
+  echo "Usage: start-secure-dns.sh"
+}
 
-DEFAULT_LIBEXEC_DIR="$bin"/../libexec
-HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}
-. $HADOOP_LIBEXEC_DIR/hdfs-config.sh
+this="${BASH_SOURCE-$0}"
+bin=$(cd -P -- "$(dirname -- "${this}")" >/dev/null && pwd -P)
 
-if [ "$EUID" -eq 0 ] && [ -n "$HADOOP_SECURE_DN_USER" ]; then
-  "$HADOOP_PREFIX"/sbin/hadoop-daemons.sh --config $HADOOP_CONF_DIR --script "$bin"/hdfs start datanode $dataStartOpt
+# let's locate libexec...
+if [[ -n "${HADOOP_PREFIX}" ]]; then
+  DEFAULT_LIBEXEC_DIR="${HADOOP_PREFIX}/libexec"
 else
-  echo $usage
+  DEFAULT_LIBEXEC_DIR="${bin}/../libexec"
+fi
+
+HADOOP_LIBEXEC_DIR="${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}"
+# shellcheck disable=SC2034
+HADOOP_NEW_CONFIG=true
+if [[ -f "${HADOOP_LIBEXEC_DIR}/hdfs-config.sh" ]]; then
+  . "${HADOOP_LIBEXEC_DIR}/hdfs-config.sh"
+else
+  echo "ERROR: Cannot execute ${HADOOP_LIBEXEC_DIR}/hdfs-config.sh." 2>&1
+  exit 1
+fi
+
+if [[ "${EUID}" -eq 0 ]] && [[ -n "${HADOOP_SECURE_DN_USER}" ]]; then
+  exec "${bin}/hadoop-daemons.sh" --config "${HADOOP_CONF_DIR}" start datanode "${dataStartOpt}"
+else
+  echo hadoop_usage_and_exit 1
 fi

Modified: hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/stop-balancer.sh
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/stop-balancer.sh?rev=1619293&r1=1619292&r2=1619293&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/stop-balancer.sh (original)
+++ hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/stop-balancer.sh Thu Aug 21 05:22:10 2014
@@ -15,14 +15,32 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-bin=`dirname "${BASH_SOURCE-$0}"`
-bin=`cd "$bin"; pwd`
+function hadoop_usage
+{
+  echo "Usage: stop-balancer.sh [--config confdir]"
+}
 
-DEFAULT_LIBEXEC_DIR="$bin"/../libexec
-HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}
-. $HADOOP_LIBEXEC_DIR/hdfs-config.sh
+this="${BASH_SOURCE-$0}"
+bin=$(cd -P -- "$(dirname -- "${this}")" >/dev/null && pwd -P)
+
+# let's locate libexec...
+if [[ -n "${HADOOP_PREFIX}" ]]; then
+  DEFAULT_LIBEXEC_DIR="${HADOOP_PREFIX}/libexec"
+else
+  DEFAULT_LIBEXEC_DIR="${bin}/../libexec"
+fi
+
+HADOOP_LIBEXEC_DIR="${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}"
+# shellcheck disable=SC2034
+HADOOP_NEW_CONFIG=true
+if [[ -f "${HADOOP_LIBEXEC_DIR}/hdfs-config.sh" ]]; then
+  . "${HADOOP_LIBEXEC_DIR}/hdfs-config.sh"
+else
+  echo "ERROR: Cannot execute ${HADOOP_LIBEXEC_DIR}/hdfs-config.sh." 2>&1
+  exit 1
+fi
 
 # Stop balancer daemon.
 # Run this on the machine where the balancer is running
 
-"$HADOOP_PREFIX"/sbin/hadoop-daemon.sh --config $HADOOP_CONF_DIR --script "$bin"/hdfs stop balancer
+"${bin}/hadoop-daemon.sh" --config "${HADOOP_CONF_DIR}" stop balancer

Modified: hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/stop-dfs.sh
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/stop-dfs.sh?rev=1619293&r1=1619292&r2=1619293&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/stop-dfs.sh (original)
+++ hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/stop-dfs.sh Thu Aug 21 05:22:10 2014
@@ -15,75 +15,100 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-bin=`dirname "${BASH_SOURCE-$0}"`
-bin=`cd "$bin"; pwd`
+function hadoop_usage
+{
+  echo "Usage: start-balancer.sh [--config confdir]  [-policy <policy>] [-threshold <threshold>]"
+}
+
+this="${BASH_SOURCE-$0}"
+bin=$(cd -P -- "$(dirname -- "${this}")" >/dev/null && pwd -P)
+
+# let's locate libexec...
+if [[ -n "${HADOOP_PREFIX}" ]]; then
+  DEFAULT_LIBEXEC_DIR="${HADOOP_PREFIX}/libexec"
+else
+  DEFAULT_LIBEXEC_DIR="${bin}/../libexec"
+fi
 
-DEFAULT_LIBEXEC_DIR="$bin"/../libexec
-HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}
-. $HADOOP_LIBEXEC_DIR/hdfs-config.sh
+HADOOP_LIBEXEC_DIR="${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}"
+# shellcheck disable=SC2034
+HADOOP_NEW_CONFIG=true
+if [[ -f "${HADOOP_LIBEXEC_DIR}/hdfs-config.sh" ]]; then
+  . "${HADOOP_LIBEXEC_DIR}/hdfs-config.sh"
+else
+  echo "ERROR: Cannot execute ${HADOOP_LIBEXEC_DIR}/hdfs-config.sh." 2>&1
+  exit 1
+fi
 
 #---------------------------------------------------------
 # namenodes
 
-NAMENODES=$($HADOOP_PREFIX/bin/hdfs getconf -namenodes)
+NAMENODES=$("${HADOOP_HDFS_HOME}/bin/hdfs" getconf -namenodes)
 
 echo "Stopping namenodes on [$NAMENODES]"
 
-"$HADOOP_PREFIX/sbin/hadoop-daemons.sh" \
-  --config "$HADOOP_CONF_DIR" \
-  --hostnames "$NAMENODES" \
-  --script "$bin/hdfs" stop namenode
+"${bin}/hadoop-daemons.sh" \
+--config "${HADOOP_CONF_DIR}" \
+--hostnames "${NAMENODES}" \
+stop namenode
 
 #---------------------------------------------------------
 # datanodes (using default slaves file)
 
-if [ -n "$HADOOP_SECURE_DN_USER" ]; then
+if [[ -n "${HADOOP_SECURE_DN_USER}" ]] &&
+[[ -z "${HADOOP_SECURE_COMMAND}" ]]; then
   echo \
-    "Attempting to stop secure cluster, skipping datanodes. " \
-    "Run stop-secure-dns.sh as root to complete shutdown."
+  "ERROR: Attempting to stop secure cluster, skipping datanodes. " \
+  "Run stop-secure-dns.sh as root to complete shutdown."
 else
-  "$HADOOP_PREFIX/sbin/hadoop-daemons.sh" \
-    --config "$HADOOP_CONF_DIR" \
-    --script "$bin/hdfs" stop datanode
+  
+  echo "Stopping datanodes"
+  
+  "${bin}/hadoop-daemons.sh" --config "${HADOOP_CONF_DIR}" stop datanode
 fi
 
 #---------------------------------------------------------
 # secondary namenodes (if any)
 
-SECONDARY_NAMENODES=$($HADOOP_PREFIX/bin/hdfs getconf -secondarynamenodes 2>/dev/null)
+SECONDARY_NAMENODES=$("${HADOOP_HDFS_HOME}/bin/hdfs" getconf -secondarynamenodes 2>/dev/null)
 
-if [ -n "$SECONDARY_NAMENODES" ]; then
-  echo "Stopping secondary namenodes [$SECONDARY_NAMENODES]"
+if [[ "${SECONDARY_NAMENODES}" == "0.0.0.0" ]]; then
+  SECONDARY_NAMENODES=$(hostname)
+fi
 
-  "$HADOOP_PREFIX/sbin/hadoop-daemons.sh" \
-      --config "$HADOOP_CONF_DIR" \
-      --hostnames "$SECONDARY_NAMENODES" \
-      --script "$bin/hdfs" stop secondarynamenode
+if [[ -n "${SECONDARY_NAMENODES}" ]]; then
+  echo "Stopping secondary namenodes [${SECONDARY_NAMENODES}]"
+  
+  "${bin}/hadoop-daemons.sh" \
+  --config "${HADOOP_CONF_DIR}" \
+  --hostnames "${SECONDARY_NAMENODES}" \
+  stop secondarynamenode
 fi
 
 #---------------------------------------------------------
 # quorumjournal nodes (if any)
 
-SHARED_EDITS_DIR=$($HADOOP_PREFIX/bin/hdfs getconf -confKey dfs.namenode.shared.edits.dir 2>&-)
+SHARED_EDITS_DIR=$("${HADOOP_HDFS_HOME}/bin/hdfs" getconf -confKey dfs.namenode.shared.edits.dir 2>&-)
 
-case "$SHARED_EDITS_DIR" in
-qjournal://*)
-  JOURNAL_NODES=$(echo "$SHARED_EDITS_DIR" | sed 's,qjournal://\([^/]*\)/.*,\1,g; s/;/ /g; s/:[0-9]*//g')
-  echo "Stopping journal nodes [$JOURNAL_NODES]"
-  "$HADOOP_PREFIX/sbin/hadoop-daemons.sh" \
-      --config "$HADOOP_CONF_DIR" \
-      --hostnames "$JOURNAL_NODES" \
-      --script "$bin/hdfs" stop journalnode ;;
+case "${SHARED_EDITS_DIR}" in
+  qjournal://*)
+    JOURNAL_NODES=$(echo "${SHARED_EDITS_DIR}" | sed 's,qjournal://\([^/]*\)/.*,\1,g; s/;/ /g; s/:[0-9]*//g')
+    echo "Stopping journal nodes [${JOURNAL_NODES}]"
+    "${bin}/hadoop-daemons.sh" \
+    --config "${HADOOP_CONF_DIR}" \
+    --hostnames "${JOURNAL_NODES}" \
+    stop journalnode
+  ;;
 esac
 
 #---------------------------------------------------------
 # ZK Failover controllers, if auto-HA is enabled
-AUTOHA_ENABLED=$($HADOOP_PREFIX/bin/hdfs getconf -confKey dfs.ha.automatic-failover.enabled)
-if [ "$(echo "$AUTOHA_ENABLED" | tr A-Z a-z)" = "true" ]; then
-  echo "Stopping ZK Failover Controllers on NN hosts [$NAMENODES]"
-  "$HADOOP_PREFIX/sbin/hadoop-daemons.sh" \
-    --config "$HADOOP_CONF_DIR" \
-    --hostnames "$NAMENODES" \
-    --script "$bin/hdfs" stop zkfc
+AUTOHA_ENABLED=$("${HADOOP_HDFS_HOME}/bin/hdfs" getconf -confKey dfs.ha.automatic-failover.enabled | tr '[:upper:]' '[:lower:]')
+if [[ "${AUTOHA_ENABLED}" = "true" ]]; then
+  echo "Stopping ZK Failover Controllers on NN hosts [${NAMENODES}]"
+  "${bin}/hadoop-daemons.sh" \
+  --config "${HADOOP_CONF_DIR}" \
+  --hostnames "${NAMENODES}" \
+  stop zkfc
 fi
 # eof

Modified: hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/stop-secure-dns.sh
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/stop-secure-dns.sh?rev=1619293&r1=1619292&r2=1619293&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/stop-secure-dns.sh (original)
+++ hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/stop-secure-dns.sh Thu Aug 21 05:22:10 2014
@@ -17,17 +17,33 @@
 
 # Run as root to start secure datanodes in a security-enabled cluster.
 
-usage="Usage (run as root in order to stop secure datanodes): stop-secure-dns.sh"
 
-bin=`dirname "${BASH_SOURCE-$0}"`
-bin=`cd "$bin"; pwd`
+function hadoop_usage {
+  echo "Usage (run as root in order to stop secure datanodes): stop-secure-dns.sh"
+}
 
-DEFAULT_LIBEXEC_DIR="$bin"/../libexec
-HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}
-. $HADOOP_LIBEXEC_DIR/hdfs-config.sh
+this="${BASH_SOURCE-$0}"
+bin=$(cd -P -- "$(dirname -- "${this}")" >/dev/null && pwd -P)
 
-if [ "$EUID" -eq 0 ] && [ -n "$HADOOP_SECURE_DN_USER" ]; then
-  "$HADOOP_PREFIX"/sbin/hadoop-daemons.sh --config $HADOOP_CONF_DIR --script "$bin"/hdfs stop datanode
+# let's locate libexec...
+if [[ -n "${HADOOP_PREFIX}" ]]; then
+  DEFAULT_LIBEXEC_DIR="${HADOOP_PREFIX}/libexec"
 else
-  echo $usage
+  DEFAULT_LIBEXEC_DIR="${bin}/../libexec"
+fi
+
+HADOOP_LIBEXEC_DIR="${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}"
+# shellcheck disable=SC2034
+HADOOP_NEW_CONFIG=true
+if [[ -f "${HADOOP_LIBEXEC_DIR}/hdfs-config.sh" ]]; then
+  . "${HADOOP_LIBEXEC_DIR}/hdfs-config.sh"
+else
+  echo "ERROR: Cannot execute ${HADOOP_LIBEXEC_DIR}/hdfs-config.sh." 2>&1
+  exit 1
+fi
+
+if [[ "${EUID}" -eq 0 ]] && [[ -n "${HADOOP_SECURE_DN_USER}" ]]; then
+  "${bin}/hadoop-daemons.sh" --config "${HADOOP_CONF_DIR}" stop datanode
+else
+  hadoop_exit_with_usage 1
 fi

Propchange: hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/
------------------------------------------------------------------------------
  Merged /hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java:r1594376-1619194
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java:r1618764-1619277

Modified: hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java?rev=1619293&r1=1619292&r2=1619293&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java (original)
+++ hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java Thu Aug 21 05:22:10 2014
@@ -17,7 +17,6 @@
  */
 package org.apache.hadoop.fs;
 
-
 import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.net.URI;
@@ -31,6 +30,7 @@ import java.util.NoSuchElementException;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.crypto.CryptoCodec;
 import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.AclStatus;
 import org.apache.hadoop.fs.permission.FsAction;
@@ -38,6 +38,8 @@ import org.apache.hadoop.fs.permission.F
 import org.apache.hadoop.fs.Options.ChecksumOpt;
 import org.apache.hadoop.hdfs.CorruptFileBlockIterator;
 import org.apache.hadoop.hdfs.DFSClient;
+import org.apache.hadoop.hdfs.DFSInputStream;
+import org.apache.hadoop.hdfs.DFSOutputStream;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
 import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
@@ -59,6 +61,7 @@ import org.apache.hadoop.util.Progressab
 public class Hdfs extends AbstractFileSystem {
 
   DFSClient dfs;
+  final CryptoCodec factory;
   private boolean verifyChecksum = true;
 
   static {
@@ -85,6 +88,7 @@ public class Hdfs extends AbstractFileSy
     }
 
     this.dfs = new DFSClient(theUri, conf, getStatistics());
+    this.factory = CryptoCodec.getInstance(conf);
   }
 
   @Override
@@ -97,9 +101,12 @@ public class Hdfs extends AbstractFileSy
       EnumSet<CreateFlag> createFlag, FsPermission absolutePermission,
       int bufferSize, short replication, long blockSize, Progressable progress,
       ChecksumOpt checksumOpt, boolean createParent) throws IOException {
-    return new HdfsDataOutputStream(dfs.primitiveCreate(getUriPath(f),
-        absolutePermission, createFlag, createParent, replication, blockSize,
-        progress, bufferSize, checksumOpt), getStatistics());
+
+    final DFSOutputStream dfsos = dfs.primitiveCreate(getUriPath(f),
+      absolutePermission, createFlag, createParent, replication, blockSize,
+      progress, bufferSize, checksumOpt);
+    return dfs.createWrappedOutputStream(dfsos, statistics,
+        dfsos.getInitialLen());
   }
 
   @Override
@@ -308,8 +315,9 @@ public class Hdfs extends AbstractFileSy
   @Override
   public HdfsDataInputStream open(Path f, int bufferSize) 
       throws IOException, UnresolvedLinkException {
-    return new DFSClient.DFSDataInputStream(dfs.open(getUriPath(f),
-        bufferSize, verifyChecksum));
+    final DFSInputStream dfsis = dfs.open(getUriPath(f),
+      bufferSize, verifyChecksum);
+    return dfs.createWrappedInputStream(dfsis);
   }
 
   @Override

Modified: hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/XAttr.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/XAttr.java?rev=1619293&r1=1619292&r2=1619293&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/XAttr.java (original)
+++ hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/XAttr.java Thu Aug 21 05:22:10 2014
@@ -26,8 +26,8 @@ import org.apache.hadoop.classification.
 /**
  * XAttr is the POSIX Extended Attribute model similar to that found in
  * traditional Operating Systems.  Extended Attributes consist of one
- * or more name/value pairs associated with a file or directory. Four
- * namespaces are defined: user, trusted, security and system.
+ * or more name/value pairs associated with a file or directory. Five
+ * namespaces are defined: user, trusted, security, system and raw.
  *   1) USER namespace attributes may be used by any user to store
  *   arbitrary information. Access permissions in this namespace are
  *   defined by a file directory's permission bits. For sticky directories,
@@ -43,6 +43,12 @@ import org.apache.hadoop.classification.
  * <br>
  *   4) SECURITY namespace attributes are used by the fs kernel for
  *   security features. It is not visible to users.
+ * <br>
+ *   5) RAW namespace attributes are used for internal system attributes that
+ *   sometimes need to be exposed. Like SYSTEM namespace attributes they are
+ *   not visible to the user except when getXAttr/getXAttrs is called on a file
+ *   or directory in the /.reserved/raw HDFS directory hierarchy.  These
+ *   attributes can only be accessed by the superuser.
  * <p/>
  * @see <a href="http://en.wikipedia.org/wiki/Extended_file_attributes">
  * http://en.wikipedia.org/wiki/Extended_file_attributes</a>
@@ -55,7 +61,8 @@ public class XAttr {
     USER,
     TRUSTED,
     SECURITY,
-    SYSTEM;
+    SYSTEM,
+    RAW;
   }
   
   private final NameSpace ns;

Modified: hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java?rev=1619293&r1=1619292&r2=1619293&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java (original)
+++ hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java Thu Aug 21 05:22:10 2014
@@ -17,6 +17,11 @@
  */
 package org.apache.hadoop.hdfs;
 
+import static org.apache.hadoop.crypto.key.KeyProvider.KeyVersion;
+import static org.apache.hadoop.crypto.key.KeyProviderCryptoExtension
+    .EncryptedKeyVersion;
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_CRYPTO_CODEC_CLASSES_KEY_PREFIX;
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_CRYPTO_CIPHER_SUITE_KEY;
 import static org.apache.hadoop.fs.CommonConfigurationKeys.IPC_CLIENT_FALLBACK_TO_SIMPLE_AUTH_ALLOWED_DEFAULT;
 import static org.apache.hadoop.fs.CommonConfigurationKeys.IPC_CLIENT_FALLBACK_TO_SIMPLE_AUTH_ALLOWED_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SIZE_DEFAULT;
@@ -76,6 +81,7 @@ import java.net.Socket;
 import java.net.SocketAddress;
 import java.net.URI;
 import java.net.UnknownHostException;
+import java.security.GeneralSecurityException;
 import java.util.ArrayList;
 import java.util.Collections;
 import java.util.EnumSet;
@@ -95,6 +101,11 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.crypto.CipherSuite;
+import org.apache.hadoop.crypto.CryptoCodec;
+import org.apache.hadoop.crypto.CryptoInputStream;
+import org.apache.hadoop.crypto.CryptoOutputStream;
+import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension;
 import org.apache.hadoop.fs.BlockLocation;
 import org.apache.hadoop.fs.BlockStorageLocation;
 import org.apache.hadoop.fs.CacheFlag;
@@ -102,6 +113,7 @@ import org.apache.hadoop.fs.CommonConfig
 import org.apache.hadoop.fs.ContentSummary;
 import org.apache.hadoop.fs.CreateFlag;
 import org.apache.hadoop.fs.FileAlreadyExistsException;
+import org.apache.hadoop.fs.FileEncryptionInfo;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FsServerDefaults;
 import org.apache.hadoop.fs.FsStatus;
@@ -140,6 +152,9 @@ import org.apache.hadoop.hdfs.protocol.D
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DirectoryListing;
+import org.apache.hadoop.hdfs.protocol.EncryptionZone;
+import org.apache.hadoop.hdfs.protocol.EncryptionZoneIterator;
+import org.apache.hadoop.hdfs.protocol.EncryptionZoneWithId;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.HdfsBlocksMetadata;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
@@ -249,7 +264,11 @@ public class DFSClient implements java.i
   private static final DFSHedgedReadMetrics HEDGED_READ_METRIC =
       new DFSHedgedReadMetrics();
   private static ThreadPoolExecutor HEDGED_READ_THREAD_POOL;
-  
+  private final CryptoCodec codec;
+  @VisibleForTesting
+  List<CipherSuite> cipherSuites;
+  @VisibleForTesting
+  KeyProviderCryptoExtension provider;
   /**
    * DFSClient configuration 
    */
@@ -581,7 +600,17 @@ public class DFSClient implements java.i
     this.authority = nameNodeUri == null? "null": nameNodeUri.getAuthority();
     this.clientName = "DFSClient_" + dfsClientConf.taskId + "_" + 
         DFSUtil.getRandom().nextInt()  + "_" + Thread.currentThread().getId();
-    
+    this.codec = CryptoCodec.getInstance(conf);
+    this.cipherSuites = Lists.newArrayListWithCapacity(1);
+    if (codec != null) {
+      cipherSuites.add(codec.getCipherSuite());
+    }
+    provider = DFSUtil.createKeyProviderCryptoExtension(conf);
+    if (provider == null) {
+      LOG.info("No KeyProvider found.");
+    } else {
+      LOG.info("Found KeyProvider: " + provider.toString());
+    }
     int numResponseToDrop = conf.getInt(
         DFSConfigKeys.DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY,
         DFSConfigKeys.DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_DEFAULT);
@@ -1280,7 +1309,93 @@ public class DFSClient implements java.i
 
     return volumeBlockLocations;
   }
-  
+
+  /**
+   * Decrypts a EDEK by consulting the KeyProvider.
+   */
+  private KeyVersion decryptEncryptedDataEncryptionKey(FileEncryptionInfo
+      feInfo) throws IOException {
+    if (provider == null) {
+      throw new IOException("No KeyProvider is configured, cannot access" +
+          " an encrypted file");
+    }
+    EncryptedKeyVersion ekv = EncryptedKeyVersion.createForDecryption(
+        feInfo.getEzKeyVersionName(), feInfo.getIV(),
+        feInfo.getEncryptedDataEncryptionKey());
+    try {
+      return provider.decryptEncryptedKey(ekv);
+    } catch (GeneralSecurityException e) {
+      throw new IOException(e);
+    }
+  }
+
+  /**
+   * Wraps the stream in a CryptoInputStream if the underlying file is
+   * encrypted.
+   */
+  public HdfsDataInputStream createWrappedInputStream(DFSInputStream dfsis)
+      throws IOException {
+    final FileEncryptionInfo feInfo = dfsis.getFileEncryptionInfo();
+    if (feInfo != null) {
+      // File is encrypted, wrap the stream in a crypto stream.
+      KeyVersion decrypted = decryptEncryptedDataEncryptionKey(feInfo);
+      CryptoCodec codec = CryptoCodec
+          .getInstance(conf, feInfo.getCipherSuite());
+      if (codec == null) {
+        throw new IOException("No configuration found for the cipher suite "
+            + feInfo.getCipherSuite().getConfigSuffix() + " prefixed with "
+            + HADOOP_SECURITY_CRYPTO_CODEC_CLASSES_KEY_PREFIX
+            + ". Please see the example configuration "
+            + "hadoop.security.crypto.codec.classes.EXAMPLECIPHERSUITE "
+            + "at core-default.xml for details.");
+      }
+      final CryptoInputStream cryptoIn =
+          new CryptoInputStream(dfsis, codec, decrypted.getMaterial(),
+              feInfo.getIV());
+      return new HdfsDataInputStream(cryptoIn);
+    } else {
+      // No FileEncryptionInfo so no encryption.
+      return new HdfsDataInputStream(dfsis);
+    }
+  }
+
+  /**
+   * Wraps the stream in a CryptoOutputStream if the underlying file is
+   * encrypted.
+   */
+  public HdfsDataOutputStream createWrappedOutputStream(DFSOutputStream dfsos,
+      FileSystem.Statistics statistics) throws IOException {
+    return createWrappedOutputStream(dfsos, statistics, 0);
+  }
+
+  /**
+   * Wraps the stream in a CryptoOutputStream if the underlying file is
+   * encrypted.
+   */
+  public HdfsDataOutputStream createWrappedOutputStream(DFSOutputStream dfsos,
+      FileSystem.Statistics statistics, long startPos) throws IOException {
+    final FileEncryptionInfo feInfo = dfsos.getFileEncryptionInfo();
+    if (feInfo != null) {
+      if (codec == null) {
+        throw new IOException("No configuration found for the cipher suite "
+            + HADOOP_SECURITY_CRYPTO_CIPHER_SUITE_KEY + " value prefixed with "
+            + HADOOP_SECURITY_CRYPTO_CODEC_CLASSES_KEY_PREFIX
+            + ". Please see the example configuration "
+            + "hadoop.security.crypto.codec.classes.EXAMPLECIPHERSUITE "
+            + "at core-default.xml for details.");
+      }
+      // File is encrypted, wrap the stream in a crypto stream.
+      KeyVersion decrypted = decryptEncryptedDataEncryptionKey(feInfo);
+      final CryptoOutputStream cryptoOut =
+          new CryptoOutputStream(dfsos, codec,
+              decrypted.getMaterial(), feInfo.getIV(), startPos);
+      return new HdfsDataOutputStream(cryptoOut, statistics, startPos);
+    } else {
+      // No FileEncryptionInfo present so no encryption.
+      return new HdfsDataOutputStream(dfsos, statistics, startPos);
+    }
+  }
+
   public DFSInputStream open(String src) 
       throws IOException, UnresolvedLinkException {
     return open(src, dfsClientConf.ioBufferSize, true, null);
@@ -1483,7 +1598,8 @@ public class DFSClient implements java.i
     }
     final DFSOutputStream result = DFSOutputStream.newStreamForCreate(this,
         src, masked, flag, createParent, replication, blockSize, progress,
-        buffersize, dfsClientConf.createChecksum(checksumOpt), favoredNodeStrs);
+        buffersize, dfsClientConf.createChecksum(checksumOpt),
+        favoredNodeStrs, cipherSuites);
     beginFileLease(result.getFileId(), result);
     return result;
   }
@@ -1530,7 +1646,7 @@ public class DFSClient implements java.i
       DataChecksum checksum = dfsClientConf.createChecksum(checksumOpt);
       result = DFSOutputStream.newStreamForCreate(this, src, absPermission,
           flag, createParent, replication, blockSize, progress, buffersize,
-          checksum);
+          checksum, null, cipherSuites);
     }
     beginFileLease(result.getFileId(), result);
     return result;
@@ -1608,7 +1724,7 @@ public class DFSClient implements java.i
       final Progressable progress, final FileSystem.Statistics statistics
       ) throws IOException {
     final DFSOutputStream out = append(src, buffersize, progress);
-    return new HdfsDataOutputStream(out, statistics, out.getInitialLen());
+    return createWrappedOutputStream(out, statistics, out.getInitialLen());
   }
 
   private DFSOutputStream append(String src, int buffersize, Progressable progress) 
@@ -2772,6 +2888,36 @@ public class DFSClient implements java.i
     }
   }
   
+  public void createEncryptionZone(String src, String keyName)
+    throws IOException {
+    checkOpen();
+    try {
+      namenode.createEncryptionZone(src, keyName);
+    } catch (RemoteException re) {
+      throw re.unwrapRemoteException(AccessControlException.class,
+                                     SafeModeException.class,
+                                     UnresolvedPathException.class);
+    }
+  }
+
+  public EncryptionZone getEZForPath(String src)
+          throws IOException {
+    checkOpen();
+    try {
+      final EncryptionZoneWithId ezi = namenode.getEZForPath(src);
+      return (ezi.getId() < 0) ? null : ezi;
+    } catch (RemoteException re) {
+      throw re.unwrapRemoteException(AccessControlException.class,
+                                     UnresolvedPathException.class);
+    }
+  }
+
+  public RemoteIterator<EncryptionZone> listEncryptionZones()
+      throws IOException {
+    checkOpen();
+    return new EncryptionZoneIterator(namenode);
+  }
+
   public void setXAttr(String src, String name, byte[] value, 
       EnumSet<XAttrSetFlag> flag) throws IOException {
     checkOpen();

Modified: hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java?rev=1619293&r1=1619292&r2=1619293&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java (original)
+++ hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java Thu Aug 21 05:22:10 2014
@@ -582,7 +582,9 @@ public class DFSConfigKeys extends Commo
   public static final String DFS_TRUSTEDCHANNEL_RESOLVER_CLASS = "dfs.trustedchannel.resolver.class";
   public static final String DFS_DATA_TRANSFER_PROTECTION_KEY = "dfs.data.transfer.protection";
   public static final String DFS_DATA_TRANSFER_SASL_PROPS_RESOLVER_CLASS_KEY = "dfs.data.transfer.saslproperties.resolver.class";
-  
+  public static final int    DFS_NAMENODE_LIST_ENCRYPTION_ZONES_NUM_RESPONSES_DEFAULT = 100;
+  public static final String DFS_NAMENODE_LIST_ENCRYPTION_ZONES_NUM_RESPONSES = "dfs.namenode.list.encryption.zones.num.responses";
+
   // Journal-node related configs. These are read on the JN side.
   public static final String  DFS_JOURNALNODE_EDITS_DIR_KEY = "dfs.journalnode.edits.dir";
   public static final String  DFS_JOURNALNODE_EDITS_DIR_DEFAULT = "/tmp/hadoop/dfs/journalnode/";

Modified: hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java?rev=1619293&r1=1619292&r2=1619293&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java (original)
+++ hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java Thu Aug 21 05:22:10 2014
@@ -56,6 +56,7 @@ import org.apache.hadoop.fs.UnresolvedLi
 import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
+import org.apache.hadoop.fs.FileEncryptionInfo;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.protocol.datatransfer.InvalidEncryptionKeyException;
@@ -92,6 +93,7 @@ implements ByteBufferReadable, CanSetDro
   private final boolean verifyChecksum;
   private LocatedBlocks locatedBlocks = null;
   private long lastBlockBeingWrittenLength = 0;
+  private FileEncryptionInfo fileEncryptionInfo = null;
   private DatanodeInfo currentNode = null;
   private LocatedBlock currentLocatedBlock = null;
   private long pos = 0;
@@ -301,6 +303,8 @@ implements ByteBufferReadable, CanSetDro
       }
     }
 
+    fileEncryptionInfo = locatedBlocks.getFileEncryptionInfo();
+
     currentNode = null;
     return lastBlockBeingWrittenLength;
   }
@@ -1525,6 +1529,10 @@ implements ByteBufferReadable, CanSetDro
     return new ReadStatistics(readStatistics);
   }
 
+  public synchronized FileEncryptionInfo getFileEncryptionInfo() {
+    return fileEncryptionInfo;
+  }
+
   private synchronized void closeCurrentBlockReader() {
     if (blockReader == null) return;
     // Close the current block reader so that the new caching settings can