You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by cn...@apache.org on 2014/07/14 20:28:03 UTC
svn commit: r1610479 [1/2] - in
/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs: ./
src/main/java/org/apache/hadoop/hdfs/
src/main/java/org/apache/hadoop/hdfs/net/
src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/ src/main/ja...
Author: cnauroth
Date: Mon Jul 14 18:28:02 2014
New Revision: 1610479
URL: http://svn.apache.org/r1610479
Log:
HDFS-2856. Merging change r1610474 from trunk to branch-2.
Added:
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/DataEncryptionKeyFactory.java
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/DataTransferSaslUtil.java
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/InvalidMagicNumberException.java
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslDataTransferClient.java
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslDataTransferServer.java
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslParticipant.java
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslDataTransferTestCase.java
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/TestSaslDataTransfer.java
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithSaslDataTransfer.java
Removed:
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/DataTransferEncryptor.java
Modified:
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/pom.xml
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderFactory.java
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemotePeerFactory.java
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/EncryptedPeer.java
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/TcpPeerServer.java
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/NameNodeConnector.java
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/JspHelper.java
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DNConf.java
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DatanodeJspHelper.java
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/BlockReaderTestUtil.java
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFS.java
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1610479&r1=1610478&r2=1610479&view=diff
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Mon Jul 14 18:28:02 2014
@@ -34,6 +34,9 @@ Release 2.6.0 - UNRELEASED
HDFS-3851. DFSOutputStream class code cleanup. (Jing Zhao via suresh)
+ HDFS-2856. Fix block protocol so that Datanodes don't require root or jsvc.
+ (cnauroth)
+
OPTIMIZATIONS
BUG FIXES
Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/pom.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/pom.xml?rev=1610479&r1=1610478&r2=1610479&view=diff
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/pom.xml (original)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/pom.xml Mon Jul 14 18:28:02 2014
@@ -146,6 +146,11 @@ http://maven.apache.org/xsd/maven-4.0.0.
<scope>test</scope>
</dependency>
<dependency>
+ <groupId>org.apache.hadoop</groupId>
+ <artifactId>hadoop-minikdc</artifactId>
+ <scope>test</scope>
+ </dependency>
+ <dependency>
<groupId>org.mockito</groupId>
<artifactId>mockito-all</artifactId>
<scope>test</scope>
Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderFactory.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderFactory.java?rev=1610479&r1=1610478&r2=1610479&view=diff
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderFactory.java (original)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderFactory.java Mon Jul 14 18:28:02 2014
@@ -744,7 +744,8 @@ public class BlockReaderFactory implemen
}
}
try {
- Peer peer = remotePeerFactory.newConnectedPeer(inetSocketAddress);
+ Peer peer = remotePeerFactory.newConnectedPeer(inetSocketAddress, token,
+ datanode);
if (LOG.isTraceEnabled()) {
LOG.trace("nextTcpPeer: created newConnectedPeer " + peer);
}
Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java?rev=1610479&r1=1610478&r2=1610479&view=diff
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java (original)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java Mon Jul 14 18:28:02 2014
@@ -17,6 +17,8 @@
*/
package org.apache.hadoop.hdfs;
+import static org.apache.hadoop.fs.CommonConfigurationKeys.IPC_CLIENT_FALLBACK_TO_SIMPLE_AUTH_ALLOWED_DEFAULT;
+import static org.apache.hadoop.fs.CommonConfigurationKeys.IPC_CLIENT_FALLBACK_TO_SIMPLE_AUTH_ALLOWED_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SIZE_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SIZE_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_DEFAULT;
@@ -93,7 +95,6 @@ import com.google.common.collect.Lists;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceAudience.LimitedPrivate;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.BlockLocation;
import org.apache.hadoop.fs.BlockStorageLocation;
@@ -136,6 +137,7 @@ import org.apache.hadoop.hdfs.protocol.C
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks;
import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
+import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.DirectoryListing;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
@@ -153,16 +155,19 @@ import org.apache.hadoop.hdfs.protocol.S
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
import org.apache.hadoop.hdfs.protocol.UnresolvedPathException;
-import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferEncryptor;
import org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair;
import org.apache.hadoop.hdfs.protocol.datatransfer.Op;
import org.apache.hadoop.hdfs.protocol.datatransfer.ReplaceDatanodeOnFailure;
import org.apache.hadoop.hdfs.protocol.datatransfer.TrustedChannelResolver;
import org.apache.hadoop.hdfs.protocol.datatransfer.Sender;
+import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.DataEncryptionKeyFactory;
+import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.DataTransferSaslUtil;
+import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.SaslDataTransferClient;
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
import org.apache.hadoop.hdfs.protocolPB.PBHelper;
+import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey;
import org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
@@ -209,7 +214,8 @@ import com.google.common.net.InetAddress
*
********************************************************/
@InterfaceAudience.Private
-public class DFSClient implements java.io.Closeable, RemotePeerFactory {
+public class DFSClient implements java.io.Closeable, RemotePeerFactory,
+ DataEncryptionKeyFactory {
public static final Log LOG = LogFactory.getLog(DFSClient.class);
public static final long SERVER_DEFAULTS_VALIDITY_PERIOD = 60 * 60 * 1000L; // 1 hour
static final int TCP_WINDOW_SIZE = 128 * 1024; // 128 KB
@@ -233,7 +239,7 @@ public class DFSClient implements java.i
private final Random r = new Random();
private SocketAddress[] localInterfaceAddrs;
private DataEncryptionKey encryptionKey;
- final TrustedChannelResolver trustedChannelResolver;
+ final SaslDataTransferClient saslClient;
private final CachingStrategy defaultReadCachingStrategy;
private final CachingStrategy defaultWriteCachingStrategy;
private final ClientContext clientContext;
@@ -635,7 +641,12 @@ public class DFSClient implements java.i
if (numThreads > 0) {
this.initThreadsNumForHedgedReads(numThreads);
}
- this.trustedChannelResolver = TrustedChannelResolver.getInstance(getConfiguration());
+ this.saslClient = new SaslDataTransferClient(
+ DataTransferSaslUtil.getSaslPropertiesResolver(conf),
+ TrustedChannelResolver.getInstance(conf),
+ conf.getBoolean(
+ IPC_CLIENT_FALLBACK_TO_SIMPLE_AUTH_ALLOWED_KEY,
+ IPC_CLIENT_FALLBACK_TO_SIMPLE_AUTH_ALLOWED_DEFAULT));
}
/**
@@ -1805,23 +1816,6 @@ public class DFSClient implements java.i
UnresolvedPathException.class);
}
}
-
- /**
- * Get the checksum of the whole file of a range of the file. Note that the
- * range always starts from the beginning of the file.
- * @param src The file path
- * @param length The length of the range
- * @return The checksum
- * @see DistributedFileSystem#getFileChecksum(Path)
- */
- public MD5MD5CRC32FileChecksum getFileChecksum(String src, long length)
- throws IOException {
- checkOpen();
- Preconditions.checkArgument(length >= 0);
- return getFileChecksum(src, length, clientName, namenode,
- socketFactory, dfsClientConf.socketTimeout, getDataEncryptionKey(),
- dfsClientConf.connectToDnViaHostname);
- }
@InterfaceAudience.Private
public void clearDataEncryptionKey() {
@@ -1841,11 +1835,9 @@ public class DFSClient implements java.i
return d == null ? false : d.getEncryptDataTransfer();
}
- @InterfaceAudience.Private
- public DataEncryptionKey getDataEncryptionKey()
- throws IOException {
- if (shouldEncryptData() &&
- !this.trustedChannelResolver.isTrusted()) {
+ @Override
+ public DataEncryptionKey newDataEncryptionKey() throws IOException {
+ if (shouldEncryptData()) {
synchronized (this) {
if (encryptionKey == null ||
encryptionKey.expiryDate < Time.now()) {
@@ -1860,22 +1852,17 @@ public class DFSClient implements java.i
}
/**
- * Get the checksum of the whole file or a range of the file.
+ * Get the checksum of the whole file of a range of the file. Note that the
+ * range always starts from the beginning of the file.
* @param src The file path
* @param length the length of the range, i.e., the range is [0, length]
- * @param clientName the name of the client requesting the checksum.
- * @param namenode the RPC proxy for the namenode
- * @param socketFactory to create sockets to connect to DNs
- * @param socketTimeout timeout to use when connecting and waiting for a response
- * @param encryptionKey the key needed to communicate with DNs in this cluster
- * @param connectToDnViaHostname whether the client should use hostnames instead of IPs
* @return The checksum
+ * @see DistributedFileSystem#getFileChecksum(Path)
*/
- private static MD5MD5CRC32FileChecksum getFileChecksum(String src,
- long length, String clientName, ClientProtocol namenode,
- SocketFactory socketFactory, int socketTimeout,
- DataEncryptionKey encryptionKey, boolean connectToDnViaHostname)
+ public MD5MD5CRC32FileChecksum getFileChecksum(String src, long length)
throws IOException {
+ checkOpen();
+ Preconditions.checkArgument(length >= 0);
//get block locations for the file range
LocatedBlocks blockLocations = callGetBlockLocations(namenode, src, 0,
length);
@@ -1910,7 +1897,7 @@ public class DFSClient implements java.i
final DatanodeInfo[] datanodes = lb.getLocations();
//try each datanode location of the block
- final int timeout = 3000 * datanodes.length + socketTimeout;
+ final int timeout = 3000 * datanodes.length + dfsClientConf.socketTimeout;
boolean done = false;
for(int j = 0; !done && j < datanodes.length; j++) {
DataOutputStream out = null;
@@ -1918,8 +1905,7 @@ public class DFSClient implements java.i
try {
//connect to a datanode
- IOStreamPair pair = connectToDN(socketFactory, connectToDnViaHostname,
- encryptionKey, datanodes[j], timeout);
+ IOStreamPair pair = connectToDN(datanodes[j], timeout, lb);
out = new DataOutputStream(new BufferedOutputStream(pair.out,
HdfsConstants.SMALL_BUFFER_SIZE));
in = new DataInputStream(pair.in);
@@ -1975,9 +1961,7 @@ public class DFSClient implements java.i
} else {
LOG.debug("Retrieving checksum from an earlier-version DataNode: " +
"inferring checksum by reading first byte");
- ct = inferChecksumTypeByReading(
- clientName, socketFactory, socketTimeout, lb, datanodes[j],
- encryptionKey, connectToDnViaHostname);
+ ct = inferChecksumTypeByReading(lb, datanodes[j]);
}
if (i == 0) { // first block
@@ -2051,16 +2035,13 @@ public class DFSClient implements java.i
* Connect to the given datanode's datantrasfer port, and return
* the resulting IOStreamPair. This includes encryption wrapping, etc.
*/
- private static IOStreamPair connectToDN(
- SocketFactory socketFactory, boolean connectToDnViaHostname,
- DataEncryptionKey encryptionKey, DatanodeInfo dn, int timeout)
- throws IOException
- {
+ private IOStreamPair connectToDN(DatanodeInfo dn, int timeout,
+ LocatedBlock lb) throws IOException {
boolean success = false;
Socket sock = null;
try {
sock = socketFactory.createSocket();
- String dnAddr = dn.getXferAddr(connectToDnViaHostname);
+ String dnAddr = dn.getXferAddr(getConf().connectToDnViaHostname);
if (LOG.isDebugEnabled()) {
LOG.debug("Connecting to datanode " + dnAddr);
}
@@ -2069,13 +2050,8 @@ public class DFSClient implements java.i
OutputStream unbufOut = NetUtils.getOutputStream(sock);
InputStream unbufIn = NetUtils.getInputStream(sock);
- IOStreamPair ret;
- if (encryptionKey != null) {
- ret = DataTransferEncryptor.getEncryptedStreams(
- unbufOut, unbufIn, encryptionKey);
- } else {
- ret = new IOStreamPair(unbufIn, unbufOut);
- }
+ IOStreamPair ret = saslClient.newSocketSend(sock, unbufOut, unbufIn, this,
+ lb.getBlockToken(), dn);
success = true;
return ret;
} finally {
@@ -2091,21 +2067,14 @@ public class DFSClient implements java.i
* with older HDFS versions which did not include the checksum type in
* OpBlockChecksumResponseProto.
*
- * @param in input stream from datanode
- * @param out output stream to datanode
* @param lb the located block
- * @param clientName the name of the DFSClient requesting the checksum
* @param dn the connected datanode
* @return the inferred checksum type
* @throws IOException if an error occurs
*/
- private static Type inferChecksumTypeByReading(
- String clientName, SocketFactory socketFactory, int socketTimeout,
- LocatedBlock lb, DatanodeInfo dn,
- DataEncryptionKey encryptionKey, boolean connectToDnViaHostname)
+ private Type inferChecksumTypeByReading(LocatedBlock lb, DatanodeInfo dn)
throws IOException {
- IOStreamPair pair = connectToDN(socketFactory, connectToDnViaHostname,
- encryptionKey, dn, socketTimeout);
+ IOStreamPair pair = connectToDN(dn, dfsClientConf.socketTimeout, lb);
try {
DataOutputStream out = new DataOutputStream(new BufferedOutputStream(pair.out,
@@ -2858,7 +2827,9 @@ public class DFSClient implements java.i
}
@Override // RemotePeerFactory
- public Peer newConnectedPeer(InetSocketAddress addr) throws IOException {
+ public Peer newConnectedPeer(InetSocketAddress addr,
+ Token<BlockTokenIdentifier> blockToken, DatanodeID datanodeId)
+ throws IOException {
Peer peer = null;
boolean success = false;
Socket sock = null;
@@ -2867,8 +2838,8 @@ public class DFSClient implements java.i
NetUtils.connect(sock, addr,
getRandomLocalInterfaceAddr(),
dfsClientConf.socketTimeout);
- peer = TcpPeerServer.peerFromSocketAndKey(sock,
- getDataEncryptionKey());
+ peer = TcpPeerServer.peerFromSocketAndKey(saslClient, sock, this,
+ blockToken, datanodeId);
success = true;
return peer;
} finally {
Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java?rev=1610479&r1=1610478&r2=1610479&view=diff
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java (original)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java Mon Jul 14 18:28:02 2014
@@ -565,6 +565,8 @@ public class DFSConfigKeys extends Commo
public static final boolean DFS_ENCRYPT_DATA_TRANSFER_DEFAULT = false;
public static final String DFS_DATA_ENCRYPTION_ALGORITHM_KEY = "dfs.encrypt.data.transfer.algorithm";
public static final String DFS_TRUSTEDCHANNEL_RESOLVER_CLASS = "dfs.trustedchannel.resolver.class";
+ public static final String DFS_DATA_TRANSFER_PROTECTION_KEY = "dfs.data.transfer.protection";
+ public static final String DFS_DATA_TRANSFER_SASL_PROPS_RESOLVER_CLASS_KEY = "dfs.data.transfer.saslproperties.resolver.class";
// Journal-node related configs. These are read on the JN side.
public static final String DFS_JOURNALNODE_EDITS_DIR_KEY = "dfs.journalnode.edits.dir";
Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java?rev=1610479&r1=1610478&r2=1610479&view=diff
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java (original)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java Mon Jul 14 18:28:02 2014
@@ -61,7 +61,6 @@ import org.apache.hadoop.hdfs.protocol.N
import org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException;
import org.apache.hadoop.hdfs.protocol.UnresolvedPathException;
import org.apache.hadoop.hdfs.protocol.datatransfer.BlockConstructionStage;
-import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferEncryptor;
import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtocol;
import org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair;
import org.apache.hadoop.hdfs.protocol.datatransfer.InvalidEncryptionKeyException;
@@ -1047,14 +1046,10 @@ public class DFSOutputStream extends FSO
OutputStream unbufOut = NetUtils.getOutputStream(sock, writeTimeout);
InputStream unbufIn = NetUtils.getInputStream(sock);
- if (dfsClient.shouldEncryptData() &&
- !dfsClient.trustedChannelResolver.isTrusted(sock.getInetAddress())) {
- IOStreamPair encryptedStreams =
- DataTransferEncryptor.getEncryptedStreams(
- unbufOut, unbufIn, dfsClient.getDataEncryptionKey());
- unbufOut = encryptedStreams.out;
- unbufIn = encryptedStreams.in;
- }
+ IOStreamPair saslStreams = dfsClient.saslClient.socketSend(sock,
+ unbufOut, unbufIn, dfsClient, blockToken, src);
+ unbufOut = saslStreams.out;
+ unbufIn = saslStreams.in;
out = new DataOutputStream(new BufferedOutputStream(unbufOut,
HdfsConstants.SMALL_BUFFER_SIZE));
in = new DataInputStream(unbufIn);
@@ -1325,14 +1320,10 @@ public class DFSOutputStream extends FSO
OutputStream unbufOut = NetUtils.getOutputStream(s, writeTimeout);
InputStream unbufIn = NetUtils.getInputStream(s);
- if (dfsClient.shouldEncryptData() &&
- !dfsClient.trustedChannelResolver.isTrusted(s.getInetAddress())) {
- IOStreamPair encryptedStreams =
- DataTransferEncryptor.getEncryptedStreams(unbufOut,
- unbufIn, dfsClient.getDataEncryptionKey());
- unbufOut = encryptedStreams.out;
- unbufIn = encryptedStreams.in;
- }
+ IOStreamPair saslStreams = dfsClient.saslClient.socketSend(s,
+ unbufOut, unbufIn, dfsClient, accessToken, nodes[0]);
+ unbufOut = saslStreams.out;
+ unbufIn = saslStreams.in;
out = new DataOutputStream(new BufferedOutputStream(unbufOut,
HdfsConstants.SMALL_BUFFER_SIZE));
blockReplyStream = new DataInputStream(unbufIn);
Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemotePeerFactory.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemotePeerFactory.java?rev=1610479&r1=1610478&r2=1610479&view=diff
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemotePeerFactory.java (original)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemotePeerFactory.java Mon Jul 14 18:28:02 2014
@@ -21,15 +21,21 @@ import java.io.IOException;
import java.net.InetSocketAddress;
import org.apache.hadoop.hdfs.net.Peer;
+import org.apache.hadoop.hdfs.protocol.DatanodeID;
+import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
+import org.apache.hadoop.security.token.Token;
public interface RemotePeerFactory {
/**
* @param addr The address to connect to.
- *
+ * @param blockToken Token used during optional SASL negotiation
+ * @param datanodeId ID of destination DataNode
* @return A new Peer connected to the address.
*
* @throws IOException If there was an error connecting or creating
* the remote socket, encrypted stream, etc.
*/
- Peer newConnectedPeer(InetSocketAddress addr) throws IOException;
+ Peer newConnectedPeer(InetSocketAddress addr,
+ Token<BlockTokenIdentifier> blockToken, DatanodeID datanodeId)
+ throws IOException;
}
Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/EncryptedPeer.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/EncryptedPeer.java?rev=1610479&r1=1610478&r2=1610479&view=diff
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/EncryptedPeer.java (original)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/EncryptedPeer.java Mon Jul 14 18:28:02 2014
@@ -19,9 +19,7 @@ package org.apache.hadoop.hdfs.net;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferEncryptor;
import org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair;
-import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey;
import org.apache.hadoop.net.unix.DomainSocket;
import java.io.InputStream;
@@ -51,11 +49,8 @@ public class EncryptedPeer implements Pe
*/
private final ReadableByteChannel channel;
- public EncryptedPeer(Peer enclosedPeer, DataEncryptionKey key)
- throws IOException {
+ public EncryptedPeer(Peer enclosedPeer, IOStreamPair ios) {
this.enclosedPeer = enclosedPeer;
- IOStreamPair ios = DataTransferEncryptor.getEncryptedStreams(
- enclosedPeer.getOutputStream(), enclosedPeer.getInputStream(), key);
this.in = ios.in;
this.out = ios.out;
this.channel = ios.in instanceof ReadableByteChannel ?
Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/TcpPeerServer.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/TcpPeerServer.java?rev=1610479&r1=1610478&r2=1610479&view=diff
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/TcpPeerServer.java (original)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/TcpPeerServer.java Mon Jul 14 18:28:02 2014
@@ -28,10 +28,14 @@ import java.nio.channels.SocketChannel;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey;
+import org.apache.hadoop.hdfs.protocol.DatanodeID;
+import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.DataEncryptionKeyFactory;
+import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.SaslDataTransferClient;
+import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
import org.apache.hadoop.hdfs.server.datanode.SecureDataNodeStarter.SecureResources;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.ipc.Server;
+import org.apache.hadoop.security.token.Token;
@InterfaceAudience.Private
public class TcpPeerServer implements PeerServer {
@@ -74,15 +78,16 @@ public class TcpPeerServer implements Pe
}
}
- public static Peer peerFromSocketAndKey(Socket s,
- DataEncryptionKey key) throws IOException {
+ public static Peer peerFromSocketAndKey(
+ SaslDataTransferClient saslClient, Socket s,
+ DataEncryptionKeyFactory keyFactory,
+ Token<BlockTokenIdentifier> blockToken, DatanodeID datanodeId)
+ throws IOException {
Peer peer = null;
boolean success = false;
try {
- peer = peerFromSocket(s);
- if (key != null) {
- peer = new EncryptedPeer(peer, key);
- }
+ peer = peerFromSocket(s);
+ peer = saslClient.peerSend(peer, keyFactory, blockToken, datanodeId);
success = true;
return peer;
} finally {
Added: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/DataEncryptionKeyFactory.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/DataEncryptionKeyFactory.java?rev=1610479&view=auto
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/DataEncryptionKeyFactory.java (added)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/DataEncryptionKeyFactory.java Mon Jul 14 18:28:02 2014
@@ -0,0 +1,38 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.protocol.datatransfer.sasl;
+
+import java.io.IOException;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey;
+
+/**
+ * Creates a new {@link DataEncryptionKey} on demand.
+ */
+@InterfaceAudience.Private
+public interface DataEncryptionKeyFactory {
+
+ /**
+ * Creates a new DataEncryptionKey.
+ *
+ * @return DataEncryptionKey newly created
+ * @throws IOException for any error
+ */
+ DataEncryptionKey newDataEncryptionKey() throws IOException;
+}
Added: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/DataTransferSaslUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/DataTransferSaslUtil.java?rev=1610479&view=auto
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/DataTransferSaslUtil.java (added)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/DataTransferSaslUtil.java Mon Jul 14 18:28:02 2014
@@ -0,0 +1,267 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.protocol.datatransfer.sasl;
+
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_RPC_PROTECTION;
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_SASL_PROPS_RESOLVER_CLASS;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATA_TRANSFER_PROTECTION_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATA_TRANSFER_SASL_PROPS_RESOLVER_CLASS_KEY;
+import static org.apache.hadoop.hdfs.protocolPB.PBHelper.vintPrefixed;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.net.InetAddress;
+import java.util.Arrays;
+import java.util.Map;
+import java.util.Set;
+import javax.security.sasl.Sasl;
+
+import org.apache.commons.codec.binary.Base64;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.net.Peer;
+import org.apache.hadoop.hdfs.protocol.datatransfer.InvalidEncryptionKeyException;
+import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProto;
+import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProto.DataTransferEncryptorStatus;
+import org.apache.hadoop.security.SaslPropertiesResolver;
+import org.apache.hadoop.security.SaslRpcServer.QualityOfProtection;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.base.Charsets;
+import com.google.common.collect.ImmutableSet;
+import com.google.common.collect.Maps;
+import com.google.common.net.InetAddresses;
+import com.google.protobuf.ByteString;
+
+/**
+ * Utility methods implementing SASL negotiation for DataTransferProtocol.
+ */
+@InterfaceAudience.Private
+public final class DataTransferSaslUtil {
+
+ private static final Logger LOG = LoggerFactory.getLogger(
+ DataTransferSaslUtil.class);
+
+ /**
+ * Delimiter for the three-part SASL username string.
+ */
+ public static final String NAME_DELIMITER = " ";
+
+ /**
+ * Sent by clients and validated by servers. We use a number that's unlikely
+ * to ever be sent as the value of the DATA_TRANSFER_VERSION.
+ */
+ public static final int SASL_TRANSFER_MAGIC_NUMBER = 0xDEADBEEF;
+
+ /**
+ * Checks that SASL negotiation has completed for the given participant, and
+ * the negotiated quality of protection is included in the given SASL
+ * properties and therefore acceptable.
+ *
+ * @param sasl participant to check
+ * @param saslProps properties of SASL negotiation
+ * @throws IOException for any error
+ */
+ public static void checkSaslComplete(SaslParticipant sasl,
+ Map<String, String> saslProps) throws IOException {
+ if (!sasl.isComplete()) {
+ throw new IOException("Failed to complete SASL handshake");
+ }
+ Set<String> requestedQop = ImmutableSet.copyOf(Arrays.asList(
+ saslProps.get(Sasl.QOP).split(",")));
+ String negotiatedQop = sasl.getNegotiatedQop();
+ LOG.debug("Verifying QOP, requested QOP = {}, negotiated QOP = {}",
+ requestedQop, negotiatedQop);
+ if (!requestedQop.contains(negotiatedQop)) {
+ throw new IOException(String.format("SASL handshake completed, but " +
+ "channel does not have acceptable quality of protection, " +
+ "requested = %s, negotiated = %s", requestedQop, negotiatedQop));
+ }
+ }
+
+ /**
+ * Creates SASL properties required for an encrypted SASL negotiation.
+ *
+ * @param encryptionAlgorithm to use for SASL negotation
+ * @return properties of encrypted SASL negotiation
+ */
+ public static Map<String, String> createSaslPropertiesForEncryption(
+ String encryptionAlgorithm) {
+ Map<String, String> saslProps = Maps.newHashMapWithExpectedSize(3);
+ saslProps.put(Sasl.QOP, QualityOfProtection.PRIVACY.getSaslQop());
+ saslProps.put(Sasl.SERVER_AUTH, "true");
+ saslProps.put("com.sun.security.sasl.digest.cipher", encryptionAlgorithm);
+ return saslProps;
+ }
+
+ /**
+ * For an encrypted SASL negotiation, encodes an encryption key to a SASL
+ * password.
+ *
+ * @param encryptionKey to encode
+ * @return key encoded as SASL password
+ */
+ public static char[] encryptionKeyToPassword(byte[] encryptionKey) {
+ return new String(Base64.encodeBase64(encryptionKey, false), Charsets.UTF_8)
+ .toCharArray();
+ }
+
+ /**
+ * Returns InetAddress from peer. The getRemoteAddressString has the form
+ * [host][/ip-address]:port. The host may be missing. The IP address (and
+ * preceding '/') may be missing. The port preceded by ':' is always present.
+ *
+ * @param peer
+ * @return InetAddress from peer
+ */
+ public static InetAddress getPeerAddress(Peer peer) {
+ String remoteAddr = peer.getRemoteAddressString().split(":")[0];
+ int slashIdx = remoteAddr.indexOf('/');
+ return InetAddresses.forString(slashIdx != -1 ?
+ remoteAddr.substring(slashIdx + 1, remoteAddr.length()) :
+ remoteAddr);
+ }
+
+ /**
+ * Creates a SaslPropertiesResolver from the given configuration. This method
+ * works by cloning the configuration, translating configuration properties
+ * specific to DataTransferProtocol to what SaslPropertiesResolver expects,
+ * and then delegating to SaslPropertiesResolver for initialization. This
+ * method returns null if SASL protection has not been configured for
+ * DataTransferProtocol.
+ *
+ * @param conf configuration to read
+ * @return SaslPropertiesResolver for DataTransferProtocol, or null if not
+ * configured
+ */
+ public static SaslPropertiesResolver getSaslPropertiesResolver(
+ Configuration conf) {
+ String qops = conf.get(DFS_DATA_TRANSFER_PROTECTION_KEY);
+ if (qops == null || qops.isEmpty()) {
+ LOG.debug("DataTransferProtocol not using SaslPropertiesResolver, no " +
+ "QOP found in configuration for {}", DFS_DATA_TRANSFER_PROTECTION_KEY);
+ return null;
+ }
+ Configuration saslPropsResolverConf = new Configuration(conf);
+ saslPropsResolverConf.set(HADOOP_RPC_PROTECTION, qops);
+ Class<? extends SaslPropertiesResolver> resolverClass = conf.getClass(
+ DFS_DATA_TRANSFER_SASL_PROPS_RESOLVER_CLASS_KEY,
+ SaslPropertiesResolver.class, SaslPropertiesResolver.class);
+ saslPropsResolverConf.setClass(HADOOP_SECURITY_SASL_PROPS_RESOLVER_CLASS,
+ resolverClass, SaslPropertiesResolver.class);
+ SaslPropertiesResolver resolver = SaslPropertiesResolver.getInstance(
+ saslPropsResolverConf);
+ LOG.debug("DataTransferProtocol using SaslPropertiesResolver, configured " +
+ "QOP {} = {}, configured class {} = {}", DFS_DATA_TRANSFER_PROTECTION_KEY, qops,
+ DFS_DATA_TRANSFER_SASL_PROPS_RESOLVER_CLASS_KEY, resolverClass);
+ return resolver;
+ }
+
+ /**
+ * Performs the first step of SASL negotiation.
+ *
+ * @param out connection output stream
+ * @param in connection input stream
+ * @param sasl participant
+ */
+ public static void performSaslStep1(OutputStream out, InputStream in,
+ SaslParticipant sasl) throws IOException {
+ byte[] remoteResponse = readSaslMessage(in);
+ byte[] localResponse = sasl.evaluateChallengeOrResponse(remoteResponse);
+ sendSaslMessage(out, localResponse);
+ }
+
+ /**
+ * Reads a SASL negotiation message.
+ *
+ * @param in stream to read
+ * @return bytes of SASL negotiation messsage
+ * @throws IOException for any error
+ */
+ public static byte[] readSaslMessage(InputStream in) throws IOException {
+ DataTransferEncryptorMessageProto proto =
+ DataTransferEncryptorMessageProto.parseFrom(vintPrefixed(in));
+ if (proto.getStatus() == DataTransferEncryptorStatus.ERROR_UNKNOWN_KEY) {
+ throw new InvalidEncryptionKeyException(proto.getMessage());
+ } else if (proto.getStatus() == DataTransferEncryptorStatus.ERROR) {
+ throw new IOException(proto.getMessage());
+ } else {
+ return proto.getPayload().toByteArray();
+ }
+ }
+
+ /**
+ * Sends a SASL negotiation message indicating an error.
+ *
+ * @param out stream to receive message
+ * @param message to send
+ * @throws IOException for any error
+ */
+ public static void sendGenericSaslErrorMessage(OutputStream out,
+ String message) throws IOException {
+ sendSaslMessage(out, DataTransferEncryptorStatus.ERROR, null, message);
+ }
+
+ /**
+ * Sends a SASL negotiation message.
+ *
+ * @param out stream to receive message
+ * @param payload to send
+ * @throws IOException for any error
+ */
+ public static void sendSaslMessage(OutputStream out, byte[] payload)
+ throws IOException {
+ sendSaslMessage(out, DataTransferEncryptorStatus.SUCCESS, payload, null);
+ }
+
+ /**
+ * Sends a SASL negotiation message.
+ *
+ * @param out stream to receive message
+ * @param status negotiation status
+ * @param payload to send
+ * @param message to send
+ * @throws IOException for any error
+ */
+ public static void sendSaslMessage(OutputStream out,
+ DataTransferEncryptorStatus status, byte[] payload, String message)
+ throws IOException {
+ DataTransferEncryptorMessageProto.Builder builder =
+ DataTransferEncryptorMessageProto.newBuilder();
+
+ builder.setStatus(status);
+ if (payload != null) {
+ builder.setPayload(ByteString.copyFrom(payload));
+ }
+ if (message != null) {
+ builder.setMessage(message);
+ }
+
+ DataTransferEncryptorMessageProto proto = builder.build();
+ proto.writeDelimitedTo(out);
+ out.flush();
+ }
+
+ /**
+ * There is no reason to instantiate this class.
+ */
+ private DataTransferSaslUtil() {
+ }
+}
Added: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/InvalidMagicNumberException.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/InvalidMagicNumberException.java?rev=1610479&view=auto
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/InvalidMagicNumberException.java (added)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/InvalidMagicNumberException.java Mon Jul 14 18:28:02 2014
@@ -0,0 +1,44 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.protocol.datatransfer.sasl;
+
+import static org.apache.hadoop.hdfs.protocol.datatransfer.sasl.DataTransferSaslUtil.SASL_TRANSFER_MAGIC_NUMBER;
+
+import java.io.IOException;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+
+/**
+ * Indicates that SASL protocol negotiation expected to read a pre-defined magic
+ * number, but the expected value was not seen.
+ */
+@InterfaceAudience.Private
+public class InvalidMagicNumberException extends IOException {
+
+ private static final long serialVersionUID = 1L;
+
+ /**
+ * Creates a new InvalidMagicNumberException.
+ *
+ * @param magicNumber expected value
+ */
+ public InvalidMagicNumberException(int magicNumber) {
+ super(String.format("Received %x instead of %x from client.",
+ magicNumber, SASL_TRANSFER_MAGIC_NUMBER));
+ }
+}
Added: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslDataTransferClient.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslDataTransferClient.java?rev=1610479&view=auto
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslDataTransferClient.java (added)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslDataTransferClient.java Mon Jul 14 18:28:02 2014
@@ -0,0 +1,439 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.protocol.datatransfer.sasl;
+
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATA_TRANSFER_PROTECTION_KEY;
+import static org.apache.hadoop.hdfs.protocol.datatransfer.sasl.DataTransferSaslUtil.*;
+
+import java.io.DataInputStream;
+import java.io.DataOutputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.net.InetAddress;
+import java.net.Socket;
+import java.util.Map;
+
+import javax.security.auth.callback.Callback;
+import javax.security.auth.callback.CallbackHandler;
+import javax.security.auth.callback.NameCallback;
+import javax.security.auth.callback.PasswordCallback;
+import javax.security.auth.callback.UnsupportedCallbackException;
+import javax.security.sasl.RealmCallback;
+import javax.security.sasl.RealmChoiceCallback;
+
+import org.apache.commons.codec.binary.Base64;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.hdfs.net.EncryptedPeer;
+import org.apache.hadoop.hdfs.net.Peer;
+import org.apache.hadoop.hdfs.protocol.DatanodeID;
+import org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair;
+import org.apache.hadoop.hdfs.protocol.datatransfer.TrustedChannelResolver;
+import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
+import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey;
+import org.apache.hadoop.security.SaslPropertiesResolver;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.token.Token;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.base.Charsets;
+
+/**
+ * Negotiates SASL for DataTransferProtocol on behalf of a client. There are
+ * two possible supported variants of SASL negotiation: either a general-purpose
+ * negotiation supporting any quality of protection, or a specialized
+ * negotiation that enforces privacy as the quality of protection using a
+ * cryptographically strong encryption key.
+ *
+ * This class is used in both the HDFS client and the DataNode. The DataNode
+ * needs it, because it acts as a client to other DataNodes during write
+ * pipelines and block transfers.
+ */
+@InterfaceAudience.Private
+public class SaslDataTransferClient {
+
+ private static final Logger LOG = LoggerFactory.getLogger(
+ SaslDataTransferClient.class);
+
+ private final boolean fallbackToSimpleAuthAllowed;
+ private final SaslPropertiesResolver saslPropsResolver;
+ private final TrustedChannelResolver trustedChannelResolver;
+
+ /**
+ * Creates a new SaslDataTransferClient.
+ *
+ * @param saslPropsResolver for determining properties of SASL negotiation
+ * @param trustedChannelResolver for identifying trusted connections that do
+ * not require SASL negotiation
+ */
+ public SaslDataTransferClient(SaslPropertiesResolver saslPropsResolver,
+ TrustedChannelResolver trustedChannelResolver,
+ boolean fallbackToSimpleAuthAllowed) {
+ this.fallbackToSimpleAuthAllowed = fallbackToSimpleAuthAllowed;
+ this.saslPropsResolver = saslPropsResolver;
+ this.trustedChannelResolver = trustedChannelResolver;
+ }
+
+ /**
+ * Sends client SASL negotiation for a newly allocated socket if required.
+ *
+ * @param socket connection socket
+ * @param underlyingOut connection output stream
+ * @param underlyingIn connection input stream
+ * @param encryptionKeyFactory for creation of an encryption key
+ * @param accessToken connection block access token
+ * @param datanodeId ID of destination DataNode
+ * @return new pair of streams, wrapped after SASL negotiation
+ * @throws IOException for any error
+ */
+ public IOStreamPair newSocketSend(Socket socket, OutputStream underlyingOut,
+ InputStream underlyingIn, DataEncryptionKeyFactory encryptionKeyFactory,
+ Token<BlockTokenIdentifier> accessToken, DatanodeID datanodeId)
+ throws IOException {
+ // The encryption key factory only returns a key if encryption is enabled.
+ DataEncryptionKey encryptionKey = !trustedChannelResolver.isTrusted() ?
+ encryptionKeyFactory.newDataEncryptionKey() : null;
+ IOStreamPair ios = send(socket.getInetAddress(), underlyingOut,
+ underlyingIn, encryptionKey, accessToken, datanodeId);
+ return ios != null ? ios : new IOStreamPair(underlyingIn, underlyingOut);
+ }
+
+ /**
+ * Sends client SASL negotiation for a peer if required.
+ *
+ * @param peer connection peer
+ * @param encryptionKeyFactory for creation of an encryption key
+ * @param accessToken connection block access token
+ * @param datanodeId ID of destination DataNode
+ * @return new pair of streams, wrapped after SASL negotiation
+ * @throws IOException for any error
+ */
+ public Peer peerSend(Peer peer, DataEncryptionKeyFactory encryptionKeyFactory,
+ Token<BlockTokenIdentifier> accessToken, DatanodeID datanodeId)
+ throws IOException {
+ IOStreamPair ios = checkTrustAndSend(getPeerAddress(peer),
+ peer.getOutputStream(), peer.getInputStream(), encryptionKeyFactory,
+ accessToken, datanodeId);
+ // TODO: Consider renaming EncryptedPeer to SaslPeer.
+ return ios != null ? new EncryptedPeer(peer, ios) : peer;
+ }
+
+ /**
+ * Sends client SASL negotiation for a socket if required.
+ *
+ * @param socket connection socket
+ * @param underlyingOut connection output stream
+ * @param underlyingIn connection input stream
+ * @param encryptionKeyFactory for creation of an encryption key
+ * @param accessToken connection block access token
+ * @param datanodeId ID of destination DataNode
+ * @return new pair of streams, wrapped after SASL negotiation
+ * @throws IOException for any error
+ */
+ public IOStreamPair socketSend(Socket socket, OutputStream underlyingOut,
+ InputStream underlyingIn, DataEncryptionKeyFactory encryptionKeyFactory,
+ Token<BlockTokenIdentifier> accessToken, DatanodeID datanodeId)
+ throws IOException {
+ IOStreamPair ios = checkTrustAndSend(socket.getInetAddress(), underlyingOut,
+ underlyingIn, encryptionKeyFactory, accessToken, datanodeId);
+ return ios != null ? ios : new IOStreamPair(underlyingIn, underlyingOut);
+ }
+
+ /**
+ * Checks if an address is already trusted and then sends client SASL
+ * negotiation if required.
+ *
+ * @param addr connection address
+ * @param underlyingOut connection output stream
+ * @param underlyingIn connection input stream
+ * @param encryptionKeyFactory for creation of an encryption key
+ * @param accessToken connection block access token
+ * @param datanodeId ID of destination DataNode
+ * @return new pair of streams, wrapped after SASL negotiation
+ * @throws IOException for any error
+ */
+ private IOStreamPair checkTrustAndSend(InetAddress addr,
+ OutputStream underlyingOut, InputStream underlyingIn,
+ DataEncryptionKeyFactory encryptionKeyFactory,
+ Token<BlockTokenIdentifier> accessToken, DatanodeID datanodeId)
+ throws IOException {
+ if (!trustedChannelResolver.isTrusted() &&
+ !trustedChannelResolver.isTrusted(addr)) {
+ // The encryption key factory only returns a key if encryption is enabled.
+ DataEncryptionKey encryptionKey =
+ encryptionKeyFactory.newDataEncryptionKey();
+ return send(addr, underlyingOut, underlyingIn, encryptionKey, accessToken,
+ datanodeId);
+ } else {
+ LOG.debug(
+ "SASL client skipping handshake on trusted connection for addr = {}, "
+ + "datanodeId = {}", addr, datanodeId);
+ return null;
+ }
+ }
+
+ /**
+ * Sends client SASL negotiation if required. Determines the correct type of
+ * SASL handshake based on configuration.
+ *
+ * @param addr connection address
+ * @param underlyingOut connection output stream
+ * @param underlyingIn connection input stream
+ * @param encryptionKey for an encrypted SASL handshake
+ * @param accessToken connection block access token
+ * @param datanodeId ID of destination DataNode
+ * @return new pair of streams, wrapped after SASL negotiation
+ * @throws IOException for any error
+ */
+ private IOStreamPair send(InetAddress addr, OutputStream underlyingOut,
+ InputStream underlyingIn, DataEncryptionKey encryptionKey,
+ Token<BlockTokenIdentifier> accessToken, DatanodeID datanodeId)
+ throws IOException {
+ if (encryptionKey != null) {
+ LOG.debug(
+ "SASL client doing encrypted handshake for addr = {}, datanodeId = {}",
+ addr, datanodeId);
+ return getEncryptedStreams(underlyingOut, underlyingIn,
+ encryptionKey);
+ } else if (!UserGroupInformation.isSecurityEnabled()) {
+ LOG.debug(
+ "SASL client skipping handshake in unsecured configuration for "
+ + "addr = {}, datanodeId = {}", addr, datanodeId);
+ return null;
+ } else if (datanodeId.getXferPort() < 1024) {
+ LOG.debug(
+ "SASL client skipping handshake in secured configuration with "
+ + "privileged port for addr = {}, datanodeId = {}", addr, datanodeId);
+ return null;
+ } else if (accessToken.getIdentifier().length == 0) {
+ if (!fallbackToSimpleAuthAllowed) {
+ throw new IOException(
+ "No block access token was provided (insecure cluster), but this " +
+ "client is configured to allow only secure connections.");
+ }
+ LOG.debug(
+ "SASL client skipping handshake in secured configuration with "
+ + "unsecured cluster for addr = {}, datanodeId = {}", addr, datanodeId);
+ return null;
+ } else {
+ LOG.debug(
+ "SASL client doing general handshake for addr = {}, datanodeId = {}",
+ addr, datanodeId);
+ return getSaslStreams(addr, underlyingOut, underlyingIn, accessToken,
+ datanodeId);
+ }
+ }
+
+ /**
+ * Sends client SASL negotiation for specialized encrypted handshake.
+ *
+ * @param underlyingOut connection output stream
+ * @param underlyingIn connection input stream
+ * @param encryptionKey for an encrypted SASL handshake
+ * @return new pair of streams, wrapped after SASL negotiation
+ * @throws IOException for any error
+ */
+ private IOStreamPair getEncryptedStreams(OutputStream underlyingOut,
+ InputStream underlyingIn, DataEncryptionKey encryptionKey)
+ throws IOException {
+ Map<String, String> saslProps = createSaslPropertiesForEncryption(
+ encryptionKey.encryptionAlgorithm);
+
+ LOG.debug("Client using encryption algorithm {}",
+ encryptionKey.encryptionAlgorithm);
+
+ String userName = getUserNameFromEncryptionKey(encryptionKey);
+ char[] password = encryptionKeyToPassword(encryptionKey.encryptionKey);
+ CallbackHandler callbackHandler = new SaslClientCallbackHandler(userName,
+ password);
+ return doSaslHandshake(underlyingOut, underlyingIn, userName, saslProps,
+ callbackHandler);
+ }
+
+ /**
+ * The SASL username for an encrypted handshake consists of the keyId,
+ * blockPoolId, and nonce with the first two encoded as Strings, and the third
+ * encoded using Base64. The fields are each separated by a single space.
+ *
+ * @param encryptionKey the encryption key to encode as a SASL username.
+ * @return encoded username containing keyId, blockPoolId, and nonce
+ */
+ private static String getUserNameFromEncryptionKey(
+ DataEncryptionKey encryptionKey) {
+ return encryptionKey.keyId + NAME_DELIMITER +
+ encryptionKey.blockPoolId + NAME_DELIMITER +
+ new String(Base64.encodeBase64(encryptionKey.nonce, false), Charsets.UTF_8);
+ }
+
+ /**
+ * Sets user name and password when asked by the client-side SASL object.
+ */
+ private static final class SaslClientCallbackHandler
+ implements CallbackHandler {
+
+ private final char[] password;
+ private final String userName;
+
+ /**
+ * Creates a new SaslClientCallbackHandler.
+ *
+ * @param userName SASL user name
+ * @Param password SASL password
+ */
+ public SaslClientCallbackHandler(String userName, char[] password) {
+ this.password = password;
+ this.userName = userName;
+ }
+
+ @Override
+ public void handle(Callback[] callbacks) throws IOException,
+ UnsupportedCallbackException {
+ NameCallback nc = null;
+ PasswordCallback pc = null;
+ RealmCallback rc = null;
+ for (Callback callback : callbacks) {
+ if (callback instanceof RealmChoiceCallback) {
+ continue;
+ } else if (callback instanceof NameCallback) {
+ nc = (NameCallback) callback;
+ } else if (callback instanceof PasswordCallback) {
+ pc = (PasswordCallback) callback;
+ } else if (callback instanceof RealmCallback) {
+ rc = (RealmCallback) callback;
+ } else {
+ throw new UnsupportedCallbackException(callback,
+ "Unrecognized SASL client callback");
+ }
+ }
+ if (nc != null) {
+ nc.setName(userName);
+ }
+ if (pc != null) {
+ pc.setPassword(password);
+ }
+ if (rc != null) {
+ rc.setText(rc.getDefaultText());
+ }
+ }
+ }
+
+ /**
+ * Sends client SASL negotiation for general-purpose handshake.
+ *
+ * @param addr connection address
+ * @param underlyingOut connection output stream
+ * @param underlyingIn connection input stream
+ * @param accessToken connection block access token
+ * @param datanodeId ID of destination DataNode
+ * @return new pair of streams, wrapped after SASL negotiation
+ * @throws IOException for any error
+ */
+ private IOStreamPair getSaslStreams(InetAddress addr,
+ OutputStream underlyingOut, InputStream underlyingIn,
+ Token<BlockTokenIdentifier> accessToken, DatanodeID datanodeId)
+ throws IOException {
+ if (saslPropsResolver == null) {
+ throw new IOException(String.format("Cannot create a secured " +
+ "connection if DataNode listens on unprivileged port (%d) and no " +
+ "protection is defined in configuration property %s.",
+ datanodeId.getXferPort(), DFS_DATA_TRANSFER_PROTECTION_KEY));
+ }
+ Map<String, String> saslProps = saslPropsResolver.getClientProperties(addr);
+
+ String userName = buildUserName(accessToken);
+ char[] password = buildClientPassword(accessToken);
+ CallbackHandler callbackHandler = new SaslClientCallbackHandler(userName,
+ password);
+ return doSaslHandshake(underlyingOut, underlyingIn, userName, saslProps,
+ callbackHandler);
+ }
+
+ /**
+ * Builds the client's user name for the general-purpose handshake, consisting
+ * of the base64-encoded serialized block access token identifier. Note that
+ * this includes only the token identifier, not the token itself, which would
+ * include the password. The password is a shared secret, and we must not
+ * write it on the network during the SASL authentication exchange.
+ *
+ * @param blockToken for block access
+ * @return SASL user name
+ */
+ private static String buildUserName(Token<BlockTokenIdentifier> blockToken) {
+ return new String(Base64.encodeBase64(blockToken.getIdentifier(), false),
+ Charsets.UTF_8);
+ }
+
+ /**
+ * Calculates the password on the client side for the general-purpose
+ * handshake. The password consists of the block access token's password.
+ *
+ * @param blockToken for block access
+ * @return SASL password
+ */
+ private char[] buildClientPassword(Token<BlockTokenIdentifier> blockToken) {
+ return new String(Base64.encodeBase64(blockToken.getPassword(), false),
+ Charsets.UTF_8).toCharArray();
+ }
+
+ /**
+ * This method actually executes the client-side SASL handshake.
+ *
+ * @param underlyingOut connection output stream
+ * @param underlyingIn connection input stream
+ * @param userName SASL user name
+ * @param saslProps properties of SASL negotiation
+ * @param callbackHandler for responding to SASL callbacks
+ * @return new pair of streams, wrapped after SASL negotiation
+ * @throws IOException for any error
+ */
+ private IOStreamPair doSaslHandshake(OutputStream underlyingOut,
+ InputStream underlyingIn, String userName, Map<String, String> saslProps,
+ CallbackHandler callbackHandler) throws IOException {
+
+ DataOutputStream out = new DataOutputStream(underlyingOut);
+ DataInputStream in = new DataInputStream(underlyingIn);
+
+ SaslParticipant sasl= SaslParticipant.createClientSaslParticipant(userName,
+ saslProps, callbackHandler);
+
+ out.writeInt(SASL_TRANSFER_MAGIC_NUMBER);
+ out.flush();
+
+ try {
+ // Start of handshake - "initial response" in SASL terminology.
+ sendSaslMessage(out, new byte[0]);
+
+ // step 1
+ performSaslStep1(out, in, sasl);
+
+ // step 2 (client-side only)
+ byte[] remoteResponse = readSaslMessage(in);
+ byte[] localResponse = sasl.evaluateChallengeOrResponse(remoteResponse);
+ assert localResponse == null;
+
+ // SASL handshake is complete
+ checkSaslComplete(sasl, saslProps);
+
+ return sasl.createStreamPair(out, in);
+ } catch (IOException ioe) {
+ sendGenericSaslErrorMessage(out, ioe.getMessage());
+ throw ioe;
+ }
+ }
+}
Added: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslDataTransferServer.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslDataTransferServer.java?rev=1610479&view=auto
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslDataTransferServer.java (added)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslDataTransferServer.java Mon Jul 14 18:28:02 2014
@@ -0,0 +1,381 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.protocol.datatransfer.sasl;
+
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATA_TRANSFER_PROTECTION_KEY;
+import static org.apache.hadoop.hdfs.protocol.datatransfer.sasl.DataTransferSaslUtil.*;
+
+import java.io.ByteArrayInputStream;
+import java.io.DataInputStream;
+import java.io.DataOutputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.util.Map;
+
+import javax.security.auth.callback.Callback;
+import javax.security.auth.callback.CallbackHandler;
+import javax.security.auth.callback.NameCallback;
+import javax.security.auth.callback.PasswordCallback;
+import javax.security.auth.callback.UnsupportedCallbackException;
+import javax.security.sasl.AuthorizeCallback;
+import javax.security.sasl.RealmCallback;
+import javax.security.sasl.SaslException;
+
+import org.apache.commons.codec.binary.Base64;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.hdfs.net.Peer;
+import org.apache.hadoop.hdfs.protocol.DatanodeID;
+import org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair;
+import org.apache.hadoop.hdfs.protocol.datatransfer.InvalidEncryptionKeyException;
+import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProto.DataTransferEncryptorStatus;
+import org.apache.hadoop.hdfs.security.token.block.BlockPoolTokenSecretManager;
+import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
+import org.apache.hadoop.hdfs.server.datanode.DNConf;
+import org.apache.hadoop.security.SaslPropertiesResolver;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.base.Charsets;
+
+/**
+ * Negotiates SASL for DataTransferProtocol on behalf of a server. There are
+ * two possible supported variants of SASL negotiation: either a general-purpose
+ * negotiation supporting any quality of protection, or a specialized
+ * negotiation that enforces privacy as the quality of protection using a
+ * cryptographically strong encryption key.
+ *
+ * This class is used in the DataNode for handling inbound connections.
+ */
+@InterfaceAudience.Private
+public class SaslDataTransferServer {
+
+ private static final Logger LOG = LoggerFactory.getLogger(
+ SaslDataTransferServer.class);
+
+ private final BlockPoolTokenSecretManager blockPoolTokenSecretManager;
+ private final DNConf dnConf;
+
+ /**
+ * Creates a new SaslDataTransferServer.
+ *
+ * @param dnConf configuration of DataNode
+ * @param blockPoolTokenSecretManager used for checking block access tokens
+ * and encryption keys
+ */
+ public SaslDataTransferServer(DNConf dnConf,
+ BlockPoolTokenSecretManager blockPoolTokenSecretManager) {
+ this.blockPoolTokenSecretManager = blockPoolTokenSecretManager;
+ this.dnConf = dnConf;
+ }
+
+ /**
+ * Receives SASL negotiation from a peer on behalf of a server.
+ *
+ * @param peer connection peer
+ * @param underlyingOut connection output stream
+ * @param underlyingIn connection input stream
+ * @param datanodeId ID of DataNode accepting connection
+ * @return new pair of streams, wrapped after SASL negotiation
+ * @throws IOException for any error
+ */
+ public IOStreamPair receive(Peer peer, OutputStream underlyingOut,
+ InputStream underlyingIn, DatanodeID datanodeId) throws IOException {
+ if (dnConf.getEncryptDataTransfer()) {
+ LOG.debug(
+ "SASL server doing encrypted handshake for peer = {}, datanodeId = {}",
+ peer, datanodeId);
+ return getEncryptedStreams(peer, underlyingOut, underlyingIn);
+ } else if (!UserGroupInformation.isSecurityEnabled()) {
+ LOG.debug(
+ "SASL server skipping handshake in unsecured configuration for "
+ + "peer = {}, datanodeId = {}", peer, datanodeId);
+ return new IOStreamPair(underlyingIn, underlyingOut);
+ } else if (datanodeId.getXferPort() < 1024) {
+ LOG.debug(
+ "SASL server skipping handshake in unsecured configuration for "
+ + "peer = {}, datanodeId = {}", peer, datanodeId);
+ return new IOStreamPair(underlyingIn, underlyingOut);
+ } else {
+ LOG.debug(
+ "SASL server doing general handshake for peer = {}, datanodeId = {}",
+ peer, datanodeId);
+ return getSaslStreams(peer, underlyingOut, underlyingIn, datanodeId);
+ }
+ }
+
+ /**
+ * Receives SASL negotiation for specialized encrypted handshake.
+ *
+ * @param peer connection peer
+ * @param underlyingOut connection output stream
+ * @param underlyingIn connection input stream
+ * @return new pair of streams, wrapped after SASL negotiation
+ * @throws IOException for any error
+ */
+ private IOStreamPair getEncryptedStreams(Peer peer,
+ OutputStream underlyingOut, InputStream underlyingIn) throws IOException {
+ if (peer.hasSecureChannel() ||
+ dnConf.getTrustedChannelResolver().isTrusted(getPeerAddress(peer))) {
+ return new IOStreamPair(underlyingIn, underlyingOut);
+ }
+
+ Map<String, String> saslProps = createSaslPropertiesForEncryption(
+ dnConf.getEncryptionAlgorithm());
+
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Server using encryption algorithm " +
+ dnConf.getEncryptionAlgorithm());
+ }
+
+ CallbackHandler callbackHandler = new SaslServerCallbackHandler(
+ new PasswordFunction() {
+ @Override
+ public char[] apply(String userName) throws IOException {
+ return encryptionKeyToPassword(getEncryptionKeyFromUserName(userName));
+ }
+ });
+ return doSaslHandshake(underlyingOut, underlyingIn, saslProps,
+ callbackHandler);
+ }
+
+ /**
+ * The SASL handshake for encrypted vs. general-purpose uses different logic
+ * for determining the password. This interface is used to parameterize that
+ * logic. It's similar to a Guava Function, but we need to let it throw
+ * exceptions.
+ */
+ private interface PasswordFunction {
+
+ /**
+ * Returns the SASL password for the given user name.
+ *
+ * @param userName SASL user name
+ * @return SASL password
+ * @throws IOException for any error
+ */
+ char[] apply(String userName) throws IOException;
+ }
+
+ /**
+ * Sets user name and password when asked by the server-side SASL object.
+ */
+ private static final class SaslServerCallbackHandler
+ implements CallbackHandler {
+
+ private final PasswordFunction passwordFunction;
+
+ /**
+ * Creates a new SaslServerCallbackHandler.
+ *
+ * @param passwordFunction for determing the user's password
+ */
+ public SaslServerCallbackHandler(PasswordFunction passwordFunction) {
+ this.passwordFunction = passwordFunction;
+ }
+
+ @Override
+ public void handle(Callback[] callbacks) throws IOException,
+ UnsupportedCallbackException {
+ NameCallback nc = null;
+ PasswordCallback pc = null;
+ AuthorizeCallback ac = null;
+ for (Callback callback : callbacks) {
+ if (callback instanceof AuthorizeCallback) {
+ ac = (AuthorizeCallback) callback;
+ } else if (callback instanceof PasswordCallback) {
+ pc = (PasswordCallback) callback;
+ } else if (callback instanceof NameCallback) {
+ nc = (NameCallback) callback;
+ } else if (callback instanceof RealmCallback) {
+ continue; // realm is ignored
+ } else {
+ throw new UnsupportedCallbackException(callback,
+ "Unrecognized SASL DIGEST-MD5 Callback: " + callback);
+ }
+ }
+
+ if (pc != null) {
+ pc.setPassword(passwordFunction.apply(nc.getDefaultName()));
+ }
+
+ if (ac != null) {
+ ac.setAuthorized(true);
+ ac.setAuthorizedID(ac.getAuthorizationID());
+ }
+ }
+ }
+
+ /**
+ * Given a secret manager and a username encoded for the encrypted handshake,
+ * determine the encryption key.
+ *
+ * @param userName containing the keyId, blockPoolId, and nonce.
+ * @return secret encryption key.
+ * @throws IOException
+ */
+ private byte[] getEncryptionKeyFromUserName(String userName)
+ throws IOException {
+ String[] nameComponents = userName.split(NAME_DELIMITER);
+ if (nameComponents.length != 3) {
+ throw new IOException("Provided name '" + userName + "' has " +
+ nameComponents.length + " components instead of the expected 3.");
+ }
+ int keyId = Integer.parseInt(nameComponents[0]);
+ String blockPoolId = nameComponents[1];
+ byte[] nonce = Base64.decodeBase64(nameComponents[2]);
+ return blockPoolTokenSecretManager.retrieveDataEncryptionKey(keyId,
+ blockPoolId, nonce);
+ }
+
+ /**
+ * Receives SASL negotiation for general-purpose handshake.
+ *
+ * @param peer connection peer
+ * @param underlyingOut connection output stream
+ * @param underlyingIn connection input stream
+ * @param datanodeId ID of DataNode accepting connection
+ * @return new pair of streams, wrapped after SASL negotiation
+ * @throws IOException for any error
+ */
+ private IOStreamPair getSaslStreams(Peer peer, OutputStream underlyingOut,
+ InputStream underlyingIn, final DatanodeID datanodeId) throws IOException {
+ SaslPropertiesResolver saslPropsResolver = dnConf.getSaslPropsResolver();
+ if (saslPropsResolver == null) {
+ throw new IOException(String.format("Cannot create a secured " +
+ "connection if DataNode listens on unprivileged port (%d) and no " +
+ "protection is defined in configuration property %s.",
+ datanodeId.getXferPort(), DFS_DATA_TRANSFER_PROTECTION_KEY));
+ }
+ Map<String, String> saslProps = saslPropsResolver.getServerProperties(
+ getPeerAddress(peer));
+
+ CallbackHandler callbackHandler = new SaslServerCallbackHandler(
+ new PasswordFunction() {
+ @Override
+ public char[] apply(String userName) throws IOException {
+ return buildServerPassword(userName);
+ }
+ });
+ return doSaslHandshake(underlyingOut, underlyingIn, saslProps,
+ callbackHandler);
+ }
+
+ /**
+ * Calculates the expected correct password on the server side for the
+ * general-purpose handshake. The password consists of the block access
+ * token's password (known to the DataNode via its secret manager). This
+ * expects that the client has supplied a user name consisting of its
+ * serialized block access token identifier.
+ *
+ * @param userName SASL user name containing serialized block access token
+ * identifier
+ * @return expected correct SASL password
+ * @throws IOException for any error
+ */
+ private char[] buildServerPassword(String userName) throws IOException {
+ BlockTokenIdentifier identifier = deserializeIdentifier(userName);
+ byte[] tokenPassword = blockPoolTokenSecretManager.retrievePassword(
+ identifier);
+ return (new String(Base64.encodeBase64(tokenPassword, false),
+ Charsets.UTF_8)).toCharArray();
+ }
+
+ /**
+ * Deserializes a base64-encoded binary representation of a block access
+ * token.
+ *
+ * @param str String to deserialize
+ * @return BlockTokenIdentifier deserialized from str
+ * @throws IOException if there is any I/O error
+ */
+ private BlockTokenIdentifier deserializeIdentifier(String str)
+ throws IOException {
+ BlockTokenIdentifier identifier = new BlockTokenIdentifier();
+ identifier.readFields(new DataInputStream(new ByteArrayInputStream(
+ Base64.decodeBase64(str))));
+ return identifier;
+ }
+
+ /**
+ * This method actually executes the server-side SASL handshake.
+ *
+ * @param underlyingOut connection output stream
+ * @param underlyingIn connection input stream
+ * @param saslProps properties of SASL negotiation
+ * @param callbackHandler for responding to SASL callbacks
+ * @return new pair of streams, wrapped after SASL negotiation
+ * @throws IOException for any error
+ */
+ private IOStreamPair doSaslHandshake(OutputStream underlyingOut,
+ InputStream underlyingIn, Map<String, String> saslProps,
+ CallbackHandler callbackHandler) throws IOException {
+
+ DataInputStream in = new DataInputStream(underlyingIn);
+ DataOutputStream out = new DataOutputStream(underlyingOut);
+
+ SaslParticipant sasl = SaslParticipant.createServerSaslParticipant(saslProps,
+ callbackHandler);
+
+ int magicNumber = in.readInt();
+ if (magicNumber != SASL_TRANSFER_MAGIC_NUMBER) {
+ throw new InvalidMagicNumberException(magicNumber);
+ }
+ try {
+ // step 1
+ performSaslStep1(out, in, sasl);
+
+ // step 2 (server-side only)
+ byte[] remoteResponse = readSaslMessage(in);
+ byte[] localResponse = sasl.evaluateChallengeOrResponse(remoteResponse);
+ sendSaslMessage(out, localResponse);
+
+ // SASL handshake is complete
+ checkSaslComplete(sasl, saslProps);
+
+ return sasl.createStreamPair(out, in);
+ } catch (IOException ioe) {
+ if (ioe instanceof SaslException &&
+ ioe.getCause() != null &&
+ ioe.getCause() instanceof InvalidEncryptionKeyException) {
+ // This could just be because the client is long-lived and hasn't gotten
+ // a new encryption key from the NN in a while. Upon receiving this
+ // error, the client will get a new encryption key from the NN and retry
+ // connecting to this DN.
+ sendInvalidKeySaslErrorMessage(out, ioe.getCause().getMessage());
+ } else {
+ sendGenericSaslErrorMessage(out, ioe.getMessage());
+ }
+ throw ioe;
+ }
+ }
+
+ /**
+ * Sends a SASL negotiation message indicating an invalid key error.
+ *
+ * @param out stream to receive message
+ * @param message to send
+ * @throws IOException for any error
+ */
+ private static void sendInvalidKeySaslErrorMessage(DataOutputStream out,
+ String message) throws IOException {
+ sendSaslMessage(out, DataTransferEncryptorStatus.ERROR_UNKNOWN_KEY, null,
+ message);
+ }
+}
Added: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslParticipant.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslParticipant.java?rev=1610479&view=auto
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslParticipant.java (added)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslParticipant.java Mon Jul 14 18:28:02 2014
@@ -0,0 +1,166 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.protocol.datatransfer.sasl;
+
+import java.io.DataInputStream;
+import java.io.DataOutputStream;
+import java.util.Map;
+import javax.security.auth.callback.CallbackHandler;
+import javax.security.sasl.Sasl;
+import javax.security.sasl.SaslClient;
+import javax.security.sasl.SaslException;
+import javax.security.sasl.SaslServer;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair;
+import org.apache.hadoop.security.SaslInputStream;
+import org.apache.hadoop.security.SaslOutputStream;
+
+/**
+ * Strongly inspired by Thrift's TSaslTransport class.
+ *
+ * Used to abstract over the <code>SaslServer</code> and
+ * <code>SaslClient</code> classes, which share a lot of their interface, but
+ * unfortunately don't share a common superclass.
+ */
+@InterfaceAudience.Private
+class SaslParticipant {
+
+ // This has to be set as part of the SASL spec, but it don't matter for
+ // our purposes, but may not be empty. It's sent over the wire, so use
+ // a short string.
+ private static final String SERVER_NAME = "0";
+ private static final String PROTOCOL = "hdfs";
+ private static final String MECHANISM = "DIGEST-MD5";
+
+ // One of these will always be null.
+ private final SaslServer saslServer;
+ private final SaslClient saslClient;
+
+ /**
+ * Creates a SaslParticipant wrapping a SaslServer.
+ *
+ * @param saslProps properties of SASL negotiation
+ * @param callbackHandler for handling all SASL callbacks
+ * @return SaslParticipant wrapping SaslServer
+ * @throws SaslException for any error
+ */
+ public static SaslParticipant createServerSaslParticipant(
+ Map<String, String> saslProps, CallbackHandler callbackHandler)
+ throws SaslException {
+ return new SaslParticipant(Sasl.createSaslServer(MECHANISM,
+ PROTOCOL, SERVER_NAME, saslProps, callbackHandler));
+ }
+
+ /**
+ * Creates a SaslParticipant wrapping a SaslClient.
+ *
+ * @param userName SASL user name
+ * @param saslProps properties of SASL negotiation
+ * @param callbackHandler for handling all SASL callbacks
+ * @return SaslParticipant wrapping SaslClient
+ * @throws SaslException for any error
+ */
+ public static SaslParticipant createClientSaslParticipant(String userName,
+ Map<String, String> saslProps, CallbackHandler callbackHandler)
+ throws SaslException {
+ return new SaslParticipant(Sasl.createSaslClient(new String[] { MECHANISM },
+ userName, PROTOCOL, SERVER_NAME, saslProps, callbackHandler));
+ }
+
+ /**
+ * Private constructor wrapping a SaslServer.
+ *
+ * @param saslServer to wrap
+ */
+ private SaslParticipant(SaslServer saslServer) {
+ this.saslServer = saslServer;
+ this.saslClient = null;
+ }
+
+ /**
+ * Private constructor wrapping a SaslClient.
+ *
+ * @param saslClient to wrap
+ */
+ private SaslParticipant(SaslClient saslClient) {
+ this.saslServer = null;
+ this.saslClient = saslClient;
+ }
+
+ /**
+ * @see {@link SaslServer#evaluateResponse}
+ * @see {@link SaslClient#evaluateChallenge}
+ */
+ public byte[] evaluateChallengeOrResponse(byte[] challengeOrResponse)
+ throws SaslException {
+ if (saslClient != null) {
+ return saslClient.evaluateChallenge(challengeOrResponse);
+ } else {
+ return saslServer.evaluateResponse(challengeOrResponse);
+ }
+ }
+
+ /**
+ * After successful SASL negotation, returns the negotiated quality of
+ * protection.
+ *
+ * @return negotiated quality of protection
+ */
+ public String getNegotiatedQop() {
+ if (saslClient != null) {
+ return (String) saslClient.getNegotiatedProperty(Sasl.QOP);
+ } else {
+ return (String) saslServer.getNegotiatedProperty(Sasl.QOP);
+ }
+ }
+
+ /**
+ * Returns true if SASL negotiation is complete.
+ *
+ * @return true if SASL negotiation is complete
+ */
+ public boolean isComplete() {
+ if (saslClient != null) {
+ return saslClient.isComplete();
+ } else {
+ return saslServer.isComplete();
+ }
+ }
+
+ /**
+ * Return some input/output streams that may henceforth have their
+ * communication encrypted, depending on the negotiated quality of protection.
+ *
+ * @param out output stream to wrap
+ * @param in input stream to wrap
+ * @return IOStreamPair wrapping the streams
+ */
+ public IOStreamPair createStreamPair(DataOutputStream out,
+ DataInputStream in) {
+ if (saslClient != null) {
+ return new IOStreamPair(
+ new SaslInputStream(in, saslClient),
+ new SaslOutputStream(out, saslClient));
+ } else {
+ return new IOStreamPair(
+ new SaslInputStream(in, saslServer),
+ new SaslOutputStream(out, saslServer));
+ }
+ }
+}