You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by cn...@apache.org on 2014/10/31 06:52:11 UTC
git commit: HDFS-7313. Support optional configuration of AES cipher
suite on DataTransferProtocol. Contributed by Chris Nauroth.
Repository: hadoop
Updated Branches:
refs/heads/trunk c637d6d9d -> 5573b3476
HDFS-7313. Support optional configuration of AES cipher suite on DataTransferProtocol. Contributed by Chris Nauroth.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5573b347
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5573b347
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5573b347
Branch: refs/heads/trunk
Commit: 5573b3476a5a6fce0ac99c654a9a9ec90f744a20
Parents: c637d6d
Author: cnauroth <cn...@apache.org>
Authored: Thu Oct 30 22:48:25 2014 -0700
Committer: cnauroth <cn...@apache.org>
Committed: Thu Oct 30 22:48:25 2014 -0700
----------------------------------------------------------------------
.../src/site/apt/SecureMode.apt.vm | 27 ++++++
.../apache/hadoop/test/GenericTestUtils.java | 6 ++
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +
.../org/apache/hadoop/hdfs/DFSConfigKeys.java | 1 +
.../datatransfer/sasl/DataTransferSaslUtil.java | 16 +++-
.../sasl/SaslDataTransferClient.java | 19 +++-
.../hdfs/server/datanode/DataXceiver.java | 19 ++--
.../src/main/resources/hdfs-default.xml | 13 +++
.../hadoop/hdfs/TestEncryptedTransfer.java | 93 ++++++++++++++++++--
9 files changed, 177 insertions(+), 20 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/5573b347/hadoop-common-project/hadoop-common/src/site/apt/SecureMode.apt.vm
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/apt/SecureMode.apt.vm b/hadoop-common-project/hadoop-common/src/site/apt/SecureMode.apt.vm
index 2085d5b..8e17e89 100644
--- a/hadoop-common-project/hadoop-common/src/site/apt/SecureMode.apt.vm
+++ b/hadoop-common-project/hadoop-common/src/site/apt/SecureMode.apt.vm
@@ -314,6 +314,21 @@ KVNO Timestamp Principal
You need to set <<<dfs.encrypt.data.transfer>>> to <<<"true">>> in the hdfs-site.xml
in order to activate data encryption for data transfer protocol of DataNode.
+ Optionally, you may set <<<dfs.encrypt.data.transfer.algorithm>>> to either
+ "3des" or "rc4" to choose the specific encryption algorithm. If unspecified,
+ then the configured JCE default on the system is used, which is usually 3DES.
+
+ Setting <<<dfs.encrypt.data.transfer.cipher.suites>>> to
+ <<<AES/CTR/NoPadding>>> activates AES encryption. By default, this is
+ unspecified, so AES is not used. When AES is used, the algorithm specified in
+ <<<dfs.encrypt.data.transfer.algorithm>>> is still used during an initial key
+ exchange. The AES key bit length can be configured by setting
+ <<<dfs.encrypt.data.transfer.cipher.key.bitlength>>> to 128, 192 or 256. The
+ default is 128.
+
+ AES offers the greatest cryptographic strength and the best performance. At
+ this time, 3DES and RC4 have been used more often in Hadoop clusters.
+
** Data Encryption on HTTP
Data transfer between Web-console and clients are protected by using SSL(HTTPS).
@@ -491,6 +506,18 @@ Configuration for <<<conf/hdfs-site.xml>>>
| <<<dfs.encrypt.data.transfer>>> | <false> | |
| | | set to <<<true>>> when using data encryption |
*-------------------------+-------------------------+------------------------+
+| <<<dfs.encrypt.data.transfer.algorithm>>> | | |
+| | | optionally set to <<<3des>>> or <<<rc4>>> when using data encryption to |
+| | | control encryption algorithm |
+*-------------------------+-------------------------+------------------------+
+| <<<dfs.encrypt.data.transfer.cipher.suites>>> | | |
+| | | optionally set to <<<AES/CTR/NoPadding>>> to activate AES encryption |
+| | | when using data encryption |
+*-------------------------+-------------------------+------------------------+
+| <<<dfs.encrypt.data.transfer.cipher.key.bitlength>>> | | |
+| | | optionally set to <<<128>>>, <<<192>>> or <<<256>>> to control key bit |
+| | | length when using AES with data encryption |
+*-------------------------+-------------------------+------------------------+
| <<<dfs.data.transfer.protection>>> | | |
| | | <authentication> : authentication only \
| | | <integrity> : integrity check in addition to authentication \
http://git-wip-us.apache.org/repos/asf/hadoop/blob/5573b347/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
index 06376e5..7be71e9 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
@@ -328,6 +328,12 @@ public abstract class GenericTestUtils {
}
}
+ public static void assertDoesNotMatch(String output, String pattern) {
+ Assert.assertFalse("Expected output to match /" + pattern + "/" +
+ " but got:\n" + output,
+ Pattern.compile(pattern).matcher(output).find());
+ }
+
public static void assertMatches(String output, String pattern) {
Assert.assertTrue("Expected output to match /" + pattern + "/" +
" but got:\n" + output,
http://git-wip-us.apache.org/repos/asf/hadoop/blob/5573b347/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 47eea75..7010c4a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -674,6 +674,9 @@ Release 2.6.0 - UNRELEASED
HDFS-6385. Show when block deletion will start after NameNode startup in
WebUI. (cnauroth)
+ HDFS-7313. Support optional configuration of AES cipher suite on
+ DataTransferProtocol. (cnauroth)
+
OPTIMIZATIONS
HDFS-6690. Deduplicate xattr names in memory. (wang)
http://git-wip-us.apache.org/repos/asf/hadoop/blob/5573b347/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 59eaa20..488bf0e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -602,6 +602,7 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
public static final boolean DFS_ENCRYPT_DATA_TRANSFER_DEFAULT = false;
public static final String DFS_ENCRYPT_DATA_TRANSFER_CIPHER_KEY_BITLENGTH_KEY = "dfs.encrypt.data.transfer.cipher.key.bitlength";
public static final int DFS_ENCRYPT_DATA_TRANSFER_CIPHER_KEY_BITLENGTH_DEFAULT = 128;
+ public static final String DFS_ENCRYPT_DATA_TRANSFER_CIPHER_SUITES_KEY = "dfs.encrypt.data.transfer.cipher.suites";
public static final String DFS_DATA_ENCRYPTION_ALGORITHM_KEY = "dfs.encrypt.data.transfer.algorithm";
public static final String DFS_TRUSTEDCHANNEL_RESOLVER_CLASS = "dfs.trustedchannel.resolver.class";
public static final String DFS_DATA_TRANSFER_PROTECTION_KEY = "dfs.data.transfer.protection";
http://git-wip-us.apache.org/repos/asf/hadoop/blob/5573b347/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/DataTransferSaslUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/DataTransferSaslUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/DataTransferSaslUtil.java
index 2d5e13c..398d44c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/DataTransferSaslUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/DataTransferSaslUtil.java
@@ -23,6 +23,7 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATA_TRANSFER_PROTECTION_
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATA_TRANSFER_SASL_PROPS_RESOLVER_CLASS_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_CIPHER_KEY_BITLENGTH_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_CIPHER_KEY_BITLENGTH_DEFAULT;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_CIPHER_SUITES_KEY;
import static org.apache.hadoop.hdfs.protocolPB.PBHelper.vintPrefixed;
import java.io.IOException;
@@ -249,14 +250,25 @@ public final class DataTransferSaslUtil {
/**
* Negotiate a cipher option which server supports.
*
+ * @param conf the configuration
* @param options the cipher options which client supports
* @return CipherOption negotiated cipher option
*/
public static CipherOption negotiateCipherOption(Configuration conf,
- List<CipherOption> options) {
+ List<CipherOption> options) throws IOException {
+ // Negotiate cipher suites if configured. Currently, the only supported
+ // cipher suite is AES/CTR/NoPadding, but the protocol allows multiple
+ // values for future expansion.
+ String cipherSuites = conf.get(DFS_ENCRYPT_DATA_TRANSFER_CIPHER_SUITES_KEY);
+ if (cipherSuites == null || cipherSuites.isEmpty()) {
+ return null;
+ }
+ if (!cipherSuites.equals(CipherSuite.AES_CTR_NOPADDING.getName())) {
+ throw new IOException(String.format("Invalid cipher suite, %s=%s",
+ DFS_ENCRYPT_DATA_TRANSFER_CIPHER_SUITES_KEY, cipherSuites));
+ }
if (options != null) {
for (CipherOption option : options) {
- // Currently we support AES/CTR/NoPadding
CipherSuite suite = option.getCipherSuite();
if (suite == CipherSuite.AES_CTR_NOPADDING) {
int keyLen = conf.getInt(
http://git-wip-us.apache.org/repos/asf/hadoop/blob/5573b347/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslDataTransferClient.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslDataTransferClient.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslDataTransferClient.java
index cfcc91f..98cdcfe 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslDataTransferClient.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslDataTransferClient.java
@@ -17,6 +17,7 @@
*/
package org.apache.hadoop.hdfs.protocol.datatransfer.sasl;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_CIPHER_SUITES_KEY;
import static org.apache.hadoop.hdfs.protocol.datatransfer.sasl.DataTransferSaslUtil.*;
import java.io.DataInputStream;
@@ -450,10 +451,20 @@ public class SaslDataTransferClient {
byte[] localResponse = sasl.evaluateChallengeOrResponse(remoteResponse);
List<CipherOption> cipherOptions = null;
if (requestedQopContainsPrivacy(saslProps)) {
- // Negotiation cipher options
- CipherOption option = new CipherOption(CipherSuite.AES_CTR_NOPADDING);
- cipherOptions = Lists.newArrayListWithCapacity(1);
- cipherOptions.add(option);
+ // Negotiate cipher suites if configured. Currently, the only supported
+ // cipher suite is AES/CTR/NoPadding, but the protocol allows multiple
+ // values for future expansion.
+ String cipherSuites = conf.get(
+ DFS_ENCRYPT_DATA_TRANSFER_CIPHER_SUITES_KEY);
+ if (cipherSuites != null && !cipherSuites.isEmpty()) {
+ if (!cipherSuites.equals(CipherSuite.AES_CTR_NOPADDING.getName())) {
+ throw new IOException(String.format("Invalid cipher suite, %s=%s",
+ DFS_ENCRYPT_DATA_TRANSFER_CIPHER_SUITES_KEY, cipherSuites));
+ }
+ CipherOption option = new CipherOption(CipherSuite.AES_CTR_NOPADDING);
+ cipherOptions = Lists.newArrayListWithCapacity(1);
+ cipherOptions.add(option);
+ }
}
sendSaslMessageAndNegotiationCipherOptions(out, localResponse,
cipherOptions);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/5573b347/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
index 9dfd33b..2a45a42 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
@@ -58,6 +58,7 @@ import org.apache.hadoop.hdfs.protocol.datatransfer.Op;
import org.apache.hadoop.hdfs.protocol.datatransfer.Receiver;
import org.apache.hadoop.hdfs.protocol.datatransfer.Sender;
import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.DataEncryptionKeyFactory;
+import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.InvalidMagicNumberException;
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto;
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto;
@@ -179,11 +180,19 @@ class DataXceiver extends Receiver implements Runnable {
dataXceiverServer.addPeer(peer, Thread.currentThread(), this);
peer.setWriteTimeout(datanode.getDnConf().socketWriteTimeout);
InputStream input = socketIn;
- IOStreamPair saslStreams = datanode.saslServer.receive(peer, socketOut,
- socketIn, datanode.getDatanodeId());
- input = new BufferedInputStream(saslStreams.in,
- HdfsConstants.SMALL_BUFFER_SIZE);
- socketOut = saslStreams.out;
+ try {
+ IOStreamPair saslStreams = datanode.saslServer.receive(peer, socketOut,
+ socketIn, datanode.getDatanodeId());
+ input = new BufferedInputStream(saslStreams.in,
+ HdfsConstants.SMALL_BUFFER_SIZE);
+ socketOut = saslStreams.out;
+ } catch (InvalidMagicNumberException imne) {
+ LOG.info("Failed to read expected encryption handshake from client " +
+ "at " + peer.getRemoteAddressString() + ". Perhaps the client " +
+ "is running an older version of Hadoop which does not support " +
+ "encryption");
+ return;
+ }
super.initialize(new DataInputStream(input));
http://git-wip-us.apache.org/repos/asf/hadoop/blob/5573b347/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
index 38d3c50..31145ba 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
@@ -1517,6 +1517,19 @@
Note that if AES is supported by both the client and server then this
encryption algorithm will only be used to initially transfer keys for AES.
+ (See dfs.encrypt.data.transfer.cipher.suites.)
+ </description>
+</property>
+
+<property>
+ <name>dfs.encrypt.data.transfer.cipher.suites</name>
+ <value></value>
+ <description>
+ This value may be either undefined or AES/CTR/NoPadding. If defined, then
+ dfs.encrypt.data.transfer uses the specified cipher suite for data
+ encryption. If not defined, then only the algorithm specified in
+ dfs.encrypt.data.transfer.algorithm is used. By default, the property is
+ not defined.
</description>
</property>
http://git-wip-us.apache.org/repos/asf/hadoop/blob/5573b347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptedTransfer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptedTransfer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptedTransfer.java
index 7f6ad1a..30484d1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptedTransfer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptedTransfer.java
@@ -18,6 +18,7 @@
package org.apache.hadoop.hdfs;
import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
@@ -133,12 +134,12 @@ public class TestEncryptedTransfer {
fs.close();
- if (resolverClazz != null && !resolverClazz.endsWith("TestTrustedChannelResolver")){
+ if (resolverClazz == null) {
// Test client and server negotiate cipher option
- GenericTestUtils.assertMatches(logs.getOutput(),
+ GenericTestUtils.assertDoesNotMatch(logs.getOutput(),
"Server using cipher suite");
// Check the IOStreamPair
- GenericTestUtils.assertMatches(logs1.getOutput(),
+ GenericTestUtils.assertDoesNotMatch(logs1.getOutput(),
"Creating IOStreamPair of CryptoInputStream and CryptoOutputStream.");
}
} finally {
@@ -174,9 +175,28 @@ public class TestEncryptedTransfer {
.build();
fs = getFileSystem(conf);
- assertEquals(PLAIN_TEXT, DFSTestUtil.readFile(fs, TEST_PATH));
- assertEquals(checksum, fs.getFileChecksum(TEST_PATH));
+ LogCapturer logs = GenericTestUtils.LogCapturer.captureLogs(
+ LogFactory.getLog(SaslDataTransferServer.class));
+ LogCapturer logs1 = GenericTestUtils.LogCapturer.captureLogs(
+ LogFactory.getLog(DataTransferSaslUtil.class));
+ try {
+ assertEquals(PLAIN_TEXT, DFSTestUtil.readFile(fs, TEST_PATH));
+ assertEquals(checksum, fs.getFileChecksum(TEST_PATH));
+ } finally {
+ logs.stopCapturing();
+ logs1.stopCapturing();
+ }
+
fs.close();
+
+ if (resolverClazz == null) {
+ // Test client and server negotiate cipher option
+ GenericTestUtils.assertDoesNotMatch(logs.getOutput(),
+ "Server using cipher suite");
+ // Check the IOStreamPair
+ GenericTestUtils.assertDoesNotMatch(logs1.getOutput(),
+ "Creating IOStreamPair of CryptoInputStream and CryptoOutputStream.");
+ }
} finally {
if (cluster != null) {
cluster.shutdown();
@@ -185,6 +205,61 @@ public class TestEncryptedTransfer {
}
@Test
+ public void testEncryptedReadWithAES() throws IOException {
+ MiniDFSCluster cluster = null;
+ try {
+ Configuration conf = new Configuration();
+ conf.set(DFSConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_CIPHER_SUITES_KEY,
+ "AES/CTR/NoPadding");
+ cluster = new MiniDFSCluster.Builder(conf).build();
+
+ FileSystem fs = getFileSystem(conf);
+ writeTestDataToFile(fs);
+ assertEquals(PLAIN_TEXT, DFSTestUtil.readFile(fs, TEST_PATH));
+ FileChecksum checksum = fs.getFileChecksum(TEST_PATH);
+ fs.close();
+ cluster.shutdown();
+
+ setEncryptionConfigKeys(conf);
+
+ cluster = new MiniDFSCluster.Builder(conf)
+ .manageDataDfsDirs(false)
+ .manageNameDfsDirs(false)
+ .format(false)
+ .startupOption(StartupOption.REGULAR)
+ .build();
+
+ fs = getFileSystem(conf);
+ LogCapturer logs = GenericTestUtils.LogCapturer.captureLogs(
+ LogFactory.getLog(SaslDataTransferServer.class));
+ LogCapturer logs1 = GenericTestUtils.LogCapturer.captureLogs(
+ LogFactory.getLog(DataTransferSaslUtil.class));
+ try {
+ assertEquals(PLAIN_TEXT, DFSTestUtil.readFile(fs, TEST_PATH));
+ assertEquals(checksum, fs.getFileChecksum(TEST_PATH));
+ } finally {
+ logs.stopCapturing();
+ logs1.stopCapturing();
+ }
+
+ fs.close();
+
+ if (resolverClazz == null) {
+ // Test client and server negotiate cipher option
+ GenericTestUtils.assertMatches(logs.getOutput(),
+ "Server using cipher suite");
+ // Check the IOStreamPair
+ GenericTestUtils.assertMatches(logs1.getOutput(),
+ "Creating IOStreamPair of CryptoInputStream and CryptoOutputStream.");
+ }
+ } finally {
+ if (cluster != null) {
+ cluster.shutdown();
+ }
+ }
+ }
+
+ @Test
public void testEncryptedReadAfterNameNodeRestart() throws IOException {
MiniDFSCluster cluster = null;
try {
@@ -270,7 +345,7 @@ public class TestEncryptedTransfer {
}
fs.close();
- if (resolverClazz != null && !resolverClazz.endsWith("TestTrustedChannelResolver")){
+ if (resolverClazz == null) {
GenericTestUtils.assertMatches(logs.getOutput(),
"Failed to read expected encryption handshake from client at");
}
@@ -444,12 +519,12 @@ public class TestEncryptedTransfer {
assertEquals(PLAIN_TEXT, DFSTestUtil.readFile(fs, TEST_PATH));
fs.close();
- if (resolverClazz != null && !resolverClazz.endsWith("TestTrustedChannelResolver")){
+ if (resolverClazz == null) {
// Test client and server negotiate cipher option
- GenericTestUtils.assertMatches(logs.getOutput(),
+ GenericTestUtils.assertDoesNotMatch(logs.getOutput(),
"Server using cipher suite");
// Check the IOStreamPair
- GenericTestUtils.assertMatches(logs1.getOutput(),
+ GenericTestUtils.assertDoesNotMatch(logs1.getOutput(),
"Creating IOStreamPair of CryptoInputStream and CryptoOutputStream.");
}
} finally {