You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by cl...@apache.org on 2019/10/08 19:20:09 UTC
[hadoop] branch branch-3.1 updated: HDFS-14509. DN throws
InvalidToken due to inequality of password when upgrade NN 2.x to 3.x.
Contributed by Yuxuan Wang and Konstantin Shvachko.
This is an automated email from the ASF dual-hosted git repository.
cliang pushed a commit to branch branch-3.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git
The following commit(s) were added to refs/heads/branch-3.1 by this push:
new 01d9952 HDFS-14509. DN throws InvalidToken due to inequality of password when upgrade NN 2.x to 3.x. Contributed by Yuxuan Wang and Konstantin Shvachko.
01d9952 is described below
commit 01d9952f63bdb2216f23ebc295d65f93121d6ca0
Author: Chen Liang <cl...@apache.org>
AuthorDate: Tue Oct 8 11:56:52 2019 -0700
HDFS-14509. DN throws InvalidToken due to inequality of password when upgrade NN 2.x to 3.x. Contributed by Yuxuan Wang and Konstantin Shvachko.
(cherry picked from commit 72ae371e7a6695f45f0d9cea5ae9aae83941d360)
---
.../security/token/block/BlockTokenIdentifier.java | 11 +++++
.../hdfs/security/token/block/TestBlockToken.java | 49 ++++++++++++++++++++++
2 files changed, 60 insertions(+)
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenIdentifier.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenIdentifier.java
index a508b90..ea27f1e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenIdentifier.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenIdentifier.java
@@ -33,6 +33,7 @@ import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AccessModeProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenSecretProto;
import org.apache.hadoop.hdfs.protocolPB.PBHelperClient;
+import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.WritableUtils;
import org.apache.hadoop.security.UserGroupInformation;
@@ -142,6 +143,7 @@ public class BlockTokenIdentifier extends TokenIdentifier {
}
public void setHandshakeMsg(byte[] bytes) {
+ cache = null; // invalidate the cache
handshakeMsg = bytes;
}
@@ -214,6 +216,15 @@ public class BlockTokenIdentifier extends TokenIdentifier {
if (!dis.markSupported()) {
throw new IOException("Could not peek first byte.");
}
+
+ // this.cache should be assigned the raw bytes from the input data for
+ // upgrading compatibility. If we won't mutate fields and call getBytes()
+ // for something (e.g retrieve password), we should return the raw bytes
+ // instead of serializing the instance self fields to bytes, because we may
+ // lose newly added fields which we can't recognize
+ this.cache = IOUtils.readFullyToByteArray(dis);
+ dis.reset();
+
dis.mark(1);
final byte firstByte = dis.readByte();
dis.reset();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java
index 6f62042..ec96e7a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java
@@ -32,6 +32,7 @@ import java.io.ByteArrayInputStream;
import java.io.DataInputStream;
import java.io.File;
import java.io.IOException;
+import java.io.DataOutput;
import java.net.InetSocketAddress;
import java.util.Arrays;
import java.util.Calendar;
@@ -41,6 +42,7 @@ import java.util.Set;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
+import org.mockito.Mockito;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
@@ -837,4 +839,51 @@ public class TestBlockToken {
}
}
}
+
+ @Test
+ public void testRetrievePasswordWithUnknownFields() throws IOException {
+ BlockTokenIdentifier id = new BlockTokenIdentifier();
+ BlockTokenIdentifier spyId = Mockito.spy(id);
+ Mockito.doAnswer(new Answer<Void>() {
+ @Override
+ public Void answer(InvocationOnMock invocation) throws Throwable {
+ DataOutput out = (DataOutput) invocation.getArguments()[0];
+ invocation.callRealMethod();
+ // write something at the end that BlockTokenIdentifier#readFields()
+ // will ignore, but which is still a part of the password
+ out.write(7);
+ return null;
+ }
+ }).when(spyId).write(Mockito.any());
+
+ BlockTokenSecretManager sm =
+ new BlockTokenSecretManager(blockKeyUpdateInterval, blockTokenLifetime,
+ 0, 1, "fake-pool", null, false);
+ // master create password
+ byte[] password = sm.createPassword(spyId);
+
+ BlockTokenIdentifier slaveId = new BlockTokenIdentifier();
+ slaveId.readFields(
+ new DataInputStream(new ByteArrayInputStream(spyId.getBytes())));
+
+ // slave retrieve password
+ assertArrayEquals(password, sm.retrievePassword(slaveId));
+ }
+
+ @Test
+ public void testRetrievePasswordWithRecognizableFieldsOnly()
+ throws IOException {
+ BlockTokenSecretManager sm =
+ new BlockTokenSecretManager(blockKeyUpdateInterval, blockTokenLifetime,
+ 0, 1, "fake-pool", null, false);
+ // master create password
+ BlockTokenIdentifier masterId = new BlockTokenIdentifier();
+ byte[] password = sm.createPassword(masterId);
+ // set cache to null, so that master getBytes() were only recognizable bytes
+ masterId.setExpiryDate(masterId.getExpiryDate());
+ BlockTokenIdentifier slaveId = new BlockTokenIdentifier();
+ slaveId.readFields(
+ new DataInputStream(new ByteArrayInputStream(masterId.getBytes())));
+ assertArrayEquals(password, sm.retrievePassword(slaveId));
+ }
}
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org