You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by sz...@apache.org on 2011/08/17 16:34:29 UTC
svn commit: r1158743 - in /hadoop/common/trunk/hdfs: ./
src/java/org/apache/hadoop/hdfs/security/token/block/
src/java/org/apache/hadoop/hdfs/server/blockmanagement/
src/test/hdfs/org/apache/hadoop/hdfs/server/blockmanagement/
src/test/hdfs/org/apache/...
Author: szetszwo
Date: Wed Aug 17 14:34:29 2011
New Revision: 1158743
URL: http://svn.apache.org/viewvc?rev=1158743&view=rev
Log:
HDFS-2265. Remove unnecessary BlockTokenSecretManager fields/methods from BlockManager.
Added:
hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFS.java
- copied, changed from r1158734, hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBlockTokenWithDFS.java
Removed:
hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBlockTokenWithDFS.java
Modified:
hadoop/common/trunk/hdfs/CHANGES.txt
hadoop/common/trunk/hdfs/src/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenSecretManager.java
hadoop/common/trunk/hdfs/src/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
Modified: hadoop/common/trunk/hdfs/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hdfs/CHANGES.txt?rev=1158743&r1=1158742&r2=1158743&view=diff
==============================================================================
--- hadoop/common/trunk/hdfs/CHANGES.txt (original)
+++ hadoop/common/trunk/hdfs/CHANGES.txt Wed Aug 17 14:34:29 2011
@@ -665,6 +665,9 @@ Trunk (unreleased changes)
HDFS-2233. Add WebUI tests with URI reserved chars. (eli)
+ HDFS-2265. Remove unnecessary BlockTokenSecretManager fields/methods from
+ BlockManager. (szetszwo)
+
OPTIMIZATIONS
HDFS-1458. Improve checkpoint performance by avoiding unnecessary image
Modified: hadoop/common/trunk/hdfs/src/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenSecretManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hdfs/src/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenSecretManager.java?rev=1158743&r1=1158742&r2=1158743&view=diff
==============================================================================
--- hadoop/common/trunk/hdfs/src/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenSecretManager.java (original)
+++ hadoop/common/trunk/hdfs/src/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenSecretManager.java Wed Aug 17 14:34:29 2011
@@ -52,7 +52,7 @@ public class BlockTokenSecretManager ext
public static final Token<BlockTokenIdentifier> DUMMY_TOKEN = new Token<BlockTokenIdentifier>();
private final boolean isMaster;
- /*
+ /**
* keyUpdateInterval is the interval that NN updates its block keys. It should
* be set long enough so that all live DN's and Balancer should have sync'ed
* their block keys with NN at least once during each interval.
@@ -151,11 +151,23 @@ public class BlockTokenSecretManager ext
}
/**
+ * Update block keys if update time > update interval.
+ * @return true if the keys are updated.
+ */
+ public boolean updateKeys(final long updateTime) throws IOException {
+ if (updateTime > keyUpdateInterval) {
+ return updateKeys();
+ }
+ return false;
+ }
+
+ /**
* Update block keys, only to be used in master mode
*/
- public synchronized void updateKeys() throws IOException {
+ synchronized boolean updateKeys() throws IOException {
if (!isMaster)
- return;
+ return false;
+
LOG.info("Updating block keys");
removeExpiredKeys();
// set final expiry date of retiring currentKey
@@ -171,6 +183,7 @@ public class BlockTokenSecretManager ext
nextKey = new BlockKey(serialNo, System.currentTimeMillis() + 3
* keyUpdateInterval + tokenLifetime, generateSecret());
allKeys.put(nextKey.getKeyId(), nextKey);
+ return true;
}
/** Generate an block token for current user */
Modified: hadoop/common/trunk/hdfs/src/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hdfs/src/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java?rev=1158743&r1=1158742&r2=1158743&view=diff
==============================================================================
--- hadoop/common/trunk/hdfs/src/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java (original)
+++ hadoop/common/trunk/hdfs/src/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java Wed Aug 17 14:34:29 2011
@@ -80,21 +80,16 @@ public class BlockManager {
private final FSNamesystem namesystem;
+ private final DatanodeManager datanodeManager;
+ private final HeartbeatManager heartbeatManager;
+ private final BlockTokenSecretManager blockTokenSecretManager;
+
private volatile long pendingReplicationBlocksCount = 0L;
private volatile long corruptReplicaBlocksCount = 0L;
private volatile long underReplicatedBlocksCount = 0L;
private volatile long scheduledReplicationBlocksCount = 0L;
private volatile long excessBlocksCount = 0L;
private volatile long pendingDeletionBlocksCount = 0L;
- private boolean isBlockTokenEnabled;
- private long blockKeyUpdateInterval;
- private long blockTokenLifetime;
- private BlockTokenSecretManager blockTokenSecretManager;
-
- /** get the BlockTokenSecretManager */
- public BlockTokenSecretManager getBlockTokenSecretManager() {
- return blockTokenSecretManager;
- }
/** Used by metrics */
public long getPendingReplicationBlocksCount() {
@@ -130,9 +125,6 @@ public class BlockManager {
*/
final BlocksMap blocksMap;
- private final DatanodeManager datanodeManager;
- private final HeartbeatManager heartbeatManager;
-
/** Replication thread. */
final Daemon replicationThread = new Daemon(new ReplicationMonitor());
@@ -197,26 +189,9 @@ public class BlockManager {
pendingReplications = new PendingReplicationBlocks(conf.getInt(
DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY,
DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_DEFAULT) * 1000L);
- this.isBlockTokenEnabled = conf.getBoolean(
- DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY,
- DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_DEFAULT);
- if (isBlockTokenEnabled) {
- if (isBlockTokenEnabled) {
- this.blockKeyUpdateInterval = conf.getLong(
- DFSConfigKeys.DFS_BLOCK_ACCESS_KEY_UPDATE_INTERVAL_KEY,
- DFSConfigKeys.DFS_BLOCK_ACCESS_KEY_UPDATE_INTERVAL_DEFAULT) * 60 * 1000L; // 10 hrs
- this.blockTokenLifetime = conf.getLong(
- DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_LIFETIME_KEY,
- DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_LIFETIME_DEFAULT) * 60 * 1000L; // 10 hrs
- }
-
- blockTokenSecretManager = new BlockTokenSecretManager(true,
- blockKeyUpdateInterval, blockTokenLifetime);
- }
- LOG.info("isBlockTokenEnabled=" + isBlockTokenEnabled
- + " blockKeyUpdateInterval=" + blockKeyUpdateInterval / (60 * 1000)
- + " min(s), blockTokenLifetime=" + blockTokenLifetime / (60 * 1000)
- + " min(s)");
+
+ blockTokenSecretManager = createBlockTokenSecretManager(conf);
+
this.maxCorruptFilesReturned = conf.getInt(
DFSConfigKeys.DFS_DEFAULT_MAX_CORRUPT_FILES_RETURNED_KEY,
DFSConfigKeys.DFS_DEFAULT_MAX_CORRUPT_FILES_RETURNED);
@@ -260,6 +235,46 @@ public class BlockManager {
LOG.info("replicationRecheckInterval = " + replicationRecheckInterval);
}
+ private static BlockTokenSecretManager createBlockTokenSecretManager(
+ final Configuration conf) throws IOException {
+ final boolean isEnabled = conf.getBoolean(
+ DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY,
+ DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_DEFAULT);
+ LOG.info(DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY + "=" + isEnabled);
+
+ if (!isEnabled) {
+ return null;
+ }
+
+ final long updateMin = conf.getLong(
+ DFSConfigKeys.DFS_BLOCK_ACCESS_KEY_UPDATE_INTERVAL_KEY,
+ DFSConfigKeys.DFS_BLOCK_ACCESS_KEY_UPDATE_INTERVAL_DEFAULT);
+ final long lifetimeMin = conf.getLong(
+ DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_LIFETIME_KEY,
+ DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_LIFETIME_DEFAULT);
+ LOG.info(DFSConfigKeys.DFS_BLOCK_ACCESS_KEY_UPDATE_INTERVAL_KEY
+ + "=" + updateMin + " min(s), "
+ + DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_LIFETIME_KEY
+ + "=" + lifetimeMin + " min(s)");
+ return new BlockTokenSecretManager(true,
+ updateMin*60*1000L, lifetimeMin*60*1000L);
+ }
+
+ /** get the BlockTokenSecretManager */
+ BlockTokenSecretManager getBlockTokenSecretManager() {
+ return blockTokenSecretManager;
+ }
+
+ private boolean isBlockTokenEnabled() {
+ return blockTokenSecretManager != null;
+ }
+
+ /** Should the access keys be updated? */
+ boolean shouldUpdateBlockKey(final long updateTime) throws IOException {
+ return isBlockTokenEnabled()? blockTokenSecretManager.updateKeys(updateTime)
+ : false;
+ }
+
public void activate(Configuration conf) {
pendingReplications.start();
datanodeManager.activate(conf);
@@ -599,7 +614,7 @@ public class BlockManager {
: fileSizeExcludeBlocksUnderConstruction;
final LocatedBlock lastlb = createLocatedBlock(last, lastPos);
- if (isBlockTokenEnabled && needBlockToken) {
+ if (isBlockTokenEnabled() && needBlockToken) {
for(LocatedBlock lb : locatedblocks) {
setBlockToken(lb, AccessMode.READ);
}
@@ -613,14 +628,14 @@ public class BlockManager {
/** @return current access keys. */
public ExportedBlockKeys getBlockKeys() {
- return isBlockTokenEnabled? blockTokenSecretManager.exportKeys()
+ return isBlockTokenEnabled()? blockTokenSecretManager.exportKeys()
: ExportedBlockKeys.DUMMY_KEYS;
}
/** Generate a block token for the located block. */
public void setBlockToken(final LocatedBlock b,
final BlockTokenSecretManager.AccessMode mode) throws IOException {
- if (isBlockTokenEnabled) {
+ if (isBlockTokenEnabled()) {
b.setBlockToken(blockTokenSecretManager.generateToken(b.getBlock(),
EnumSet.of(mode)));
}
@@ -629,7 +644,7 @@ public class BlockManager {
void addKeyUpdateCommand(final List<DatanodeCommand> cmds,
final DatanodeDescriptor nodeinfo) {
// check access key update
- if (isBlockTokenEnabled && nodeinfo.needKeyUpdate) {
+ if (isBlockTokenEnabled() && nodeinfo.needKeyUpdate) {
cmds.add(new KeyUpdateCommand(blockTokenSecretManager.exportKeys()));
nodeinfo.needKeyUpdate = false;
}
@@ -2368,16 +2383,6 @@ public class BlockManager {
return blocksMap.getStoredBlock(block);
}
-
- /** Should the access keys be updated? */
- boolean shouldUpdateBlockKey(final long updateTime) throws IOException {
- final boolean b = isBlockTokenEnabled && blockKeyUpdateInterval < updateTime;
- if (b) {
- blockTokenSecretManager.updateKeys();
- }
- return b;
- }
-
/** updates a block in under replication queue */
private void updateNeededReplications(final Block block,
final int curReplicasDelta, int expectedReplicasDelta) {
Copied: hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFS.java (from r1158734, hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBlockTokenWithDFS.java)
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFS.java?p2=hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFS.java&p1=hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBlockTokenWithDFS.java&r1=1158734&r2=1158743&rev=1158743&view=diff
==============================================================================
--- hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBlockTokenWithDFS.java (original)
+++ hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFS.java Wed Aug 17 14:34:29 2011
@@ -15,7 +15,12 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-package org.apache.hadoop.hdfs.server.namenode;
+package org.apache.hadoop.hdfs.server.blockmanagement;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
import java.io.IOException;
import java.net.InetSocketAddress;
@@ -26,6 +31,10 @@ import java.util.Random;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.BlockReader;
import org.apache.hadoop.hdfs.DFSClient;
import org.apache.hadoop.hdfs.DFSConfigKeys;
@@ -34,21 +43,20 @@ import org.apache.hadoop.hdfs.MiniDFSClu
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
-import org.apache.hadoop.hdfs.security.token.block.*;
+import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
+import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager;
+import org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException;
+import org.apache.hadoop.hdfs.security.token.block.SecurityTestUtil;
import org.apache.hadoop.hdfs.server.balancer.TestBalancer;
import org.apache.hadoop.hdfs.server.common.HdfsConstants;
+import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.net.NetUtils;
-import org.apache.hadoop.fs.FSDataInputStream;
-import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.security.token.*;
+import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.util.StringUtils;
import org.apache.log4j.Level;
+import org.junit.Test;
-import junit.framework.TestCase;
-
-public class TestBlockTokenWithDFS extends TestCase {
+public class TestBlockTokenWithDFS {
private static final int BLOCK_SIZE = 1024;
private static final int FILE_SIZE = 2 * BLOCK_SIZE;
@@ -175,10 +183,11 @@ public class TestBlockTokenWithDFS exten
return conf;
}
- /*
+ /**
* testing that APPEND operation can handle token expiration when
* re-establishing pipeline is needed
*/
+ @Test
public void testAppend() throws Exception {
MiniDFSCluster cluster = null;
int numDataNodes = 2;
@@ -188,9 +197,13 @@ public class TestBlockTokenWithDFS exten
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes).build();
cluster.waitActive();
assertEquals(numDataNodes, cluster.getDataNodes().size());
+
+ final NameNode nn = cluster.getNameNode();
+ final BlockManager bm = nn.getNamesystem().getBlockManager();
+ final BlockTokenSecretManager sm = bm.getBlockTokenSecretManager();
+
// set a short token lifetime (1 second)
- SecurityTestUtil.setBlockTokenLifetime(
- cluster.getNameNode().getNamesystem().getBlockManager().getBlockTokenSecretManager(), 1000L);
+ SecurityTestUtil.setBlockTokenLifetime(sm, 1000L);
Path fileToAppend = new Path(FILE_TO_APPEND);
FileSystem fs = cluster.getFileSystem();
@@ -231,10 +244,11 @@ public class TestBlockTokenWithDFS exten
}
}
- /*
+ /**
* testing that WRITE operation can handle token expiration when
* re-establishing pipeline is needed
*/
+ @Test
public void testWrite() throws Exception {
MiniDFSCluster cluster = null;
int numDataNodes = 2;
@@ -244,9 +258,13 @@ public class TestBlockTokenWithDFS exten
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes).build();
cluster.waitActive();
assertEquals(numDataNodes, cluster.getDataNodes().size());
+
+ final NameNode nn = cluster.getNameNode();
+ final BlockManager bm = nn.getNamesystem().getBlockManager();
+ final BlockTokenSecretManager sm = bm.getBlockTokenSecretManager();
+
// set a short token lifetime (1 second)
- SecurityTestUtil.setBlockTokenLifetime(
- cluster.getNameNode().getNamesystem().getBlockManager().getBlockTokenSecretManager(), 1000L);
+ SecurityTestUtil.setBlockTokenLifetime(sm, 1000L);
Path fileToWrite = new Path(FILE_TO_WRITE);
FileSystem fs = cluster.getFileSystem();
@@ -283,6 +301,7 @@ public class TestBlockTokenWithDFS exten
}
}
+ @Test
public void testRead() throws Exception {
MiniDFSCluster cluster = null;
int numDataNodes = 2;
@@ -292,11 +311,14 @@ public class TestBlockTokenWithDFS exten
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes).build();
cluster.waitActive();
assertEquals(numDataNodes, cluster.getDataNodes().size());
+
+ final NameNode nn = cluster.getNameNode();
+ final BlockManager bm = nn.getNamesystem().getBlockManager();
+ final BlockTokenSecretManager sm = bm.getBlockTokenSecretManager();
+
// set a short token lifetime (1 second) initially
- SecurityTestUtil.setBlockTokenLifetime(
- cluster.getNameNode()
- .getNamesystem().getBlockManager().getBlockTokenSecretManager(),
- 1000L);
+ SecurityTestUtil.setBlockTokenLifetime(sm, 1000L);
+
Path fileToRead = new Path(FILE_TO_READ);
FileSystem fs = cluster.getFileSystem();
createFile(fs, fileToRead);
@@ -321,7 +343,7 @@ public class TestBlockTokenWithDFS exten
new DFSClient(new InetSocketAddress("localhost",
cluster.getNameNodePort()), conf);
- List<LocatedBlock> locatedBlocks = cluster.getNameNode().getBlockLocations(
+ List<LocatedBlock> locatedBlocks = nn.getBlockLocations(
FILE_TO_READ, 0, FILE_SIZE).getLocatedBlocks();
LocatedBlock lblock = locatedBlocks.get(0); // first block
Token<BlockTokenIdentifier> myToken = lblock.getBlockToken();
@@ -350,36 +372,27 @@ public class TestBlockTokenWithDFS exten
// read should fail
tryRead(conf, lblock, false);
// use a valid new token
- lblock.setBlockToken(cluster.getNameNode().getNamesystem()
- .getBlockManager().getBlockTokenSecretManager().generateToken(
- lblock.getBlock(),
+ lblock.setBlockToken(sm.generateToken(lblock.getBlock(),
EnumSet.of(BlockTokenSecretManager.AccessMode.READ)));
// read should succeed
tryRead(conf, lblock, true);
// use a token with wrong blockID
ExtendedBlock wrongBlock = new ExtendedBlock(lblock.getBlock()
.getBlockPoolId(), lblock.getBlock().getBlockId() + 1);
- lblock.setBlockToken(cluster.getNameNode().getNamesystem()
- .getBlockManager().getBlockTokenSecretManager().generateToken(wrongBlock,
- EnumSet.of(BlockTokenSecretManager.AccessMode.READ)));
+ lblock.setBlockToken(sm.generateToken(wrongBlock,
+ EnumSet.of(BlockTokenSecretManager.AccessMode.READ)));
// read should fail
tryRead(conf, lblock, false);
// use a token with wrong access modes
- lblock.setBlockToken(cluster.getNameNode().getNamesystem()
- .getBlockManager().getBlockTokenSecretManager().generateToken(
- lblock.getBlock(),
- EnumSet.of(
- BlockTokenSecretManager.AccessMode.WRITE,
- BlockTokenSecretManager.AccessMode.COPY,
- BlockTokenSecretManager.AccessMode.REPLACE)));
+ lblock.setBlockToken(sm.generateToken(lblock.getBlock(),
+ EnumSet.of(BlockTokenSecretManager.AccessMode.WRITE,
+ BlockTokenSecretManager.AccessMode.COPY,
+ BlockTokenSecretManager.AccessMode.REPLACE)));
// read should fail
tryRead(conf, lblock, false);
// set a long token lifetime for future tokens
- SecurityTestUtil.setBlockTokenLifetime(
- cluster.getNameNode()
- .getNamesystem().getBlockManager().getBlockTokenSecretManager(),
- 600 * 1000L);
+ SecurityTestUtil.setBlockTokenLifetime(sm, 600 * 1000L);
/*
* testing that when cached tokens are expired, DFSClient will re-fetch
@@ -531,9 +544,10 @@ public class TestBlockTokenWithDFS exten
}
}
- /*
+ /**
* Integration testing of access token, involving NN, DN, and Balancer
*/
+ @Test
public void testEnd2End() throws Exception {
Configuration conf = new Configuration();
conf.setBoolean(DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY, true);