You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by gk...@apache.org on 2012/08/03 21:00:59 UTC
svn commit: r1369164 [6/16] - in
/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project: ./
hadoop-hdfs-httpfs/ hadoop-hdfs-httpfs/dev-support/
hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/
hadoop-hdfs-httpfs/src/main/java/or...
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java Fri Aug 3 19:00:15 2012
@@ -72,8 +72,10 @@ import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.net.NetworkTopology;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.util.Time;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
+import static com.google.common.base.Preconditions.checkArgument;
/** <p>The balancer is a tool that balances disk space usage on an HDFS cluster
* when some datanodes become full or when new empty nodes join the cluster.
@@ -376,6 +378,7 @@ public class Balancer {
/* start a thread to dispatch the block move */
private void scheduleBlockMove() {
moverExecutor.execute(new Runnable() {
+ @Override
public void run() {
if (LOG.isDebugEnabled()) {
LOG.debug("Starting moving "+ block.getBlockId() +
@@ -568,6 +571,7 @@ public class Balancer {
/* A thread that initiates a block move
* and waits for block move to complete */
private class BlockMoveDispatcher implements Runnable {
+ @Override
public void run() {
dispatchBlocks();
}
@@ -709,7 +713,7 @@ public class Balancer {
*/
private static final long MAX_ITERATION_TIME = 20*60*1000L; //20 mins
private void dispatchBlocks() {
- long startTime = Util.now();
+ long startTime = Time.now();
this.blocksToReceive = 2*scheduledSize;
boolean isTimeUp = false;
while(!isTimeUp && scheduledSize>0 &&
@@ -738,7 +742,7 @@ public class Balancer {
}
// check if time is up or not
- if (Util.now()-startTime > MAX_ITERATION_TIME) {
+ if (Time.now()-startTime > MAX_ITERATION_TIME) {
isTimeUp = true;
continue;
}
@@ -1143,7 +1147,7 @@ public class Balancer {
* move blocks in current window to old window.
*/
private static class MovedBlocks {
- private long lastCleanupTime = System.currentTimeMillis();
+ private long lastCleanupTime = Time.now();
final private static int CUR_WIN = 0;
final private static int OLD_WIN = 1;
final private static int NUM_WINS = 2;
@@ -1174,7 +1178,7 @@ public class Balancer {
/* remove old blocks */
synchronized private void cleanup() {
- long curTime = System.currentTimeMillis();
+ long curTime = Time.now();
// check if old win is older than winWidth
if (lastCleanupTime + WIN_WIDTH <= curTime) {
// purge the old window
@@ -1471,7 +1475,7 @@ public class Balancer {
/** Parse arguments and then run Balancer */
@Override
public int run(String[] args) {
- final long startTime = Util.now();
+ final long startTime = Time.now();
final Configuration conf = getConf();
WIN_WIDTH = conf.getLong(
DFSConfigKeys.DFS_BALANCER_MOVEDWINWIDTH_KEY,
@@ -1489,7 +1493,7 @@ public class Balancer {
System.out.println(e + ". Exiting ...");
return ReturnStatus.INTERRUPTED.code;
} finally {
- System.out.println("Balancing took " + time2Str(Util.now()-startTime));
+ System.out.println("Balancing took " + time2Str(Time.now()-startTime));
}
}
@@ -1501,6 +1505,7 @@ public class Balancer {
if (args != null) {
try {
for(int i = 0; i < args.length; i++) {
+ checkArgument(args.length >= 2, "args = " + Arrays.toString(args));
if ("-threshold".equalsIgnoreCase(args[i])) {
i++;
try {
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/NameNodeConnector.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/NameNodeConnector.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/NameNodeConnector.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/NameNodeConnector.java Fri Aug 3 19:00:15 2012
@@ -189,6 +189,7 @@ class NameNodeConnector {
* Periodically updates access keys.
*/
class BlockKeyUpdater implements Runnable {
+ @Override
public void run() {
try {
while (shouldRun) {
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java Fri Aug 3 19:00:15 2012
@@ -53,6 +53,9 @@ import org.apache.hadoop.hdfs.security.t
import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager.AccessMode;
import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys;
import org.apache.hadoop.hdfs.server.blockmanagement.PendingDataNodeMessages.ReportedBlockInfo;
+
+import static org.apache.hadoop.util.ExitUtil.terminate;
+
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
import org.apache.hadoop.hdfs.server.common.Util;
@@ -68,6 +71,7 @@ import org.apache.hadoop.hdfs.server.pro
import org.apache.hadoop.hdfs.util.LightWeightLinkedSet;
import org.apache.hadoop.net.Node;
import org.apache.hadoop.util.Daemon;
+import org.apache.hadoop.util.Time;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
@@ -202,6 +206,14 @@ public class BlockManager {
/** variable to enable check for enough racks */
final boolean shouldCheckForEnoughRacks;
+ /**
+ * When running inside a Standby node, the node may receive block reports
+ * from datanodes before receiving the corresponding namespace edits from
+ * the active NameNode. Thus, it will postpone them for later processing,
+ * instead of marking the blocks as corrupt.
+ */
+ private boolean shouldPostponeBlocksFromFuture = false;
+
/** for block replicas placement */
private BlockPlacementPolicy blockplacement;
@@ -1010,6 +1022,12 @@ public class BlockManager {
}
}
+
+ public void setPostponeBlocksFromFuture(boolean postpone) {
+ this.shouldPostponeBlocksFromFuture = postpone;
+ }
+
+
private void postponeBlock(Block blk) {
if (postponedMisreplicatedBlocks.add(blk)) {
postponedMisreplicatedBlocksCount++;
@@ -1453,7 +1471,7 @@ public class BlockManager {
public void processReport(final DatanodeID nodeID, final String poolId,
final BlockListAsLongs newReport) throws IOException {
namesystem.writeLock();
- final long startTime = Util.now(); //after acquiring write lock
+ final long startTime = Time.now(); //after acquiring write lock
final long endTime;
try {
final DatanodeDescriptor node = datanodeManager.getDatanode(nodeID);
@@ -1492,7 +1510,7 @@ public class BlockManager {
}
} finally {
- endTime = Util.now();
+ endTime = Time.now();
namesystem.writeUnlock();
}
@@ -1586,13 +1604,11 @@ public class BlockManager {
assert (node.numBlocks() == 0);
BlockReportIterator itBR = report.getBlockReportIterator();
- boolean isStandby = namesystem.isInStandbyState();
-
while(itBR.hasNext()) {
Block iblk = itBR.next();
ReplicaState reportedState = itBR.getCurrentReplicaState();
- if (isStandby &&
+ if (shouldPostponeBlocksFromFuture &&
namesystem.isGenStampInFuture(iblk.getGenerationStamp())) {
queueReportedBlock(node, iblk, reportedState,
QUEUE_REASON_FUTURE_GENSTAMP);
@@ -1608,7 +1624,7 @@ public class BlockManager {
BlockToMarkCorrupt c = checkReplicaCorrupt(
iblk, reportedState, storedBlock, ucState, node);
if (c != null) {
- if (namesystem.isInStandbyState()) {
+ if (shouldPostponeBlocksFromFuture) {
// In the Standby, we may receive a block report for a file that we
// just have an out-of-date gen-stamp or state for, for example.
queueReportedBlock(node, iblk, reportedState,
@@ -1714,7 +1730,7 @@ public class BlockManager {
+ " replicaState = " + reportedState);
}
- if (namesystem.isInStandbyState() &&
+ if (shouldPostponeBlocksFromFuture &&
namesystem.isGenStampInFuture(block.getGenerationStamp())) {
queueReportedBlock(dn, block, reportedState,
QUEUE_REASON_FUTURE_GENSTAMP);
@@ -1747,7 +1763,7 @@ assert storedBlock.findDatanode(dn) < 0
BlockToMarkCorrupt c = checkReplicaCorrupt(
block, reportedState, storedBlock, ucState, dn);
if (c != null) {
- if (namesystem.isInStandbyState()) {
+ if (shouldPostponeBlocksFromFuture) {
// If the block is an out-of-date generation stamp or state,
// but we're the standby, we shouldn't treat it as corrupt,
// but instead just queue it for later processing.
@@ -1780,7 +1796,7 @@ assert storedBlock.findDatanode(dn) < 0
*/
private void queueReportedBlock(DatanodeDescriptor dn, Block block,
ReplicaState reportedState, String reason) {
- assert namesystem.isInStandbyState();
+ assert shouldPostponeBlocksFromFuture;
if (LOG.isDebugEnabled()) {
LOG.debug("Queueing reported block " + block +
@@ -1823,9 +1839,9 @@ assert storedBlock.findDatanode(dn) < 0
* with the namespace information.
*/
public void processAllPendingDNMessages() throws IOException {
- assert !namesystem.isInStandbyState() :
- "processAllPendingDNMessages() should be called after exiting " +
- "standby state!";
+ assert !shouldPostponeBlocksFromFuture :
+ "processAllPendingDNMessages() should be called after disabling " +
+ "block postponement.";
int count = pendingDNMessages.count();
if (count > 0) {
LOG.info("Processing " + count + " messages from DataNodes " +
@@ -2962,8 +2978,8 @@ assert storedBlock.findDatanode(dn) < 0
LOG.warn("ReplicationMonitor thread received InterruptedException.", ie);
break;
} catch (Throwable t) {
- LOG.warn("ReplicationMonitor thread received Runtime exception. ", t);
- Runtime.getRuntime().exit(-1);
+ LOG.fatal("ReplicationMonitor thread received Runtime exception. ", t);
+ terminate(1, t);
}
}
}
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java Fri Aug 3 19:00:15 2012
@@ -17,7 +17,7 @@
*/
package org.apache.hadoop.hdfs.server.blockmanagement;
-import static org.apache.hadoop.hdfs.server.common.Util.now;
+import static org.apache.hadoop.util.Time.now;
import java.util.ArrayList;
import java.util.Collection;
@@ -27,7 +27,8 @@ import java.util.List;
import java.util.Set;
import java.util.TreeSet;
-import org.apache.commons.logging.impl.Log4JLogger;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSConfigKeys;
@@ -42,7 +43,8 @@ import org.apache.hadoop.net.NodeBase;
import com.google.common.annotations.VisibleForTesting;
-/** The class is responsible for choosing the desired number of targets
+/**
+ * The class is responsible for choosing the desired number of targets
* for placing block replicas.
* The replica placement strategy is that if the writer is on a datanode,
* the 1st replica is placed on the local machine,
@@ -52,9 +54,13 @@ import com.google.common.annotations.Vis
*/
@InterfaceAudience.Private
public class BlockPlacementPolicyDefault extends BlockPlacementPolicy {
+
+ private static final Log LOG =
+ LogFactory.getLog(BlockPlacementPolicyDefault.class.getName());
+
private static final String enableDebugLogging =
"For more information, please enable DEBUG log level on "
- + ((Log4JLogger)LOG).getLogger().getName();
+ + LOG.getClass().getName();
protected boolean considerLoad;
private boolean preferLocalNode = true;
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyWithNodeGroup.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyWithNodeGroup.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyWithNodeGroup.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyWithNodeGroup.java Fri Aug 3 19:00:15 2012
@@ -52,6 +52,7 @@ public class BlockPlacementPolicyWithNod
BlockPlacementPolicyWithNodeGroup() {
}
+ @Override
public void initialize(Configuration conf, FSClusterStats stats,
NetworkTopology clusterMap) {
super.initialize(conf, stats, clusterMap);
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlocksMap.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlocksMap.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlocksMap.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlocksMap.java Fri Aug 3 19:00:15 2012
@@ -37,15 +37,18 @@ class BlocksMap {
this.blockInfo = blkInfo;
}
+ @Override
public boolean hasNext() {
return blockInfo != null && nextIdx < blockInfo.getCapacity()
&& blockInfo.getDatanode(nextIdx) != null;
}
+ @Override
public DatanodeDescriptor next() {
return blockInfo.getDatanode(nextIdx++);
}
+ @Override
public void remove() {
throw new UnsupportedOperationException("Sorry. can't remove.");
}
@@ -81,10 +84,12 @@ class BlocksMap {
final int exponent = e2 < 0? 0: e2 > 30? 30: e2;
final int c = 1 << exponent;
- LightWeightGSet.LOG.info("VM type = " + vmBit + "-bit");
- LightWeightGSet.LOG.info("2% max memory = " + twoPC/(1 << 20) + " MB");
- LightWeightGSet.LOG.info("capacity = 2^" + exponent
- + " = " + c + " entries");
+ if (LightWeightGSet.LOG.isDebugEnabled()) {
+ LightWeightGSet.LOG.debug("VM type = " + vmBit + "-bit");
+ LightWeightGSet.LOG.debug("2% max memory = " + twoPC/(1 << 20) + " MB");
+ LightWeightGSet.LOG.debug("capacity = 2^" + exponent
+ + " = " + c + " entries");
+ }
return c;
}
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java Fri Aug 3 19:00:15 2012
@@ -28,6 +28,7 @@ import org.apache.hadoop.hdfs.protocol.B
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.util.LightWeightHashSet;
+import org.apache.hadoop.util.Time;
/**
* This class extends the DatanodeInfo class with ephemeral information (eg
@@ -306,7 +307,7 @@ public class DatanodeDescriptor extends
this.dfsUsed = dfsUsed;
this.remaining = remaining;
this.blockPoolUsed = blockPoolUsed;
- this.lastUpdate = System.currentTimeMillis();
+ this.lastUpdate = Time.now();
this.xceiverCount = xceiverCount;
this.volumeFailures = volFailures;
this.heartbeatedSinceFailover = true;
@@ -325,16 +326,19 @@ public class DatanodeDescriptor extends
this.node = dn;
}
+ @Override
public boolean hasNext() {
return current != null;
}
+ @Override
public BlockInfo next() {
BlockInfo res = current;
current = current.getNext(current.findDatanode(node));
return res;
}
+ @Override
public void remove() {
throw new UnsupportedOperationException("Sorry. can't remove.");
}
@@ -541,6 +545,7 @@ public class DatanodeDescriptor extends
/**
* @param nodeReg DatanodeID to update registration for.
*/
+ @Override
public void updateRegInfo(DatanodeID nodeReg) {
super.updateRegInfo(nodeReg);
}
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java Fri Aug 3 19:00:15 2012
@@ -17,7 +17,7 @@
*/
package org.apache.hadoop.hdfs.server.blockmanagement;
-import static org.apache.hadoop.hdfs.server.common.Util.now;
+import static org.apache.hadoop.util.Time.now;
import java.io.IOException;
import java.io.PrintWriter;
@@ -73,6 +73,7 @@ import org.apache.hadoop.net.ScriptBased
import org.apache.hadoop.util.Daemon;
import org.apache.hadoop.util.HostsFileReader;
import org.apache.hadoop.util.ReflectionUtils;
+import org.apache.hadoop.util.Time;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.net.InetAddresses;
@@ -338,7 +339,7 @@ public class DatanodeManager {
/** Is the datanode dead? */
boolean isDatanodeDead(DatanodeDescriptor node) {
return (node.getLastUpdate() <
- (Util.now() - heartbeatExpireInterval));
+ (Time.now() - heartbeatExpireInterval));
}
/** Add a datanode. */
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HeartbeatManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HeartbeatManager.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HeartbeatManager.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HeartbeatManager.java Fri Aug 3 19:00:15 2012
@@ -29,6 +29,7 @@ import org.apache.hadoop.hdfs.protocol.D
import org.apache.hadoop.hdfs.server.common.Util;
import org.apache.hadoop.hdfs.server.namenode.Namesystem;
import org.apache.hadoop.util.Daemon;
+import org.apache.hadoop.util.Time;
/**
* Manage the heartbeats received from datanodes.
@@ -247,7 +248,7 @@ class HeartbeatManager implements Datano
public void run() {
while(namesystem.isRunning()) {
try {
- final long now = Util.now();
+ final long now = Time.now();
if (lastHeartbeatCheck + heartbeatRecheckInterval < now) {
heartbeatCheck();
lastHeartbeatCheck = now;
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/PendingReplicationBlocks.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/PendingReplicationBlocks.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/PendingReplicationBlocks.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/PendingReplicationBlocks.java Fri Aug 3 19:00:15 2012
@@ -17,7 +17,7 @@
*/
package org.apache.hadoop.hdfs.server.blockmanagement;
-import static org.apache.hadoop.hdfs.server.common.Util.now;
+import static org.apache.hadoop.util.Time.now;
import java.io.PrintWriter;
import java.sql.Time;
@@ -192,6 +192,7 @@ class PendingReplicationBlocks {
* their replication request.
*/
class PendingReplicationMonitor implements Runnable {
+ @Override
public void run() {
while (fsRunning) {
long period = Math.min(defaultRecheckInterval, timeout);
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java Fri Aug 3 19:00:15 2012
@@ -141,6 +141,7 @@ public final class HdfsServerConstants {
private String description = null;
private NamenodeRole(String arg) {this.description = arg;}
+ @Override
public String toString() {
return description;
}
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/JspHelper.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/JspHelper.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/JspHelper.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/JspHelper.java Fri Aug 3 19:00:15 2012
@@ -111,6 +111,7 @@ public class JspHelper {
// compare two records based on their frequency
private static class NodeRecordComparator implements Comparator<NodeRecord> {
+ @Override
public int compare(NodeRecord o1, NodeRecord o2) {
if (o1.frequency < o2.frequency) {
return -1;
@@ -314,6 +315,7 @@ public class JspHelper {
}
}
+ @Override
public int compare(DatanodeDescriptor d1,
DatanodeDescriptor d2) {
int ret = 0;
@@ -485,12 +487,17 @@ public class JspHelper {
*/
public static UserGroupInformation getDefaultWebUser(Configuration conf
) throws IOException {
+ return UserGroupInformation.createRemoteUser(getDefaultWebUserName(conf));
+ }
+
+ private static String getDefaultWebUserName(Configuration conf
+ ) throws IOException {
String user = conf.get(
HADOOP_HTTP_STATIC_USER, DEFAULT_HADOOP_HTTP_STATIC_USER);
if (user == null || user.length() == 0) {
throw new IOException("Cannot determine UGI from request or conf");
}
- return UserGroupInformation.createRemoteUser(user);
+ return user;
}
private static InetSocketAddress getNNServiceAddress(ServletContext context,
@@ -536,65 +543,45 @@ public class JspHelper {
HttpServletRequest request, Configuration conf,
final AuthenticationMethod secureAuthMethod,
final boolean tryUgiParameter) throws IOException {
- final UserGroupInformation ugi;
+ UserGroupInformation ugi = null;
final String usernameFromQuery = getUsernameFromQuery(request, tryUgiParameter);
final String doAsUserFromQuery = request.getParameter(DoAsParam.NAME);
-
- if(UserGroupInformation.isSecurityEnabled()) {
- final String remoteUser = request.getRemoteUser();
- String tokenString = request.getParameter(DELEGATION_PARAMETER_NAME);
+ final String remoteUser;
+
+ if (UserGroupInformation.isSecurityEnabled()) {
+ remoteUser = request.getRemoteUser();
+ final String tokenString = request.getParameter(DELEGATION_PARAMETER_NAME);
if (tokenString != null) {
- Token<DelegationTokenIdentifier> token =
- new Token<DelegationTokenIdentifier>();
- token.decodeFromUrlString(tokenString);
- InetSocketAddress serviceAddress = getNNServiceAddress(context, request);
- if (serviceAddress != null) {
- SecurityUtil.setTokenService(token, serviceAddress);
- token.setKind(DelegationTokenIdentifier.HDFS_DELEGATION_KIND);
- }
- ByteArrayInputStream buf = new ByteArrayInputStream(token
- .getIdentifier());
- DataInputStream in = new DataInputStream(buf);
- DelegationTokenIdentifier id = new DelegationTokenIdentifier();
- id.readFields(in);
- if (context != null) {
- final NameNode nn = NameNodeHttpServer.getNameNodeFromContext(context);
- if (nn != null) {
- // Verify the token.
- nn.getNamesystem().verifyToken(id, token.getPassword());
- }
- }
- ugi = id.getUser();
- if (ugi.getRealUser() == null) {
- //non-proxy case
- checkUsername(ugi.getShortUserName(), usernameFromQuery);
- checkUsername(null, doAsUserFromQuery);
- } else {
- //proxy case
- checkUsername(ugi.getRealUser().getShortUserName(), usernameFromQuery);
- checkUsername(ugi.getShortUserName(), doAsUserFromQuery);
- ProxyUsers.authorize(ugi, request.getRemoteAddr(), conf);
- }
- ugi.addToken(token);
- ugi.setAuthenticationMethod(AuthenticationMethod.TOKEN);
- } else {
- if(remoteUser == null) {
- throw new IOException("Security enabled but user not " +
- "authenticated by filter");
- }
- final UserGroupInformation realUgi = UserGroupInformation.createRemoteUser(remoteUser);
- checkUsername(realUgi.getShortUserName(), usernameFromQuery);
+ // Token-based connections need only verify the effective user, and
+ // disallow proxying to different user. Proxy authorization checks
+ // are not required since the checks apply to issuing a token.
+ ugi = getTokenUGI(context, request, tokenString, conf);
+ checkUsername(ugi.getShortUserName(), usernameFromQuery);
+ checkUsername(ugi.getShortUserName(), doAsUserFromQuery);
+ } else if (remoteUser == null) {
+ throw new IOException(
+ "Security enabled but user not authenticated by filter");
+ }
+ } else {
+ // Security's not on, pull from url or use default web user
+ remoteUser = (usernameFromQuery == null)
+ ? getDefaultWebUserName(conf) // not specified in request
+ : usernameFromQuery;
+ }
+
+ if (ugi == null) { // security is off, or there's no token
+ ugi = UserGroupInformation.createRemoteUser(remoteUser);
+ checkUsername(ugi.getShortUserName(), usernameFromQuery);
+ if (UserGroupInformation.isSecurityEnabled()) {
// This is not necessarily true, could have been auth'ed by user-facing
// filter
- realUgi.setAuthenticationMethod(secureAuthMethod);
- ugi = initUGI(realUgi, doAsUserFromQuery, request, true, conf);
+ ugi.setAuthenticationMethod(secureAuthMethod);
+ }
+ if (doAsUserFromQuery != null) {
+ // create and attempt to authorize a proxy user
+ ugi = UserGroupInformation.createProxyUser(doAsUserFromQuery, ugi);
+ ProxyUsers.authorize(ugi, request.getRemoteAddr(), conf);
}
- } else { // Security's not on, pull from url
- final UserGroupInformation realUgi = usernameFromQuery == null?
- getDefaultWebUser(conf) // not specified in request
- : UserGroupInformation.createRemoteUser(usernameFromQuery);
- realUgi.setAuthenticationMethod(AuthenticationMethod.SIMPLE);
- ugi = initUGI(realUgi, doAsUserFromQuery, request, false, conf);
}
if(LOG.isDebugEnabled())
@@ -602,21 +589,34 @@ public class JspHelper {
return ugi;
}
- private static UserGroupInformation initUGI(final UserGroupInformation realUgi,
- final String doAsUserFromQuery, final HttpServletRequest request,
- final boolean isSecurityEnabled, final Configuration conf
- ) throws AuthorizationException {
- final UserGroupInformation ugi;
- if (doAsUserFromQuery == null) {
- //non-proxy case
- ugi = realUgi;
- } else {
- //proxy case
- ugi = UserGroupInformation.createProxyUser(doAsUserFromQuery, realUgi);
- ugi.setAuthenticationMethod(
- isSecurityEnabled? AuthenticationMethod.PROXY: AuthenticationMethod.SIMPLE);
- ProxyUsers.authorize(ugi, request.getRemoteAddr(), conf);
+ private static UserGroupInformation getTokenUGI(ServletContext context,
+ HttpServletRequest request,
+ String tokenString,
+ Configuration conf)
+ throws IOException {
+ final Token<DelegationTokenIdentifier> token =
+ new Token<DelegationTokenIdentifier>();
+ token.decodeFromUrlString(tokenString);
+ InetSocketAddress serviceAddress = getNNServiceAddress(context, request);
+ if (serviceAddress != null) {
+ SecurityUtil.setTokenService(token, serviceAddress);
+ token.setKind(DelegationTokenIdentifier.HDFS_DELEGATION_KIND);
+ }
+
+ ByteArrayInputStream buf =
+ new ByteArrayInputStream(token.getIdentifier());
+ DataInputStream in = new DataInputStream(buf);
+ DelegationTokenIdentifier id = new DelegationTokenIdentifier();
+ id.readFields(in);
+ if (context != null) {
+ final NameNode nn = NameNodeHttpServer.getNameNodeFromContext(context);
+ if (nn != null) {
+ // Verify the token.
+ nn.getNamesystem().verifyToken(id, token.getPassword());
+ }
}
+ UserGroupInformation ugi = id.getUser();
+ ugi.addToken(token);
return ugi;
}
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java Fri Aug 3 19:00:15 2012
@@ -22,6 +22,7 @@ import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.RandomAccessFile;
+import java.lang.management.ManagementFactory;
import java.nio.channels.FileLock;
import java.nio.channels.OverlappingFileLockException;
import java.util.ArrayList;
@@ -122,6 +123,7 @@ public abstract class Storage extends St
this.prevIndex = 0;
}
+ @Override
public boolean hasNext() {
if (storageDirs.isEmpty() || nextIndex >= storageDirs.size())
return false;
@@ -137,6 +139,7 @@ public abstract class Storage extends St
return true;
}
+ @Override
public StorageDirectory next() {
StorageDirectory sd = getStorageDir(nextIndex);
prevIndex = nextIndex;
@@ -151,6 +154,7 @@ public abstract class Storage extends St
return sd;
}
+ @Override
public void remove() {
nextIndex = prevIndex; // restore previous state
storageDirs.remove(prevIndex); // remove last returned element
@@ -615,14 +619,20 @@ public abstract class Storage extends St
deletionHookAdded = true;
}
RandomAccessFile file = new RandomAccessFile(lockF, "rws");
+ String jvmName = ManagementFactory.getRuntimeMXBean().getName();
FileLock res = null;
try {
res = file.getChannel().tryLock();
+ file.write(jvmName.getBytes());
+ LOG.info("Lock on " + lockF + " acquired by nodename " + jvmName);
} catch(OverlappingFileLockException oe) {
+ LOG.error("It appears that another namenode " + file.readLine()
+ + " has already locked the storage directory");
file.close();
return null;
} catch(IOException e) {
- LOG.error("Cannot create lock on " + lockF, e);
+ LOG.error("Failed to acquire lock on " + lockF + ". If this storage directory is mounted via NFS, "
+ + "ensure that the appropriate nfs lock services are running.", e);
file.close();
throw e;
}
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/StorageInfo.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/StorageInfo.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/StorageInfo.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/StorageInfo.java Fri Aug 3 19:00:15 2012
@@ -78,6 +78,7 @@ public class StorageInfo {
cTime = from.cTime;
}
+ @Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append("lv=").append(layoutVersion).append(";cid=").append(clusterID)
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/UpgradeObject.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/UpgradeObject.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/UpgradeObject.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/UpgradeObject.java Fri Aug 3 19:00:15 2012
@@ -32,19 +32,23 @@ import org.apache.hadoop.hdfs.server.com
public abstract class UpgradeObject implements Upgradeable {
protected short status;
+ @Override
public short getUpgradeStatus() {
return status;
}
+ @Override
public String getDescription() {
return "Upgrade object for " + getType() + " layout version " + getVersion();
}
+ @Override
public UpgradeStatusReport getUpgradeStatusReport(boolean details)
throws IOException {
return new UpgradeStatusReport(getVersion(), getUpgradeStatus(), false);
}
+ @Override
public int compareTo(Upgradeable o) {
if(this.getVersion() != o.getVersion())
return (getVersion() > o.getVersion() ? -1 : 1);
@@ -55,6 +59,7 @@ public abstract class UpgradeObject impl
o.getClass().getCanonicalName());
}
+ @Override
public boolean equals(Object o) {
if (!(o instanceof UpgradeObject)) {
return false;
@@ -62,6 +67,7 @@ public abstract class UpgradeObject impl
return this.compareTo((UpgradeObject)o) == 0;
}
+ @Override
public int hashCode() {
return new UOSignature(this).hashCode();
}
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/UpgradeObjectCollection.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/UpgradeObjectCollection.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/UpgradeObjectCollection.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/UpgradeObjectCollection.java Fri Aug 3 19:00:15 2012
@@ -73,6 +73,7 @@ public class UpgradeObjectCollection {
}
}
+ @Override
public int compareTo(UOSignature o) {
if(this.version != o.version)
return (version < o.version ? -1 : 1);
@@ -82,6 +83,7 @@ public class UpgradeObjectCollection {
return className.compareTo(o.className);
}
+ @Override
public boolean equals(Object o) {
if (!(o instanceof UOSignature)) {
return false;
@@ -89,6 +91,7 @@ public class UpgradeObjectCollection {
return this.compareTo((UOSignature)o) == 0;
}
+ @Override
public int hashCode() {
return version ^ ((type==null)?0:type.hashCode())
^ ((className==null)?0:className.hashCode());
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/UpgradeStatusReport.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/UpgradeStatusReport.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/UpgradeStatusReport.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/UpgradeStatusReport.java Fri Aug 3 19:00:15 2012
@@ -82,6 +82,7 @@ public class UpgradeStatusReport {
/**
* Print basic upgradeStatus details.
*/
+ @Override
public String toString() {
return getStatusText(false);
}
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Util.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Util.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Util.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Util.java Fri Aug 3 19:00:15 2012
@@ -34,29 +34,6 @@ public final class Util {
private final static Log LOG = LogFactory.getLog(Util.class.getName());
/**
- * Current system time. Do not use this to calculate a duration or interval
- * to sleep, because it will be broken by settimeofday. Instead, use
- * monotonicNow.
- * @return current time in msec.
- */
- public static long now() {
- return System.currentTimeMillis();
- }
-
- /**
- * Current time from some arbitrary time base in the past, counting in
- * milliseconds, and not affected by settimeofday or similar system clock
- * changes. This is appropriate to use when computing how much longer to
- * wait for an interval to expire.
- * @return a monotonic clock that counts in milliseconds.
- */
- public static long monotonicNow() {
- final long NANOSECONDS_PER_MILLISECOND = 1000000;
-
- return System.nanoTime() / NANOSECONDS_PER_MILLISECOND;
- }
-
- /**
* Interprets the passed string as a URI. In case of error it
* assumes the specified string is a file.
*
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java Fri Aug 3 19:00:15 2012
@@ -17,7 +17,7 @@
*/
package org.apache.hadoop.hdfs.server.datanode;
-import static org.apache.hadoop.hdfs.server.common.Util.now;
+import static org.apache.hadoop.util.Time.now;
import java.io.IOException;
import java.net.InetSocketAddress;
@@ -51,6 +51,7 @@ import org.apache.hadoop.hdfs.server.pro
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.util.Time;
import org.apache.hadoop.util.VersionInfo;
import org.apache.hadoop.util.VersionUtil;
@@ -226,7 +227,7 @@ class BPServiceActor implements Runnable
*/
void scheduleBlockReport(long delay) {
if (delay > 0) { // send BR after random delay
- lastBlockReport = System.currentTimeMillis()
+ lastBlockReport = Time.now()
- ( dnConf.blockReportInterval - DFSUtil.getRandom().nextInt((int)(delay)));
} else { // send at next heartbeat
lastBlockReport = lastHeartbeat - dnConf.blockReportInterval;
@@ -561,7 +562,7 @@ class BPServiceActor implements Runnable
// or work arrives, and then iterate again.
//
long waitTime = dnConf.heartBeatInterval -
- (System.currentTimeMillis() - lastHeartbeat);
+ (Time.now() - lastHeartbeat);
synchronized(pendingIncrementalBR) {
if (waitTime > 0 && pendingReceivedRequests == 0) {
try {
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolManager.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolManager.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolManager.java Fri Aug 3 19:00:15 2012
@@ -122,6 +122,7 @@ class BlockPoolManager {
try {
UserGroupInformation.getLoginUser().doAs(
new PrivilegedExceptionAction<Object>() {
+ @Override
public Object run() throws Exception {
for (BPOfferService bpos : offerServices) {
bpos.start();
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java Fri Aug 3 19:00:15 2012
@@ -49,6 +49,7 @@ import org.apache.hadoop.hdfs.server.dat
import org.apache.hadoop.hdfs.server.datanode.fsdataset.RollingLogs;
import org.apache.hadoop.hdfs.util.DataTransferThrottler;
import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.util.Time;
/**
* Scans the block files under a block pool and verifies that the
@@ -90,7 +91,7 @@ class BlockPoolSliceScanner {
private long totalTransientErrors = 0;
private final AtomicInteger totalBlocksScannedInLastRun = new AtomicInteger(); // Used for test only
- private long currentPeriodStart = System.currentTimeMillis();
+ private long currentPeriodStart = Time.now();
private long bytesLeft = 0; // Bytes to scan in this period
private long totalBytesToScan = 0;
@@ -114,10 +115,12 @@ class BlockPoolSliceScanner {
this.block = block;
}
+ @Override
public int hashCode() {
return block.hashCode();
}
+ @Override
public boolean equals(Object other) {
return other instanceof BlockScanInfo &&
compareTo((BlockScanInfo)other) == 0;
@@ -127,6 +130,7 @@ class BlockPoolSliceScanner {
return (lastScanType == ScanType.NONE) ? 0 : lastScanTime;
}
+ @Override
public int compareTo(BlockScanInfo other) {
long t1 = lastScanTime;
long t2 = other.lastScanTime;
@@ -224,7 +228,7 @@ class BlockPoolSliceScanner {
long period = Math.min(scanPeriod,
Math.max(blockMap.size(),1) * 600 * 1000L);
int periodInt = Math.abs((int)period);
- return System.currentTimeMillis() - scanPeriod +
+ return Time.now() - scanPeriod +
DFSUtil.getRandom().nextInt(periodInt);
}
@@ -281,7 +285,7 @@ class BlockPoolSliceScanner {
info = new BlockScanInfo(block);
}
- long now = System.currentTimeMillis();
+ long now = Time.now();
info.lastScanType = type;
info.lastScanTime = now;
info.lastScanOk = scanOk;
@@ -358,7 +362,7 @@ class BlockPoolSliceScanner {
}
private synchronized void adjustThrottler() {
- long timeLeft = currentPeriodStart+scanPeriod - System.currentTimeMillis();
+ long timeLeft = currentPeriodStart+scanPeriod - Time.now();
long bw = Math.max(bytesLeft*1000/timeLeft, MIN_SCAN_RATE);
throttler.setBandwidth(Math.min(bw, MAX_SCAN_RATE));
}
@@ -481,7 +485,7 @@ class BlockPoolSliceScanner {
private boolean assignInitialVerificationTimes() {
//First updates the last verification times from the log file.
if (verificationLog != null) {
- long now = System.currentTimeMillis();
+ long now = Time.now();
RollingLogs.LineIterator logIterator = null;
try {
logIterator = verificationLog.logs.iterator(false);
@@ -529,7 +533,7 @@ class BlockPoolSliceScanner {
// Initially spread the block reads over half of scan period
// so that we don't keep scanning the blocks too quickly when restarted.
long verifyInterval = Math.min(scanPeriod/(2L * numBlocks), 10*60*1000L);
- long lastScanTime = System.currentTimeMillis() - scanPeriod;
+ long lastScanTime = Time.now() - scanPeriod;
if (!blockInfoSet.isEmpty()) {
BlockScanInfo info;
@@ -556,7 +560,7 @@ class BlockPoolSliceScanner {
// reset the byte counts :
bytesLeft = totalBytesToScan;
- currentPeriodStart = System.currentTimeMillis();
+ currentPeriodStart = Time.now();
}
void scanBlockPoolSlice() {
@@ -571,7 +575,7 @@ class BlockPoolSliceScanner {
scan();
} finally {
totalBlocksScannedInLastRun.set(processedBlocks.size());
- lastScanTime.set(System.currentTimeMillis());
+ lastScanTime.set(Time.now());
}
}
@@ -584,7 +588,7 @@ class BlockPoolSliceScanner {
while (datanode.shouldRun && !Thread.interrupted()
&& datanode.isBPServiceAlive(blockPoolId)) {
- long now = System.currentTimeMillis();
+ long now = Time.now();
synchronized (this) {
if ( now >= (currentPeriodStart + scanPeriod)) {
startNewPeriod();
@@ -642,7 +646,7 @@ class BlockPoolSliceScanner {
int total = blockInfoSet.size();
- long now = System.currentTimeMillis();
+ long now = Time.now();
Date date = new Date();
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java Fri Aug 3 19:00:15 2012
@@ -440,6 +440,7 @@ public class BlockPoolSliceStorage exten
// delete finalized.tmp dir in a separate thread
new Daemon(new Runnable() {
+ @Override
public void run() {
try {
deleteDir(tmpDir);
@@ -449,6 +450,7 @@ public class BlockPoolSliceStorage exten
LOG.info("Finalize upgrade for " + dataDirPath + " is complete.");
}
+ @Override
public String toString() {
return "Finalize " + dataDirPath;
}
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java Fri Aug 3 19:00:15 2012
@@ -246,6 +246,7 @@ class BlockReceiver implements Closeable
/**
* close files.
*/
+ @Override
public void close() throws IOException {
IOException ioe = null;
if (syncOnClose && (out != null || checksumOut != null)) {
@@ -844,6 +845,7 @@ class BlockReceiver implements Closeable
try {
responder.join();
} catch (InterruptedException e) {
+ responder.interrupt();
throw new IOException("Interrupted receiveBlock");
}
responder = null;
@@ -1018,6 +1020,7 @@ class BlockReceiver implements Closeable
wait();
} catch (InterruptedException e) {
running = false;
+ Thread.currentThread().interrupt();
}
}
if(LOG.isDebugEnabled()) {
@@ -1031,6 +1034,7 @@ class BlockReceiver implements Closeable
* Thread to process incoming acks.
* @see java.lang.Runnable#run()
*/
+ @Override
public void run() {
boolean lastPacketInBlock = false;
final long startTime = ClientTraceLog.isInfoEnabled() ? System.nanoTime() : 0;
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java Fri Aug 3 19:00:15 2012
@@ -28,6 +28,7 @@ import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.net.SocketException;
+import java.net.SocketTimeoutException;
import java.nio.ByteBuffer;
import java.nio.channels.FileChannel;
import java.util.Arrays;
@@ -40,6 +41,7 @@ import org.apache.hadoop.hdfs.protocol.d
import org.apache.hadoop.hdfs.server.common.Util;
import org.apache.hadoop.hdfs.util.DataTransferThrottler;
import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.ReadaheadPool;
import org.apache.hadoop.io.ReadaheadPool.ReadaheadRequest;
import org.apache.hadoop.io.nativeio.NativeIO;
@@ -329,6 +331,7 @@ class BlockSender implements java.io.Clo
/**
* close opened files.
*/
+ @Override
public void close() throws IOException {
if (blockInFd != null && shouldDropCacheBehindRead && isLongRead()) {
// drop the last few MB of the file from cache
@@ -484,27 +487,39 @@ class BlockSender implements java.io.Clo
// no need to flush since we know out is not a buffered stream
FileChannel fileCh = ((FileInputStream)blockIn).getChannel();
+ LongWritable waitTime = new LongWritable();
+ LongWritable transferTime = new LongWritable();
sockOut.transferToFully(fileCh, blockInPosition, dataLen,
- datanode.metrics.getSendDataPacketBlockedOnNetworkNanos(),
- datanode.metrics.getSendDataPacketTransferNanos());
+ waitTime, transferTime);
+ datanode.metrics.addSendDataPacketBlockedOnNetworkNanos(waitTime.get());
+ datanode.metrics.addSendDataPacketTransferNanos(transferTime.get());
blockInPosition += dataLen;
- } else {
+ } else {
// normal transfer
out.write(buf, 0, dataOff + dataLen);
}
} catch (IOException e) {
- /* Exception while writing to the client. Connection closure from
- * the other end is mostly the case and we do not care much about
- * it. But other things can go wrong, especially in transferTo(),
- * which we do not want to ignore.
- *
- * The message parsing below should not be considered as a good
- * coding example. NEVER do it to drive a program logic. NEVER.
- * It was done here because the NIO throws an IOException for EPIPE.
- */
- String ioem = e.getMessage();
- if (!ioem.startsWith("Broken pipe") && !ioem.startsWith("Connection reset")) {
- LOG.error("BlockSender.sendChunks() exception: ", e);
+ if (e instanceof SocketTimeoutException) {
+ /*
+ * writing to client timed out. This happens if the client reads
+ * part of a block and then decides not to read the rest (but leaves
+ * the socket open).
+ */
+ LOG.info("BlockSender.sendChunks() exception: ", e);
+ } else {
+ /* Exception while writing to the client. Connection closure from
+ * the other end is mostly the case and we do not care much about
+ * it. But other things can go wrong, especially in transferTo(),
+ * which we do not want to ignore.
+ *
+ * The message parsing below should not be considered as a good
+ * coding example. NEVER do it to drive a program logic. NEVER.
+ * It was done here because the NIO throws an IOException for EPIPE.
+ */
+ String ioem = e.getMessage();
+ if (!ioem.startsWith("Broken pipe") && !ioem.startsWith("Connection reset")) {
+ LOG.error("BlockSender.sendChunks() exception: ", e);
+ }
}
throw ioeToSocketException(e);
}
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataBlockScanner.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataBlockScanner.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataBlockScanner.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataBlockScanner.java Fri Aug 3 19:00:15 2012
@@ -63,6 +63,7 @@ public class DataBlockScanner implements
this.conf = conf;
}
+ @Override
public void run() {
String currentBpId = "";
boolean firstRun = true;
@@ -273,6 +274,7 @@ public class DataBlockScanner implements
public static class Servlet extends HttpServlet {
private static final long serialVersionUID = 1L;
+ @Override
public void doGet(HttpServletRequest request,
HttpServletResponse response) throws IOException {
response.setContentType("text/plain");
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java Fri Aug 3 19:00:15 2012
@@ -121,6 +121,9 @@ import org.apache.hadoop.hdfs.server.com
import org.apache.hadoop.hdfs.server.common.JspHelper;
import org.apache.hadoop.hdfs.server.common.StorageInfo;
import org.apache.hadoop.hdfs.server.common.Util;
+
+import static org.apache.hadoop.util.ExitUtil.terminate;
+
import org.apache.hadoop.hdfs.server.datanode.SecureDataNodeStarter.SecureResources;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
@@ -160,6 +163,7 @@ import org.apache.hadoop.util.DiskChecke
import org.apache.hadoop.util.GenericOptionsParser;
import org.apache.hadoop.util.ServicePlugin;
import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.util.Time;
import org.apache.hadoop.util.VersionInfo;
import org.mortbay.util.ajax.JSON;
@@ -924,6 +928,7 @@ public class DataNode extends Configured
try {
return loginUgi
.doAs(new PrivilegedExceptionAction<InterDatanodeProtocol>() {
+ @Override
public InterDatanodeProtocol run() throws IOException {
return new InterDatanodeProtocolTranslatorPB(addr, loginUgi,
conf, NetUtils.getDefaultSocketFactory(conf), socketTimeout);
@@ -965,7 +970,7 @@ public class DataNode extends Configured
int rand = DFSUtil.getSecureRandom().nextInt(Integer.MAX_VALUE);
return "DS-" + rand + "-" + ip + "-" + port + "-"
- + System.currentTimeMillis();
+ + Time.now();
}
/** Ensure the authentication method is kerberos */
@@ -1363,6 +1368,7 @@ public class DataNode extends Configured
/**
* Do the deed, write the bytes
*/
+ @Override
public void run() {
xmitsInProgress.getAndIncrement();
Socket sock = null;
@@ -1642,7 +1648,7 @@ public class DataNode extends Configured
if ("-r".equalsIgnoreCase(cmd) || "--rack".equalsIgnoreCase(cmd)) {
LOG.error("-r, --rack arguments are not supported anymore. RackID " +
"resolution is handled by the NameNode.");
- System.exit(-1);
+ terminate(1);
} else if ("-rollback".equalsIgnoreCase(cmd)) {
startOpt = StartupOption.ROLLBACK;
} else if ("-regular".equalsIgnoreCase(cmd)) {
@@ -1697,15 +1703,15 @@ public class DataNode extends Configured
if (datanode != null)
datanode.join();
} catch (Throwable e) {
- LOG.error("Exception in secureMain", e);
- System.exit(-1);
+ LOG.fatal("Exception in secureMain", e);
+ terminate(1, e);
} finally {
- // We need to add System.exit here because either shutdown was called or
- // some disk related conditions like volumes tolerated or volumes required
+ // We need to terminate the process here because either shutdown was called
+ // or some disk related conditions like volumes tolerated or volumes required
// condition was not met. Also, In secure mode, control will go to Jsvc
- // and Datanode process hangs without System.exit.
+ // and Datanode process hangs if it does not exit.
LOG.warn("Exiting Datanode");
- System.exit(0);
+ terminate(0);
}
}
@@ -1719,6 +1725,7 @@ public class DataNode extends Configured
Daemon d = new Daemon(threadGroup, new Runnable() {
/** Recover a list of blocks. It is run by the primary datanode. */
+ @Override
public void run() {
for(RecoveringBlock b : blocks) {
try {
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java Fri Aug 3 19:00:15 2012
@@ -587,6 +587,7 @@ public class DataStorage extends Storage
// 2. delete finalized.tmp dir in a separate thread
new Daemon(new Runnable() {
+ @Override
public void run() {
try {
deleteDir(tmpDir);
@@ -595,6 +596,7 @@ public class DataStorage extends Storage
}
LOG.info("Finalize upgrade for " + dataDirPath + " is complete.");
}
+ @Override
public String toString() { return "Finalize " + dataDirPath; }
}).start();
}
@@ -677,6 +679,7 @@ public class DataStorage extends Storage
throw new IOException("Cannot create directory " + to);
String[] blockNames = from.list(new java.io.FilenameFilter() {
+ @Override
public boolean accept(File dir, String name) {
return name.startsWith(BLOCK_FILE_PREFIX);
}
@@ -694,6 +697,7 @@ public class DataStorage extends Storage
// Now take care of the rest of the files and subdirectories
String[] otherNames = from.list(new java.io.FilenameFilter() {
+ @Override
public boolean accept(File dir, String name) {
return name.startsWith(BLOCK_SUBDIR_PREFIX)
|| name.startsWith(COPY_FILE_PREFIX);
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java Fri Aug 3 19:00:15 2012
@@ -20,7 +20,7 @@ package org.apache.hadoop.hdfs.server.da
import static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.ERROR;
import static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.ERROR_ACCESS_TOKEN;
import static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.SUCCESS;
-import static org.apache.hadoop.hdfs.server.common.Util.now;
+import static org.apache.hadoop.util.Time.now;
import static org.apache.hadoop.hdfs.server.datanode.DataNode.DN_CLIENTTRACE_FORMAT;
import java.io.BufferedInputStream;
@@ -145,6 +145,7 @@ class DataXceiver extends Receiver imple
/**
* Read/write data from/to the DataXceiverServer.
*/
+ @Override
public void run() {
int opsProcessed = 0;
Op op = null;
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DatanodeJspHelper.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DatanodeJspHelper.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DatanodeJspHelper.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DatanodeJspHelper.java Fri Aug 3 19:00:15 2012
@@ -60,6 +60,7 @@ public class DatanodeJspHelper {
InterruptedException {
return
user.doAs(new PrivilegedExceptionAction<DFSClient>() {
+ @Override
public DFSClient run() throws IOException {
return new DFSClient(NetUtils.createSocketAddr(addr), conf);
}
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java Fri Aug 3 19:00:15 2012
@@ -46,6 +46,7 @@ import org.apache.hadoop.hdfs.server.com
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
import org.apache.hadoop.util.Daemon;
+import org.apache.hadoop.util.Time;
/**
* Periodically scans the data directories for block and block metadata files.
@@ -87,6 +88,7 @@ public class DirectoryScanner implements
this.bpid = bpid;
}
+ @Override
public String toString() {
return "BlockPool " + bpid
+ " Total blocks: " + totalBlocks + ", missing metadata files:"
@@ -239,7 +241,7 @@ public class DirectoryScanner implements
void start() {
shouldRun = true;
long offset = DFSUtil.getRandom().nextInt((int) (scanPeriodMsecs/1000L)) * 1000L; //msec
- long firstScanTime = System.currentTimeMillis() + offset;
+ long firstScanTime = Time.now() + offset;
LOG.info("Periodic Directory Tree Verification scan starting at "
+ firstScanTime + " with interval " + scanPeriodMsecs);
masterThread.scheduleAtFixedRate(this, offset, scanPeriodMsecs,
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/UpgradeManagerDatanode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/UpgradeManagerDatanode.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/UpgradeManagerDatanode.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/UpgradeManagerDatanode.java Fri Aug 3 19:00:15 2012
@@ -47,6 +47,7 @@ class UpgradeManagerDatanode extends Upg
this.bpid = bpid;
}
+ @Override
public HdfsServerConstants.NodeType getType() {
return HdfsServerConstants.NodeType.DATA_NODE;
}
@@ -71,6 +72,7 @@ class UpgradeManagerDatanode extends Upg
* @return true if distributed upgrade is required or false otherwise
* @throws IOException
*/
+ @Override
public synchronized boolean startUpgrade() throws IOException {
if(upgradeState) { // upgrade is already in progress
assert currentUpgrades != null :
@@ -134,6 +136,7 @@ class UpgradeManagerDatanode extends Upg
+ "The upgrade object is not defined.");
}
+ @Override
public synchronized void completeUpgrade() throws IOException {
assert currentUpgrades != null :
"UpgradeManagerDatanode.currentUpgrades is null.";
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/UpgradeObjectDatanode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/UpgradeObjectDatanode.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/UpgradeObjectDatanode.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/UpgradeObjectDatanode.java Fri Aug 3 19:00:15 2012
@@ -36,6 +36,7 @@ public abstract class UpgradeObjectDatan
private DataNode dataNode = null;
private String bpid = null;
+ @Override
public HdfsServerConstants.NodeType getType() {
return HdfsServerConstants.NodeType.DATA_NODE;
}
@@ -96,6 +97,7 @@ public abstract class UpgradeObjectDatan
throw new IOException(errorMsg);
}
+ @Override
public void run() {
assert dataNode != null : "UpgradeObjectDatanode.dataNode is null";
while(dataNode.shouldRun) {
@@ -132,6 +134,7 @@ public abstract class UpgradeObjectDatan
* The data-node needs to re-confirm with the name-node that the upgrade
* is complete while other nodes are still upgrading.
*/
+ @Override
public UpgradeCommand completeUpgrade() throws IOException {
return new UpgradeCommand(UpgradeCommand.UC_ACTION_REPORT_STATUS,
getVersion(), (short)100);
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java Fri Aug 3 19:00:15 2012
@@ -78,6 +78,7 @@ import org.apache.hadoop.util.DataChecks
import org.apache.hadoop.util.DiskChecker.DiskErrorException;
import org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException;
import org.apache.hadoop.util.ReflectionUtils;
+import org.apache.hadoop.util.Time;
/**************************************************
* FSDataset manages a set of data blocks. Each block
@@ -263,6 +264,7 @@ class FsDatasetImpl implements FsDataset
/**
* Return the number of failed volumes in the FSDataset.
*/
+ @Override
public int getNumFailedVolumes() {
return volumes.numberOfFailedVolumes();
}
@@ -838,6 +840,10 @@ class FsDatasetImpl implements FsDataset
*/
@Override // FsDatasetSpi
public synchronized void finalizeBlock(ExtendedBlock b) throws IOException {
+ if (Thread.interrupted()) {
+ // Don't allow data modifications from interrupted threads
+ throw new IOException("Cannot finalize block from Interrupted Thread");
+ }
ReplicaInfo replicaInfo = getReplicaInfo(b);
if (replicaInfo.getState() == ReplicaState.FINALIZED) {
// this is legal, when recovery happens on a file that has
@@ -1138,7 +1144,7 @@ class FsDatasetImpl implements FsDataset
}
// Otherwise remove blocks for the failed volumes
- long mlsec = System.currentTimeMillis();
+ long mlsec = Time.now();
synchronized (this) {
for (FsVolumeImpl fv: failedVols) {
for (String bpid : fv.getBlockPoolList()) {
@@ -1157,7 +1163,7 @@ class FsDatasetImpl implements FsDataset
}
}
} // end of sync
- mlsec = System.currentTimeMillis() - mlsec;
+ mlsec = Time.now() - mlsec;
LOG.warn("Removed " + removedBlocks + " out of " + totalBlocks +
"(took " + mlsec + " millisecs)");
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeMetrics.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeMetrics.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeMetrics.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeMetrics.java Fri Aug 3 19:00:15 2012
@@ -29,6 +29,7 @@ import org.apache.hadoop.metrics2.annota
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
import org.apache.hadoop.metrics2.lib.MetricsRegistry;
import org.apache.hadoop.metrics2.lib.MutableCounterLong;
+import org.apache.hadoop.metrics2.lib.MutableQuantiles;
import org.apache.hadoop.metrics2.lib.MutableRate;
import org.apache.hadoop.metrics2.source.JvmMetrics;
@@ -74,19 +75,54 @@ public class DataNodeMetrics {
@Metric MutableRate heartbeats;
@Metric MutableRate blockReports;
@Metric MutableRate packetAckRoundTripTimeNanos;
-
+ MutableQuantiles[] packetAckRoundTripTimeNanosQuantiles;
+
@Metric MutableRate flushNanos;
+ MutableQuantiles[] flushNanosQuantiles;
+
@Metric MutableRate fsyncNanos;
+ MutableQuantiles[] fsyncNanosQuantiles;
@Metric MutableRate sendDataPacketBlockedOnNetworkNanos;
+ MutableQuantiles[] sendDataPacketBlockedOnNetworkNanosQuantiles;
@Metric MutableRate sendDataPacketTransferNanos;
+ MutableQuantiles[] sendDataPacketTransferNanosQuantiles;
+
final MetricsRegistry registry = new MetricsRegistry("datanode");
final String name;
- public DataNodeMetrics(String name, String sessionId) {
+ public DataNodeMetrics(String name, String sessionId, int[] intervals) {
this.name = name;
registry.tag(SessionId, sessionId);
+
+ final int len = intervals.length;
+ packetAckRoundTripTimeNanosQuantiles = new MutableQuantiles[len];
+ flushNanosQuantiles = new MutableQuantiles[len];
+ fsyncNanosQuantiles = new MutableQuantiles[len];
+ sendDataPacketBlockedOnNetworkNanosQuantiles = new MutableQuantiles[len];
+ sendDataPacketTransferNanosQuantiles = new MutableQuantiles[len];
+
+ for (int i = 0; i < len; i++) {
+ int interval = intervals[i];
+ packetAckRoundTripTimeNanosQuantiles[i] = registry.newQuantiles(
+ "packetAckRoundTripTimeNanos" + interval + "s",
+ "Packet Ack RTT in ns", "ops", "latency", interval);
+ flushNanosQuantiles[i] = registry.newQuantiles(
+ "flushNanos" + interval + "s",
+ "Disk flush latency in ns", "ops", "latency", interval);
+ fsyncNanosQuantiles[i] = registry.newQuantiles(
+ "fsyncNanos" + interval + "s", "Disk fsync latency in ns",
+ "ops", "latency", interval);
+ sendDataPacketBlockedOnNetworkNanosQuantiles[i] = registry.newQuantiles(
+ "sendDataPacketBlockedOnNetworkNanos" + interval + "s",
+ "Time blocked on network while sending a packet in ns",
+ "ops", "latency", interval);
+ sendDataPacketTransferNanosQuantiles[i] = registry.newQuantiles(
+ "sendDataPacketTransferNanos" + interval + "s",
+ "Time reading from disk and writing to network while sending " +
+ "a packet in ns", "ops", "latency", interval);
+ }
}
public static DataNodeMetrics create(Configuration conf, String dnName) {
@@ -94,8 +130,15 @@ public class DataNodeMetrics {
MetricsSystem ms = DefaultMetricsSystem.instance();
JvmMetrics.create("DataNode", sessionId, ms);
String name = "DataNodeActivity-"+ (dnName.isEmpty()
- ? "UndefinedDataNodeName"+ DFSUtil.getRandom().nextInt() : dnName.replace(':', '-'));
- return ms.register(name, null, new DataNodeMetrics(name, sessionId));
+ ? "UndefinedDataNodeName"+ DFSUtil.getRandom().nextInt()
+ : dnName.replace(':', '-'));
+
+ // Percentile measurement is off by default, by watching no intervals
+ int[] intervals =
+ conf.getInts(DFSConfigKeys.DFS_METRICS_PERCENTILES_INTERVALS_KEY);
+
+ return ms.register(name, null, new DataNodeMetrics(name, sessionId,
+ intervals));
}
public String name() { return name; }
@@ -166,14 +209,23 @@ public class DataNodeMetrics {
public void addPacketAckRoundTripTimeNanos(long latencyNanos) {
packetAckRoundTripTimeNanos.add(latencyNanos);
+ for (MutableQuantiles q : packetAckRoundTripTimeNanosQuantiles) {
+ q.add(latencyNanos);
+ }
}
public void addFlushNanos(long latencyNanos) {
flushNanos.add(latencyNanos);
+ for (MutableQuantiles q : flushNanosQuantiles) {
+ q.add(latencyNanos);
+ }
}
public void addFsyncNanos(long latencyNanos) {
fsyncNanos.add(latencyNanos);
+ for (MutableQuantiles q : fsyncNanosQuantiles) {
+ q.add(latencyNanos);
+ }
}
public void shutdown() {
@@ -196,12 +248,18 @@ public class DataNodeMetrics {
public void incrBlocksGetLocalPathInfo() {
blocksGetLocalPathInfo.incr();
}
-
- public MutableRate getSendDataPacketBlockedOnNetworkNanos() {
- return sendDataPacketBlockedOnNetworkNanos;
+
+ public void addSendDataPacketBlockedOnNetworkNanos(long latencyNanos) {
+ sendDataPacketBlockedOnNetworkNanos.add(latencyNanos);
+ for (MutableQuantiles q : sendDataPacketBlockedOnNetworkNanosQuantiles) {
+ q.add(latencyNanos);
+ }
}
-
- public MutableRate getSendDataPacketTransferNanos() {
- return sendDataPacketTransferNanos;
+
+ public void addSendDataPacketTransferNanos(long latencyNanos) {
+ sendDataPacketTransferNanos.add(latencyNanos);
+ for (MutableQuantiles q : sendDataPacketTransferNanosQuantiles) {
+ q.add(latencyNanos);
+ }
}
}
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/resources/DatanodeWebHdfsMethods.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/resources/DatanodeWebHdfsMethods.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/resources/DatanodeWebHdfsMethods.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/resources/DatanodeWebHdfsMethods.java Fri Aug 3 19:00:15 2012
@@ -19,7 +19,6 @@ package org.apache.hadoop.hdfs.server.da
import java.io.IOException;
import java.io.InputStream;
-import java.io.OutputStream;
import java.net.InetSocketAddress;
import java.net.URI;
import java.net.URISyntaxException;
@@ -40,7 +39,6 @@ import javax.ws.rs.QueryParam;
import javax.ws.rs.core.Context;
import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.Response;
-import javax.ws.rs.core.StreamingOutput;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
@@ -411,31 +409,10 @@ public class DatanodeWebHdfsMethods {
IOUtils.cleanup(LOG, dfsclient);
throw ioe;
}
- final HdfsDataInputStream dis = in;
- final StreamingOutput streaming = new StreamingOutput() {
- @Override
- public void write(final OutputStream out) throws IOException {
- final Long n = length.getValue();
- HdfsDataInputStream dfsin = dis;
- DFSClient client = dfsclient;
- try {
- if (n == null) {
- IOUtils.copyBytes(dfsin, out, b);
- } else {
- IOUtils.copyBytes(dfsin, out, n, false);
- }
- dfsin.close();
- dfsin = null;
- dfsclient.close();
- client = null;
- } finally {
- IOUtils.cleanup(LOG, dfsin);
- IOUtils.cleanup(LOG, client);
- }
- }
- };
-
- return Response.ok(streaming).type(
+
+ final long n = length.getValue() != null? length.getValue()
+ : in.getVisibleLength();
+ return Response.ok(new OpenEntity(in, n, dfsclient)).type(
MediaType.APPLICATION_OCTET_STREAM).build();
}
case GETFILECHECKSUM:
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java Fri Aug 3 19:00:15 2012
@@ -364,6 +364,7 @@ public class BackupNode extends NameNode
} else {
nsInfo.validateStorage(storage);
}
+ bnImage.initEditLog();
setRegistration();
NamenodeRegistration nnReg = null;
while(!isStopRequested()) {