You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by su...@apache.org on 2012/10/29 15:09:53 UTC
svn commit: r1403301 [3/3] - in
/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs: ./
src/contrib/bkjournal/src/main/proto/ src/contrib/libwebhdfs/src/
src/main/java/ src/main/java/org/apache/hadoop/hdfs/
src/main/java/org/apache/hadoop...
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java?rev=1403301&r1=1403300&r2=1403301&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java Mon Oct 29 14:09:47 2012
@@ -950,8 +950,8 @@ public class BlockManager {
datanodes.append(node).append(" ");
}
if (datanodes.length() != 0) {
- NameNode.stateChangeLog.info("BLOCK* addToInvalidates: "
- + b + " to " + datanodes.toString());
+ NameNode.stateChangeLog.info("BLOCK* addToInvalidates: " + b + " "
+ + datanodes);
}
}
@@ -972,7 +972,7 @@ public class BlockManager {
// thread of Datanode reports bad block before Block reports are sent
// by the Datanode on startup
NameNode.stateChangeLog.info("BLOCK* findAndMarkBlockAsCorrupt: "
- + blk + " not found.");
+ + blk + " not found");
return;
}
markBlockAsCorrupt(new BlockToMarkCorrupt(storedBlock, reason), dn);
@@ -1026,7 +1026,7 @@ public class BlockManager {
NameNode.stateChangeLog.info("BLOCK* invalidateBlocks: postponing " +
"invalidation of " + b + " on " + dn + " because " +
nr.replicasOnStaleNodes() + " replica(s) are located on nodes " +
- "with potentially out-of-date block reports.");
+ "with potentially out-of-date block reports");
postponeBlock(b.corrupted);
} else if (nr.liveReplicas() >= 1) {
@@ -1039,7 +1039,7 @@ public class BlockManager {
}
} else {
NameNode.stateChangeLog.info("BLOCK* invalidateBlocks: " + b
- + " on " + dn + " is the only copy and was not deleted.");
+ + " on " + dn + " is the only copy and was not deleted");
}
}
@@ -1160,9 +1160,8 @@ public class BlockManager {
(blockHasEnoughRacks(block)) ) {
neededReplications.remove(block, priority); // remove from neededReplications
neededReplications.decrementReplicationIndex(priority);
- NameNode.stateChangeLog.info("BLOCK* "
- + "Removing block " + block
- + " from neededReplications as it has enough replicas.");
+ NameNode.stateChangeLog.info("BLOCK* Removing " + block
+ + " from neededReplications as it has enough replicas");
continue;
}
}
@@ -1236,9 +1235,8 @@ public class BlockManager {
neededReplications.remove(block, priority); // remove from neededReplications
neededReplications.decrementReplicationIndex(priority);
rw.targets = null;
- NameNode.stateChangeLog.info("BLOCK* "
- + "Removing block " + block
- + " from neededReplications as it has enough replicas.");
+ NameNode.stateChangeLog.info("BLOCK* Removing " + block
+ + " from neededReplications as it has enough replicas");
continue;
}
}
@@ -1290,10 +1288,8 @@ public class BlockManager {
targetList.append(' ');
targetList.append(targets[k]);
}
- NameNode.stateChangeLog.info(
- "BLOCK* ask "
- + rw.srcNode + " to replicate "
- + rw.block + " to " + targetList);
+ NameNode.stateChangeLog.info("BLOCK* ask " + rw.srcNode
+ + " to replicate " + rw.block + " to " + targetList);
}
}
}
@@ -1527,10 +1523,9 @@ public class BlockManager {
boolean staleBefore = node.areBlockContentsStale();
node.receivedBlockReport();
if (staleBefore && !node.areBlockContentsStale()) {
- LOG.info("BLOCK* processReport: " +
- "Received first block report from " + node +
- " after becoming active. Its block contents are no longer" +
- " considered stale.");
+ LOG.info("BLOCK* processReport: Received first block report from "
+ + node + " after becoming active. Its block contents are no longer"
+ + " considered stale");
rescanPostponedMisreplicatedBlocks();
}
@@ -1601,9 +1596,9 @@ public class BlockManager {
addStoredBlock(b, node, null, true);
}
for (Block b : toInvalidate) {
- NameNode.stateChangeLog.info("BLOCK* processReport: block "
+ NameNode.stateChangeLog.info("BLOCK* processReport: "
+ b + " on " + node + " size " + b.getNumBytes()
- + " does not belong to any file.");
+ + " does not belong to any file");
addToInvalidates(b, node);
}
for (BlockToMarkCorrupt b : toCorrupt) {
@@ -1870,7 +1865,7 @@ assert storedBlock.findDatanode(dn) < 0
int count = pendingDNMessages.count();
if (count > 0) {
LOG.info("Processing " + count + " messages from DataNodes " +
- "that were previously queued during standby state.");
+ "that were previously queued during standby state");
}
processQueuedMessages(pendingDNMessages.takeAll());
assert pendingDNMessages.count() == 0;
@@ -1927,9 +1922,9 @@ assert storedBlock.findDatanode(dn) < 0
// the block report got a little bit delayed after the pipeline
// closed. So, ignore this report, assuming we will get a
// FINALIZED replica later. See HDFS-2791
- LOG.info("Received an RBW replica for block " + storedBlock +
- " on " + dn + ": ignoring it, since the block is " +
- "complete with the same generation stamp.");
+ LOG.info("Received an RBW replica for " + storedBlock +
+ " on " + dn + ": ignoring it, since it is " +
+ "complete with the same genstamp");
return null;
} else {
return new BlockToMarkCorrupt(storedBlock,
@@ -2041,7 +2036,7 @@ assert storedBlock.findDatanode(dn) < 0
// If this block does not belong to anyfile, then we are done.
NameNode.stateChangeLog.info("BLOCK* addStoredBlock: " + block + " on "
+ node + " size " + block.getNumBytes()
- + " but it does not belong to any file.");
+ + " but it does not belong to any file");
// we could add this block to invalidate set of this datanode.
// it will happen in next block report otherwise.
return block;
@@ -2158,9 +2153,8 @@ assert storedBlock.findDatanode(dn) < 0
try {
invalidateBlock(new BlockToMarkCorrupt(blk, null), node);
} catch (IOException e) {
- NameNode.stateChangeLog.info("NameNode.invalidateCorruptReplicas " +
- "error in deleting bad block " + blk +
- " on " + node, e);
+ NameNode.stateChangeLog.info("invalidateCorruptReplicas "
+ + "error in deleting bad block " + blk + " on " + node, e);
gotException = true;
}
}
@@ -2308,7 +2302,7 @@ assert storedBlock.findDatanode(dn) < 0
DatanodeDescriptor cur = it.next();
if (cur.areBlockContentsStale()) {
LOG.info("BLOCK* processOverReplicatedBlock: " +
- "Postponing processing of over-replicated block " +
+ "Postponing processing of over-replicated " +
block + " since datanode " + cur + " does not yet have up-to-date " +
"block information.");
postponeBlock(block);
@@ -2398,7 +2392,7 @@ assert storedBlock.findDatanode(dn) < 0
//
addToInvalidates(b, cur);
NameNode.stateChangeLog.info("BLOCK* chooseExcessReplicates: "
- +"("+cur+", "+b+") is added to invalidated blocks set.");
+ +"("+cur+", "+b+") is added to invalidated blocks set");
}
}
@@ -2540,7 +2534,7 @@ assert storedBlock.findDatanode(dn) < 0
for (Block b : toInvalidate) {
NameNode.stateChangeLog.info("BLOCK* addBlock: block "
+ b + " on " + node + " size " + b.getNumBytes()
- + " does not belong to any file.");
+ + " does not belong to any file");
addToInvalidates(b, node);
}
for (BlockToMarkCorrupt b : toCorrupt) {
@@ -2651,7 +2645,7 @@ assert storedBlock.findDatanode(dn) < 0
* of live nodes. If in startup safemode (or its 30-sec extension period),
* then it gains speed by ignoring issues of excess replicas or nodes
* that are decommissioned or in process of becoming decommissioned.
- * If not in startup, then it calls {@link countNodes()} instead.
+ * If not in startup, then it calls {@link #countNodes(Block)} instead.
*
* @param b - the block being tested
* @return count of live nodes for this block
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java?rev=1403301&r1=1403300&r2=1403301&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java Mon Oct 29 14:09:47 2012
@@ -362,8 +362,7 @@ public class DatanodeDescriptor extends
void addBlockToBeRecovered(BlockInfoUnderConstruction block) {
if(recoverBlocks.contains(block)) {
// this prevents adding the same block twice to the recovery queue
- BlockManager.LOG.info("Block " + block +
- " is already in the recovery queue.");
+ BlockManager.LOG.info(block + " is already in the recovery queue");
return;
}
recoverBlocks.offer(block);
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java?rev=1403301&r1=1403300&r2=1403301&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java Mon Oct 29 14:09:47 2012
@@ -584,7 +584,7 @@ public class DatanodeManager {
if (node.isDecommissionInProgress()) {
if (!blockManager.isReplicationInProgress(node)) {
node.setDecommissioned();
- LOG.info("Decommission complete for node " + node);
+ LOG.info("Decommission complete for " + node);
}
}
return node.isDecommissioned();
@@ -593,8 +593,8 @@ public class DatanodeManager {
/** Start decommissioning the specified datanode. */
private void startDecommission(DatanodeDescriptor node) {
if (!node.isDecommissionInProgress() && !node.isDecommissioned()) {
- LOG.info("Start Decommissioning node " + node + " with " +
- node.numBlocks() + " blocks.");
+ LOG.info("Start Decommissioning " + node + " with " +
+ node.numBlocks() + " blocks");
heartbeatManager.startDecommission(node);
node.decommissioningStatus.setStartTime(now());
@@ -606,7 +606,7 @@ public class DatanodeManager {
/** Stop decommissioning the specified datanodes. */
void stopDecommission(DatanodeDescriptor node) {
if (node.isDecommissionInProgress() || node.isDecommissioned()) {
- LOG.info("Stop Decommissioning node " + node);
+ LOG.info("Stop Decommissioning " + node);
heartbeatManager.stopDecommission(node);
blockManager.processOverReplicatedBlocksOnReCommission(node);
}
@@ -658,17 +658,15 @@ public class DatanodeManager {
throw new DisallowedDatanodeException(nodeReg);
}
- NameNode.stateChangeLog.info("BLOCK* NameSystem.registerDatanode: "
- + "node registration from " + nodeReg
- + " storage " + nodeReg.getStorageID());
+ NameNode.stateChangeLog.info("BLOCK* registerDatanode: from "
+ + nodeReg + " storage " + nodeReg.getStorageID());
DatanodeDescriptor nodeS = datanodeMap.get(nodeReg.getStorageID());
DatanodeDescriptor nodeN = host2DatanodeMap.getDatanodeByXferAddr(
nodeReg.getIpAddr(), nodeReg.getXferPort());
if (nodeN != null && nodeN != nodeS) {
- NameNode.LOG.info("BLOCK* NameSystem.registerDatanode: "
- + "node from name: " + nodeN);
+ NameNode.LOG.info("BLOCK* registerDatanode: " + nodeN);
// nodeN previously served a different data storage,
// which is not served by anybody anymore.
removeDatanode(nodeN);
@@ -683,8 +681,8 @@ public class DatanodeManager {
// storage. We do not need to remove old data blocks, the delta will
// be calculated on the next block report from the datanode
if(NameNode.stateChangeLog.isDebugEnabled()) {
- NameNode.stateChangeLog.debug("BLOCK* NameSystem.registerDatanode: "
- + "node restarted.");
+ NameNode.stateChangeLog.debug("BLOCK* registerDatanode: "
+ + "node restarted.");
}
} else {
// nodeS is found
@@ -696,11 +694,9 @@ public class DatanodeManager {
value in "VERSION" file under the data directory of the datanode,
but this is might not work if VERSION file format has changed
*/
- NameNode.stateChangeLog.info( "BLOCK* NameSystem.registerDatanode: "
- + "node " + nodeS
- + " is replaced by " + nodeReg +
- " with the same storageID " +
- nodeReg.getStorageID());
+ NameNode.stateChangeLog.info("BLOCK* registerDatanode: " + nodeS
+ + " is replaced by " + nodeReg + " with the same storageID "
+ + nodeReg.getStorageID());
}
// update cluster map
getNetworkTopology().remove(nodeS);
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java?rev=1403301&r1=1403300&r2=1403301&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java Mon Oct 29 14:09:47 2012
@@ -433,7 +433,7 @@ public abstract class Storage extends St
if (!root.exists()) {
// storage directory does not exist
if (startOpt != StartupOption.FORMAT) {
- LOG.info("Storage directory " + rootPath + " does not exist.");
+ LOG.info("Storage directory " + rootPath + " does not exist");
return StorageState.NON_EXISTENT;
}
LOG.info(rootPath + " does not exist. Creating ...");
@@ -442,7 +442,7 @@ public abstract class Storage extends St
}
// or is inaccessible
if (!root.isDirectory()) {
- LOG.info(rootPath + "is not a directory.");
+ LOG.info(rootPath + "is not a directory");
return StorageState.NON_EXISTENT;
}
if (!root.canWrite()) {
@@ -539,34 +539,34 @@ public abstract class Storage extends St
switch(curState) {
case COMPLETE_UPGRADE: // mv previous.tmp -> previous
LOG.info("Completing previous upgrade for storage directory "
- + rootPath + ".");
+ + rootPath);
rename(getPreviousTmp(), getPreviousDir());
return;
case RECOVER_UPGRADE: // mv previous.tmp -> current
LOG.info("Recovering storage directory " + rootPath
- + " from previous upgrade.");
+ + " from previous upgrade");
if (curDir.exists())
deleteDir(curDir);
rename(getPreviousTmp(), curDir);
return;
case COMPLETE_ROLLBACK: // rm removed.tmp
LOG.info("Completing previous rollback for storage directory "
- + rootPath + ".");
+ + rootPath);
deleteDir(getRemovedTmp());
return;
case RECOVER_ROLLBACK: // mv removed.tmp -> current
LOG.info("Recovering storage directory " + rootPath
- + " from previous rollback.");
+ + " from previous rollback");
rename(getRemovedTmp(), curDir);
return;
case COMPLETE_FINALIZE: // rm finalized.tmp
LOG.info("Completing previous finalize for storage directory "
- + rootPath + ".");
+ + rootPath);
deleteDir(getFinalizedTmp());
return;
case COMPLETE_CHECKPOINT: // mv lastcheckpoint.tmp -> previous.checkpoint
LOG.info("Completing previous checkpoint for storage directory "
- + rootPath + ".");
+ + rootPath);
File prevCkptDir = getPreviousCheckpoint();
if (prevCkptDir.exists())
deleteDir(prevCkptDir);
@@ -574,7 +574,7 @@ public abstract class Storage extends St
return;
case RECOVER_CHECKPOINT: // mv lastcheckpoint.tmp -> current
LOG.info("Recovering storage directory " + rootPath
- + " from failed checkpoint.");
+ + " from failed checkpoint");
if (curDir.exists())
deleteDir(curDir);
rename(getLastCheckpointTmp(), curDir);
@@ -629,7 +629,7 @@ public abstract class Storage extends St
FileLock newLock = tryLock();
if (newLock == null) {
String msg = "Cannot lock storage " + this.root
- + ". The directory is already locked.";
+ + ". The directory is already locked";
LOG.info(msg);
throw new IOException(msg);
}
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java?rev=1403301&r1=1403300&r2=1403301&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java Mon Oct 29 14:09:47 2012
@@ -75,14 +75,18 @@ class BPServiceActor implements Runnable
BPOfferService bpos;
- long lastBlockReport = 0;
- long lastDeletedReport = 0;
+ // lastBlockReport, lastDeletedReport and lastHeartbeat may be assigned/read
+ // by testing threads (through BPServiceActor#triggerXXX), while also
+ // assigned/read by the actor thread. Thus they should be declared as volatile
+ // to make sure the "happens-before" consistency.
+ volatile long lastBlockReport = 0;
+ volatile long lastDeletedReport = 0;
boolean resetBlockReportTime = true;
Thread bpThread;
DatanodeProtocolClientSideTranslatorPB bpNamenode;
- private long lastHeartbeat = 0;
+ private volatile long lastHeartbeat = 0;
private volatile boolean initialized = false;
/**
@@ -637,8 +641,7 @@ class BPServiceActor implements Runnable
try {
Thread.sleep(millis);
} catch (InterruptedException ie) {
- LOG.info("BPOfferService " + this +
- " interrupted while " + stateString);
+ LOG.info("BPOfferService " + this + " interrupted while " + stateString);
}
}
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolManager.java?rev=1403301&r1=1403300&r2=1403301&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolManager.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolManager.java Mon Oct 29 14:09:47 2012
@@ -106,15 +106,15 @@ class BlockPoolManager {
}
}
- void shutDownAll() throws InterruptedException {
- BPOfferService[] bposArray = this.getAllNamenodeThreads();
-
- for (BPOfferService bpos : bposArray) {
- bpos.stop(); //interrupts the threads
- }
- //now join
- for (BPOfferService bpos : bposArray) {
- bpos.join();
+ void shutDownAll(BPOfferService[] bposArray) throws InterruptedException {
+ if (bposArray != null) {
+ for (BPOfferService bpos : bposArray) {
+ bpos.stop(); //interrupts the threads
+ }
+ //now join
+ for (BPOfferService bpos : bposArray) {
+ bpos.join();
+ }
}
}
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java?rev=1403301&r1=1403300&r2=1403301&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java Mon Oct 29 14:09:47 2012
@@ -154,7 +154,7 @@ class BlockPoolSliceScanner {
}
this.scanPeriod = hours * 3600 * 1000;
LOG.info("Periodic Block Verification Scanner initialized with interval "
- + hours + " hours for block pool " + bpid + ".");
+ + hours + " hours for block pool " + bpid);
// get the list of blocks and arrange them in random order
List<Block> arr = dataset.getFinalizedBlocks(blockPoolId);
@@ -310,12 +310,12 @@ class BlockPoolSliceScanner {
}
private void handleScanFailure(ExtendedBlock block) {
- LOG.info("Reporting bad block " + block);
+ LOG.info("Reporting bad " + block);
try {
datanode.reportBadBlocks(block);
} catch (IOException ie) {
// it is bad, but not bad enough to shutdown the scanner
- LOG.warn("Cannot report bad block=" + block.getBlockId());
+ LOG.warn("Cannot report bad " + block.getBlockId());
}
}
@@ -411,7 +411,7 @@ class BlockPoolSliceScanner {
// If the block does not exists anymore, then its not an error
if (!dataset.contains(block)) {
- LOG.info(block + " is no longer in the dataset.");
+ LOG.info(block + " is no longer in the dataset");
deleteBlock(block.getLocalBlock());
return;
}
@@ -424,7 +424,7 @@ class BlockPoolSliceScanner {
// is a block really deleted by mistake, DirectoryScan should catch it.
if (e instanceof FileNotFoundException ) {
LOG.info("Verification failed for " + block +
- ". It may be due to race with write.");
+ " - may be due to race with write");
deleteBlock(block.getLocalBlock());
return;
}
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java?rev=1403301&r1=1403300&r2=1403301&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java Mon Oct 29 14:09:47 2012
@@ -332,7 +332,7 @@ public class BlockPoolSliceStorage exten
// 4.rename <SD>/curernt/<bpid>/previous.tmp to <SD>/curernt/<bpid>/previous
rename(bpTmpDir, bpPrevDir);
LOG.info("Upgrade of block pool " + blockpoolID + " at " + bpSd.getRoot()
- + " is complete.");
+ + " is complete");
}
/**
@@ -409,7 +409,7 @@ public class BlockPoolSliceStorage exten
// 3. delete removed.tmp dir
deleteDir(tmpDir);
- LOG.info("Rollback of " + bpSd.getRoot() + " is complete.");
+ LOG.info("Rollback of " + bpSd.getRoot() + " is complete");
}
/*
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java?rev=1403301&r1=1403300&r2=1403301&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java Mon Oct 29 14:09:47 2012
@@ -357,7 +357,7 @@ class BlockReceiver implements Closeable
private void handleMirrorOutError(IOException ioe) throws IOException {
String bpid = block.getBlockPoolId();
LOG.info(datanode.getDNRegistrationForBP(bpid)
- + ":Exception writing block " + block + " to mirror " + mirrorAddr, ioe);
+ + ":Exception writing " + block + " to mirror " + mirrorAddr, ioe);
if (Thread.interrupted()) { // shut down if the thread is interrupted
throw ioe;
} else { // encounter an error while writing to mirror
@@ -379,16 +379,16 @@ class BlockReceiver implements Closeable
LOG.warn("Checksum error in block " + block + " from " + inAddr, ce);
if (srcDataNode != null) {
try {
- LOG.info("report corrupt block " + block + " from datanode " +
+ LOG.info("report corrupt " + block + " from datanode " +
srcDataNode + " to namenode");
datanode.reportRemoteBadBlock(srcDataNode, block);
} catch (IOException e) {
- LOG.warn("Failed to report bad block " + block +
+ LOG.warn("Failed to report bad " + block +
" from datanode " + srcDataNode + " to namenode");
}
}
- throw new IOException("Unexpected checksum mismatch " +
- "while writing " + block + " from " + inAddr);
+ throw new IOException("Unexpected checksum mismatch while writing "
+ + block + " from " + inAddr);
}
}
@@ -518,7 +518,7 @@ class BlockReceiver implements Closeable
// If this is a partial chunk, then read in pre-existing checksum
if (firstByteInBlock % bytesPerChecksum != 0) {
LOG.info("Packet starts at " + firstByteInBlock +
- " for block " + block +
+ " for " + block +
" which is not a multiple of bytesPerChecksum " +
bytesPerChecksum);
long offsetInChecksum = BlockMetadataHeader.getHeaderSize() +
@@ -662,7 +662,7 @@ class BlockReceiver implements Closeable
}
} catch (IOException ioe) {
- LOG.info("Exception in receiveBlock for " + block, ioe);
+ LOG.info("Exception for " + block, ioe);
throw ioe;
} finally {
if (!responderClosed) { // Abnormal termination of the flow above
@@ -733,10 +733,9 @@ class BlockReceiver implements Closeable
int checksumSize = diskChecksum.getChecksumSize();
blkoff = blkoff - sizePartialChunk;
LOG.info("computePartialChunkCrc sizePartialChunk " +
- sizePartialChunk +
- " block " + block +
- " offset in block " + blkoff +
- " offset in metafile " + ckoff);
+ sizePartialChunk + " " + block +
+ " block offset " + blkoff +
+ " metafile offset " + ckoff);
// create an input stream from the block file
// and read in partial crc chunk into temporary buffer
@@ -758,7 +757,7 @@ class BlockReceiver implements Closeable
partialCrc = DataChecksum.newDataChecksum(
diskChecksum.getChecksumType(), diskChecksum.getBytesPerChecksum());
partialCrc.update(buf, 0, sizePartialChunk);
- LOG.info("Read in partial CRC chunk from disk for block " + block);
+ LOG.info("Read in partial CRC chunk from disk for " + block);
// paranoia! verify that the pre-computed crc matches what we
// recalculated just now
@@ -973,7 +972,7 @@ class BlockReceiver implements Closeable
"HDFS_WRITE", clientname, offset,
dnR.getStorageID(), block, endTime-startTime));
} else {
- LOG.info("Received block " + block + " of size "
+ LOG.info("Received " + block + " size "
+ block.getNumBytes() + " from " + inAddr);
}
}
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java?rev=1403301&r1=1403300&r2=1403301&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java Mon Oct 29 14:09:47 2012
@@ -503,7 +503,7 @@ class BlockSender implements java.io.Clo
* part of a block and then decides not to read the rest (but leaves
* the socket open).
*/
- LOG.info("BlockSender.sendChunks() exception: ", e);
+ LOG.info("exception: ", e);
} else {
/* Exception while writing to the client. Connection closure from
* the other end is mostly the case and we do not care much about
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java?rev=1403301&r1=1403300&r2=1403301&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java Mon Oct 29 14:09:47 2012
@@ -481,8 +481,7 @@ public class DataNode extends Configured
blockScanner = new DataBlockScanner(this, data, conf);
blockScanner.start();
} else {
- LOG.info("Periodic Block Verification scan is disabled because " +
- reason + ".");
+ LOG.info("Periodic Block Verification scan disabled because " + reason);
}
}
@@ -511,7 +510,7 @@ public class DataNode extends Configured
directoryScanner.start();
} else {
LOG.info("Periodic Directory Tree Verification scan is disabled because " +
- reason + ".");
+ reason);
}
}
@@ -1095,6 +1094,12 @@ public class DataNode extends Configured
}
}
+ // We need to make a copy of the original blockPoolManager#offerServices to
+ // make sure blockPoolManager#shutDownAll() can still access all the
+ // BPOfferServices, since after setting DataNode#shouldRun to false the
+ // offerServices may be modified.
+ BPOfferService[] bposArray = this.blockPoolManager == null ? null
+ : this.blockPoolManager.getAllNamenodeThreads();
this.shouldRun = false;
shutdownPeriodicScanners();
@@ -1141,7 +1146,7 @@ public class DataNode extends Configured
if(blockPoolManager != null) {
try {
- this.blockPoolManager.shutDownAll();
+ this.blockPoolManager.shutDownAll(bposArray);
} catch (InterruptedException ie) {
LOG.warn("Received exception in BlockPoolManager#shutDownAll: ", ie);
}
@@ -1256,7 +1261,7 @@ public class DataNode extends Configured
xfersBuilder.append(xferTargets[i]);
xfersBuilder.append(" ");
}
- LOG.info(bpReg + " Starting thread to transfer block " +
+ LOG.info(bpReg + " Starting thread to transfer " +
block + " to " + xfersBuilder);
}
@@ -2043,7 +2048,7 @@ public class DataNode extends Configured
ExtendedBlock block = rb.getBlock();
DatanodeInfo[] targets = rb.getLocations();
- LOG.info(who + " calls recoverBlock(block=" + block
+ LOG.info(who + " calls recoverBlock(" + block
+ ", targets=[" + Joiner.on(", ").join(targets) + "]"
+ ", newGenerationStamp=" + rb.getNewGenerationStamp() + ")");
}
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java?rev=1403301&r1=1403300&r2=1403301&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java Mon Oct 29 14:09:47 2012
@@ -155,11 +155,11 @@ public class DataStorage extends Storage
break;
case NON_EXISTENT:
// ignore this storage
- LOG.info("Storage directory " + dataDir + " does not exist.");
+ LOG.info("Storage directory " + dataDir + " does not exist");
it.remove();
continue;
case NOT_FORMATTED: // format
- LOG.info("Storage directory " + dataDir + " is not formatted.");
+ LOG.info("Storage directory " + dataDir + " is not formatted");
LOG.info("Formatting ...");
format(sd, nsInfo);
break;
@@ -482,7 +482,7 @@ public class DataStorage extends Storage
// 5. Rename <SD>/previous.tmp to <SD>/previous
rename(tmpDir, prevDir);
- LOG.info("Upgrade of " + sd.getRoot()+ " is complete.");
+ LOG.info("Upgrade of " + sd.getRoot()+ " is complete");
addBlockPoolStorage(nsInfo.getBlockPoolID(), bpStorage);
}
@@ -556,7 +556,7 @@ public class DataStorage extends Storage
rename(prevDir, curDir);
// delete tmp dir
deleteDir(tmpDir);
- LOG.info("Rollback of " + sd.getRoot() + " is complete.");
+ LOG.info("Rollback of " + sd.getRoot() + " is complete");
}
/**
@@ -596,9 +596,9 @@ public class DataStorage extends Storage
deleteDir(bbwDir);
}
} catch(IOException ex) {
- LOG.error("Finalize upgrade for " + dataDirPath + " failed.", ex);
+ LOG.error("Finalize upgrade for " + dataDirPath + " failed", ex);
}
- LOG.info("Finalize upgrade for " + dataDirPath + " is complete.");
+ LOG.info("Finalize upgrade for " + dataDirPath + " is complete");
}
@Override
public String toString() { return "Finalize " + dataDirPath; }
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java?rev=1403301&r1=1403300&r2=1403301&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java Mon Oct 29 14:09:47 2012
@@ -170,7 +170,7 @@ class DataXceiver extends Receiver imple
} catch (InvalidMagicNumberException imne) {
LOG.info("Failed to read expected encryption handshake from client " +
"at " + s.getInetAddress() + ". Perhaps the client is running an " +
- "older version of Hadoop which does not support encryption.");
+ "older version of Hadoop which does not support encryption");
return;
}
input = encryptedStreams.in;
@@ -367,9 +367,8 @@ class DataXceiver extends Receiver imple
// make a copy here.
final ExtendedBlock originalBlock = new ExtendedBlock(block);
block.setNumBytes(dataXceiverServer.estimateBlockSize);
- LOG.info("Receiving block " + block +
- " src: " + remoteAddress +
- " dest: " + localAddress);
+ LOG.info("Receiving " + block + " src: " + remoteAddress + " dest: "
+ + localAddress);
// reply to upstream datanode or client
final DataOutputStream replyOut = new DataOutputStream(
@@ -478,9 +477,9 @@ class DataXceiver extends Receiver imple
block + " to mirror " + mirrorNode + ": " + e);
throw e;
} else {
- LOG.info(datanode + ":Exception transfering block " +
+ LOG.info(datanode + ":Exception transfering " +
block + " to mirror " + mirrorNode +
- ". continuing without the mirror.", e);
+ "- continuing without the mirror", e);
}
}
}
@@ -528,10 +527,8 @@ class DataXceiver extends Receiver imple
if (isDatanode ||
stage == BlockConstructionStage.PIPELINE_CLOSE_RECOVERY) {
datanode.closeBlock(block, DataNode.EMPTY_DEL_HINT);
- LOG.info("Received block " + block +
- " src: " + remoteAddress +
- " dest: " + localAddress +
- " of size " + block.getNumBytes());
+ LOG.info("Received " + block + " src: " + remoteAddress + " dest: "
+ + localAddress + " of size " + block.getNumBytes());
}
@@ -674,7 +671,7 @@ class DataXceiver extends Receiver imple
datanode.metrics.incrBytesRead((int) read);
datanode.metrics.incrBlocksRead();
- LOG.info("Copied block " + block + " to " + s.getRemoteSocketAddress());
+ LOG.info("Copied " + block + " to " + s.getRemoteSocketAddress());
} catch (IOException ioe) {
isOpSuccess = false;
LOG.info("opCopyBlock " + block + " received exception " + ioe);
@@ -797,8 +794,7 @@ class DataXceiver extends Receiver imple
// notify name node
datanode.notifyNamenodeReceivedBlock(block, delHint);
- LOG.info("Moved block " + block +
- " from " + s.getRemoteSocketAddress());
+ LOG.info("Moved " + block + " from " + s.getRemoteSocketAddress());
} catch (IOException ioe) {
opStatus = ERROR;
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetAsyncDiskService.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetAsyncDiskService.java?rev=1403301&r1=1403300&r2=1403301&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetAsyncDiskService.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetAsyncDiskService.java Mon Oct 29 14:09:47 2012
@@ -136,7 +136,7 @@ class FsDatasetAsyncDiskService {
if (executors == null) {
LOG.warn("AsyncDiskService has already shut down.");
} else {
- LOG.info("Shutting down all async disk service threads...");
+ LOG.info("Shutting down all async disk service threads");
for (Map.Entry<File, ThreadPoolExecutor> e : executors.entrySet()) {
e.getValue().shutdown();
@@ -144,7 +144,7 @@ class FsDatasetAsyncDiskService {
// clear the executor map so that calling execute again will fail.
executors = null;
- LOG.info("All async disk service threads have been shut down.");
+ LOG.info("All async disk service threads have been shut down");
}
}
@@ -154,7 +154,7 @@ class FsDatasetAsyncDiskService {
*/
void deleteAsync(FsVolumeImpl volume, File blockFile, File metaFile,
ExtendedBlock block) {
- LOG.info("Scheduling block " + block.getLocalBlock()
+ LOG.info("Scheduling " + block.getLocalBlock()
+ " file " + blockFile + " for deletion");
ReplicaFileDeleteTask deletionTask = new ReplicaFileDeleteTask(
volume, blockFile, metaFile, block);
@@ -198,8 +198,8 @@ class FsDatasetAsyncDiskService {
datanode.notifyNamenodeDeletedBlock(block);
}
volume.decDfsUsed(block.getBlockPoolId(), dfsBytes);
- LOG.info("Deleted block " + block.getBlockPoolId() + " "
- + block.getLocalBlock() + " at file " + blockFile);
+ LOG.info("Deleted " + block.getBlockPoolId() + " "
+ + block.getLocalBlock() + " file " + blockFile);
}
}
}
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java?rev=1403301&r1=1403300&r2=1403301&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java Mon Oct 29 14:09:47 2012
@@ -425,7 +425,7 @@ class FsDatasetImpl implements FsDataset
return;
}
if (newlen > oldlen) {
- throw new IOException("Cannout truncate block to from oldlen (=" + oldlen
+ throw new IOException("Cannot truncate block to from oldlen (=" + oldlen
+ ") to newlen (=" + newlen + ")");
}
@@ -481,7 +481,7 @@ class FsDatasetImpl implements FsDataset
" should be greater than the replica " + b + "'s generation stamp");
}
ReplicaInfo replicaInfo = getReplicaInfo(b);
- LOG.info("Appending to replica " + replicaInfo);
+ LOG.info("Appending to " + replicaInfo);
if (replicaInfo.getState() != ReplicaState.FINALIZED) {
throw new ReplicaNotFoundException(
ReplicaNotFoundException.UNFINALIZED_REPLICA + b);
@@ -689,7 +689,7 @@ class FsDatasetImpl implements FsDataset
public synchronized ReplicaInPipeline recoverRbw(ExtendedBlock b,
long newGS, long minBytesRcvd, long maxBytesRcvd)
throws IOException {
- LOG.info("Recover the RBW replica " + b);
+ LOG.info("Recover RBW replica " + b);
ReplicaInfo replicaInfo = getReplicaInfo(b.getBlockPoolId(), b.getBlockId());
@@ -700,7 +700,7 @@ class FsDatasetImpl implements FsDataset
}
ReplicaBeingWritten rbw = (ReplicaBeingWritten)replicaInfo;
- LOG.info("Recovering replica " + rbw);
+ LOG.info("Recovering " + rbw);
// Stop the previous writer
rbw.stopWriter();
@@ -736,8 +736,8 @@ class FsDatasetImpl implements FsDataset
final long blockId = b.getBlockId();
final long expectedGs = b.getGenerationStamp();
final long visible = b.getNumBytes();
- LOG.info("Convert replica " + b
- + " from Temporary to RBW, visible length=" + visible);
+ LOG.info("Convert " + b + " from Temporary to RBW, visible length="
+ + visible);
final ReplicaInPipeline temp;
{
@@ -1415,8 +1415,7 @@ class FsDatasetImpl implements FsDataset
static ReplicaRecoveryInfo initReplicaRecovery(String bpid,
ReplicaMap map, Block block, long recoveryId) throws IOException {
final ReplicaInfo replica = map.get(bpid, block.getBlockId());
- LOG.info("initReplicaRecovery: block=" + block
- + ", recoveryId=" + recoveryId
+ LOG.info("initReplicaRecovery: " + block + ", recoveryId=" + recoveryId
+ ", replica=" + replica);
//check replica
@@ -1485,7 +1484,7 @@ class FsDatasetImpl implements FsDataset
//get replica
final String bpid = oldBlock.getBlockPoolId();
final ReplicaInfo replica = volumeMap.get(bpid, oldBlock.getBlockId());
- LOG.info("updateReplica: block=" + oldBlock
+ LOG.info("updateReplica: " + oldBlock
+ ", recoveryId=" + recoveryId
+ ", length=" + newlength
+ ", replica=" + replica);
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java?rev=1403301&r1=1403300&r2=1403301&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java Mon Oct 29 14:09:47 2012
@@ -142,7 +142,7 @@ public class FSDirectory implements Clos
DFSConfigKeys.DFS_NAMENODE_NAME_CACHE_THRESHOLD_KEY,
DFSConfigKeys.DFS_NAMENODE_NAME_CACHE_THRESHOLD_DEFAULT);
NameNode.LOG.info("Caching file names occuring more than " + threshold
- + " times ");
+ + " times");
nameCache = new NameCache<ByteArray>(threshold);
namesystem = ns;
}
@@ -255,15 +255,12 @@ public class FSDirectory implements Clos
writeUnlock();
}
if (newNode == null) {
- NameNode.stateChangeLog.info("DIR* FSDirectory.addFile: "
- +"failed to add "+path
- +" to the file system");
+ NameNode.stateChangeLog.info("DIR* addFile: failed to add " + path);
return null;
}
if(NameNode.stateChangeLog.isDebugEnabled()) {
- NameNode.stateChangeLog.debug("DIR* FSDirectory.addFile: "
- +path+" is added to the file system");
+ NameNode.stateChangeLog.debug("DIR* addFile: " + path + " is added");
}
return newNode;
}
@@ -2189,16 +2186,13 @@ public class FSDirectory implements Clos
writeUnlock();
}
if (newNode == null) {
- NameNode.stateChangeLog.info("DIR* FSDirectory.addSymlink: "
- +"failed to add "+path
- +" to the file system");
+ NameNode.stateChangeLog.info("DIR* addSymlink: failed to add " + path);
return null;
}
fsImage.getEditLog().logSymlink(path, target, modTime, modTime, newNode);
if(NameNode.stateChangeLog.isDebugEnabled()) {
- NameNode.stateChangeLog.debug("DIR* FSDirectory.addSymlink: "
- +path+" is added to the file system");
+ NameNode.stateChangeLog.debug("DIR* addSymlink: " + path + " is added");
}
return newNode;
}
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java?rev=1403301&r1=1403300&r2=1403301&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java Mon Oct 29 14:09:47 2012
@@ -928,7 +928,7 @@ public class FSEditLog implements LogsPu
* in the new log.
*/
synchronized long rollEditLog() throws IOException {
- LOG.info("Rolling edit logs.");
+ LOG.info("Rolling edit logs");
endCurrentLogSegment(true);
long nextTxId = getLastWrittenTxId() + 1;
@@ -943,7 +943,7 @@ public class FSEditLog implements LogsPu
*/
public synchronized void startLogSegment(long txid,
boolean abortCurrentLogSegment) throws IOException {
- LOG.info("Namenode started a new log segment at txid " + txid);
+ LOG.info("Started a new log segment at txid " + txid);
if (isSegmentOpen()) {
if (getLastWrittenTxId() == txid - 1) {
//In sync with the NN, so end and finalize the current segment`
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java?rev=1403301&r1=1403300&r2=1403301&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java Mon Oct 29 14:09:47 2012
@@ -90,7 +90,7 @@ public class FSEditLogLoader {
expectedStartingTxId, recovery);
FSImage.LOG.info("Edits file " + edits.getName()
+ " of size " + edits.length() + " edits # " + numEdits
- + " loaded in " + (now()-startTime)/1000 + " seconds.");
+ + " loaded in " + (now()-startTime)/1000 + " seconds");
return numEdits;
} finally {
edits.close();
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java?rev=1403301&r1=1403300&r2=1403301&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java Mon Oct 29 14:09:47 2012
@@ -660,11 +660,11 @@ public class FSNamesystem implements Nam
editLog.recoverUnclosedStreams();
LOG.info("Catching up to latest edits from old active before " +
- "taking over writer role in edits logs.");
+ "taking over writer role in edits logs");
editLogTailer.catchupDuringFailover();
blockManager.setPostponeBlocksFromFuture(false);
- LOG.info("Reprocessing replication and invalidation queues...");
+ LOG.info("Reprocessing replication and invalidation queues");
blockManager.getDatanodeManager().markAllDatanodesStale();
blockManager.clearQueues();
blockManager.processAllPendingDNMessages();
@@ -1978,7 +1978,7 @@ public class FSNamesystem implements Nam
if (force) {
// close now: no need to wait for soft lease expiration and
// close only the file src
- LOG.info("recoverLease: recover lease " + lease + ", src=" + src +
+ LOG.info("recoverLease: " + lease + ", src=" + src +
" from client " + pendingFile.getClientName());
internalReleaseLease(lease, src, holder);
} else {
@@ -1990,8 +1990,8 @@ public class FSNamesystem implements Nam
// period, then start lease recovery.
//
if (lease.expiredSoftLimit()) {
- LOG.info("startFile: recover lease " + lease + ", src=" + src +
- " from client " + pendingFile.getClientName());
+ LOG.info("startFile: recover " + lease + ", src=" + src + " client "
+ + pendingFile.getClientName());
boolean isClosed = internalReleaseLease(lease, src, null);
if(!isClosed)
throw new RecoveryInProgressException(
@@ -2167,7 +2167,7 @@ public class FSNamesystem implements Nam
}
// The retry case ("b" above) -- abandon the old block.
- NameNode.stateChangeLog.info("BLOCK* NameSystem.allocateBlock: " +
+ NameNode.stateChangeLog.info("BLOCK* allocateBlock: " +
"caught retry for allocation of a new block in " +
src + ". Abandoning old block " + lastBlockInFile);
dir.removeBlock(src, pendingFile, lastBlockInFile);
@@ -2403,10 +2403,10 @@ public class FSNamesystem implements Nam
// See HDFS-3031.
final Block realLastBlock = ((INodeFile)inode).getLastBlock();
if (Block.matchingIdAndGenStamp(last, realLastBlock)) {
- NameNode.stateChangeLog.info("DIR* NameSystem.completeFile: " +
- "received request from " + holder + " to complete file " + src +
+ NameNode.stateChangeLog.info("DIR* completeFile: " +
+ "request from " + holder + " to complete " + src +
" which is already closed. But, it appears to be an RPC " +
- "retry. Returning success.");
+ "retry. Returning success");
return true;
}
}
@@ -2421,8 +2421,8 @@ public class FSNamesystem implements Nam
finalizeINodeFileUnderConstruction(src, pendingFile);
- NameNode.stateChangeLog.info("DIR* NameSystem.completeFile: file " + src
- + " is closed by " + holder);
+ NameNode.stateChangeLog.info("DIR* completeFile: " + src + " is closed by "
+ + holder);
return true;
}
@@ -2447,8 +2447,8 @@ public class FSNamesystem implements Nam
nextGenerationStamp();
b.setGenerationStamp(getGenerationStamp());
b = dir.addBlock(src, inodes, b, targets);
- NameNode.stateChangeLog.info("BLOCK* NameSystem.allocateBlock: "
- +src+ ". " + blockPoolId + " "+ b);
+ NameNode.stateChangeLog.info("BLOCK* allocateBlock: " + src + ". "
+ + blockPoolId + " " + b);
return b;
}
@@ -2466,8 +2466,8 @@ public class FSNamesystem implements Nam
//
for (BlockInfo block: v.getBlocks()) {
if (!block.isComplete()) {
- LOG.info("BLOCK* NameSystem.checkFileProgress: "
- + "block " + block + " has not reached minimal replication "
+ LOG.info("BLOCK* checkFileProgress: " + block
+ + " has not reached minimal replication "
+ blockManager.minReplication);
return false;
}
@@ -2478,8 +2478,8 @@ public class FSNamesystem implements Nam
//
BlockInfo b = v.getPenultimateBlock();
if (b != null && !b.isComplete()) {
- LOG.info("BLOCK* NameSystem.checkFileProgress: "
- + "block " + b + " has not reached minimal replication "
+ LOG.info("BLOCK* checkFileProgress: " + b
+ + " has not reached minimal replication "
+ blockManager.minReplication);
return false;
}
@@ -2952,8 +2952,7 @@ public class FSNamesystem implements Nam
*/
void fsync(String src, String clientName)
throws IOException, UnresolvedLinkException {
- NameNode.stateChangeLog.info("BLOCK* NameSystem.fsync: file "
- + src + " for " + clientName);
+ NameNode.stateChangeLog.info("BLOCK* fsync: " + src + " for " + clientName);
writeLock();
try {
checkOperation(OperationCategory.WRITE);
@@ -2984,7 +2983,7 @@ public class FSNamesystem implements Nam
boolean internalReleaseLease(Lease lease, String src,
String recoveryLeaseHolder) throws AlreadyBeingCreatedException,
IOException, UnresolvedLinkException {
- LOG.info("Recovering lease=" + lease + ", src=" + src);
+ LOG.info("Recovering " + lease + ", src=" + src);
assert !isInSafeMode();
assert hasWriteLock();
@@ -3625,7 +3624,7 @@ public class FSNamesystem implements Nam
"in order to create namespace image.");
}
getFSImage().saveNamespace(this);
- LOG.info("New namespace image has been created.");
+ LOG.info("New namespace image has been created");
} finally {
readUnlock();
}
@@ -3843,11 +3842,11 @@ public class FSNamesystem implements Nam
}
long timeInSafemode = now() - startTime;
NameNode.stateChangeLog.info("STATE* Leaving safe mode after "
- + timeInSafemode/1000 + " secs.");
+ + timeInSafemode/1000 + " secs");
NameNode.getNameNodeMetrics().setSafeModeTime((int) timeInSafemode);
if (reached >= 0) {
- NameNode.stateChangeLog.info("STATE* Safe mode is OFF.");
+ NameNode.stateChangeLog.info("STATE* Safe mode is OFF");
}
reached = -1;
safeMode = null;
@@ -4167,7 +4166,7 @@ public class FSNamesystem implements Nam
}
}
if (!fsRunning) {
- LOG.info("NameNode is being shutdown, exit SafeModeMonitor thread. ");
+ LOG.info("NameNode is being shutdown, exit SafeModeMonitor thread");
} else {
// leave safe mode and stop the monitor
leaveSafeMode();
@@ -4352,7 +4351,7 @@ public class FSNamesystem implements Nam
if (isEditlogOpenForWrite) {
getEditLog().logSyncAll();
}
- NameNode.stateChangeLog.info("STATE* Safe mode is ON. "
+ NameNode.stateChangeLog.info("STATE* Safe mode is ON"
+ safeMode.getTurnOffTip());
} finally {
writeUnlock();
@@ -4367,7 +4366,7 @@ public class FSNamesystem implements Nam
writeLock();
try {
if (!isInSafeMode()) {
- NameNode.stateChangeLog.info("STATE* Safe mode is already OFF.");
+ NameNode.stateChangeLog.info("STATE* Safe mode is already OFF");
return;
}
safeMode.leave();
@@ -4721,7 +4720,7 @@ public class FSNamesystem implements Nam
try {
checkOperation(OperationCategory.WRITE);
- NameNode.stateChangeLog.info("*DIR* NameNode.reportBadBlocks");
+ NameNode.stateChangeLog.info("*DIR* reportBadBlocks");
for (int i = 0; i < blocks.length; i++) {
ExtendedBlock blk = blocks[i].getBlock();
DatanodeInfo[] nodes = blocks[i].getLocations();
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/GetDelegationTokenServlet.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/GetDelegationTokenServlet.java?rev=1403301&r1=1403300&r2=1403301&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/GetDelegationTokenServlet.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/GetDelegationTokenServlet.java Mon Oct 29 14:09:47 2012
@@ -77,7 +77,7 @@ public class GetDelegationTokenServlet e
});
} catch(Exception e) {
- LOG.info("Exception while sending token. Re-throwing. ", e);
+ LOG.info("Exception while sending token. Re-throwing ", e);
resp.sendError(HttpServletResponse.SC_INTERNAL_SERVER_ERROR);
} finally {
if(dos != null) dos.close();
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java?rev=1403301&r1=1403300&r2=1403301&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java Mon Oct 29 14:09:47 2012
@@ -429,7 +429,7 @@ public class LeaseManager {
return;
}
- LOG.info("Lease " + oldest + " has expired hard limit");
+ LOG.info(oldest + " has expired hard limit");
final List<String> removing = new ArrayList<String>();
// need to create a copy of the oldest lease paths, becuase
@@ -441,15 +441,14 @@ public class LeaseManager {
for(String p : leasePaths) {
try {
if(fsnamesystem.internalReleaseLease(oldest, p, HdfsServerConstants.NAMENODE_LEASE_HOLDER)) {
- LOG.info("Lease recovery for file " + p +
- " is complete. File closed.");
+ LOG.info("Lease recovery for " + p + " is complete. File closed.");
removing.add(p);
} else {
- LOG.info("Started block recovery for file " + p +
- " lease " + oldest);
+ LOG.info("Started block recovery " + p + " lease " + oldest);
}
} catch (IOException e) {
- LOG.error("Cannot release the path "+p+" in the lease "+oldest, e);
+ LOG.error("Cannot release the path " + p + " in the lease "
+ + oldest, e);
removing.add(p);
}
}
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/MetaRecoveryContext.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/MetaRecoveryContext.java?rev=1403301&r1=1403300&r2=1403301&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/MetaRecoveryContext.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/MetaRecoveryContext.java Mon Oct 29 14:09:47 2012
@@ -102,7 +102,7 @@ public final class MetaRecoveryContext
"without prompting. " +
"(c/s/q/a)\n", "c", "s", "q", "a");
if (answer.equals("c")) {
- LOG.info("Continuing.");
+ LOG.info("Continuing");
return;
} else if (answer.equals("s")) {
throw new RequestStopException("user requested stop");
Propchange: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/native/
------------------------------------------------------------------------------
Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native:r1402604-1403174
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/jni_helper.c
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/jni_helper.c?rev=1403301&r1=1403300&r2=1403301&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/jni_helper.c (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/jni_helper.c Mon Oct 29 14:09:47 2012
@@ -146,6 +146,7 @@ static int hashTableInit(void)
if (hcreate(MAX_HASH_TABLE_ELEM) == 0) {
fprintf(stderr, "error creating hashtable, <%d>: %s\n",
errno, strerror(errno));
+ UNLOCK_HASH_TABLE();
return 0;
}
hashTableInited = 1;
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientDatanodeProtocol.proto
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientDatanodeProtocol.proto?rev=1403301&r1=1403300&r2=1403301&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientDatanodeProtocol.proto (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientDatanodeProtocol.proto Mon Oct 29 14:09:47 2012
@@ -23,6 +23,7 @@ option java_package = "org.apache.hadoop
option java_outer_classname = "ClientDatanodeProtocolProtos";
option java_generic_services = true;
option java_generate_equals_and_hash = true;
+package hadoop.hdfs;
import "hdfs.proto";
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto?rev=1403301&r1=1403300&r2=1403301&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto Mon Oct 29 14:09:47 2012
@@ -20,6 +20,7 @@ option java_package = "org.apache.hadoop
option java_outer_classname = "ClientNamenodeProtocolProtos";
option java_generic_services = true;
option java_generate_equals_and_hash = true;
+package hadoop.hdfs;
import "hdfs.proto";
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto?rev=1403301&r1=1403300&r2=1403301&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto Mon Oct 29 14:09:47 2012
@@ -23,6 +23,7 @@ option java_package = "org.apache.hadoop
option java_outer_classname = "DatanodeProtocolProtos";
option java_generic_services = true;
option java_generate_equals_and_hash = true;
+package hadoop.hdfs;
import "hdfs.proto";
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/GetUserMappingsProtocol.proto
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/GetUserMappingsProtocol.proto?rev=1403301&r1=1403300&r2=1403301&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/GetUserMappingsProtocol.proto (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/GetUserMappingsProtocol.proto Mon Oct 29 14:09:47 2012
@@ -20,6 +20,7 @@ option java_package = "org.apache.hadoop
option java_outer_classname = "GetUserMappingsProtocolProtos";
option java_generic_services = true;
option java_generate_equals_and_hash = true;
+package hadoop.hdfs;
/**
* Get groups for user request.
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/HAZKInfo.proto
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/HAZKInfo.proto?rev=1403301&r1=1403300&r2=1403301&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/HAZKInfo.proto (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/HAZKInfo.proto Mon Oct 29 14:09:47 2012
@@ -17,6 +17,7 @@
*/
option java_package = "org.apache.hadoop.hdfs.server.namenode.ha.proto";
option java_outer_classname = "HAZKInfoProtos";
+package hadoop.hdfs;
message ActiveNodeInfo {
required string nameserviceId = 1;
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/InterDatanodeProtocol.proto
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/InterDatanodeProtocol.proto?rev=1403301&r1=1403300&r2=1403301&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/InterDatanodeProtocol.proto (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/InterDatanodeProtocol.proto Mon Oct 29 14:09:47 2012
@@ -23,6 +23,7 @@ option java_package = "org.apache.hadoop
option java_outer_classname = "InterDatanodeProtocolProtos";
option java_generic_services = true;
option java_generate_equals_and_hash = true;
+package hadoop.hdfs;
import "hdfs.proto";
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/JournalProtocol.proto
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/JournalProtocol.proto?rev=1403301&r1=1403300&r2=1403301&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/JournalProtocol.proto (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/JournalProtocol.proto Mon Oct 29 14:09:47 2012
@@ -23,6 +23,7 @@ option java_package = "org.apache.hadoop
option java_outer_classname = "JournalProtocolProtos";
option java_generic_services = true;
option java_generate_equals_and_hash = true;
+package hadoop.hdfs;
import "hdfs.proto";
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/NamenodeProtocol.proto
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/NamenodeProtocol.proto?rev=1403301&r1=1403300&r2=1403301&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/NamenodeProtocol.proto (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/NamenodeProtocol.proto Mon Oct 29 14:09:47 2012
@@ -23,6 +23,7 @@ option java_package = "org.apache.hadoop
option java_outer_classname = "NamenodeProtocolProtos";
option java_generic_services = true;
option java_generate_equals_and_hash = true;
+package hadoop.hdfs;
import "hdfs.proto";
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/QJournalProtocol.proto
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/QJournalProtocol.proto?rev=1403301&r1=1403300&r2=1403301&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/QJournalProtocol.proto (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/QJournalProtocol.proto Mon Oct 29 14:09:47 2012
@@ -20,6 +20,7 @@ option java_package = "org.apache.hadoop
option java_outer_classname = "QJournalProtocolProtos";
option java_generic_services = true;
option java_generate_equals_and_hash = true;
+package hadoop.hdfs;
import "hdfs.proto";
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/RefreshAuthorizationPolicyProtocol.proto
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/RefreshAuthorizationPolicyProtocol.proto?rev=1403301&r1=1403300&r2=1403301&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/RefreshAuthorizationPolicyProtocol.proto (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/RefreshAuthorizationPolicyProtocol.proto Mon Oct 29 14:09:47 2012
@@ -20,6 +20,7 @@ option java_package = "org.apache.hadoop
option java_outer_classname = "RefreshAuthorizationPolicyProtocolProtos";
option java_generic_services = true;
option java_generate_equals_and_hash = true;
+package hadoop.hdfs;
/**
* Refresh service acl request.
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/RefreshUserMappingsProtocol.proto
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/RefreshUserMappingsProtocol.proto?rev=1403301&r1=1403300&r2=1403301&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/RefreshUserMappingsProtocol.proto (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/RefreshUserMappingsProtocol.proto Mon Oct 29 14:09:47 2012
@@ -20,6 +20,7 @@ option java_package = "org.apache.hadoop
option java_outer_classname = "RefreshUserMappingsProtocolProtos";
option java_generic_services = true;
option java_generate_equals_and_hash = true;
+package hadoop.hdfs;
/**
* Refresh user to group mappings request.
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/datatransfer.proto
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/datatransfer.proto?rev=1403301&r1=1403300&r2=1403301&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/datatransfer.proto (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/datatransfer.proto Mon Oct 29 14:09:47 2012
@@ -22,6 +22,7 @@
option java_package = "org.apache.hadoop.hdfs.protocol.proto";
option java_outer_classname = "DataTransferProtos";
option java_generate_equals_and_hash = true;
+package hadoop.hdfs;
import "hdfs.proto";
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto?rev=1403301&r1=1403300&r2=1403301&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto Mon Oct 29 14:09:47 2012
@@ -22,6 +22,7 @@
option java_package = "org.apache.hadoop.hdfs.protocol.proto";
option java_outer_classname = "HdfsProtos";
option java_generate_equals_and_hash = true;
+package hadoop.hdfs;
/**
* Extended block idenfies a block
Propchange: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/
------------------------------------------------------------------------------
Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode:r1402604-1403174
Propchange: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/
------------------------------------------------------------------------------
Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs:r1402604-1403174
Propchange: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/
------------------------------------------------------------------------------
Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary:r1402604-1403174
Propchange: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs/
------------------------------------------------------------------------------
Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs:r1402604-1403174
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml?rev=1403301&r1=1403300&r2=1403301&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml Mon Oct 29 14:09:47 2012
@@ -15797,7 +15797,7 @@
<comparators>
<comparator>
<type>RegexpComparator</type>
- <expected-output>Configured Capacity: [0-9]+ \([0-9]+\.[0-9]+ [BKMGT]+\)</expected-output>
+ <expected-output>Configured Capacity: [0-9]+ \([0-9\.]+ [BKMGT]+\)</expected-output>
</comparator>
<comparator>
<type>RegexpComparator</type>
@@ -15915,7 +15915,7 @@
<comparators>
<comparator>
<type>RegexpComparator</type>
- <expected-output>Configured Capacity: [0-9]+ \([0-9]+\.[0-9]+ [BKMGT]+\)</expected-output>
+ <expected-output>Configured Capacity: [0-9]+ \([0-9\.]+ [BKMGT]+\)</expected-output>
</comparator>
<comparator>
<type>RegexpComparator</type>