You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by su...@apache.org on 2012/10/29 03:21:10 UTC

svn commit: r1403148 [1/2] - in /hadoop/common/branches/branch-1: ./ src/hdfs/org/apache/hadoop/hdfs/ src/hdfs/org/apache/hadoop/hdfs/server/balancer/ src/hdfs/org/apache/hadoop/hdfs/server/common/ src/hdfs/org/apache/hadoop/hdfs/server/datanode/ src/h...

Author: suresh
Date: Mon Oct 29 02:21:09 2012
New Revision: 1403148

URL: http://svn.apache.org/viewvc?rev=1403148&view=rev
Log:
HDFS-4122. Cleanup HDFS logs and reduce the size of logged messages. Contributed by Suresh Srinivas.

Modified:
    hadoop/common/branches/branch-1/CHANGES.txt
    hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/DFSClient.java
    hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/server/balancer/Balancer.java
    hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/server/common/Storage.java
    hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
    hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/server/datanode/BlockSender.java
    hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/server/datanode/DataBlockScanner.java
    hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/server/datanode/DataNode.java
    hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
    hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
    hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/server/datanode/DatanodeBlockInfo.java
    hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/server/datanode/FSDataset.java
    hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/server/datanode/FSDatasetAsyncDiskService.java
    hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/server/datanode/UpgradeManagerDatanode.java
    hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/server/datanode/UpgradeObjectDatanode.java
    hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
    hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
    hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
    hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/server/namenode/NameNode.java

Modified: hadoop/common/branches/branch-1/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1/CHANGES.txt?rev=1403148&r1=1403147&r2=1403148&view=diff
==============================================================================
--- hadoop/common/branches/branch-1/CHANGES.txt (original)
+++ hadoop/common/branches/branch-1/CHANGES.txt Mon Oct 29 02:21:09 2012
@@ -110,6 +110,9 @@ Release 1.2.0 - unreleased
     HADOOP-8968. Add a flag to completely disable the worker version check.
     (tucu via eli)
 
+    HDFS-4122. Cleanup HDFS logs and reduce the size of logged messages.
+    (suresh)
+
   OPTIMIZATIONS
 
     HDFS-2533. Backport: Remove needless synchronization on some FSDataSet

Modified: hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/DFSClient.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/DFSClient.java?rev=1403148&r1=1403147&r2=1403148&view=diff
==============================================================================
--- hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/DFSClient.java (original)
+++ hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/DFSClient.java Mon Oct 29 02:21:09 2012
@@ -2036,7 +2036,7 @@ public class DFSClient implements FSCons
             serverSupportsHdfs200 = false;
           } else {
             LOG.info("Failed to get block info from "
-                + datanode.getHostName() + " probably does not have block "
+                + datanode.getHostName() + " probably does not have "
                 + last.getBlock(), e);
           }
         } finally {
@@ -2247,7 +2247,7 @@ public class DFSClient implements FSCons
               fetchBlockAt(target);
               continue;
             } else {
-              LOG.info("Failed to read block " + targetBlock.getBlock()
+              LOG.info("Failed to read " + targetBlock.getBlock()
                   + " on local machine" + StringUtils.stringifyException(ex));
               LOG.info("Try reading via the datanode on " + targetAddr);
             }
@@ -2435,9 +2435,9 @@ public class DFSClient implements FSCons
           }
           
           if (nodes == null || nodes.length == 0) {
-            LOG.info("No node available for block: " + blockInfo);
+            LOG.info("No node available for: " + blockInfo);
           }
-          LOG.info("Could not obtain block " + block.getBlock()
+          LOG.info("Could not obtain " + block.getBlock()
               + " from any node: " + ie
               + ". Will get new block locations from namenode and retry...");
           try {
@@ -3211,12 +3211,12 @@ public class DFSClient implements FSCons
         return false;
       }
       if (response != null) {
-        LOG.info("Error Recovery for block " + block +
+        LOG.info("Error Recovery for " + block +
                  " waiting for responder to exit. ");
         return true;
       }
       if (errorIndex >= 0) {
-        LOG.warn("Error Recovery for block " + block
+        LOG.warn("Error Recovery for " + block
             + " bad datanode[" + errorIndex + "] "
             + (nodes == null? "nodes == null": nodes[errorIndex].getName()));
       }
@@ -3562,7 +3562,7 @@ public class DFSClient implements FSCons
         success = createBlockOutputStream(nodes, clientName, false);
 
         if (!success) {
-          LOG.info("Abandoning block " + block);
+          LOG.info("Abandoning " + block);
           namenode.abandonBlock(block, src, clientName);
 
           if (errorIndex < nodes.length) {
@@ -4083,7 +4083,7 @@ public class DFSClient implements FSCons
             try {
               Thread.sleep(400);
               if (System.currentTimeMillis() - localstart > 5000) {
-                LOG.info("Could not complete file " + src + " retrying...");
+                LOG.info("Could not complete " + src + " retrying...");
               }
             } catch (InterruptedException ie) {
             }

Modified: hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/server/balancer/Balancer.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/server/balancer/Balancer.java?rev=1403148&r1=1403147&r2=1403148&view=diff
==============================================================================
--- hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/server/balancer/Balancer.java (original)
+++ hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/server/balancer/Balancer.java Mon Oct 29 02:21:09 2012
@@ -338,8 +338,7 @@ public class Balancer implements Tool {
         LOG.info( "Moving block " + block.getBlock().getBlockId() +
               " from "+ source.getName() + " to " +
               target.getName() + " through " +
-              proxySource.getName() +
-              " is succeeded." );
+              proxySource.getName() + " is succeeded." );
       } catch (IOException e) {
         LOG.warn("Error moving block "+block.getBlockId()+
             " from " + source.getName() + " to " +
@@ -1515,8 +1514,8 @@ public class Balancer implements Tool {
           System.out.println("The cluster is balanced. Exiting...");
           return SUCCESS;
         } else {
-          LOG.info( "Need to move "+ StringUtils.byteDesc(bytesLeftToMove)
-              +" bytes to make the cluster balanced." );
+          LOG.info("Need to move "+ StringUtils.byteDesc(bytesLeftToMove)
+              +" bytes to make the cluster balanced" );
         }
         
         /* Decide all the nodes that will participate in the block move and

Modified: hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/server/common/Storage.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/server/common/Storage.java?rev=1403148&r1=1403147&r2=1403148&view=diff
==============================================================================
--- hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/server/common/Storage.java (original)
+++ hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/server/common/Storage.java Mon Oct 29 02:21:09 2012
@@ -428,16 +428,16 @@ public abstract class Storage extends St
         if (!root.exists()) {
           // storage directory does not exist
           if (startOpt != StartupOption.FORMAT) {
-            LOG.info("Storage directory " + rootPath + " does not exist.");
+            LOG.info("Storage directory " + rootPath + " does not exist");
             return StorageState.NON_EXISTENT;
           }
-          LOG.info(rootPath + " does not exist. Creating ...");
+          LOG.info(rootPath + " does not exist. Creating...");
           if (!root.mkdirs())
             throw new IOException("Cannot create directory " + rootPath);
         }
         // or is inaccessible
         if (!root.isDirectory()) {
-          LOG.info(rootPath + "is not a directory.");
+          LOG.info(rootPath + "is not a directory");
           return StorageState.NON_EXISTENT;
         }
         if (!root.canWrite()) {
@@ -534,34 +534,34 @@ public abstract class Storage extends St
       switch(curState) {
       case COMPLETE_UPGRADE:  // mv previous.tmp -> previous
         LOG.info("Completing previous upgrade for storage directory " 
-                 + rootPath + ".");
+                 + rootPath);
         rename(getPreviousTmp(), getPreviousDir());
         return;
       case RECOVER_UPGRADE:   // mv previous.tmp -> current
         LOG.info("Recovering storage directory " + rootPath
-                 + " from previous upgrade.");
+                 + " from previous upgrade");
         if (curDir.exists())
           deleteDir(curDir);
         rename(getPreviousTmp(), curDir);
         return;
       case COMPLETE_ROLLBACK: // rm removed.tmp
         LOG.info("Completing previous rollback for storage directory "
-                 + rootPath + ".");
+                 + rootPath);
         deleteDir(getRemovedTmp());
         return;
       case RECOVER_ROLLBACK:  // mv removed.tmp -> current
         LOG.info("Recovering storage directory " + rootPath
-                 + " from previous rollback.");
+                 + " from previous rollback");
         rename(getRemovedTmp(), curDir);
         return;
       case COMPLETE_FINALIZE: // rm finalized.tmp
         LOG.info("Completing previous finalize for storage directory "
-                 + rootPath + ".");
+                 + rootPath);
         deleteDir(getFinalizedTmp());
         return;
       case COMPLETE_CHECKPOINT: // mv lastcheckpoint.tmp -> previous.checkpoint
         LOG.info("Completing previous checkpoint for storage directory " 
-                 + rootPath + ".");
+                 + rootPath);
         File prevCkptDir = getPreviousCheckpoint();
         if (prevCkptDir.exists())
           deleteDir(prevCkptDir);
@@ -569,7 +569,7 @@ public abstract class Storage extends St
         return;
       case RECOVER_CHECKPOINT:  // mv lastcheckpoint.tmp -> current
         LOG.info("Recovering storage directory " + rootPath
-                 + " from failed checkpoint.");
+                 + " from failed checkpoint");
         if (curDir.exists())
           deleteDir(curDir);
         rename(getLastCheckpointTmp(), curDir);

Modified: hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java?rev=1403148&r1=1403147&r2=1403148&view=diff
==============================================================================
--- hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java (original)
+++ hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java Mon Oct 29 02:21:09 2012
@@ -201,7 +201,7 @@ class BlockReceiver implements java.io.C
    * affect this datanode.
    */
   private void handleMirrorOutError(IOException ioe) throws IOException {
-    LOG.info(datanode.dnRegistration + ": Exception writing block " +
+    LOG.info(datanode.dnRegistration + ": Exception writing " +
              block + " to mirror " + mirrorAddr + "\n" +
              StringUtils.stringifyException(ioe));
     if (Thread.interrupted()) { // shut down if the thread is interrupted
@@ -228,13 +228,13 @@ class BlockReceiver implements java.io.C
       if (!checksum.compare(checksumBuf, checksumOff)) {
         if (srcDataNode != null) {
           try {
-            LOG.info("report corrupt block " + block + " from datanode " +
+            LOG.info("report corrupt " + block + " from datanode " +
                       srcDataNode + " to namenode");
             LocatedBlock lb = new LocatedBlock(block, 
                                             new DatanodeInfo[] {srcDataNode});
             datanode.namenode.reportBadBlocks(new LocatedBlock[] {lb});
           } catch (IOException e) {
-            LOG.warn("Failed to report bad block " + block + 
+            LOG.warn("Failed to report bad " + block + 
                       " from datanode " + srcDataNode + " to namenode");
           }
         }
@@ -410,7 +410,7 @@ class BlockReceiver implements java.io.C
     buf.reset();
     
     if (LOG.isDebugEnabled()){
-      LOG.debug("Receiving one packet for block " + block +
+      LOG.debug("Receiving one packet for " + block +
                 " of length " + payloadLen +
                 " seqno " + seqno +
                 " offsetInBlock " + offsetInBlock +
@@ -439,7 +439,7 @@ class BlockReceiver implements java.io.C
     } 
 
     if (len == 0) {
-      LOG.debug("Receiving empty packet for block " + block);
+      LOG.debug("Receiving empty packet for " + block);
     } else {
       offsetInBlock += len;
 
@@ -603,8 +603,7 @@ class BlockReceiver implements java.io.C
       }
 
     } catch (IOException ioe) {
-      LOG.info("Exception in receiveBlock for block " + block + 
-               " " + ioe);
+      LOG.info("Exception in receiveBlock for " + block + " " + ioe);
       IOUtils.closeStream(this);
       if (responder != null) {
         responder.interrupt();
@@ -666,8 +665,7 @@ class BlockReceiver implements java.io.C
     // If this is a partial chunk, then read in pre-existing checksum
     if (offsetInBlock % bytesPerChecksum != 0) {
       LOG.info("setBlockPosition trying to set position to " +
-               offsetInBlock +
-               " for block " + block +
+               offsetInBlock + " for " + block +
                " which is not a multiple of bytesPerChecksum " +
                bytesPerChecksum);
       computePartialChunkCrc(offsetInBlock, offsetInChecksum, bytesPerChecksum);
@@ -697,8 +695,7 @@ class BlockReceiver implements java.io.C
     int checksumSize = checksum.getChecksumSize();
     blkoff = blkoff - sizePartialChunk;
     LOG.info("computePartialChunkCrc sizePartialChunk " + 
-              sizePartialChunk +
-              " block " + block +
+              sizePartialChunk + " " + block +
               " offset in block " + blkoff +
               " offset in metafile " + ckoff);
 
@@ -721,7 +718,7 @@ class BlockReceiver implements java.io.C
     // compute crc of partial chunk from data read in the block file.
     partialCrc = new PureJavaCrc32();
     partialCrc.update(buf, 0, sizePartialChunk);
-    LOG.info("Read in partial CRC chunk from disk for block " + block);
+    LOG.info("Read in partial CRC chunk from disk for " + block);
 
     // paranoia! verify that the pre-computed crc matches what we
     // recalculated just now
@@ -754,7 +751,7 @@ class BlockReceiver implements java.io.C
     private Thread receiverThread; // the thread that spawns this responder
 
     public String toString() {
-      return "PacketResponder " + numTargets + " for Block " + this.block;
+      return "PacketResponder " + numTargets + " for " + this.block;
     }
 
     PacketResponder(BlockReceiver receiver, Block b, DataInputStream in, 
@@ -906,8 +903,7 @@ class BlockReceiver implements java.io.C
                       "HDFS_WRITE", receiver.clientName, offset, 
                       datanode.dnRegistration.getStorageID(), block, endTime-startTime));
               } else {
-                LOG.info("Received block " + block + 
-                         " of size " + block.getNumBytes() + 
+                LOG.info("Received " + block + " of size " + block.getNumBytes() +
                          " from " + receiver.inAddr);
               }
             }
@@ -948,8 +944,8 @@ class BlockReceiver implements java.io.C
           }
         }
       }
-      LOG.info("PacketResponder " + numTargets + 
-               " for block " + block + " terminating");
+      LOG.info("PacketResponder " + numTargets + " for " + block +
+          " terminating");
     }
   }
   

Modified: hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/server/datanode/BlockSender.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/server/datanode/BlockSender.java?rev=1403148&r1=1403147&r2=1403148&view=diff
==============================================================================
--- hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/server/datanode/BlockSender.java (original)
+++ hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/server/datanode/BlockSender.java Mon Oct 29 02:21:09 2012
@@ -160,7 +160,7 @@ class BlockSender implements java.io.Clo
       if (startOffset < 0 || startOffset > endOffset
           || (length + startOffset) > endOffset) {
         String msg = " Offset " + startOffset + " and length " + length
-        + " don't match block " + block + " ( blockLen " + endOffset + " )";
+        + " don't match " + block + " ( blockLen " + endOffset + " )";
         LOG.warn(datanode.dnRegistration + ":sendBlock() : " + msg);
         throw new IOException(msg);
       }

Modified: hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/server/datanode/DataBlockScanner.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/server/datanode/DataBlockScanner.java?rev=1403148&r1=1403147&r2=1403148&view=diff
==============================================================================
--- hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/server/datanode/DataBlockScanner.java (original)
+++ hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/server/datanode/DataBlockScanner.java Mon Oct 29 02:21:09 2012
@@ -262,7 +262,7 @@ class DataBlockScanner implements Runnab
     
     BlockScanInfo info = blockMap.get(block);
     if ( info != null ) {
-      LOG.warn("Adding an already existing block " + block);
+      LOG.warn("Adding an already existing " + block);
       delBlockInfo(info);
     }
     
@@ -369,8 +369,7 @@ class DataBlockScanner implements Runnab
   }
   
   private void handleScanFailure(Block block) {
-    
-    LOG.info("Reporting bad block " + block + " to namenode.");
+    LOG.info("Reporting bad " + block + " to namenode.");
     
     try {
       DatanodeInfo[] dnArr = { new DatanodeInfo(datanode.dnRegistration) };
@@ -380,7 +379,7 @@ class DataBlockScanner implements Runnab
       /* One common reason is that NameNode could be in safe mode.
        * Should we keep on retrying in that case?
        */
-      LOG.warn("Failed to report bad block " + block + " to namenode : " +
+      LOG.warn("Failed to report bad " + block + " to namenode : " +
                " Exception : " + StringUtils.stringifyException(e));
     }
   }
@@ -460,8 +459,8 @@ class DataBlockScanner implements Runnab
         
         blockSender.sendBlock(out, null, throttler);
 
-        LOG.info((second ? "Second " : "") +
-                 "Verification succeeded for " + block);
+        LOG.info((second ? "Second verification" : "Verification") +
+                 " succeeded " + block);
         
         if ( second ) {
           totalTransientErrors++;
@@ -477,8 +476,8 @@ class DataBlockScanner implements Runnab
 
         // If the block does not exists anymore, then its not an error
         if ( dataset.getFile(block) == null ) {
-          LOG.info("Verification failed for " + block + ". Its ok since " +
-          "it not in datanode dataset anymore.");
+          LOG.info("Verification failed for " + block + ". Its ok since "
+              + "it is not in datanode dataset anymore.");
           deleteBlock(block);
           return;
         }
@@ -629,7 +628,7 @@ class DataBlockScanner implements Runnab
       throw e;
     } finally {
       shutdown();
-      LOG.info("Exiting DataBlockScanner thread.");
+      LOG.info("Exiting DataBlockScanner thread");
     }
   }
   

Modified: hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/server/datanode/DataNode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/server/datanode/DataNode.java?rev=1403148&r1=1403147&r2=1403148&view=diff
==============================================================================
--- hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/server/datanode/DataNode.java (original)
+++ hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/server/datanode/DataNode.java Mon Oct 29 02:21:09 2012
@@ -479,7 +479,7 @@ public class DataNode extends Configured
       blockScanner = new DataBlockScanner(this, (FSDataset)data, conf);
     } else {
       LOG.info("Periodic Block Verification is disabled because " +
-               reason + ".");
+               reason);
     }
 
     readaheadPool = ReadaheadPool.getInstance();
@@ -986,7 +986,6 @@ public class DataNode extends Configured
    * forever calling remote NameNode functions.
    */
   public void offerService() throws Exception {
-     
     LOG.info("using BLOCKREPORT_INTERVAL of " + blockReportInterval + "msec" + 
        " Initial delay: " + initialBlockReportDelay + "msec");
 
@@ -1018,7 +1017,6 @@ public class DataNode extends Configured
                                                        xmitsInProgress.get(),
                                                        getXceiverCount());
           myMetrics.addHeartBeat(now() - startTime);
-          //LOG.info("Just sent heartbeat, with name " + localName);
           if (!processCommand(cmds))
             continue;
         }
@@ -1117,7 +1115,7 @@ public class DataNode extends Configured
         // start block scanner
         if (blockScanner != null && blockScannerThread == null &&
             upgradeManager.isUpgradeCompleted()) {
-          LOG.info("Starting Periodic block scanner.");
+          LOG.info("Starting Periodic block scanner");
           blockScannerThread = new Daemon(blockScanner);
           blockScannerThread.start();
         }
@@ -1301,7 +1299,7 @@ public class DataNode extends Configured
                               ) throws IOException {
     if (!data.isValidBlock(block)) {
       // block does not exist or is under-construction
-      String errStr = "Can't send invalid block " + block;
+      String errStr = "Can't send invalid " + block;
       LOG.info(errStr);
       notifyNamenode(DatanodeProtocol.INVALID_BLOCK, errStr);
       return;
@@ -1314,7 +1312,7 @@ public class DataNode extends Configured
       namenode.reportBadBlocks(new LocatedBlock[]{
           new LocatedBlock(block, new DatanodeInfo[] {
               new DatanodeInfo(dnRegistration)})});
-      LOG.info("Can't replicate block " + block
+      LOG.info("Can't replicate " + block
           + " because on-disk length " + onDiskLength 
           + " is shorter than NameNode recorded length " + block.getNumBytes());
       return;
@@ -1328,7 +1326,7 @@ public class DataNode extends Configured
           xfersBuilder.append(xferTargets[i].getName());
           xfersBuilder.append(" ");
         }
-        LOG.info(dnRegistration + " Starting thread to transfer block " + 
+        LOG.info(dnRegistration + " Starting thread to transfer " + 
                  block + " to " + xfersBuilder);                       
       }
 
@@ -1343,7 +1341,7 @@ public class DataNode extends Configured
       try {
         transferBlock(blocks[i], xferTargets[i]);
       } catch (IOException ie) {
-        LOG.warn("Failed to transfer block " + blocks[i], ie);
+        LOG.warn("Failed to transfer " + blocks[i], ie);
       }
     }
   }
@@ -1532,7 +1530,7 @@ public class DataNode extends Configured
         blockSender.sendBlock(out, baseStream, null);
 
         // no response necessary
-        LOG.info(dnRegistration + ":Transmitted block " + b + " to " + curTarget);
+        LOG.info(dnRegistration + ":Transmitted " + b + " to " + curTarget);
 
       } catch (IOException ie) {
         LOG.warn(dnRegistration + ":Failed to transfer " + b + " to " + targets[0].getName()
@@ -1853,9 +1851,9 @@ public class DataNode extends Configured
       data.finalizeBlockIfNeeded(newblock);
       myMetrics.incrBlocksWritten();
       notifyNamenodeReceivedBlock(newblock, EMPTY_DEL_HINT);
-      LOG.info("Received block " + newblock +
+      LOG.info("Received " + newblock +
                 " of size " + newblock.getNumBytes() +
-                " as part of lease recovery.");
+                " as part of lease recovery");
     }
   }
 
@@ -1986,7 +1984,7 @@ public class DataNode extends Configured
     // file at the same time.
     synchronized (ongoingRecovery) {
       if (ongoingRecovery.get(block.getWithWildcardGS()) != null) {
-        String msg = "Block " + block + " is already being recovered, " +
+        String msg = block + " is already being recovered, " +
                      " ignoring this request to recover it.";
         LOG.info(msg);
         throw new IOException(msg);
@@ -2013,7 +2011,7 @@ public class DataNode extends Configured
                 id, getConf(), socketTimeout, connectToDnViaHostname);
           BlockRecoveryInfo info = datanode.startBlockRecovery(block);
           if (info == null) {
-            LOG.info("No block metadata found for block " + block + " on datanode "
+            LOG.info("No block metadata found for " + block + " on datanode "
                 + id);
             continue;
           }

Modified: hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/server/datanode/DataStorage.java?rev=1403148&r1=1403147&r2=1403148&view=diff
==============================================================================
--- hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/server/datanode/DataStorage.java (original)
+++ hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/server/datanode/DataStorage.java Mon Oct 29 02:21:09 2012
@@ -115,11 +115,11 @@ public class DataStorage extends Storage
           break;
         case NON_EXISTENT:
           // ignore this storage
-          LOG.info("Storage directory " + dataDir + " does not exist.");
+          LOG.info("Storage directory " + dataDir + " does not exist");
           it.remove();
           continue;
         case NOT_FORMATTED: // format
-          LOG.info("Storage directory " + dataDir + " is not formatted.");
+          LOG.info("Storage directory " + dataDir + " is not formatted");
           LOG.info("Formatting ...");
           format(sd, nsInfo);
           break;
@@ -291,7 +291,7 @@ public class DataStorage extends Storage
     // rename tmp to previous
     rename(tmpDir, prevDir);
     LOG.info( hardLink.linkStats.report());
-    LOG.info("Upgrade of " + sd.getRoot()+ " is complete.");
+    LOG.info("Upgrade of " + sd.getRoot()+ " is complete");
   }
 
   void doRollback( StorageDirectory sd,
@@ -327,7 +327,7 @@ public class DataStorage extends Storage
     rename(prevDir, curDir);
     // delete tmp dir
     deleteDir(tmpDir);
-    LOG.info("Rollback of " + sd.getRoot() + " is complete.");
+    LOG.info("Rollback of " + sd.getRoot() + " is complete");
   }
 
   void doFinalize(StorageDirectory sd) throws IOException {
@@ -350,9 +350,9 @@ public class DataStorage extends Storage
           try {
             deleteDir(tmpDir);
           } catch(IOException ex) {
-            LOG.error("Finalize upgrade for " + dataDirPath + " failed.", ex);
+            LOG.error("Finalize upgrade for " + dataDirPath + " failed", ex);
           }
-          LOG.info("Finalize upgrade for " + dataDirPath + " is complete.");
+          LOG.info("Finalize upgrade for " + dataDirPath + " is complete");
         }
         public String toString() { return "Finalize " + dataDirPath; }
       }).start();

Modified: hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java?rev=1403148&r1=1403147&r2=1403148&view=diff
==============================================================================
--- hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java (original)
+++ hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java Mon Oct 29 02:21:09 2012
@@ -174,7 +174,7 @@ class DataXceiver implements Runnable, F
           out.writeShort(DataTransferProtocol.OP_STATUS_ERROR_ACCESS_TOKEN);
           out.flush();
           throw new IOException("Access token verification failed, for client "
-              + remoteAddress + " for OP_READ_BLOCK for block " + block);
+              + remoteAddress + " for OP_READ_BLOCK for " + block);
         } finally {
           IOUtils.closeStream(out);
         }
@@ -187,7 +187,7 @@ class DataXceiver implements Runnable, F
         ? String.format(DN_CLIENTTRACE_FORMAT, localAddress, remoteAddress,
             "%d", "HDFS_READ", clientName, "%d", 
             datanode.dnRegistration.getStorageID(), block, "%d")
-        : datanode.dnRegistration + " Served block " + block + " to " +
+        : datanode.dnRegistration + " Served " + block + " to " +
             s.getInetAddress();
     try {
       try {
@@ -222,9 +222,8 @@ class DataXceiver implements Runnable, F
        * Earlier version shutdown() datanode if there is disk error.
        */
       LOG.warn(datanode.dnRegistration +  ":Got exception while serving " + 
-          block + " to " +
-                s.getInetAddress() + ":\n" + 
-                StringUtils.stringifyException(ioe) );
+          block + " to " + s.getInetAddress() + ":\n" + 
+          StringUtils.stringifyException(ioe) );
       throw ioe;
     } finally {
       IOUtils.closeStream(out);
@@ -247,9 +246,8 @@ class DataXceiver implements Runnable, F
     //
     Block block = new Block(in.readLong(), 
         dataXceiverServer.estimateBlockSize, in.readLong());
-    LOG.info("Receiving block " + block + 
-             " src: " + remoteAddress +
-             " dest: " + localAddress);
+    LOG.info("Receiving " + block + " src: " + remoteAddress + " dest: "
+        + localAddress);
     int pipelineSize = in.readInt(); // num of datanodes in entire pipeline
     boolean isRecovery = in.readBoolean(); // is this part of recovery?
     String client = Text.readString(in); // working on behalf of this client
@@ -285,7 +283,7 @@ class DataXceiver implements Runnable, F
             replyOut.flush();
           }
           throw new IOException("Access token verification failed, for client "
-              + remoteAddress + " for OP_WRITE_BLOCK for block " + block);
+              + remoteAddress + " for OP_WRITE_BLOCK for " + block);
         } finally {
           IOUtils.closeStream(replyOut);
         }
@@ -381,9 +379,9 @@ class DataXceiver implements Runnable, F
           if (client.length() > 0) {
             throw e;
           } else {
-            LOG.info(datanode.dnRegistration + ":Exception transfering block " +
+            LOG.info(datanode.dnRegistration + ":Exception transfering " +
                      block + " to mirror " + mirrorNode +
-                     ". continuing without the mirror.\n" +
+                     "- continuing without the mirror\n" +
                      StringUtils.stringifyException(e));
           }
         }
@@ -411,10 +409,8 @@ class DataXceiver implements Runnable, F
       // the block is finalized in the PacketResponder.
       if (client.length() == 0) {
         datanode.notifyNamenodeReceivedBlock(block, DataNode.EMPTY_DEL_HINT);
-        LOG.info("Received block " + block + 
-                 " src: " + remoteAddress +
-                 " dest: " + localAddress +
-                 " of size " + block.getNumBytes());
+        LOG.info("Received " + block + " src: " + remoteAddress + " dest: "
+            + localAddress + " size " + block.getNumBytes());
       }
 
       if (datanode.blockScanner != null) {
@@ -454,7 +450,7 @@ class DataXceiver implements Runnable, F
           out.flush();
           throw new IOException(
               "Access token verification failed, for client " + remoteAddress
-                  + " for OP_BLOCK_CHECKSUM for block " + block);
+                  + " for OP_BLOCK_CHECKSUM for " + block);
         } finally {
           IOUtils.closeStream(out);
         }
@@ -512,7 +508,7 @@ class DataXceiver implements Runnable, F
             BlockTokenSecretManager.AccessMode.COPY);
       } catch (InvalidToken e) {
         LOG.warn("Invalid access token in request from "
-            + remoteAddress + " for OP_COPY_BLOCK for block " + block);
+            + remoteAddress + " for OP_COPY_BLOCK for " + block);
         sendResponse(s,
             (short) DataTransferProtocol.OP_STATUS_ERROR_ACCESS_TOKEN,
             datanode.socketWriteTimeout);
@@ -522,7 +518,7 @@ class DataXceiver implements Runnable, F
 
     if (!dataXceiverServer.balanceThrottler.acquire()) { // not able to start
       LOG.info("Not able to copy block " + blockId + " to " 
-          + s.getRemoteSocketAddress() + " because threads quota is exceeded.");
+          + s.getRemoteSocketAddress() + " because threads quota is exceeded");
       sendResponse(s, (short)DataTransferProtocol.OP_STATUS_ERROR, 
           datanode.socketWriteTimeout);
       return;
@@ -552,7 +548,7 @@ class DataXceiver implements Runnable, F
       datanode.myMetrics.incrBytesRead((int) read);
       datanode.myMetrics.incrBlocksRead();
       
-      LOG.info("Copied block " + block + " to " + s.getRemoteSocketAddress());
+      LOG.info("Copied " + block + " to " + s.getRemoteSocketAddress());
     } catch (IOException ioe) {
       isOpSuccess = false;
       throw ioe;
@@ -643,11 +639,11 @@ class DataXceiver implements Runnable, F
       short status = proxyReply.readShort();
       if (status != DataTransferProtocol.OP_STATUS_SUCCESS) {
         if (status == DataTransferProtocol.OP_STATUS_ERROR_ACCESS_TOKEN) {
-          throw new IOException("Copy block " + block + " from "
+          throw new IOException("Copy " + block + " from "
               + proxySock.getRemoteSocketAddress()
               + " failed due to access token error");
         }
-        throw new IOException("Copy block " + block + " from "
+        throw new IOException("Copy " + block + " from "
             + proxySock.getRemoteSocketAddress() + " failed");
       }
       // open a block receiver and check if the block does not exist
@@ -663,7 +659,7 @@ class DataXceiver implements Runnable, F
       // notify name node
       datanode.notifyNamenodeReceivedBlock(block, sourceID);
 
-      LOG.info("Moved block " + block + 
+      LOG.info("Moved " + block + 
           " from " + s.getRemoteSocketAddress());
       
     } catch (IOException ioe) {

Modified: hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/server/datanode/DatanodeBlockInfo.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/server/datanode/DatanodeBlockInfo.java?rev=1403148&r1=1403147&r2=1403148&view=diff
==============================================================================
--- hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/server/datanode/DatanodeBlockInfo.java (original)
+++ hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/server/datanode/DatanodeBlockInfo.java Mon Oct 29 02:21:09 2012
@@ -109,15 +109,15 @@ class DatanodeBlockInfo {
       return false;
     }
     if (file == null || volume == null) {
-      throw new IOException("detachBlock:Block not found. " + block);
+      throw new IOException("detachBlock: not found " + block);
     }
     File meta = FSDataset.getMetaFile(file, block);
     if (meta == null) {
-      throw new IOException("Meta file not found for block " + block);
+      throw new IOException("Meta file not found for " + block);
     }
 
     if (HardLink.getLinkCount(file) > numLinks) {
-      DataNode.LOG.info("CopyOnWrite for block " + block);
+      DataNode.LOG.info("CopyOnWrite for " + block);
       detachFile(file, block);
     }
     if (HardLink.getLinkCount(meta) > numLinks) {

Modified: hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/server/datanode/FSDataset.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/server/datanode/FSDataset.java?rev=1403148&r1=1403147&r2=1403148&view=diff
==============================================================================
--- hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/server/datanode/FSDataset.java (original)
+++ hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/server/datanode/FSDataset.java Mon Oct 29 02:21:09 2012
@@ -565,7 +565,7 @@ public class FSDataset implements FSCons
             // add this block to block set
             blockSet.add(block);
             if (DataNode.LOG.isDebugEnabled()) {
-              DataNode.LOG.debug("recoverBlocksBeingWritten for block " + block);
+              DataNode.LOG.debug("recoverBlocksBeingWritten for " + block);
             }
           }
         }
@@ -766,7 +766,7 @@ public class FSDataset implements FSCons
         volumes = fsvs; // replace array of volumes
       }
       Log.info("Completed FSVolumeSet.checkDirs. Removed=" + removed_size + 
-          "volumes. List of current volumes: " +   toString());
+          " volumes. Current volumes: " +   toString());
       
       return removed_vols;
     }
@@ -1472,7 +1472,7 @@ public class FSDataset implements FSCons
         volumeMap.put(b, new DatanodeBlockInfo(v, f));
       } else {
         // reopening block for appending to it.
-        DataNode.LOG.info("Reopen Block for append " + b);
+        DataNode.LOG.info("Reopen for append " + b);
         v = volumeMap.get(b).getVolume();
         f = createTmpFile(v, b, replicationRequest);
         File blkfile = getBlockFile(b);
@@ -1491,19 +1491,18 @@ public class FSDataset implements FSCons
         DataNode.LOG.debug("Renaming " + blkfile + " to " + f);
         if (!blkfile.renameTo(f)) {
           if (!f.delete()) {
-            throw new IOException("Block " + b + " reopen failed. " +
+            throw new IOException(b + " reopen failed. " +
                                   " Unable to remove file " + f);
           }
           if (!blkfile.renameTo(f)) {
-            throw new IOException("Block " + b + " reopen failed. " +
+            throw new IOException(b + " reopen failed. " +
                                   " Unable to move block file " + blkfile +
                                   " to tmp dir " + f);
           }
         }
       }
       if (f == null) {
-        DataNode.LOG.warn("Block " + b + " reopen failed " +
-                          " Unable to locate tmp file.");
+        DataNode.LOG.warn(b + " reopen failed. Unable to locate tmp file");
         throw new IOException("Block " + b + " reopen failed " +
                               " Unable to locate tmp file.");
       }
@@ -1744,9 +1743,10 @@ public class FSDataset implements FSCons
       long st = System.currentTimeMillis();
       // broken out to a static method to simplify testing
       reconcileRoughBlockScan(seenOnDisk, volumeMap, ongoingCreates);
-      DataNode.LOG.info(
-          "Reconciled asynchronous block report against current state in " +
-          (System.currentTimeMillis() - st) + " ms");
+      if (DataNode.LOG.isDebugEnabled()) {
+        DataNode.LOG.debug("Reconciled block report with current state in "
+                + (System.currentTimeMillis() - st) + "ms");
+      }
       
       blockReport = seenOnDisk.keySet();
     }
@@ -2250,7 +2250,6 @@ public class FSDataset implements FSCons
           waitForReportRequest();
           assert requested && scan == null;
           
-          DataNode.LOG.info("Starting asynchronous block report scan");
           long st = System.currentTimeMillis();
           HashMap<Block, File> result = fsd.roughBlockScan();
           DataNode.LOG.info("Finished asynchronous block report scan in "

Modified: hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/server/datanode/FSDatasetAsyncDiskService.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/server/datanode/FSDatasetAsyncDiskService.java?rev=1403148&r1=1403147&r2=1403148&view=diff
==============================================================================
--- hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/server/datanode/FSDatasetAsyncDiskService.java (original)
+++ hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/server/datanode/FSDatasetAsyncDiskService.java Mon Oct 29 02:21:09 2012
@@ -130,7 +130,7 @@ class FSDatasetAsyncDiskService {
       // clear the executor map so that calling execute again will fail.
       executors = null;
       
-      LOG.info("All async disk service threads have been shut down.");
+      LOG.info("All async disk service threads have been shut down");
     }
   }
 
@@ -140,7 +140,7 @@ class FSDatasetAsyncDiskService {
    */
   void deleteAsync(FSDataset.FSVolume volume, File blockFile,
       File metaFile, long dfsBytes, String blockName) {
-    DataNode.LOG.info("Scheduling block " + blockName + " file " + blockFile
+    DataNode.LOG.info("Scheduling " + blockName + " file " + blockFile
         + " for deletion");
     ReplicaFileDeleteTask deletionTask = 
         new ReplicaFileDeleteTask(volume, blockFile, metaFile, dfsBytes,
@@ -175,18 +175,18 @@ class FSDatasetAsyncDiskService {
     @Override
     public String toString() {
       // Called in AsyncDiskService.execute for displaying error messages.
-      return "deletion of block " + blockName + " with block file " + blockFile
+      return "deletion of " + blockName + " with file " + blockFile
           + " and meta file " + metaFile + " from volume " + volume;
     }
 
     @Override
     public void run() {
       if ( !blockFile.delete() || ( !metaFile.delete() && metaFile.exists() ) ) {
-        DataNode.LOG.warn("Unexpected error trying to delete block "
+        DataNode.LOG.warn("Unexpected error trying to delete "
             + blockName + " at file " + blockFile + ". Ignored.");
       } else {
         volume.decDfsUsed(dfsBytes);
-        DataNode.LOG.info("Deleted block " + blockName + " at file " + blockFile);
+        DataNode.LOG.info("Deleted " + blockName + " at file " + blockFile);
       }
     }
   };

Modified: hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/server/datanode/UpgradeManagerDatanode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/server/datanode/UpgradeManagerDatanode.java?rev=1403148&r1=1403147&r2=1403148&view=diff
==============================================================================
--- hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/server/datanode/UpgradeManagerDatanode.java (original)
+++ hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/server/datanode/UpgradeManagerDatanode.java Mon Oct 29 02:21:09 2012
@@ -54,7 +54,7 @@ class UpgradeManagerDatanode extends Upg
     DataNode.LOG.info("\n   Distributed upgrade for DataNode " 
         + dataNode.dnRegistration.getName() 
         + " version " + getUpgradeVersion() + " to current LV " 
-        + FSConstants.LAYOUT_VERSION + " is initialized.");
+        + FSConstants.LAYOUT_VERSION + " is initialized");
     UpgradeObjectDatanode curUO = (UpgradeObjectDatanode)currentUpgrades.first();
     curUO.setDatanode(dataNode);
     upgradeState = curUO.preUpgradeAction(nsInfo);
@@ -99,7 +99,7 @@ class UpgradeManagerDatanode extends Upg
       DataNode.LOG.info("\n   Distributed upgrade for DataNode version " 
           + getUpgradeVersion() + " to current LV " 
           + FSConstants.LAYOUT_VERSION + " cannot be started. "
-          + "The upgrade object is not defined.");
+          + "The upgrade object is not defined");
       return false;
     }
     upgradeState = true;
@@ -111,7 +111,7 @@ class UpgradeManagerDatanode extends Upg
     DataNode.LOG.info("\n   Distributed upgrade for DataNode " 
         + dataNode.dnRegistration.getName() 
         + " version " + getUpgradeVersion() + " to current LV " 
-        + FSConstants.LAYOUT_VERSION + " is started.");
+        + FSConstants.LAYOUT_VERSION + " is started");
     return true;
   }
 
@@ -141,7 +141,7 @@ class UpgradeManagerDatanode extends Upg
     DataNode.LOG.info("\n   Distributed upgrade for DataNode " 
         + dataNode.dnRegistration.getName() 
         + " version " + getUpgradeVersion() + " to current LV " 
-        + FSConstants.LAYOUT_VERSION + " is complete.");
+        + FSConstants.LAYOUT_VERSION + " is complete");
   }
 
   synchronized void shutdownUpgrade() {

Modified: hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/server/datanode/UpgradeObjectDatanode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/server/datanode/UpgradeObjectDatanode.java?rev=1403148&r1=1403147&r2=1403148&view=diff
==============================================================================
--- hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/server/datanode/UpgradeObjectDatanode.java (original)
+++ hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/server/datanode/UpgradeObjectDatanode.java Mon Oct 29 02:21:09 2012
@@ -109,7 +109,7 @@ public abstract class UpgradeObjectDatan
     if(getUpgradeStatus() < 100) {
       DataNode.LOG.info("\n   Distributed upgrade for DataNode version " 
           + getVersion() + " to current LV " 
-          + FSConstants.LAYOUT_VERSION + " cannot be completed.");
+          + FSConstants.LAYOUT_VERSION + " cannot be completed");
     }
 
     // Complete the upgrade by calling the manager method

Modified: hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java?rev=1403148&r1=1403147&r2=1403148&view=diff
==============================================================================
--- hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java (original)
+++ hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java Mon Oct 29 02:21:09 2012
@@ -176,16 +176,13 @@ class FSDirectory implements FSConstants
       newNode = addNode(path, newNode, -1, false);
     }
     if (newNode == null) {
-      NameNode.stateChangeLog.info("DIR* FSDirectory.addFile: "
-                                   +"failed to add "+path
-                                   +" to the file system");
+      NameNode.stateChangeLog.info("DIR* addFile: " + "failed to add " + path);
       return null;
     }
     // add create file record to log, record new generation stamp
     fsImage.getEditLog().logOpenFile(path, newNode);
 
-    NameNode.stateChangeLog.debug("DIR* FSDirectory.addFile: "
-                                  +path+" is added to the file system");
+    NameNode.stateChangeLog.debug("DIR* addFile: " + path + " is added");
     return newNode;
   }
 
@@ -291,7 +288,7 @@ class FSDirectory implements FSConstants
 
       NameNode.stateChangeLog.debug("DIR* FSDirectory.addFile: "
                                     + path + " with " + block
-                                    + " block is added to the in-memory "
+                                    + " is added to the in-memory "
                                     + "file system");
     }
     return block;
@@ -308,7 +305,7 @@ class FSDirectory implements FSConstants
       fsImage.getEditLog().logOpenFile(path, file);
       NameNode.stateChangeLog.debug("DIR* FSDirectory.persistBlocks: "
                                     +path+" with "+ file.getBlocks().length 
-                                    +" blocks is persisted to the file system");
+                                    +" blocks is persisted");
     }
   }
 
@@ -323,7 +320,7 @@ class FSDirectory implements FSConstants
       if (NameNode.stateChangeLog.isDebugEnabled()) {
         NameNode.stateChangeLog.debug("DIR* FSDirectory.closeFile: "
                                     +path+" with "+ file.getBlocks().length 
-                                    +" blocks is persisted to the file system");
+                                    +" blocks is persisted");
       }
     }
   }
@@ -345,8 +342,7 @@ class FSDirectory implements FSConstants
       // write modified block locations to log
       fsImage.getEditLog().logOpenFile(path, fileNode);
       NameNode.stateChangeLog.debug("DIR* FSDirectory.addFile: "
-                                    +path+" with "+block
-                                    +" block is added to the file system");
+          + path + " with "+ block +" is added to the");
       // update space consumed
       INode[] pathINodes = getExistingPathINodes(path);
       updateCount(pathINodes, pathINodes.length-1, 0,

Modified: hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java?rev=1403148&r1=1403147&r2=1403148&view=diff
==============================================================================
--- hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java (original)
+++ hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java Mon Oct 29 02:21:09 2012
@@ -205,8 +205,7 @@ public class FSEditLog {
       int bufSize = bufCurrent.size();
       if (bufSize != 0) {
         throw new IOException("FSEditStream has " + bufSize +
-                              " bytes still to be flushed and cannot " +
-                              "be closed.");
+           " bytes still to be flushed and cannot be closed.");
       } 
       bufCurrent.close();
       bufReady.close();