You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by jg...@apache.org on 2010/06/04 20:50:26 UTC

svn commit: r951528 - in /hadoop/hdfs/trunk: ./ src/java/org/apache/hadoop/hdfs/server/datanode/ src/java/org/apache/hadoop/hdfs/server/namenode/

Author: jghoman
Date: Fri Jun  4 18:50:26 2010
New Revision: 951528

URL: http://svn.apache.org/viewvc?rev=951528&view=rev
Log:
HDFS-1185. Remove duplicate now() functions in DataNode, FSNamesystem. Contributed by Jeff Ames.

Modified:
    hadoop/hdfs/trunk/CHANGES.txt
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/BackupStorage.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/Checkpointer.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/EditLogOutputStream.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/PendingReplicationBlocks.java

Modified: hadoop/hdfs/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/CHANGES.txt?rev=951528&r1=951527&r2=951528&view=diff
==============================================================================
--- hadoop/hdfs/trunk/CHANGES.txt (original)
+++ hadoop/hdfs/trunk/CHANGES.txt Fri Jun  4 18:50:26 2010
@@ -34,7 +34,10 @@ Trunk (unreleased changes)
 
     HDFS-1119. Introduce a GSet interface to BlocksMap.  (szetszwo)
 
-    HDFS-1184. Replace tabs in code with spaces. (Jeff via jghoman)
+    HDFS-1184. Replace tabs in code with spaces. (Jeff Ames via jghoman)
+
+    HDFS-1185. Remove duplicate now() functions in DataNode, FSNamesysetm.
+    (Jeff Ames via jghoman)
 
   BUG FIXES
 

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java?rev=951528&r1=951527&r2=951528&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java Fri Jun  4 18:50:26 2010
@@ -73,6 +73,7 @@ import org.apache.hadoop.hdfs.server.com
 import org.apache.hadoop.hdfs.server.common.IncorrectVersionException;
 import org.apache.hadoop.hdfs.server.common.Storage;
 import org.apache.hadoop.hdfs.server.common.Util;
+import static org.apache.hadoop.hdfs.server.common.Util.now;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants.ReplicaState;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption;
 import org.apache.hadoop.hdfs.server.datanode.metrics.DataNodeMetrics;
@@ -225,14 +226,6 @@ public class DataNode extends Configured
   public Server ipcServer;
 
   /**
-   * Current system time.
-   * @return current time in msec.
-   */
-  static long now() {
-    return System.currentTimeMillis();
-  }
-
-  /**
    * Create the DataNode given a configuration and an array of dataDirs.
    * 'dataDirs' is where the blocks are stored.
    */

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java?rev=951528&r1=951527&r2=951528&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java Fri Jun  4 18:50:26 2010
@@ -42,6 +42,7 @@ import org.apache.hadoop.hdfs.protocol.D
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants;
+import static org.apache.hadoop.hdfs.server.common.Util.now;
 import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.MetaDataInputStream;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.MD5Hash;
@@ -109,7 +110,7 @@ class DataXceiver extends DataTransferPr
                               + dataXceiverServer.maxXceiverCount);
       }
 
-      opStartTime = DataNode.now();
+      opStartTime = now();
       processOp(op, in);
     } catch (Throwable t) {
       LOG.error(datanode.dnRegistration + ":DataXceiver",t);
@@ -657,7 +658,7 @@ class DataXceiver extends DataTransferPr
   }
 
   private void updateDuration(MetricsTimeVaryingRate mtvr) {
-    mtvr.inc(DataNode.now() - opStartTime);
+    mtvr.inc(now() - opStartTime);
   }
 
   private void updateCounter(MetricsTimeVaryingInt localCounter,

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/BackupStorage.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/BackupStorage.java?rev=951528&r1=951527&r2=951528&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/BackupStorage.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/BackupStorage.java Fri Jun  4 18:50:26 2010
@@ -26,6 +26,7 @@ import java.util.Iterator;
 
 import org.apache.hadoop.hdfs.server.common.HdfsConstants;
 import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException;
+import static org.apache.hadoop.hdfs.server.common.Util.now;
 import org.apache.hadoop.hdfs.server.namenode.FSImage;
 import org.apache.hadoop.hdfs.server.namenode.EditLogFileInputStream;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration;
@@ -323,7 +324,7 @@ public class BackupStorage extends FSIma
     StorageDirectory sdEdits = itEdits.next();
     int numEdits = 0;
     File jSpoolFile = getJSpoolFile(sdEdits);
-    long startTime = FSNamesystem.now();
+    long startTime = now();
     if(jSpoolFile.exists()) {
       // load edits.new
       EditLogFileInputStream edits = new EditLogFileInputStream(jSpoolFile);
@@ -339,7 +340,7 @@ public class BackupStorage extends FSIma
 
     FSImage.LOG.info("Edits file " + jSpoolFile.getCanonicalPath() 
         + " of size " + jSpoolFile.length() + " edits # " + numEdits 
-        + " loaded in " + (FSNamesystem.now()-startTime)/1000 + " seconds.");
+        + " loaded in " + (now()-startTime)/1000 + " seconds.");
 
     // rename spool edits.new to edits making it in sync with the active node
     // subsequent journal records will go directly to edits

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/Checkpointer.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/Checkpointer.java?rev=951528&r1=951527&r2=951528&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/Checkpointer.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/Checkpointer.java Fri Jun  4 18:50:26 2010
@@ -28,6 +28,7 @@ import org.apache.commons.logging.LogFac
 
 import org.apache.hadoop.hdfs.protocol.FSConstants;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants.NamenodeRole;
+import static org.apache.hadoop.hdfs.server.common.Util.now;
 import org.apache.hadoop.hdfs.server.namenode.FSImage.CheckpointStates;
 import org.apache.hadoop.hdfs.server.namenode.FSImage.NameNodeDirType;
 import org.apache.hadoop.hdfs.server.namenode.FSImage.NameNodeFile;
@@ -125,10 +126,10 @@ class Checkpointer extends Daemon {
 
     long lastCheckpointTime = 0;
     if(!backupNode.shouldCheckpointAtStartup())
-      lastCheckpointTime = FSNamesystem.now();
+      lastCheckpointTime = now();
     while(shouldRun) {
       try {
-        long now = FSNamesystem.now();
+        long now = now();
         boolean shouldCheckpoint = false;
         if(now >= lastCheckpointTime + periodMSec) {
           shouldCheckpoint = true;
@@ -210,7 +211,7 @@ class Checkpointer extends Daemon {
    * Create a new checkpoint
    */
   void doCheckpoint() throws IOException {
-    long startTime = FSNamesystem.now();
+    long startTime = now();
     NamenodeCommand cmd = 
       getNamenode().startCheckpoint(backupNode.getRegistration());
     CheckpointCommand cpCmd = null;
@@ -254,7 +255,7 @@ class Checkpointer extends Daemon {
     if(backupNode.isRole(NamenodeRole.CHECKPOINT))
         getFSImage().getEditLog().close();
     LOG.info("Checkpoint completed in "
-        + (FSNamesystem.now() - startTime)/1000 + " seconds."
+        + (now() - startTime)/1000 + " seconds."
         + " New Image Size: " + bnImage.getFsImageName().length());
   }
 }

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/EditLogOutputStream.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/EditLogOutputStream.java?rev=951528&r1=951527&r2=951528&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/EditLogOutputStream.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/EditLogOutputStream.java Fri Jun  4 18:50:26 2010
@@ -20,6 +20,7 @@ package org.apache.hadoop.hdfs.server.na
 import java.io.IOException;
 import java.io.OutputStream;
 
+import static org.apache.hadoop.hdfs.server.common.Util.now;
 import org.apache.hadoop.io.Writable;
 
 /**
@@ -79,9 +80,9 @@ implements JournalStream {
    */
   public void flush() throws IOException {
     numSync++;
-    long start = FSNamesystem.now();
+    long start = now();
     flushAndSync();
-    long end = FSNamesystem.now();
+    long end = now();
     totalTimeSync += (end - start);
   }
 

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java?rev=951528&r1=951527&r2=951528&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java Fri Jun  4 18:50:26 2010
@@ -43,6 +43,7 @@ import org.apache.hadoop.hdfs.protocol.H
 import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants.BlockUCState;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption;
+import static org.apache.hadoop.hdfs.server.common.Util.now;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.security.UserGroupInformation;
 
@@ -168,7 +169,7 @@ class FSDirectory implements Closeable {
     waitForReady();
 
     // Always do an implicit mkdirs for parent directory tree.
-    long modTime = FSNamesystem.now();
+    long modTime = now();
     if (!mkdirs(new Path(path).getParent().toString(), permissions, true,
         modTime)) {
       return null;
@@ -345,7 +346,7 @@ class FSDirectory implements Closeable {
    */
   void closeFile(String path, INodeFile file) {
     waitForReady();
-    long now = FSNamesystem.now();
+    long now = now();
     synchronized (rootDir) {
       // file is closed
       file.setModificationTimeForce(now);
@@ -394,7 +395,7 @@ class FSDirectory implements Closeable {
                                   +src+" to "+dst);
     }
     waitForReady();
-    long now = FSNamesystem.now();
+    long now = now();
     if (!unprotectedRenameTo(src, dst, now))
       return false;
     fsImage.getEditLog().logRename(src, dst, now);
@@ -413,7 +414,7 @@ class FSDirectory implements Closeable {
           + " to " + dst);
     }
     waitForReady();
-    long now = FSNamesystem.now();
+    long now = now();
     if (unprotectedRenameTo(src, dst, now, options)) {
       incrDeletedFileCount(1);
     }
@@ -842,7 +843,7 @@ class FSDirectory implements Closeable {
 
       unprotectedConcat(target, srcs);
       // do the commit
-      fsImage.getEditLog().logConcat(target, srcs, FSNamesystem.now());
+      fsImage.getEditLog().logConcat(target, srcs, now());
     }
   }
   
@@ -886,7 +887,7 @@ class FSDirectory implements Closeable {
       count++;
     }
     
-    long now = FSNamesystem.now();
+    long now = now();
     trgInode.setModificationTime(now);
     trgParent.setModificationTime(now);
     // update quota on the parent directory ('count' files removed, 0 space)
@@ -906,7 +907,7 @@ class FSDirectory implements Closeable {
       NameNode.stateChangeLog.debug("DIR* FSDirectory.delete: " + src);
     }
     waitForReady();
-    long now = FSNamesystem.now();
+    long now = now();
     int filesRemoved = unprotectedDelete(src, collectedBlocks, now);
     if (filesRemoved <= 0) {
       return false;
@@ -1806,7 +1807,7 @@ class FSDirectory implements Closeable {
       QuotaExceededException, IOException {
     waitForReady();
 
-    final long modTime = FSNamesystem.now();
+    final long modTime = now();
     if (createParent) {
       final String parent = new Path(path).getParent().toString();
       if (!mkdirs(parent, dirPerms, true, modTime)) {

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java?rev=951528&r1=951527&r2=951528&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java Fri Jun  4 18:50:26 2010
@@ -40,6 +40,7 @@ import org.apache.hadoop.hdfs.server.com
 import org.apache.hadoop.hdfs.server.common.Storage;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants.NamenodeRole;
 import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
+import static org.apache.hadoop.hdfs.server.common.Util.now;
 import org.apache.hadoop.hdfs.server.namenode.FSImage.NameNodeDirType;
 import org.apache.hadoop.hdfs.server.namenode.FSImage.NameNodeFile;
 import org.apache.hadoop.hdfs.server.namenode.JournalStream.JournalType;
@@ -143,7 +144,7 @@ public class FSEditLog {
     fsimage = image;
     isSyncRunning = false;
     metrics = NameNode.getNameNodeMetrics();
-    lastPrintTime = FSNamesystem.now();
+    lastPrintTime = now();
   }
   
   private File getEditFile(StorageDirectory sd) {
@@ -372,11 +373,11 @@ public class FSEditLog {
    */
   int loadFSEdits(EditLogInputStream edits) throws IOException {
     DataInputStream in = edits.getDataInputStream();
-    long startTime = FSNamesystem.now();
+    long startTime = now();
     int numEdits = loadFSEdits(in, true);
     FSImage.LOG.info("Edits file " + edits.getName() 
         + " of size " + edits.length() + " edits # " + numEdits 
-        + " loaded in " + (FSNamesystem.now()-startTime)/1000 + " seconds.");
+        + " loaded in " + (now()-startTime)/1000 + " seconds.");
     return numEdits;
   }
 
@@ -860,7 +861,7 @@ public class FSEditLog {
       if(getNumEditStreams() == 0)
         throw new java.lang.IllegalStateException(NO_JOURNAL_STREAMS_WARNING);
       ArrayList<EditLogOutputStream> errorStreams = null;
-      long start = FSNamesystem.now();
+      long start = now();
       for(EditLogOutputStream eStream : editStreams) {
         FSImage.LOG.debug("loggin edits into " + eStream.getName()  + " stream");
         if(!eStream.isOperationSupported(op))
@@ -937,7 +938,7 @@ public class FSEditLog {
     id.txid = txid;
 
     // update statistics
-    long end = FSNamesystem.now();
+    long end = now();
     numTransactions++;
     totalTimeTransactions += (end-start);
     if (metrics != null) // Metrics is non-null only when used inside name node
@@ -1050,7 +1051,7 @@ public class FSEditLog {
       }
   
       // do the sync
-      long start = FSNamesystem.now();
+      long start = now();
       for (EditLogOutputStream eStream : streams) {
         try {
           eStream.flush();
@@ -1065,7 +1066,7 @@ public class FSEditLog {
           errorStreams.add(eStream);
         }
       }
-      long elapsed = FSNamesystem.now() - start;
+      long elapsed = now() - start;
       processIOError(errorStreams, true);
   
       if (metrics != null) // Metrics non-null only when used inside name node
@@ -1086,7 +1087,7 @@ public class FSEditLog {
   // print statistics every 1 minute.
   //
   private void printStatistics(boolean force) {
-    long now = FSNamesystem.now();
+    long now = now();
     if (lastPrintTime + 60000 > now && !force) {
       return;
     }
@@ -1635,7 +1636,7 @@ public class FSEditLog {
     if(getNumEditStreams() == 0)
       throw new java.lang.IllegalStateException(NO_JOURNAL_STREAMS_WARNING);
     ArrayList<EditLogOutputStream> errorStreams = null;
-    long start = FSNamesystem.now();
+    long start = now();
     for(EditLogOutputStream eStream : editStreams) {
       try {
         eStream.write(data, 0, length);

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java?rev=951528&r1=951527&r2=951528&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java Fri Jun  4 18:50:26 2010
@@ -58,6 +58,7 @@ import org.apache.hadoop.hdfs.server.com
 import org.apache.hadoop.hdfs.server.common.StorageInfo;
 import org.apache.hadoop.hdfs.server.common.UpgradeManager;
 import org.apache.hadoop.hdfs.server.common.Util;
+import static org.apache.hadoop.hdfs.server.common.Util.now;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants.BlockUCState;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants.NamenodeRole;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType;
@@ -509,10 +510,10 @@ public class FSImage extends Storage {
 
     // Do upgrade for each directory
     long oldCTime = this.getCTime();
-    this.cTime = FSNamesystem.now();  // generate new cTime for the state
+    this.cTime = now();  // generate new cTime for the state
     int oldLV = this.getLayoutVersion();
     this.layoutVersion = FSConstants.LAYOUT_VERSION;
-    this.checkpointTime = FSNamesystem.now();
+    this.checkpointTime = now();
     for (Iterator<StorageDirectory> it = 
                            dirIterator(); it.hasNext();) {
       StorageDirectory sd = it.next();
@@ -998,7 +999,7 @@ public class FSImage extends Storage {
     // Recover from previous interrupted checkpoint, if any
     needToSave |= recoverInterruptedCheckpoint(latestNameSD, latestEditsSD);
 
-    long startTime = FSNamesystem.now();
+    long startTime = now();
     long imageSize = getImageFile(latestNameSD, NameNodeFile.IMAGE).length();
 
     //
@@ -1007,7 +1008,7 @@ public class FSImage extends Storage {
     latestNameSD.read();
     needToSave |= loadFSImage(getImageFile(latestNameSD, NameNodeFile.IMAGE));
     LOG.info("Image file of size " + imageSize + " loaded in " 
-        + (FSNamesystem.now() - startTime)/1000 + " seconds.");
+        + (now() - startTime)/1000 + " seconds.");
     
     // Load latest edits
     if (latestNameCheckpointTime > latestEditsCheckpointTime)
@@ -1227,7 +1228,7 @@ public class FSImage extends Storage {
   void saveFSImage(File newFile) throws IOException {
     FSNamesystem fsNamesys = getFSNamesystem();
     FSDirectory fsDir = fsNamesys.dir;
-    long startTime = FSNamesystem.now();
+    long startTime = now();
     //
     // Write out data
     //
@@ -1256,7 +1257,7 @@ public class FSImage extends Storage {
     }
 
     LOG.info("Image file of size " + newFile.length() + " saved in " 
-        + (FSNamesystem.now() - startTime)/1000 + " seconds.");
+        + (now() - startTime)/1000 + " seconds.");
   }
 
   /**
@@ -1278,7 +1279,7 @@ public class FSImage extends Storage {
     assert editLog != null : "editLog must be initialized";
     editLog.close();
     if(renewCheckpointTime)
-      this.checkpointTime = FSNamesystem.now();
+      this.checkpointTime = now();
     ArrayList<StorageDirectory> errorSDs = new ArrayList<StorageDirectory>();
 
     // mv current -> lastcheckpoint.tmp
@@ -1414,7 +1415,7 @@ public class FSImage extends Storage {
    */
   private int newNamespaceID() {
     Random r = new Random();
-    r.setSeed(FSNamesystem.now());
+    r.setSeed(now());
     int newID = 0;
     while(newID == 0)
       newID = r.nextInt(0x7FFFFFFF);  // use 31 bits only
@@ -1439,7 +1440,7 @@ public class FSImage extends Storage {
     this.layoutVersion = FSConstants.LAYOUT_VERSION;
     this.namespaceID = newNamespaceID();
     this.cTime = 0L;
-    this.checkpointTime = FSNamesystem.now();
+    this.checkpointTime = now();
     for (Iterator<StorageDirectory> it = 
                            dirIterator(); it.hasNext();) {
       StorageDirectory sd = it.next();
@@ -1710,7 +1711,7 @@ public class FSImage extends Storage {
   void resetVersion(boolean renewCheckpointTime) throws IOException {
     this.layoutVersion = FSConstants.LAYOUT_VERSION;
     if(renewCheckpointTime)
-      this.checkpointTime = FSNamesystem.now();
+      this.checkpointTime = now();
     
     ArrayList<StorageDirectory> al = null;
     for (Iterator<StorageDirectory> it = 

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java?rev=951528&r1=951527&r2=951528&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java Fri Jun  4 18:50:26 2010
@@ -31,6 +31,7 @@ import org.apache.hadoop.hdfs.server.com
 import org.apache.hadoop.hdfs.server.common.Storage;
 import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport;
 import org.apache.hadoop.hdfs.server.common.Util;
+import static org.apache.hadoop.hdfs.server.common.Util.now;
 import org.apache.hadoop.hdfs.server.namenode.metrics.FSNamesystemMBean;
 import org.apache.hadoop.hdfs.server.namenode.metrics.FSNamesystemMetrics;
 import org.apache.hadoop.security.AccessControlException;
@@ -3755,14 +3756,6 @@ public class FSNamesystem implements FSC
     }
   }
     
-  /**
-   * Current system time.
-   * @return current time in msec.
-   */
-  static long now() {
-    return System.currentTimeMillis();
-  }
-    
   boolean setSafeMode(SafeModeAction action) throws IOException {
     if (action != SafeModeAction.SAFEMODE_GET) {
       checkSuperuserPrivilege();

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java?rev=951528&r1=951527&r2=951528&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java Fri Jun  4 18:50:26 2010
@@ -32,6 +32,7 @@ import org.apache.commons.logging.LogFac
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.UnresolvedLinkException;
 import org.apache.hadoop.hdfs.protocol.FSConstants;
+import static org.apache.hadoop.hdfs.server.common.Util.now;
 
 /**
  * LeaseManager does the lease housekeeping for writing on files.   
@@ -204,17 +205,17 @@ public class LeaseManager {
     }
     /** Only LeaseManager object can renew a lease */
     private void renew() {
-      this.lastUpdate = FSNamesystem.now();
+      this.lastUpdate = now();
     }
 
     /** @return true if the Hard Limit Timer has expired */
     public boolean expiredHardLimit() {
-      return FSNamesystem.now() - lastUpdate > hardLimit;
+      return now() - lastUpdate > hardLimit;
     }
 
     /** @return true if the Soft Limit Timer has expired */
     public boolean expiredSoftLimit() {
-      return FSNamesystem.now() - lastUpdate > softLimit;
+      return now() - lastUpdate > softLimit;
     }
 
     /**

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/PendingReplicationBlocks.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/PendingReplicationBlocks.java?rev=951528&r1=951527&r2=951528&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/PendingReplicationBlocks.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/PendingReplicationBlocks.java Fri Jun  4 18:50:26 2010
@@ -18,6 +18,7 @@
 package org.apache.hadoop.hdfs.server.namenode;
 
 import org.apache.hadoop.hdfs.protocol.Block;
+import static org.apache.hadoop.hdfs.server.common.Util.now;
 import org.apache.hadoop.util.*;
 import java.io.*;
 import java.util.*;
@@ -142,7 +143,7 @@ class PendingReplicationBlocks {
     private int numReplicasInProgress;
 
     PendingBlockInfo(int numReplicas) {
-      this.timeStamp = FSNamesystem.now();
+      this.timeStamp = now();
       this.numReplicasInProgress = numReplicas;
     }
 
@@ -151,7 +152,7 @@ class PendingReplicationBlocks {
     }
 
     void setTimeStamp() {
-      timeStamp = FSNamesystem.now();
+      timeStamp = now();
     }
 
     void incrementReplicas(int increment) {
@@ -193,7 +194,7 @@ class PendingReplicationBlocks {
       synchronized (pendingReplications) {
         Iterator<Map.Entry<Block, PendingBlockInfo>> iter =
                                     pendingReplications.entrySet().iterator();
-        long now = FSNamesystem.now();
+        long now = now();
         FSNamesystem.LOG.debug("PendingReplicationMonitor checking Q");
         while (iter.hasNext()) {
           Map.Entry<Block, PendingBlockInfo> entry = iter.next();