You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by to...@apache.org on 2011/07/29 09:10:53 UTC

svn commit: r1152128 [3/3] - in /hadoop/common/branches/HDFS-1073/hdfs: ./ src/c++/libhdfs/ src/contrib/ src/contrib/hdfsproxy/ src/docs/src/documentation/content/xdocs/ src/java/ src/java/org/apache/hadoop/hdfs/ src/java/org/apache/hadoop/hdfs/server/...

Modified: hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java?rev=1152128&r1=1152127&r2=1152128&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java (original)
+++ hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java Fri Jul 29 07:10:48 2011
@@ -164,7 +164,7 @@ import org.mortbay.util.ajax.JSON;
 @Metrics(context="dfs")
 public class FSNamesystem implements FSConstants, FSNamesystemMBean,
     FSClusterStats, NameNodeMXBean {
-  public static final Log LOG = LogFactory.getLog(FSNamesystem.class);
+  static final Log LOG = LogFactory.getLog(FSNamesystem.class);
 
   private static final ThreadLocal<StringBuilder> auditBuffer =
     new ThreadLocal<StringBuilder>() {
@@ -239,35 +239,9 @@ public class FSNamesystem implements FSC
   
   // Block pool ID used by this namenode
   String blockPoolId;
-    
-  /**
-   * Stores the datanode -> block map.  
-   * <p>
-   * Done by storing a set of {@link DatanodeDescriptor} objects, sorted by 
-   * storage id. In order to keep the storage map consistent it tracks 
-   * all storages ever registered with the namenode.
-   * A descriptor corresponding to a specific storage id can be
-   * <ul> 
-   * <li>added to the map if it is a new storage id;</li>
-   * <li>updated with a new datanode started as a replacement for the old one 
-   * with the same storage id; and </li>
-   * <li>removed if and only if an existing datanode is restarted to serve a
-   * different storage id.</li>
-   * </ul> <br>
-   * The list of the {@link DatanodeDescriptor}s in the map is checkpointed
-   * in the namespace image file. Only the {@link DatanodeInfo} part is 
-   * persistent, the list of blocks is restored from the datanode block
-   * reports. 
-   * <p>
-   * Mapping: StorageID -> DatanodeDescriptor
-   */
-  public final NavigableMap<String, DatanodeDescriptor> datanodeMap = 
-    new TreeMap<String, DatanodeDescriptor>();
 
   /**
-   * Stores a set of DatanodeDescriptor objects.
-   * This is a subset of {@link #datanodeMap}, containing nodes that are 
-   * considered alive.
+   * Stores a subset of datanodeMap, containing nodes that are considered alive.
    * The HeartbeatMonitor periodically checks for out-dated entries,
    * and removes them from the list.
    */
@@ -291,9 +265,6 @@ public class FSNamesystem implements FSC
 
   // heartbeatRecheckInterval is how often namenode checks for expired datanodes
   private long heartbeatRecheckInterval;
-  // heartbeatExpireInterval is how long namenode waits for datanode to report
-  // heartbeat
-  private long heartbeatExpireInterval;
 
   //resourceRecheckInterval is how often namenode checks for the disk space availability
   private long resourceRecheckInterval;
@@ -316,9 +287,6 @@ public class FSNamesystem implements FSC
    */
   private final GenerationStamp generationStamp = new GenerationStamp();
 
-  // Ask Datanode only up to this many blocks to delete.
-  public int blockInvalidateLimit = DFSConfigKeys.DFS_BLOCK_INVALIDATE_LIMIT_DEFAULT;
-
   // precision of access times.
   private long accessTimePrecision = 0;
 
@@ -515,14 +483,9 @@ public class FSNamesystem implements FSC
     this.defaultPermission = PermissionStatus.createImmutable(
         fsOwner.getShortUserName(), supergroup, new FsPermission(filePermission));
 
-    long heartbeatInterval = conf.getLong(
-        DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY,
-        DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_DEFAULT) * 1000;
     this.heartbeatRecheckInterval = conf.getInt(
         DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 
         DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_DEFAULT); // 5 minutes
-    this.heartbeatExpireInterval = 2 * heartbeatRecheckInterval +
-      10 * heartbeatInterval;
     
     this.serverDefaults = new FsServerDefaults(
         conf.getLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, DEFAULT_BLOCK_SIZE),
@@ -533,14 +496,6 @@ public class FSNamesystem implements FSC
     this.maxFsObjects = conf.getLong(DFSConfigKeys.DFS_NAMENODE_MAX_OBJECTS_KEY, 
                                      DFSConfigKeys.DFS_NAMENODE_MAX_OBJECTS_DEFAULT);
 
-    //default limit
-    this.blockInvalidateLimit = Math.max(this.blockInvalidateLimit, 
-                                         20*(int)(heartbeatInterval/1000));
-    //use conf value if it is set.
-    this.blockInvalidateLimit = conf.getInt(
-        DFSConfigKeys.DFS_BLOCK_INVALIDATE_LIMIT_KEY, this.blockInvalidateLimit);
-    LOG.info(DFSConfigKeys.DFS_BLOCK_INVALIDATE_LIMIT_KEY + "=" + this.blockInvalidateLimit);
-
     this.accessTimePrecision = conf.getLong(DFSConfigKeys.DFS_NAMENODE_ACCESSTIME_PRECISION_KEY, 0);
     this.supportAppends = conf.getBoolean(DFSConfigKeys.DFS_SUPPORT_APPEND_KEY,
                                       DFSConfigKeys.DFS_SUPPORT_APPEND_DEFAULT);
@@ -644,12 +599,7 @@ public class FSNamesystem implements FSC
       out.println("Live Datanodes: "+live.size());
       out.println("Dead Datanodes: "+dead.size());
       blockManager.metaSave(out);
-  
-      //
-      // Dump all datanodes
-      //
-      datanodeDump(out);
-  
+
       out.flush();
       out.close();
     } finally {
@@ -690,45 +640,7 @@ public class FSNamesystem implements FSC
     readLock();
     try {
       checkSuperuserPrivilege();
-  
-      DatanodeDescriptor node = getDatanode(datanode);
-      if (node == null) {
-        NameNode.stateChangeLog.warn("BLOCK* NameSystem.getBlocks: "
-            + "Asking for blocks from an unrecorded node " + datanode.getName());
-        throw new IllegalArgumentException(
-            "Unexpected exception.  Got getBlocks message for datanode " +
-            datanode.getName() + ", but there is no info for it");
-      }
-  
-      int numBlocks = node.numBlocks();
-      if(numBlocks == 0) {
-        return new BlocksWithLocations(new BlockWithLocations[0]);
-      }
-      Iterator<BlockInfo> iter = node.getBlockIterator();
-      int startBlock = DFSUtil.getRandom().nextInt(numBlocks); // starting from a random block
-      // skip blocks
-      for(int i=0; i<startBlock; i++) {
-        iter.next();
-      }
-      List<BlockWithLocations> results = new ArrayList<BlockWithLocations>();
-      long totalSize = 0;
-      BlockInfo curBlock;
-      while(totalSize<size && iter.hasNext()) {
-        curBlock = iter.next();
-        if(!curBlock.isComplete())  continue;
-        totalSize += addBlock(curBlock, results);
-      }
-      if(totalSize<size) {
-        iter = node.getBlockIterator(); // start from the beginning
-        for(int i=0; i<startBlock&&totalSize<size; i++) {
-          curBlock = iter.next();
-          if(!curBlock.isComplete())  continue;
-          totalSize += addBlock(curBlock, results);
-        }
-      }
-  
-      return new BlocksWithLocations(
-          results.toArray(new BlockWithLocations[results.size()]));
+      return blockManager.getBlocksWithLocations(datanode, size);  
     } finally {
       readUnlock();
     }
@@ -744,22 +656,6 @@ public class FSNamesystem implements FSC
         : ExportedBlockKeys.DUMMY_KEYS;
   }
 
-  /**
-   * Get all valid locations of the block & add the block to results
-   * return the length of the added block; 0 if the block is not added
-   */
-  private long addBlock(Block block, List<BlockWithLocations> results) {
-    assert hasReadOrWriteLock();
-    ArrayList<String> machineSet = blockManager.getValidLocations(block);
-    if(machineSet.size() == 0) {
-      return 0;
-    } else {
-      results.add(new BlockWithLocations(block, 
-          machineSet.toArray(new String[machineSet.size()])));
-      return block.getNumBytes();
-    }
-  }
-
   /////////////////////////////////////////////////////////
   //
   // These methods are called by HadoopFS clients
@@ -1797,7 +1693,8 @@ public class FSNamesystem implements FSC
       //find datanode descriptors
       chosen = new ArrayList<DatanodeDescriptor>();
       for(DatanodeInfo d : existings) {
-        final DatanodeDescriptor descriptor = getDatanode(d);
+        final DatanodeDescriptor descriptor = blockManager.getDatanodeManager(
+            ).getDatanode(d);
         if (descriptor != null) {
           chosen.add(descriptor);
         }
@@ -2624,7 +2521,8 @@ public class FSNamesystem implements FSC
         if (newtargets.length > 0) {
           descriptors = new DatanodeDescriptor[newtargets.length];
           for(int i = 0; i < newtargets.length; i++) {
-            descriptors[i] = getDatanode(newtargets[i]);
+            descriptors[i] = blockManager.getDatanodeManager().getDatanode(
+                newtargets[i]);
           }
         }
         if (closeFile) {
@@ -2768,15 +2666,6 @@ public class FSNamesystem implements FSC
     return Storage.getRegistrationID(dir.fsImage.getStorage());
   }
 
-  public boolean isDatanodeDead(DatanodeDescriptor node) {
-    return (node.getLastUpdate() <
-            (now() - heartbeatExpireInterval));
-  }
-    
-  private void setDatanodeDead(DatanodeDescriptor node) throws IOException {
-    node.setLastUpdate(0);
-  }
-
   /**
    * The given node has reported in.  This method should:
    * 1) Record the heartbeat, so the datanode isn't timed out
@@ -2794,91 +2683,32 @@ public class FSNamesystem implements FSC
         throws IOException {
     readLock();
     try {
-      return handleHeartbeatInternal(nodeReg, capacity, dfsUsed, 
-          remaining, blockPoolUsed, xceiverCount, xmitsInProgress, 
-          failedVolumes);
+      final int maxTransfer = blockManager.maxReplicationStreams - xmitsInProgress;
+      DatanodeCommand[] cmds = blockManager.getDatanodeManager().handleHeartbeat(
+          nodeReg, blockPoolId, capacity, dfsUsed, remaining, blockPoolUsed,
+          xceiverCount, maxTransfer, failedVolumes);
+      if (cmds != null) {
+        return cmds;
+      }
+
+      //check distributed upgrade
+      DatanodeCommand cmd = getDistributedUpgradeCommand();
+      if (cmd != null) {
+        return new DatanodeCommand[] {cmd};
+      }
+      return null;
     } finally {
       readUnlock();
     }
   }
 
-  /** @see #handleHeartbeat(DatanodeRegistration, long, long, long, long, int, int, int) */
-  DatanodeCommand[] handleHeartbeatInternal(DatanodeRegistration nodeReg,
-      long capacity, long dfsUsed, long remaining, long blockPoolUsed,
-      int xceiverCount, int xmitsInProgress, int failedVolumes) 
-        throws IOException {
-    assert hasReadLock();
-    DatanodeCommand cmd = null;
-    synchronized (heartbeats) {
-      synchronized (datanodeMap) {
-        DatanodeDescriptor nodeinfo = null;
-        try {
-          nodeinfo = getDatanode(nodeReg);
-        } catch(UnregisteredNodeException e) {
-          return new DatanodeCommand[]{DatanodeCommand.REGISTER};
-        }
-        
-        // Check if this datanode should actually be shutdown instead. 
-        if (nodeinfo != null && nodeinfo.isDisallowed()) {
-          setDatanodeDead(nodeinfo);
-          throw new DisallowedDatanodeException(nodeinfo);
-        }
-         
-        if (nodeinfo == null || !nodeinfo.isAlive) {
-          return new DatanodeCommand[]{DatanodeCommand.REGISTER};
-        }
-
-        updateStats(nodeinfo, false);
-        nodeinfo.updateHeartbeat(capacity, dfsUsed, remaining, blockPoolUsed,
-            xceiverCount, failedVolumes);
-        updateStats(nodeinfo, true);
-        
-        //check lease recovery
-        BlockInfoUnderConstruction[] blocks = nodeinfo
-            .getLeaseRecoveryCommand(Integer.MAX_VALUE);
-        if (blocks != null) {
-          BlockRecoveryCommand brCommand = new BlockRecoveryCommand(
-              blocks.length);
-          for (BlockInfoUnderConstruction b : blocks) {
-            brCommand.add(new RecoveringBlock(
-                new ExtendedBlock(blockPoolId, b), b.getExpectedLocations(), b
-                    .getBlockRecoveryId()));
-          }
-          return new DatanodeCommand[] { brCommand };
-        }
-      
-        ArrayList<DatanodeCommand> cmds = new ArrayList<DatanodeCommand>(3);
-        //check pending replication
-        List<BlockTargetPair> pendingList = nodeinfo.getReplicationCommand(
-              blockManager.maxReplicationStreams - xmitsInProgress);
-        if (pendingList != null) {
-          cmd = new BlockCommand(DatanodeProtocol.DNA_TRANSFER, blockPoolId,
-              pendingList);
-          cmds.add(cmd);
-        }
-        //check block invalidation
-        Block[] blks = nodeinfo.getInvalidateBlocks(blockInvalidateLimit);
-        if (blks != null) {
-          cmd = new BlockCommand(DatanodeProtocol.DNA_INVALIDATE, blockPoolId, blks);
-          cmds.add(cmd);
-        }
-        // check access key update
-        if (isBlockTokenEnabled && nodeinfo.needKeyUpdate) {
-          cmds.add(new KeyUpdateCommand(blockTokenSecretManager.exportKeys()));
-          nodeinfo.needKeyUpdate = false;
-        }
-        if (!cmds.isEmpty()) {
-          return cmds.toArray(new DatanodeCommand[cmds.size()]);
-        }
-      }
-    }
-
-    //check distributed upgrade
-    cmd = getDistributedUpgradeCommand();
-    if (cmd != null) {
-      return new DatanodeCommand[] {cmd};
+  public void addKeyUpdateCommand(final List<DatanodeCommand> cmds,
+      final DatanodeDescriptor nodeinfo) {
+    // check access key update
+    if (isBlockTokenEnabled && nodeinfo.needKeyUpdate) {
+      cmds.add(new KeyUpdateCommand(blockTokenSecretManager.exportKeys()));
+      nodeinfo.needKeyUpdate = false;
     }
-    return null;
   }
 
   public void updateStats(DatanodeDescriptor node, boolean isAdded) {
@@ -3019,7 +2849,8 @@ public class FSNamesystem implements FSC
       ) throws UnregisteredNodeException {
     writeLock();
     try {
-      DatanodeDescriptor nodeInfo = getDatanode(nodeID);
+      DatanodeDescriptor nodeInfo = getBlockManager().getDatanodeManager(
+          ).getDatanode(nodeID);
       if (nodeInfo != null) {
         removeDatanode(nodeInfo);
       } else {
@@ -3035,7 +2866,7 @@ public class FSNamesystem implements FSC
    * Remove a datanode descriptor.
    * @param nodeInfo datanode descriptor.
    */
-  private void removeDatanode(DatanodeDescriptor nodeInfo) {
+  public void removeDatanode(DatanodeDescriptor nodeInfo) {
     assert hasWriteLock();
     synchronized (heartbeats) {
       if (nodeInfo.isAlive) {
@@ -3066,6 +2897,7 @@ public class FSNamesystem implements FSC
    * effect causes more datanodes to be declared dead.
    */
   void heartbeatCheck() {
+    final DatanodeManager datanodeManager = getBlockManager().getDatanodeManager();
     // It's OK to check safe mode w/o taking the lock here, we re-check
     // for safe mode after taking the lock before removing a datanode.
     if (isInSafeMode()) {
@@ -3081,7 +2913,7 @@ public class FSNamesystem implements FSC
         for (Iterator<DatanodeDescriptor> it = heartbeats.iterator();
              it.hasNext();) {
           DatanodeDescriptor nodeInfo = it.next();
-          if (isDatanodeDead(nodeInfo)) {
+          if (datanodeManager.isDatanodeDead(nodeInfo)) {
             expiredHeartbeats.incr();
             foundDead = true;
             nodeID = nodeInfo;
@@ -3097,21 +2929,7 @@ public class FSNamesystem implements FSC
           return;
         }
         try {
-          synchronized(heartbeats) {
-            synchronized (datanodeMap) {
-              DatanodeDescriptor nodeInfo = null;
-              try {
-                nodeInfo = getDatanode(nodeID);
-              } catch (IOException e) {
-                nodeInfo = null;
-              }
-              if (nodeInfo != null && isDatanodeDead(nodeInfo)) {
-                NameNode.stateChangeLog.info("BLOCK* NameSystem.heartbeatCheck: "
-                                             + "lost heartbeat from " + nodeInfo.getName());
-                removeDatanode(nodeInfo);
-              }
-            }
-          }
+          datanodeManager.removeDeadDatanode(nodeID);
         } finally {
           writeUnlock();
         }
@@ -3131,7 +2949,8 @@ public class FSNamesystem implements FSC
     writeLock();
     startTime = now(); //after acquiring write lock
     try {
-      DatanodeDescriptor node = getDatanode(nodeID);
+      final DatanodeDescriptor node = blockManager.getDatanodeManager(
+          ).getDatanode(nodeID);
       if (node == null || !node.isAlive) {
         throw new IOException("ProcessReport from dead or unregistered node: "
                               + nodeID.getName());
@@ -3271,7 +3090,8 @@ public class FSNamesystem implements FSC
                                          ) throws IOException {
     writeLock();
     try {
-      DatanodeDescriptor node = getDatanode(nodeID);
+      final DatanodeDescriptor node = blockManager.getDatanodeManager(
+          ).getDatanode(nodeID);
       if (node == null || !node.isAlive) {
         NameNode.stateChangeLog.warn("BLOCK* NameSystem.blockReceived: " + block
             + " is received from dead or unregistered node " + nodeID.getName());
@@ -3477,33 +3297,7 @@ public class FSNamesystem implements FSC
                                           ArrayList<DatanodeDescriptor> dead) {
     readLock();
     try {
-      final List<DatanodeDescriptor> results = getBlockManager(
-          ).getDatanodeManager().getDatanodeListForReport(DatanodeReportType.ALL);    
-      for(Iterator<DatanodeDescriptor> it = results.iterator(); it.hasNext();) {
-        DatanodeDescriptor node = it.next();
-        if (isDatanodeDead(node))
-          dead.add(node);
-        else
-          live.add(node);
-      }
-    } finally {
-      readUnlock();
-    }
-  }
-
-  /**
-   * Prints information about all datanodes.
-   */
-  private void datanodeDump(PrintWriter out) {
-    readLock();
-    try {
-      synchronized (datanodeMap) {
-        out.println("Metasave: Number of datanodes: " + datanodeMap.size());
-        for(Iterator<DatanodeDescriptor> it = datanodeMap.values().iterator(); it.hasNext();) {
-          DatanodeDescriptor node = it.next();
-          out.println(node.dumpDatanode());
-        }
-      }
+      getBlockManager().getDatanodeManager().fetchDatanodess(live, dead);
     } finally {
       readUnlock();
     }
@@ -3558,30 +3352,6 @@ public class FSNamesystem implements FSC
     checkSuperuserPrivilege();
     getFSImage().finalizeUpgrade();
   }
-    
-    
-  /**
-   * Get data node by storage ID.
-   * 
-   * @param nodeID
-   * @return DatanodeDescriptor or null if the node is not found.
-   * @throws IOException
-   */
-  public DatanodeDescriptor getDatanode(DatanodeID nodeID
-      ) throws UnregisteredNodeException {
-    assert hasReadOrWriteLock();
-    UnregisteredNodeException e = null;
-    DatanodeDescriptor node = datanodeMap.get(nodeID.getStorageID());
-    if (node == null) 
-      return null;
-    if (!node.getName().equals(nodeID.getName())) {
-      e = new UnregisteredNodeException(nodeID, node);
-      NameNode.stateChangeLog.fatal("BLOCK* NameSystem.getDatanode: "
-                                    + e.getLocalizedMessage());
-      throw e;
-    }
-    return node;
-  }
 
   /**
    * SafeModeInfo contains information related to the safe mode.
@@ -4490,43 +4260,14 @@ public class FSNamesystem implements FSC
   }
   
 
-  /**
-   * Number of live data nodes
-   * @return Number of live data nodes
-   */
   @Override // FSNamesystemMBean
   public int getNumLiveDataNodes() {
-    int numLive = 0;
-    synchronized (datanodeMap) {   
-      for(Iterator<DatanodeDescriptor> it = datanodeMap.values().iterator(); 
-                                                               it.hasNext();) {
-        DatanodeDescriptor dn = it.next();
-        if (!isDatanodeDead(dn) ) {
-          numLive++;
-        }
-      }
-    }
-    return numLive;
+    return getBlockManager().getDatanodeManager().getNumLiveDataNodes();
   }
-  
 
-  /**
-   * Number of dead data nodes
-   * @return Number of dead data nodes
-   */
   @Override // FSNamesystemMBean
   public int getNumDeadDataNodes() {
-    int numDead = 0;
-    synchronized (datanodeMap) {   
-      for(Iterator<DatanodeDescriptor> it = datanodeMap.values().iterator(); 
-                                                               it.hasNext();) {
-        DatanodeDescriptor dn = it.next();
-        if (isDatanodeDead(dn) ) {
-          numDead++;
-        }
-      }
-    }
-    return numDead;
+    return getBlockManager().getDatanodeManager().getNumDeadDataNodes();
   }
 
   /**
@@ -4686,11 +4427,12 @@ public class FSNamesystem implements FSC
     blockinfo.setNumBytes(newBlock.getNumBytes());
 
     // find the DatanodeDescriptor objects
+    final DatanodeManager dm = getBlockManager().getDatanodeManager();
     DatanodeDescriptor[] descriptors = null;
     if (newNodes.length > 0) {
       descriptors = new DatanodeDescriptor[newNodes.length];
       for(int i = 0; i < newNodes.length; i++) {
-        descriptors[i] = getDatanode(newNodes[i]);
+        descriptors[i] = dm.getDatanode(newNodes[i]);
       }
     }
     blockinfo.setExpectedLocations(descriptors);
@@ -4817,12 +4559,6 @@ public class FSNamesystem implements FSC
     return blockManager.numCorruptReplicas(blk);
   }
 
-  /** Get a datanode descriptor given corresponding storageID */
-  public DatanodeDescriptor getDatanode(String nodeID) {
-    assert hasReadOrWriteLock();
-    return datanodeMap.get(nodeID);
-  }
-
   /**
    * Return a range of corrupt replica block ids. Up to numExpectedBlocks 
    * blocks starting at the next block after startingBlockId are returned

Modified: hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java?rev=1152128&r1=1152127&r2=1152128&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java (original)
+++ hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java Fri Jul 29 07:10:48 2011
@@ -490,7 +490,7 @@ public class NNStorage extends Storage i
    * in this filesystem. */
   private void format(StorageDirectory sd) throws IOException {
     sd.clearDirectory(); // create currrent dir
-    sd.write();
+    writeProperties(sd);
     writeTransactionIdFile(sd, 0);
 
     LOG.info("Storage directory " + sd.getRoot()
@@ -533,10 +533,9 @@ public class NNStorage extends Storage i
   }
 
   @Override // Storage
-  protected void getFields(Properties props,
-                           StorageDirectory sd
-                           ) throws IOException {
-    super.getFields(props, sd);
+  protected void setFieldsFromProperties(
+      Properties props, StorageDirectory sd) throws IOException {
+    super.setFieldsFromProperties(props, sd);
     if (layoutVersion == 0) {
       throw new IOException("NameNode directory "
                             + sd.getRoot() + " is not formatted.");
@@ -592,10 +591,10 @@ public class NNStorage extends Storage i
    * @throws IOException
    */
   @Override // Storage
-  protected void setFields(Properties props,
+  protected void setPropertiesFromFields(Properties props,
                            StorageDirectory sd
                            ) throws IOException {
-    super.setFields(props, sd);
+    super.setPropertiesFromFields(props, sd);
     // Set blockpoolID in version with federation support
     if (LayoutVersion.supports(Feature.FEDERATION, layoutVersion)) {
       props.setProperty("blockpoolID", blockpoolID);
@@ -927,7 +926,7 @@ public class NNStorage extends Storage i
     while(sdit.hasNext()) {
       StorageDirectory sd = sdit.next();
       try {
-        Properties props = sd.readFrom(sd.getVersionFile());
+        Properties props = readPropertiesFile(sd.getVersionFile());
         cid = props.getProperty("clusterID");
         LOG.info("current cluster id for sd="+sd.getCurrentDir() + 
             ";lv=" + layoutVersion + ";cid=" + cid);
@@ -1027,7 +1026,7 @@ public class NNStorage extends Storage i
         FSImage.LOG.warn("Storage directory " + sd + " contains no VERSION file. Skipping...");
         continue;
       }
-      sd.read(); // sets layoutVersion
+      readProperties(sd); // sets layoutVersion
       minLayoutVersion = Math.min(minLayoutVersion, getLayoutVersion());
       maxLayoutVersion = Math.max(maxLayoutVersion, getLayoutVersion());
     }

Modified: hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java?rev=1152128&r1=1152127&r2=1152128&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java (original)
+++ hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java Fri Jul 29 07:10:48 2011
@@ -43,7 +43,7 @@ import org.apache.hadoop.fs.Trash;
 import org.apache.hadoop.fs.UnresolvedLinkException;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.PermissionStatus;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.*;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.HDFSPolicyProvider;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
@@ -157,20 +157,20 @@ public class NameNode implements Namenod
    * Following are nameservice specific keys.
    */
   public static final String[] NAMESERVICE_SPECIFIC_KEYS = {
-    DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY,
-    DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
-    DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,
-    DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY,
-    DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY,
-    DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,
-    DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY,
-    DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY,
-    DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY,
-    DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY,
-    DFSConfigKeys.DFS_SECONDARY_NAMENODE_KEYTAB_FILE_KEY,
-    DFSConfigKeys.DFS_NAMENODE_BACKUP_ADDRESS_KEY,
-    DFSConfigKeys.DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY,
-    DFSConfigKeys.DFS_NAMENODE_BACKUP_SERVICE_RPC_ADDRESS_KEY
+    DFS_NAMENODE_RPC_ADDRESS_KEY,
+    DFS_NAMENODE_NAME_DIR_KEY,
+    DFS_NAMENODE_EDITS_DIR_KEY,
+    DFS_NAMENODE_CHECKPOINT_DIR_KEY,
+    DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY,
+    DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,
+    DFS_NAMENODE_HTTP_ADDRESS_KEY,
+    DFS_NAMENODE_HTTPS_ADDRESS_KEY,
+    DFS_NAMENODE_KEYTAB_FILE_KEY,
+    DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY,
+    DFS_SECONDARY_NAMENODE_KEYTAB_FILE_KEY,
+    DFS_NAMENODE_BACKUP_ADDRESS_KEY,
+    DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY,
+    DFS_NAMENODE_BACKUP_SERVICE_RPC_ADDRESS_KEY
   };
   
   public long getProtocolVersion(String protocol, 
@@ -265,7 +265,7 @@ public class NameNode implements Namenod
   public static void setServiceAddress(Configuration conf,
                                            String address) {
     LOG.info("Setting ADDRESS " + address);
-    conf.set(DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, address);
+    conf.set(DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, address);
   }
   
   /**
@@ -277,7 +277,7 @@ public class NameNode implements Namenod
    */
   public static InetSocketAddress getServiceAddress(Configuration conf,
                                                         boolean fallback) {
-    String addr = conf.get(DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY);
+    String addr = conf.get(DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY);
     if (addr == null || addr.isEmpty()) {
       return fallback ? getAddress(conf) : null;
     }
@@ -363,11 +363,11 @@ public class NameNode implements Namenod
 
   protected InetSocketAddress getHttpServerAddress(Configuration conf) {
     return  NetUtils.createSocketAddr(
-        conf.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "0.0.0.0:50070"));
+        conf.get(DFS_NAMENODE_HTTP_ADDRESS_KEY, DFS_NAMENODE_HTTP_ADDRESS_DEFAULT));
   }
   
   protected void setHttpServerAddress(Configuration conf) {
-    conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY,
+    conf.set(DFS_NAMENODE_HTTP_ADDRESS_KEY,
         getHostPortString(getHttpAddress()));
   }
 
@@ -392,8 +392,8 @@ public class NameNode implements Namenod
    */
   void loginAsNameNodeUser(Configuration conf) throws IOException {
     InetSocketAddress socAddr = getRpcServerAddress(conf);
-    SecurityUtil.login(conf, DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY,
-        DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY, socAddr.getHostName());
+    SecurityUtil.login(conf, DFS_NAMENODE_KEYTAB_FILE_KEY,
+        DFS_NAMENODE_USER_NAME_KEY, socAddr.getHostName());
   }
   
   /**
@@ -406,8 +406,8 @@ public class NameNode implements Namenod
     UserGroupInformation.setConfiguration(conf);
     loginAsNameNodeUser(conf);
     int handlerCount = 
-      conf.getInt(DFSConfigKeys.DFS_DATANODE_HANDLER_COUNT_KEY, 
-                  DFSConfigKeys.DFS_DATANODE_HANDLER_COUNT_DEFAULT);
+      conf.getInt(DFS_DATANODE_HANDLER_COUNT_KEY, 
+                  DFS_DATANODE_HANDLER_COUNT_DEFAULT);
 
     NameNode.initMetrics(conf, this.getRole());
     loadNamesystem(conf);
@@ -415,8 +415,8 @@ public class NameNode implements Namenod
     InetSocketAddress dnSocketAddr = getServiceRpcServerAddress(conf);
     if (dnSocketAddr != null) {
       int serviceHandlerCount =
-        conf.getInt(DFSConfigKeys.DFS_NAMENODE_SERVICE_HANDLER_COUNT_KEY,
-                    DFSConfigKeys.DFS_NAMENODE_SERVICE_HANDLER_COUNT_DEFAULT);
+        conf.getInt(DFS_NAMENODE_SERVICE_HANDLER_COUNT_KEY,
+                    DFS_NAMENODE_SERVICE_HANDLER_COUNT_DEFAULT);
       this.serviceRpcServer = RPC.getServer(NamenodeProtocols.class, this,
           dnSocketAddr.getHostName(), dnSocketAddr.getPort(), serviceHandlerCount,
           false, conf, namesystem.getDelegationTokenSecretManager());
@@ -494,7 +494,8 @@ public class NameNode implements Namenod
     }
     startTrashEmptier(conf);
     
-    plugins = conf.getInstances("dfs.namenode.plugins", ServicePlugin.class);
+    plugins = conf.getInstances(DFS_NAMENODE_PLUGINS_KEY,
+        ServicePlugin.class);
     for (ServicePlugin p: plugins) {
       try {
         p.start(this);
@@ -1308,12 +1309,12 @@ public class NameNode implements Namenod
   private static boolean format(Configuration conf,
                                 boolean isConfirmationNeeded)
       throws IOException {
-    if (!conf.getBoolean(DFSConfigKeys.DFS_NAMENODE_SUPPORT_ALLOW_FORMAT_KEY, 
-                         DFSConfigKeys.DFS_NAMENODE_SUPPORT_ALLOW_FORMAT_DEFAULT)) {
-      throw new IOException("The option " + DFSConfigKeys.DFS_NAMENODE_SUPPORT_ALLOW_FORMAT_KEY
+    if (!conf.getBoolean(DFS_NAMENODE_SUPPORT_ALLOW_FORMAT_KEY, 
+                         DFS_NAMENODE_SUPPORT_ALLOW_FORMAT_DEFAULT)) {
+      throw new IOException("The option " + DFS_NAMENODE_SUPPORT_ALLOW_FORMAT_KEY
                              + " is set to false for this filesystem, so it "
                              + "cannot be formatted. You will need to set "
-                             + DFSConfigKeys.DFS_NAMENODE_SUPPORT_ALLOW_FORMAT_KEY +" parameter "
+                             + DFS_NAMENODE_SUPPORT_ALLOW_FORMAT_KEY +" parameter "
                              + "to true in order to format this filesystem");
     }
     
@@ -1456,11 +1457,11 @@ public class NameNode implements Namenod
   }
 
   private static void setStartupOption(Configuration conf, StartupOption opt) {
-    conf.set("dfs.namenode.startup", opt.toString());
+    conf.set(DFS_NAMENODE_STARTUP_KEY, opt.toString());
   }
 
   static StartupOption getStartupOption(Configuration conf) {
-    return StartupOption.valueOf(conf.get("dfs.namenode.startup",
+    return StartupOption.valueOf(conf.get(DFS_NAMENODE_STARTUP_KEY,
                                           StartupOption.REGULAR.toString()));
   }
 
@@ -1552,10 +1553,10 @@ public class NameNode implements Namenod
     
     DFSUtil.setGenericConf(conf, nameserviceId, NAMESERVICE_SPECIFIC_KEYS);
     
-    if (conf.get(DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY) != null) {
+    if (conf.get(DFS_NAMENODE_RPC_ADDRESS_KEY) != null) {
       URI defaultUri = URI.create(FSConstants.HDFS_URI_SCHEME + "://"
-          + conf.get(DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY));
-      conf.set(DFSConfigKeys.FS_DEFAULT_NAME_KEY, defaultUri.toString());
+          + conf.get(DFS_NAMENODE_RPC_ADDRESS_KEY));
+      conf.set(FS_DEFAULT_NAME_KEY, defaultUri.toString());
     }
   }
     

Modified: hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java?rev=1152128&r1=1152127&r2=1152128&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java (original)
+++ hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java Fri Jul 29 07:10:48 2011
@@ -41,7 +41,7 @@ import org.apache.commons.logging.LogFac
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.*;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.DFSUtil.ErrorSimulator;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
@@ -182,8 +182,8 @@ public class SecondaryNameNode implement
   
   public static InetSocketAddress getHttpAddress(Configuration conf) {
     return NetUtils.createSocketAddr(conf.get(
-        DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY,
-        DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_DEFAULT));
+        DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY,
+        DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_DEFAULT));
   }
   
   /**
@@ -196,15 +196,12 @@ public class SecondaryNameNode implement
     infoBindAddress = infoSocAddr.getHostName();
     UserGroupInformation.setConfiguration(conf);
     if (UserGroupInformation.isSecurityEnabled()) {
-      SecurityUtil.login(conf, 
-          DFSConfigKeys.DFS_SECONDARY_NAMENODE_KEYTAB_FILE_KEY,
-          DFSConfigKeys.DFS_SECONDARY_NAMENODE_USER_NAME_KEY,
-          infoBindAddress);
+      SecurityUtil.login(conf, DFS_SECONDARY_NAMENODE_KEYTAB_FILE_KEY,
+          DFS_SECONDARY_NAMENODE_USER_NAME_KEY, infoBindAddress);
     }
     // initiate Java VM metrics
     JvmMetrics.create("SecondaryNameNode",
-        conf.get(DFSConfigKeys.DFS_METRICS_SESSION_ID_KEY),
-        DefaultMetricsSystem.instance());
+        conf.get(DFS_METRICS_SESSION_ID_KEY), DefaultMetricsSystem.instance());
     
     // Create connection to the namenode.
     shouldRun = true;
@@ -226,13 +223,13 @@ public class SecondaryNameNode implement
 
     // Initialize other scheduling parameters from the configuration
     checkpointCheckPeriod = conf.getLong(
-        DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_CHECK_PERIOD_KEY,
-        DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_CHECK_PERIOD_DEFAULT);
+        DFS_NAMENODE_CHECKPOINT_CHECK_PERIOD_KEY,
+        DFS_NAMENODE_CHECKPOINT_CHECK_PERIOD_DEFAULT);
         
-    checkpointPeriod = conf.getLong(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_PERIOD_KEY, 
-                                    DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_PERIOD_DEFAULT);
-    checkpointTxnCount = conf.getLong(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_TXNS_KEY, 
-                                  DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_TXNS_DEFAULT);
+    checkpointPeriod = conf.getLong(DFS_NAMENODE_CHECKPOINT_PERIOD_KEY, 
+                                    DFS_NAMENODE_CHECKPOINT_PERIOD_DEFAULT);
+    checkpointTxnCount = conf.getLong(DFS_NAMENODE_CHECKPOINT_TXNS_KEY, 
+                                  DFS_NAMENODE_CHECKPOINT_TXNS_DEFAULT);
     warnForDeprecatedConfigs(conf);
 
     // initialize the webserver for uploading files.
@@ -240,9 +237,9 @@ public class SecondaryNameNode implement
     UserGroupInformation httpUGI = 
       UserGroupInformation.loginUserFromKeytabAndReturnUGI(
           SecurityUtil.getServerPrincipal(conf
-              .get(DFSConfigKeys.DFS_SECONDARY_NAMENODE_KRB_HTTPS_USER_NAME_KEY),
+              .get(DFS_SECONDARY_NAMENODE_KRB_HTTPS_USER_NAME_KEY),
               infoBindAddress),
-          conf.get(DFSConfigKeys.DFS_SECONDARY_NAMENODE_KEYTAB_FILE_KEY));
+          conf.get(DFS_SECONDARY_NAMENODE_KEYTAB_FILE_KEY));
     try {
       infoServer = httpUGI.doAs(new PrivilegedExceptionAction<HttpServer>() {
         @Override
@@ -253,7 +250,7 @@ public class SecondaryNameNode implement
           int tmpInfoPort = infoSocAddr.getPort();
           infoServer = new HttpServer("secondary", infoBindAddress, tmpInfoPort,
               tmpInfoPort == 0, conf, 
-              new AccessControlList(conf.get(DFSConfigKeys.DFS_ADMIN, " ")));
+              new AccessControlList(conf.get(DFS_ADMIN, " ")));
           
           if(UserGroupInformation.isSecurityEnabled()) {
             System.setProperty("https.cipherSuites", 
@@ -286,7 +283,7 @@ public class SecondaryNameNode implement
       imagePort = infoPort;
     }
     
-    conf.set(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY, infoBindAddress + ":" +infoPort); 
+    conf.set(DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY, infoBindAddress + ":" +infoPort); 
     LOG.info("Secondary Web-server up at: " + infoBindAddress + ":" +infoPort);
     LOG.info("Secondary image servlet up at: " + infoBindAddress + ":" + imagePort);
     LOG.info("Checkpoint Period   :" + checkpointPeriod + " secs " +
@@ -301,7 +298,7 @@ public class SecondaryNameNode implement
       if (conf.get(key) != null) {
         LOG.warn("Configuration key " + key + " is deprecated! Ignoring..." +
             " Instead please specify a value for " +
-            DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_TXNS_KEY);
+            DFS_NAMENODE_CHECKPOINT_TXNS_KEY);
       }
     }
   }
@@ -796,7 +793,7 @@ public class SecondaryNameNode implement
         
         StorageState curState;
         try {
-          curState = sd.analyzeStorage(HdfsConstants.StartupOption.REGULAR);
+          curState = sd.analyzeStorage(HdfsConstants.StartupOption.REGULAR, storage);
           // sd is locked but not opened
           switch(curState) {
           case NON_EXISTENT:
@@ -810,7 +807,7 @@ public class SecondaryNameNode implement
             // (a) the VERSION file for each of the directories is the same,
             // and (b) when we connect to a NN, we can verify that the remote
             // node matches the same namespace that we ran on previously.
-            sd.read();
+            storage.readProperties(sd);
             break;
           default:  // recovery is possible
             sd.doRecover(curState);

Modified: hadoop/common/branches/HDFS-1073/hdfs/src/test/aop/org/apache/hadoop/hdfs/TestFiPipelines.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1073/hdfs/src/test/aop/org/apache/hadoop/hdfs/TestFiPipelines.java?rev=1152128&r1=1152127&r2=1152128&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1073/hdfs/src/test/aop/org/apache/hadoop/hdfs/TestFiPipelines.java (original)
+++ hadoop/common/branches/HDFS-1073/hdfs/src/test/aop/org/apache/hadoop/hdfs/TestFiPipelines.java Fri Jul 29 07:10:48 2011
@@ -236,7 +236,7 @@ public class TestFiPipelines {
 
   private static void initLoggers() {
     ((Log4JLogger) NameNode.stateChangeLog).getLogger().setLevel(Level.ALL);
-    ((Log4JLogger) FSNamesystem.LOG).getLogger().setLevel(Level.ALL);
+    ((Log4JLogger) LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.ALL);
     ((Log4JLogger) DataNode.LOG).getLogger().setLevel(Level.ALL);
     ((Log4JLogger) TestFiPipelines.LOG).getLogger().setLevel(Level.ALL);
     ((Log4JLogger) DFSClient.LOG).getLogger().setLevel(Level.ALL);

Propchange: hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Fri Jul 29 07:10:48 2011
@@ -1,4 +1,4 @@
-/hadoop/common/trunk/hdfs/src/test/hdfs:1134994-1150966
+/hadoop/common/trunk/hdfs/src/test/hdfs:1134994-1151750
 /hadoop/core/branches/branch-0.19/hdfs/src/test/hdfs:713112
 /hadoop/core/trunk/src/test/hdfs:776175-785643
 /hadoop/hdfs/branches/HDFS-1052/src/test/hdfs:987665-1095512

Modified: hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java?rev=1152128&r1=1152127&r2=1152128&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java (original)
+++ hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java Fri Jul 29 07:10:48 2011
@@ -33,6 +33,7 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.protocol.FSConstants;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption;
 import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil;
+import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -221,7 +222,9 @@ public class TestDFSUpgradeFromImage ext
         .build();
       fail("Was able to start NN from 0.3.0 image");
     } catch (IOException ioe) {
-      assertTrue(ioe.toString().contains("Old layout version is 'too old'"));
+      if (!ioe.toString().contains("Old layout version is 'too old'")) {
+        throw ioe;
+      }
     }
   }
   

Modified: hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestDatanodeDeath.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestDatanodeDeath.java?rev=1152128&r1=1152127&r2=1152128&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestDatanodeDeath.java (original)
+++ hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestDatanodeDeath.java Fri Jul 29 07:10:48 2011
@@ -21,6 +21,7 @@ import java.io.IOException;
 
 import junit.framework.TestCase;
 
+import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.BlockLocation;
@@ -44,7 +45,7 @@ public class TestDatanodeDeath extends T
   {
     ((Log4JLogger)NameNode.stateChangeLog).getLogger().setLevel(Level.ALL);
     ((Log4JLogger)LeaseManager.LOG).getLogger().setLevel(Level.ALL);
-    ((Log4JLogger)FSNamesystem.LOG).getLogger().setLevel(Level.ALL);
+    ((Log4JLogger)LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.ALL);
     ((Log4JLogger)DataNode.LOG).getLogger().setLevel(Level.ALL);
     ((Log4JLogger)DFSClient.LOG).getLogger().setLevel(Level.ALL);
     ((Log4JLogger)InterDatanodeProtocol.LOG).getLogger().setLevel(Level.ALL);

Modified: hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestDecommission.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestDecommission.java?rev=1152128&r1=1152127&r2=1152128&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestDecommission.java (original)
+++ hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestDecommission.java Fri Jul 29 07:10:48 2011
@@ -226,7 +226,7 @@ public class TestDecommission {
     writeConfigFile(excludeFile, nodes);
     cluster.getNamesystem(nnIndex).refreshNodes(conf);
     DatanodeInfo ret = NameNodeAdapter.getDatanode(
-        cluster.getNameNode(nnIndex), info[index]);
+        cluster.getNamesystem(nnIndex), info[index]);
     waitNodeState(ret, waitForState);
     return ret;
   }
@@ -466,7 +466,7 @@ public class TestDecommission {
       // Stop decommissioning and verify stats
       writeConfigFile(excludeFile, null);
       fsn.refreshNodes(conf);
-      DatanodeInfo ret = NameNodeAdapter.getDatanode(namenode, downnode);
+      DatanodeInfo ret = NameNodeAdapter.getDatanode(fsn, downnode);
       waitNodeState(ret, AdminStates.NORMAL);
       verifyStats(namenode, fsn, ret, false);
     }

Modified: hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestFileAppend2.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestFileAppend2.java?rev=1152128&r1=1152127&r2=1152128&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestFileAppend2.java (original)
+++ hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestFileAppend2.java Fri Jul 29 07:10:48 2011
@@ -37,6 +37,7 @@ import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.UserGroupInformation;
 
+import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.log4j.Level;
 
@@ -49,7 +50,7 @@ public class TestFileAppend2 extends Tes
   {
     ((Log4JLogger)NameNode.stateChangeLog).getLogger().setLevel(Level.ALL);
     ((Log4JLogger)LeaseManager.LOG).getLogger().setLevel(Level.ALL);
-    ((Log4JLogger)FSNamesystem.LOG).getLogger().setLevel(Level.ALL);
+    ((Log4JLogger)LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.ALL);
     ((Log4JLogger)DataNode.LOG).getLogger().setLevel(Level.ALL);
     ((Log4JLogger)DFSClient.LOG).getLogger().setLevel(Level.ALL);
   }

Modified: hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestFileAppend3.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestFileAppend3.java?rev=1152128&r1=1152127&r2=1152128&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestFileAppend3.java (original)
+++ hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestFileAppend3.java Fri Jul 29 07:10:48 2011
@@ -24,6 +24,7 @@ import junit.extensions.TestSetup;
 import junit.framework.Test;
 import junit.framework.TestSuite;
 
+import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataOutputStream;
@@ -46,7 +47,7 @@ public class TestFileAppend3 extends jun
   {
     ((Log4JLogger)NameNode.stateChangeLog).getLogger().setLevel(Level.ALL);
     ((Log4JLogger)LeaseManager.LOG).getLogger().setLevel(Level.ALL);
-    ((Log4JLogger)FSNamesystem.LOG).getLogger().setLevel(Level.ALL);
+    ((Log4JLogger)LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.ALL);
     ((Log4JLogger)DataNode.LOG).getLogger().setLevel(Level.ALL);
     ((Log4JLogger)DFSClient.LOG).getLogger().setLevel(Level.ALL);
     ((Log4JLogger)InterDatanodeProtocol.LOG).getLogger().setLevel(Level.ALL);

Modified: hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestFileAppend4.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestFileAppend4.java?rev=1152128&r1=1152127&r2=1152128&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestFileAppend4.java (original)
+++ hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestFileAppend4.java Fri Jul 29 07:10:48 2011
@@ -68,7 +68,7 @@ public class TestFileAppend4 {
   {
     ((Log4JLogger)NameNode.stateChangeLog).getLogger().setLevel(Level.ALL);
     ((Log4JLogger)LeaseManager.LOG).getLogger().setLevel(Level.ALL);
-    ((Log4JLogger)FSNamesystem.LOG).getLogger().setLevel(Level.ALL);
+    ((Log4JLogger)LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.ALL);
     ((Log4JLogger)DataNode.LOG).getLogger().setLevel(Level.ALL);
     ((Log4JLogger)DFSClient.LOG).getLogger().setLevel(Level.ALL);
   }

Modified: hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestFileConcurrentReader.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestFileConcurrentReader.java?rev=1152128&r1=1152127&r2=1152128&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestFileConcurrentReader.java (original)
+++ hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestFileConcurrentReader.java Fri Jul 29 07:10:48 2011
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.hdfs;
 
+import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.BlockLocation;
@@ -54,7 +55,7 @@ public class TestFileConcurrentReader ex
 
   {
     ((Log4JLogger) LeaseManager.LOG).getLogger().setLevel(Level.ALL);
-    ((Log4JLogger) FSNamesystem.LOG).getLogger().setLevel(Level.ALL);
+    ((Log4JLogger)LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.ALL);
     ((Log4JLogger) DFSClient.LOG).getLogger().setLevel(Level.ALL);
   }
 

Modified: hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCorruption.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCorruption.java?rev=1152128&r1=1152127&r2=1152128&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCorruption.java (original)
+++ hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCorruption.java Fri Jul 29 07:10:48 2011
@@ -27,6 +27,7 @@ import java.util.ArrayList;
 import junit.framework.TestCase;
 
 import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.ChecksumException;
@@ -48,7 +49,7 @@ import org.apache.log4j.Level;
 public class TestFileCorruption extends TestCase {
   {
     ((Log4JLogger)NameNode.stateChangeLog).getLogger().setLevel(Level.ALL);
-    ((Log4JLogger)FSNamesystem.LOG).getLogger().setLevel(Level.ALL);
+    ((Log4JLogger)LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.ALL);
     ((Log4JLogger)DFSClient.LOG).getLogger().setLevel(Level.ALL);
     ((Log4JLogger)DataNode.LOG).getLogger().setLevel(Level.ALL);
   }

Modified: hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCreation.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCreation.java?rev=1152128&r1=1152127&r2=1152128&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCreation.java (original)
+++ hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCreation.java Fri Jul 29 07:10:48 2011
@@ -25,6 +25,7 @@ import java.io.IOException;
 import java.net.InetSocketAddress;
 import java.util.EnumSet;
 
+import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CreateFlag;
@@ -59,7 +60,7 @@ public class TestFileCreation extends ju
   {
     //((Log4JLogger)DataNode.LOG).getLogger().setLevel(Level.ALL);
     ((Log4JLogger)LeaseManager.LOG).getLogger().setLevel(Level.ALL);
-    ((Log4JLogger)FSNamesystem.LOG).getLogger().setLevel(Level.ALL);
+    ((Log4JLogger)LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.ALL);
     ((Log4JLogger)DFSClient.LOG).getLogger().setLevel(Level.ALL);
   }
 

Modified: hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCreationClient.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCreationClient.java?rev=1152128&r1=1152127&r2=1152128&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCreationClient.java (original)
+++ hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCreationClient.java Fri Jul 29 07:10:48 2011
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.hdfs;
 
+import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataInputStream;
@@ -40,7 +41,7 @@ public class TestFileCreationClient exte
   {
     ((Log4JLogger)DataNode.LOG).getLogger().setLevel(Level.ALL);
     ((Log4JLogger)LeaseManager.LOG).getLogger().setLevel(Level.ALL);
-    ((Log4JLogger)FSNamesystem.LOG).getLogger().setLevel(Level.ALL);
+    ((Log4JLogger)LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.ALL);
     ((Log4JLogger)InterDatanodeProtocol.LOG).getLogger().setLevel(Level.ALL);
   }
 

Modified: hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCreationDelete.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCreationDelete.java?rev=1152128&r1=1152127&r2=1152128&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCreationDelete.java (original)
+++ hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCreationDelete.java Fri Jul 29 07:10:48 2011
@@ -19,6 +19,7 @@ package org.apache.hadoop.hdfs;
 
 import java.io.IOException;
 
+import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataOutputStream;
@@ -33,7 +34,7 @@ public class TestFileCreationDelete exte
   {
     ((Log4JLogger)NameNode.stateChangeLog).getLogger().setLevel(Level.ALL);
     ((Log4JLogger)LeaseManager.LOG).getLogger().setLevel(Level.ALL);
-    ((Log4JLogger)FSNamesystem.LOG).getLogger().setLevel(Level.ALL);
+    ((Log4JLogger)LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.ALL);
   }
 
   public void testFileCreationDeleteParent() throws IOException {

Modified: hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCreationEmpty.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCreationEmpty.java?rev=1152128&r1=1152127&r2=1152128&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCreationEmpty.java (original)
+++ hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCreationEmpty.java Fri Jul 29 07:10:48 2011
@@ -21,7 +21,7 @@ import java.util.ConcurrentModificationE
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
+import org.apache.hadoop.hdfs.server.namenode.LeaseManager;
 
 /**
  * Test empty file creation.
@@ -40,7 +40,7 @@ public class TestFileCreationEmpty exten
     Thread.setDefaultUncaughtExceptionHandler(new Thread.UncaughtExceptionHandler() {
       public void uncaughtException(Thread t, Throwable e) {
         if (e instanceof ConcurrentModificationException) {
-          FSNamesystem.LOG.error("t=" + t, e);
+          LeaseManager.LOG.error("t=" + t, e);
           isConcurrentModificationException = true;
         }
       }

Modified: hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestFileStatus.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestFileStatus.java?rev=1152128&r1=1152127&r2=1152128&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestFileStatus.java (original)
+++ hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestFileStatus.java Fri Jul 29 07:10:48 2011
@@ -26,6 +26,7 @@ import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.util.Random;
 
+import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataOutputStream;
@@ -51,7 +52,7 @@ import org.junit.Test;
  */
 public class TestFileStatus {
   {
-    ((Log4JLogger)FSNamesystem.LOG).getLogger().setLevel(Level.ALL);
+    ((Log4JLogger)LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.ALL);
     ((Log4JLogger)FileSystem.LOG).getLogger().setLevel(Level.ALL);
   }
 

Modified: hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestLeaseRecovery2.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestLeaseRecovery2.java?rev=1152128&r1=1152127&r2=1152128&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestLeaseRecovery2.java (original)
+++ hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestLeaseRecovery2.java Fri Jul 29 07:10:48 2011
@@ -57,7 +57,7 @@ public class TestLeaseRecovery2 {
   {
     ((Log4JLogger)DataNode.LOG).getLogger().setLevel(Level.ALL);
     ((Log4JLogger)LeaseManager.LOG).getLogger().setLevel(Level.ALL);
-    ((Log4JLogger)FSNamesystem.LOG).getLogger().setLevel(Level.ALL);
+    ((Log4JLogger)LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.ALL);
   }
 
   static final private long BLOCK_SIZE = 1024;

Modified: hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestMultiThreadedHflush.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestMultiThreadedHflush.java?rev=1152128&r1=1152127&r2=1152128&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestMultiThreadedHflush.java (original)
+++ hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestMultiThreadedHflush.java Fri Jul 29 07:10:48 2011
@@ -24,6 +24,7 @@ import java.util.ArrayList;
 import java.util.concurrent.CountDownLatch;
 import java.util.concurrent.atomic.AtomicReference;
 
+import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
@@ -53,7 +54,7 @@ public class TestMultiThreadedHflush {
   {
     ((Log4JLogger)NameNode.stateChangeLog).getLogger().setLevel(Level.ALL);
     ((Log4JLogger)LeaseManager.LOG).getLogger().setLevel(Level.ALL);
-    ((Log4JLogger)FSNamesystem.LOG).getLogger().setLevel(Level.ALL);
+    ((Log4JLogger)LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.ALL);
     ((Log4JLogger)DataNode.LOG).getLogger().setLevel(Level.ALL);
     ((Log4JLogger)DFSClient.LOG).getLogger().setLevel(Level.ALL);
     ((Log4JLogger)InterDatanodeProtocol.LOG).getLogger().setLevel(Level.ALL);

Modified: hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestPipelines.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestPipelines.java?rev=1152128&r1=1152127&r2=1152128&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestPipelines.java (original)
+++ hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestPipelines.java Fri Jul 29 07:10:48 2011
@@ -160,7 +160,7 @@ public class TestPipelines {
 
   private static void initLoggers() {
     ((Log4JLogger) NameNode.stateChangeLog).getLogger().setLevel(Level.ALL);
-    ((Log4JLogger) FSNamesystem.LOG).getLogger().setLevel(Level.ALL);
+    ((Log4JLogger)LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.ALL);
     ((Log4JLogger) DataNode.LOG).getLogger().setLevel(Level.ALL);
     ((Log4JLogger) DFSClient.LOG).getLogger().setLevel(Level.ALL);
   }

Modified: hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestReadWhileWriting.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestReadWhileWriting.java?rev=1152128&r1=1152127&r2=1152128&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestReadWhileWriting.java (original)
+++ hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestReadWhileWriting.java Fri Jul 29 07:10:48 2011
@@ -21,6 +21,7 @@ import java.io.IOException;
 import java.io.OutputStream;
 import java.security.PrivilegedExceptionAction;
 
+import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataOutputStream;
@@ -38,7 +39,7 @@ import org.junit.Test;
 /** Test reading from hdfs while a file is being written. */
 public class TestReadWhileWriting {
   {
-    ((Log4JLogger)FSNamesystem.LOG).getLogger().setLevel(Level.ALL);
+    ((Log4JLogger)LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.ALL);
     ((Log4JLogger)DFSClient.LOG).getLogger().setLevel(Level.ALL);
   }
 

Modified: hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestRenameWhileOpen.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestRenameWhileOpen.java?rev=1152128&r1=1152127&r2=1152128&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestRenameWhileOpen.java (original)
+++ hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestRenameWhileOpen.java Fri Jul 29 07:10:48 2011
@@ -19,6 +19,7 @@ package org.apache.hadoop.hdfs;
 
 import java.io.IOException;
 
+import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataOutputStream;
@@ -33,7 +34,7 @@ public class TestRenameWhileOpen extends
   {
     ((Log4JLogger)NameNode.stateChangeLog).getLogger().setLevel(Level.ALL);
     ((Log4JLogger)LeaseManager.LOG).getLogger().setLevel(Level.ALL);
-    ((Log4JLogger)FSNamesystem.LOG).getLogger().setLevel(Level.ALL);
+    ((Log4JLogger)LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.ALL);
   }
 
   //TODO: un-comment checkFullFile once the lease recovery is done

Modified: hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/UpgradeUtilities.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/UpgradeUtilities.java?rev=1152128&r1=1152127&r2=1152128&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/UpgradeUtilities.java (original)
+++ hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/UpgradeUtilities.java Fri Jul 29 07:10:48 2011
@@ -409,18 +409,15 @@ public class UpgradeUtilities {
    */
   public static File[] createNameNodeVersionFile(Configuration conf,
       File[] parent, StorageInfo version, String bpid) throws IOException {
-    Storage storage = null;
-    File[] versionFiles = new File[parent.length];
-    for (int i = 0; i < parent.length; i++) {
-      File versionFile = new File(parent[i], "VERSION");
-      FileUtil.fullyDelete(versionFile);
-      storage = new NNStorage(conf, 
+    Storage storage = new NNStorage(conf, 
                               Collections.<URI>emptyList(), 
                               Collections.<URI>emptyList());
-      storage.setStorageInfo(version);
-      StorageDirectory sd = storage.new StorageDirectory(parent[i].getParentFile());
-      sd.write(versionFile);
-      versionFiles[i] = versionFile;
+    storage.setStorageInfo(version);
+    File[] versionFiles = new File[parent.length];
+    for (int i = 0; i < parent.length; i++) {
+      versionFiles[i] = new File(parent[i], "VERSION");
+      StorageDirectory sd = new StorageDirectory(parent[i].getParentFile());
+      storage.writeProperties(versionFiles[i], sd);
     }
     return versionFiles;
   }
@@ -453,14 +450,13 @@ public class UpgradeUtilities {
    */
   public static void createDataNodeVersionFile(File[] parent,
       StorageInfo version, String bpid, String bpidToWrite) throws IOException {
-    DataStorage storage = null;
+    DataStorage storage = new DataStorage(version, "doNotCare");
+
     File[] versionFiles = new File[parent.length];
     for (int i = 0; i < parent.length; i++) {
       File versionFile = new File(parent[i], "VERSION");
-      FileUtil.fullyDelete(versionFile);
-      storage = new DataStorage(version, "doNotCare");
-      StorageDirectory sd = storage.new StorageDirectory(parent[i].getParentFile());
-      sd.write(versionFile);
+      StorageDirectory sd = new StorageDirectory(parent[i].getParentFile());
+      storage.writeProperties(versionFile, sd);
       versionFiles[i] = versionFile;
       File bpDir = BlockPoolSliceStorage.getBpRoot(bpid, parent[i]);
       createBlockPoolVersionFile(bpDir, version, bpidToWrite);
@@ -475,9 +471,8 @@ public class UpgradeUtilities {
       BlockPoolSliceStorage bpStorage = new BlockPoolSliceStorage(version,
           bpid);
       File versionFile = new File(bpCurDir, "VERSION");
-      FileUtil.fullyDelete(versionFile);
-      StorageDirectory sd = bpStorage.new StorageDirectory(bpDir);
-      sd.write(versionFile);
+      StorageDirectory sd = new StorageDirectory(bpDir);
+      bpStorage.writeProperties(versionFile, sd);
     }
   }
   

Modified: hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithMultipleNameNodes.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithMultipleNameNodes.java?rev=1152128&r1=1152127&r2=1152128&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithMultipleNameNodes.java (original)
+++ hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithMultipleNameNodes.java Fri Jul 29 07:10:48 2011
@@ -25,6 +25,7 @@ import java.util.List;
 import java.util.Random;
 
 import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
@@ -56,7 +57,7 @@ public class TestBalancerWithMultipleNam
     ((Log4JLogger)LOG).getLogger().setLevel(Level.ALL);
     ((Log4JLogger)NameNode.stateChangeLog).getLogger().setLevel(Level.OFF);
     ((Log4JLogger)LeaseManager.LOG).getLogger().setLevel(Level.OFF);
-    ((Log4JLogger)FSNamesystem.LOG).getLogger().setLevel(Level.OFF);
+    ((Log4JLogger)LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.OFF);
 //    ((Log4JLogger)DataNode.LOG).getLogger().setLevel(Level.OFF);
   }
 

Modified: hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerTestUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerTestUtil.java?rev=1152128&r1=1152127&r2=1152128&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerTestUtil.java (original)
+++ hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerTestUtil.java Fri Jul 29 07:10:48 2011
@@ -25,9 +25,30 @@ import java.util.Set;
 
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
+import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.util.Daemon;
 
 public class BlockManagerTestUtil {
+
+  /** @return the datanode descriptor for the given the given storageID. */
+  public static DatanodeDescriptor getDatanode(final FSNamesystem ns,
+      final String storageID) {
+    ns.readLock();
+    try {
+      return ns.getBlockManager().getDatanodeManager().getDatanode(storageID);
+    } finally {
+      ns.readUnlock();
+    }
+  }
+
+
+  /**
+   * Refresh block queue counts on the name-node.
+   */
+  public static void updateState(final BlockManager blockManager) {
+    blockManager.updateState();
+  }
+
   /**
    * @return a tuple of the replica state (number racks, number live
    * replicas, and number needed replicas) for the given block.

Modified: hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/blockmanagement/TestHeartbeatHandling.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/blockmanagement/TestHeartbeatHandling.java?rev=1152128&r1=1152127&r2=1152128&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/blockmanagement/TestHeartbeatHandling.java (original)
+++ hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/blockmanagement/TestHeartbeatHandling.java Fri Jul 29 07:10:48 2011
@@ -30,6 +30,7 @@ import org.apache.hadoop.hdfs.protocol.B
 import org.apache.hadoop.hdfs.server.common.GenerationStamp;
 import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
+import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
 import org.apache.hadoop.hdfs.server.protocol.BlockCommand;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
@@ -54,14 +55,9 @@ public class TestHeartbeatHandling exten
       final String poolId = namesystem.getBlockPoolId();
       final DatanodeRegistration nodeReg = 
         DataNodeTestUtils.getDNRegistrationForBP(cluster.getDataNodes().get(0), poolId);
-        
-      namesystem.readLock();
-      DatanodeDescriptor dd;
-      try {
-        dd = namesystem.getDatanode(nodeReg);
-      } finally {
-        namesystem.readUnlock();
-      }
+
+
+      final DatanodeDescriptor dd = NameNodeAdapter.getDatanode(namesystem, nodeReg);
       
       final int REMAINING_BLOCKS = 1;
       final int MAX_REPLICATE_LIMIT = 

Modified: hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestBlockReport.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestBlockReport.java?rev=1152128&r1=1152127&r2=1152128&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestBlockReport.java (original)
+++ hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestBlockReport.java Fri Jul 29 07:10:48 2011
@@ -596,7 +596,7 @@ public class TestBlockReport {
   }
 
   private void printStats() {
-    NameNodeAdapter.refreshBlockCounts(cluster.getNameNode());
+    BlockManagerTestUtil.updateState(cluster.getNamesystem().getBlockManager());
     if(LOG.isDebugEnabled()) {
       LOG.debug("Missing " + cluster.getNamesystem().getMissingBlocksCount());
       LOG.debug("Corrupted " + cluster.getNamesystem().getCorruptReplicaBlocks());
@@ -667,7 +667,7 @@ public class TestBlockReport {
 
   private static void initLoggers() {
     ((Log4JLogger) NameNode.stateChangeLog).getLogger().setLevel(Level.ALL);
-    ((Log4JLogger) FSNamesystem.LOG).getLogger().setLevel(Level.ALL);
+    ((Log4JLogger) LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.ALL);
     ((Log4JLogger) DataNode.LOG).getLogger().setLevel(Level.ALL);
     ((Log4JLogger) TestBlockReport.LOG).getLogger().setLevel(Level.ALL);
   }

Modified: hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java?rev=1152128&r1=1152127&r2=1152128&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java (original)
+++ hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java Fri Jul 29 07:10:48 2011
@@ -46,14 +46,6 @@ public class NameNodeAdapter {
   }
 
   /**
-   * Refresh block queue counts on the name-node.
-   * @param namenode to proxy the invocation to
-   */
-  public static void refreshBlockCounts(NameNode namenode) {
-    namenode.getNamesystem().getBlockManager().updateState();
-  }
-
-  /**
    * Get the internal RPC server instance.
    * @return rpc server
    */
@@ -68,12 +60,11 @@ public class NameNodeAdapter {
   /**
    * Return the datanode descriptor for the given datanode.
    */
-  public static DatanodeDescriptor getDatanode(NameNode namenode,
+  public static DatanodeDescriptor getDatanode(final FSNamesystem ns,
       DatanodeID id) throws IOException {
-    FSNamesystem ns = namenode.getNamesystem();
     ns.readLock();
     try {
-      return ns.getDatanode(id);
+      return ns.getBlockManager().getDatanodeManager().getDatanode(id);
     } finally {
       ns.readUnlock();
     }

Modified: hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestClusterId.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestClusterId.java?rev=1152128&r1=1152127&r2=1152128&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestClusterId.java (original)
+++ hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestClusterId.java Fri Jul 29 07:10:48 2011
@@ -35,6 +35,7 @@ import org.apache.commons.logging.LogFac
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption;
+import org.apache.hadoop.hdfs.server.common.Storage;
 import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
 import org.junit.After;
 import org.junit.Before;
@@ -54,7 +55,7 @@ public class TestClusterId {
     Iterator<StorageDirectory> sdit = 
       fsImage.getStorage().dirIterator(NNStorage.NameNodeDirType.IMAGE);
     StorageDirectory sd = sdit.next();
-    Properties props = sd.readFrom(sd.getVersionFile());
+    Properties props = Storage.readPropertiesFile(sd.getVersionFile());
     String cid = props.getProperty("clusterID");
     LOG.info("successfully formated : sd="+sd.getCurrentDir() + ";cid="+cid);
     return cid;

Modified: hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java?rev=1152128&r1=1152127&r2=1152128&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java (original)
+++ hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java Fri Jul 29 07:10:48 2011
@@ -29,6 +29,7 @@ import org.apache.hadoop.hdfs.DFSConfigK
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
@@ -61,13 +62,8 @@ public class TestDeadDatanode {
     FSNamesystem namesystem = cluster.getNamesystem();
     String state = alive ? "alive" : "dead";
     while (System.currentTimeMillis() < stopTime) {
-      namesystem.readLock();
-      DatanodeDescriptor dd;
-      try {
-        dd = namesystem.getDatanode(nodeID);
-      } finally {
-        namesystem.readUnlock();
-      }
+      final DatanodeDescriptor dd = BlockManagerTestUtil.getDatanode(
+          namesystem, nodeID);
       if (dd.isAlive == alive) {
         LOG.info("datanode " + nodeID + " is " + state);
         return;

Modified: hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestEditLogJournalFailures.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestEditLogJournalFailures.java?rev=1152128&r1=1152127&r2=1152128&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestEditLogJournalFailures.java (original)
+++ hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestEditLogJournalFailures.java Fri Jul 29 07:10:48 2011
@@ -87,18 +87,13 @@ public class TestEditLogJournalFailures 
     assertTrue(doAnEdit());
     // Invalidate both edits journals.
     invalidateEditsDirAtIndex(0, true);
-    EditLogOutputStream elos = invalidateEditsDirAtIndex(1, true);
+    invalidateEditsDirAtIndex(1, true);
     // Make sure runtime.exit(...) hasn't been called at all yet.
     assertExitInvocations(0);
     assertTrue(doAnEdit());
     // The previous edit could not be synced to any persistent storage, should
     // have halted the NN.
     assertExitInvocations(1);
-    // Restore an edits journal to working order.
-    restoreEditsDirAtIndex(1, elos);
-    assertTrue(doAnEdit());
-    // Make sure we didn't make another call to runtime.exit(...).
-    assertExitInvocations(1);
   }
   
   @Test

Modified: hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestSaveNamespace.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestSaveNamespace.java?rev=1152128&r1=1152127&r2=1152128&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestSaveNamespace.java (original)
+++ hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestSaveNamespace.java Fri Jul 29 07:10:48 2011
@@ -42,7 +42,6 @@ import org.apache.hadoop.hdfs.DFSTestUti
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction;
-import org.apache.hadoop.hdfs.server.common.StorageAdapter;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants.NamenodeRole;
 import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
 import org.apache.log4j.Level;
@@ -148,10 +147,8 @@ public class TestSaveNamespace {
       break;
     case WRITE_STORAGE_ONE:
       // The spy throws on exception on one particular storage directory
-      StorageDirectory dir = StorageAdapter.spyOnStorageDirectory(
-          storage, 1);
-      doThrow(new RuntimeException("Injected"))
-        .when(dir).write();
+      doAnswer(new FaultySaveImage(true))
+        .when(spyStorage).writeProperties((StorageDirectory)anyObject());
       // TODO: unfortunately this fails -- should be improved.
       // See HDFS-2173.
       shouldFail = true;

Modified: hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestStorageRestore.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestStorageRestore.java?rev=1152128&r1=1152127&r2=1152128&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestStorageRestore.java (original)
+++ hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestStorageRestore.java Fri Jul 29 07:10:48 2011
@@ -131,8 +131,7 @@ public class TestStorageRestore {
           EditLogOutputStream mockStream = spy(j.getCurrentStream());
           j.setCurrentStreamForTests(mockStream);
           doThrow(new IOException("Injected fault: write")).
-            when(mockStream).write(Mockito.anyByte(),
-                Mockito.anyLong(), (Writable[]) Mockito.anyVararg());
+            when(mockStream).write(Mockito.<FSEditLogOp>anyObject());
         }
       }
     }

Modified: hadoop/common/branches/HDFS-1073/hdfs/src/test/unit/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1073/hdfs/src/test/unit/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java?rev=1152128&r1=1152127&r2=1152128&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1073/hdfs/src/test/unit/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java (original)
+++ hadoop/common/branches/HDFS-1073/hdfs/src/test/unit/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java Fri Jul 29 07:10:48 2011
@@ -84,7 +84,7 @@ public class TestBlockRecovery {
     new NamespaceInfo(1,CLUSTER_ID, POOL_ID, 2, 3);
 
   static {
-    ((Log4JLogger)FSNamesystem.LOG).getLogger().setLevel(Level.ALL);
+    ((Log4JLogger)LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.ALL);
     ((Log4JLogger)LOG).getLogger().setLevel(Level.ALL);
   }
 

Propchange: hadoop/common/branches/HDFS-1073/hdfs/src/webapps/datanode/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Fri Jul 29 07:10:48 2011
@@ -1,4 +1,4 @@
-/hadoop/common/trunk/hdfs/src/webapps/datanode:1134994-1150966
+/hadoop/common/trunk/hdfs/src/webapps/datanode:1134994-1151750
 /hadoop/core/branches/branch-0.19/hdfs/src/webapps/datanode:713112
 /hadoop/core/trunk/src/webapps/datanode:776175-784663
 /hadoop/hdfs/branches/HDFS-1052/src/webapps/datanode:987665-1095512

Propchange: hadoop/common/branches/HDFS-1073/hdfs/src/webapps/hdfs/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Fri Jul 29 07:10:48 2011
@@ -1,4 +1,4 @@
-/hadoop/common/trunk/hdfs/src/webapps/hdfs:1134994-1150966
+/hadoop/common/trunk/hdfs/src/webapps/hdfs:1134994-1151750
 /hadoop/core/branches/branch-0.19/hdfs/src/webapps/hdfs:713112
 /hadoop/core/trunk/src/webapps/hdfs:776175-784663
 /hadoop/hdfs/branches/HDFS-1052/src/webapps/hdfs:987665-1095512

Propchange: hadoop/common/branches/HDFS-1073/hdfs/src/webapps/secondary/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Fri Jul 29 07:10:48 2011
@@ -1,4 +1,4 @@
-/hadoop/common/trunk/hdfs/src/webapps/secondary:1134994-1150966
+/hadoop/common/trunk/hdfs/src/webapps/secondary:1134994-1151750
 /hadoop/core/branches/branch-0.19/hdfs/src/webapps/secondary:713112
 /hadoop/core/trunk/src/webapps/secondary:776175-784663
 /hadoop/hdfs/branches/HDFS-1052/src/webapps/secondary:987665-1095512