You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by to...@apache.org on 2011/06/21 22:09:57 UTC

svn commit: r1138160 [5/6] - in /hadoop/common/branches/HDFS-1073/hdfs: ./ bin/ src/c++/libhdfs/ src/c++/libhdfs/tests/ src/contrib/ src/contrib/fuse-dfs/ src/contrib/fuse-dfs/src/ src/contrib/hdfsproxy/ src/docs/src/documentation/content/xdocs/ src/ja...

Modified: hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java?rev=1138160&r1=1138159&r2=1138160&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java (original)
+++ hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java Tue Jun 21 20:09:54 2011
@@ -391,6 +391,7 @@ public class LeaseManager {
 
   /** Check the leases beginning from the oldest. */
   private synchronized void checkLeases() {
+    assert fsnamesystem.hasWriteLock();
     for(; sortedLeases.size() > 0; ) {
       final Lease oldest = sortedLeases.first();
       if (!oldest.expiredHardLimit()) {

Modified: hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java?rev=1138160&r1=1138159&r2=1138160&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java (original)
+++ hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java Tue Jun 21 20:09:54 2011
@@ -868,6 +868,40 @@ public class NNStorage extends Storage i
     LOG.debug("at the end current list of storage dirs:" + lsd);
   }
   
+  /** 
+   * Processes the startup options for the clusterid and blockpoolid 
+   * for the upgrade. 
+   * @param startOpt Startup options 
+   * @param layoutVersion Layout version for the upgrade 
+   * @throws IOException
+   */
+  void processStartupOptionsForUpgrade(StartupOption startOpt, int layoutVersion)
+      throws IOException {
+    if (startOpt == StartupOption.UPGRADE) {
+      // If upgrade from a release that does not support federation,
+      // if clusterId is provided in the startupOptions use it.
+      // Else generate a new cluster ID      
+      if (!LayoutVersion.supports(Feature.FEDERATION, layoutVersion)) {
+        if (startOpt.getClusterId() == null) {
+          startOpt.setClusterId(newClusterID());
+        }
+        setClusterID(startOpt.getClusterId());
+        setBlockPoolID(newBlockPoolID());
+      } else {
+        // Upgrade from one version of federation to another supported
+        // version of federation doesn't require clusterID.
+        // Warn the user if the current clusterid didn't match with the input
+        // clusterid.
+        if (startOpt.getClusterId() != null
+            && !startOpt.getClusterId().equals(getClusterID())) {
+          LOG.warn("Clusterid mismatch - current clusterid: " + getClusterID()
+              + ", Ignoring given clusterid: " + startOpt.getClusterId());
+        }
+      }
+      LOG.info("Using clusterid: " + getClusterID());
+    }
+  }
+  
   /**
    * Generate new clusterID.
    * 

Modified: hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java?rev=1138160&r1=1138159&r2=1138160&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java (original)
+++ hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java Tue Jun 21 20:09:54 2011
@@ -195,7 +195,7 @@ public class NameNode implements Namenod
   }
     
 
-  @Override
+  @Override // VersionedProtocol
   public ProtocolSignature getProtocolSignature(String protocol,
       long clientVersion, int clientMethodsHash) throws IOException {
     return ProtocolSignature.getProtocolSignature(
@@ -696,7 +696,7 @@ public class NameNode implements Namenod
     return namesystem.getBlocks(datanode, size); 
   }
 
-  /** {@inheritDoc} */
+  @Override // NamenodeProtocol
   public ExportedBlockKeys getBlockKeys() throws IOException {
     return namesystem.getBlockKeys();
   }
@@ -744,39 +744,34 @@ public class NameNode implements Namenod
     return namesystem.getEditLogSize();
   }
 
-  /*
-   * Active name-node cannot journal.
-   */
   @Override // NamenodeProtocol
   public void journal(NamenodeRegistration registration,
                       int jAction,
                       int length,
                       byte[] args) throws IOException {
+    // Active name-node cannot journal.
     throw new UnsupportedActionException("journal");
   }
 
-  /////////////////////////////////////////////////////
-  // ClientProtocol
-  /////////////////////////////////////////////////////
-  
+  @Override // ClientProtocol
   public Token<DelegationTokenIdentifier> getDelegationToken(Text renewer)
       throws IOException {
     return namesystem.getDelegationToken(renewer);
   }
 
-  @Override
+  @Override // ClientProtocol
   public long renewDelegationToken(Token<DelegationTokenIdentifier> token)
       throws InvalidToken, IOException {
     return namesystem.renewDelegationToken(token);
   }
 
-  @Override
+  @Override // ClientProtocol
   public void cancelDelegationToken(Token<DelegationTokenIdentifier> token)
       throws IOException {
     namesystem.cancelDelegationToken(token);
   }
   
-  /** {@inheritDoc} */
+  @Override // ClientProtocol
   public LocatedBlocks getBlockLocations(String src, 
                                           long offset, 
                                           long length) 
@@ -786,20 +781,12 @@ public class NameNode implements Namenod
                                         src, offset, length);
   }
   
-  private static String getClientMachine() {
-    String clientMachine = Server.getRemoteAddress();
-    if (clientMachine == null) {
-      clientMachine = "";
-    }
-    return clientMachine;
-  }
-
-  /** {@inheritDoc} */
+  @Override // ClientProtocol
   public FsServerDefaults getServerDefaults() throws IOException {
     return namesystem.getServerDefaults();
   }
 
-  /** {@inheritDoc} */
+  @Override // ClientProtocol
   public void create(String src, 
                      FsPermission masked,
                      String clientName, 
@@ -824,7 +811,7 @@ public class NameNode implements Namenod
     metrics.incrCreateFileOps();
   }
 
-  /** {@inheritDoc} */
+  @Override // ClientProtocol
   public LocatedBlock append(String src, String clientName) 
       throws IOException {
     String clientMachine = getClientMachine();
@@ -837,31 +824,31 @@ public class NameNode implements Namenod
     return info;
   }
 
-  /** {@inheritDoc} */
+  @Override // ClientProtocol
   public boolean recoverLease(String src, String clientName) throws IOException {
     String clientMachine = getClientMachine();
     return namesystem.recoverLease(src, clientName, clientMachine);
   }
 
-  /** {@inheritDoc} */
+  @Override // ClientProtocol
   public boolean setReplication(String src, short replication) 
     throws IOException {  
     return namesystem.setReplication(src, replication);
   }
     
-  /** {@inheritDoc} */
+  @Override // ClientProtocol
   public void setPermission(String src, FsPermission permissions)
       throws IOException {
     namesystem.setPermission(src, permissions);
   }
 
-  /** {@inheritDoc} */
+  @Override // ClientProtocol
   public void setOwner(String src, String username, String groupname)
       throws IOException {
     namesystem.setOwner(src, username, groupname);
   }
 
-  @Override
+  @Override // ClientProtocol
   public LocatedBlock addBlock(String src,
                                String clientName,
                                ExtendedBlock previous,
@@ -885,7 +872,7 @@ public class NameNode implements Namenod
     return locatedBlock;
   }
 
-  @Override
+  @Override // ClientProtocol
   public LocatedBlock getAdditionalDatanode(final String src, final ExtendedBlock blk,
       final DatanodeInfo[] existings, final DatanodeInfo[] excludes,
       final int numAdditionalNodes, final String clientName
@@ -926,7 +913,7 @@ public class NameNode implements Namenod
     }
   }
 
-  /** {@inheritDoc} */
+  @Override // ClientProtocol
   public boolean complete(String src, String clientName, ExtendedBlock last)
       throws IOException {
     if(stateChangeLog.isDebugEnabled()) {
@@ -942,6 +929,7 @@ public class NameNode implements Namenod
    * mark the block as corrupt.  In the future we might 
    * check the blocks are actually corrupt. 
    */
+  @Override
   public void reportBadBlocks(LocatedBlock[] blocks) throws IOException {
     stateChangeLog.info("*DIR* NameNode.reportBadBlocks");
     for (int i = 0; i < blocks.length; i++) {
@@ -954,22 +942,21 @@ public class NameNode implements Namenod
     }
   }
 
-  /** {@inheritDoc} */
-  @Override
+  @Override // ClientProtocol
   public LocatedBlock updateBlockForPipeline(ExtendedBlock block, String clientName)
       throws IOException {
     return namesystem.updateBlockForPipeline(block, clientName);
   }
 
 
-  @Override
+  @Override // ClientProtocol
   public void updatePipeline(String clientName, ExtendedBlock oldBlock,
       ExtendedBlock newBlock, DatanodeID[] newNodes)
       throws IOException {
     namesystem.updatePipeline(clientName, oldBlock, newBlock, newNodes);
   }
   
-  /** {@inheritDoc} */
+  @Override // DatanodeProtocol
   public void commitBlockSynchronization(ExtendedBlock block,
       long newgenerationstamp, long newlength,
       boolean closeFile, boolean deleteblock, DatanodeID[] newtargets)
@@ -978,14 +965,14 @@ public class NameNode implements Namenod
         newgenerationstamp, newlength, closeFile, deleteblock, newtargets);
   }
   
+  @Override // ClientProtocol
   public long getPreferredBlockSize(String filename) 
       throws IOException {
     return namesystem.getPreferredBlockSize(filename);
   }
     
-  /** {@inheritDoc} */
   @Deprecated
-  @Override
+  @Override // ClientProtocol
   public boolean rename(String src, String dst) throws IOException {
     if(stateChangeLog.isDebugEnabled()) {
       stateChangeLog.debug("*DIR* NameNode.rename: " + src + " to " + dst);
@@ -1001,15 +988,12 @@ public class NameNode implements Namenod
     return ret;
   }
   
-  /** 
-   * {@inheritDoc}
-   */
+  @Override // ClientProtocol
   public void concat(String trg, String[] src) throws IOException {
     namesystem.concat(trg, src);
   }
   
-  /** {@inheritDoc} */
-  @Override
+  @Override // ClientProtocol
   public void rename(String src, String dst, Options.Rename... options)
       throws IOException {
     if(stateChangeLog.isDebugEnabled()) {
@@ -1023,14 +1007,13 @@ public class NameNode implements Namenod
     metrics.incrFilesRenamed();
   }
 
-  /**
-   */
   @Deprecated
+  @Override // ClientProtocol
   public boolean delete(String src) throws IOException {
     return delete(src, true);
   }
 
-  /** {@inheritDoc} */
+  @Override // ClientProtocol
   public boolean delete(String src, boolean recursive) throws IOException {
     if (stateChangeLog.isDebugEnabled()) {
       stateChangeLog.debug("*DIR* Namenode.delete: src=" + src
@@ -1046,7 +1029,6 @@ public class NameNode implements Namenod
    * Check path length does not exceed maximum.  Returns true if
    * length and depth are okay.  Returns false if length is too long 
    * or depth is too great.
-   * 
    */
   private boolean checkPathLength(String src) {
     Path srcPath = new Path(src);
@@ -1054,7 +1036,7 @@ public class NameNode implements Namenod
             srcPath.depth() <= MAX_PATH_DEPTH);
   }
     
-  /** {@inheritDoc} */
+  @Override // ClientProtocol
   public boolean mkdirs(String src, FsPermission masked, boolean createParent)
       throws IOException {
     if(stateChangeLog.isDebugEnabled()) {
@@ -1069,15 +1051,12 @@ public class NameNode implements Namenod
             null, masked), createParent);
   }
 
-  /**
-   */
+  @Override // ClientProtocol
   public void renewLease(String clientName) throws IOException {
     namesystem.renewLease(clientName);        
   }
 
-  /**
-   */
-  @Override
+  @Override // ClientProtocol
   public DirectoryListing getListing(String src, byte[] startAfter,
       boolean needLocation)
   throws IOException {
@@ -1090,24 +1069,13 @@ public class NameNode implements Namenod
     return files;
   }
 
-  /**
-   * Get the file info for a specific file.
-   * @param src The string representation of the path to the file
-   * @return object containing information regarding the file
-   *         or null if file not found
-   */
+  @Override // ClientProtocol
   public HdfsFileStatus getFileInfo(String src)  throws IOException {
     metrics.incrFileInfoOps();
     return namesystem.getFileInfo(src, true);
   }
 
-  /**
-   * Get the file info for a specific file. If the path refers to a 
-   * symlink then the FileStatus of the symlink is returned.
-   * @param src The string representation of the path to the file
-   * @return object containing information regarding the file
-   *         or null if file not found
-   */
+  @Override // ClientProtocol
   public HdfsFileStatus getFileLinkInfo(String src) throws IOException { 
     metrics.incrFileInfoOps();
     return namesystem.getFileInfo(src, false);
@@ -1118,8 +1086,7 @@ public class NameNode implements Namenod
     return namesystem.getStats();
   }
 
-  /**
-   */
+  @Override // ClientProtocol
   public DatanodeInfo[] getDatanodeReport(DatanodeReportType type)
       throws IOException {
     DatanodeInfo results[] = namesystem.datanodeReport(type);
@@ -1129,7 +1096,7 @@ public class NameNode implements Namenod
     return results;
   }
     
-  @Override
+  @Override // ClientProtocol
   public boolean setSafeMode(SafeModeAction action) throws IOException {
     return namesystem.setSafeMode(action);
   }
@@ -1141,38 +1108,29 @@ public class NameNode implements Namenod
     return namesystem.isInSafeMode();
   }
 
-  @Override
+  @Override // ClientProtocol
   public boolean restoreFailedStorage(String arg) 
       throws AccessControlException {
     return namesystem.restoreFailedStorage(arg);
   }
 
-  @Override
+  @Override // ClientProtocol
   public void saveNamespace() throws IOException {
     namesystem.saveNamespace();
   }
 
-  /**
-   * Refresh the list of datanodes that the namenode should allow to  
-   * connect.  Re-reads conf by creating new HdfsConfiguration object and 
-   * uses the files list in the configuration to update the list. 
-   */
+  @Override // ClientProtocol
   public void refreshNodes() throws IOException {
     namesystem.refreshNodes(new HdfsConfiguration());
   }
 
-  /**
-   * Returns the size of the current edit log.
-   */
-  @Deprecated
+  @Deprecated // NamenodeProtocol
   public long getEditLogSize() throws IOException {
     return namesystem.getEditLogSize();
   }
 
-  /**
-   * Roll the edit log.
-   */
   @Deprecated
+  @Override // NamenodeProtocol
   public CheckpointSignature rollEditLog() throws IOException {
     return namesystem.rollEditLog();
   }
@@ -1183,29 +1141,25 @@ public class NameNode implements Namenod
     return namesystem.getEditLogManifest(sinceTxId);
   }
     
+  @Override // ClientProtocol
   public void finalizeUpgrade() throws IOException {
     namesystem.finalizeUpgrade();
   }
 
+  @Override // ClientProtocol
   public UpgradeStatusReport distributedUpgradeProgress(UpgradeAction action)
       throws IOException {
     return namesystem.distributedUpgradeProgress(action);
   }
 
-  /**
-   * Dumps namenode state into specified file
-   */
+  @Override // ClientProtocol
   public void metaSave(String filename) throws IOException {
     namesystem.metaSave(filename);
   }
 
-  /**
-   * {@inheritDoc}
-   */
-  @Override
-  public CorruptFileBlocks
-    listCorruptFileBlocks(String path, String cookie) 
-    throws IOException {
+  @Override // ClientProtocol
+  public CorruptFileBlocks listCorruptFileBlocks(String path, String cookie)
+      throws IOException {
     Collection<FSNamesystem.CorruptFileBlockInfo> fbs =
       namesystem.listCorruptFileBlocks(path, cookie);
     
@@ -1219,32 +1173,31 @@ public class NameNode implements Namenod
     return new CorruptFileBlocks(files, lastCookie);
   }
   
-  /** {@inheritDoc} */
+  @Override // ClientProtocol
   public ContentSummary getContentSummary(String path) throws IOException {
     return namesystem.getContentSummary(path);
   }
 
-  /** {@inheritDoc} */
+  @Override // ClientProtocol
   public void setQuota(String path, long namespaceQuota, long diskspaceQuota) 
       throws IOException {
     namesystem.setQuota(path, namespaceQuota, diskspaceQuota);
   }
   
-  /** {@inheritDoc} */
+  @Override // ClientProtocol
   public void fsync(String src, String clientName) throws IOException {
     namesystem.fsync(src, clientName);
   }
 
-  @Override
+  @Override // ClientProtocol
   public void setTimes(String src, long mtime, long atime) 
       throws IOException {
     namesystem.setTimes(src, mtime, atime);
   }
 
-  @Override
-  public void createSymlink(String target, String link, FsPermission dirPerms, 
-                            boolean createParent) 
-      throws IOException {
+  @Override // ClientProtocol
+  public void createSymlink(String target, String link, FsPermission dirPerms,
+      boolean createParent) throws IOException {
     metrics.incrCreateSymlinkOps();
     /* We enforce the MAX_PATH_LENGTH limit even though a symlink target 
      * URI may refer to a non-HDFS file system. 
@@ -1262,7 +1215,7 @@ public class NameNode implements Namenod
       new PermissionStatus(ugi.getShortUserName(), null, dirPerms), createParent);
   }
 
-  @Override
+  @Override // ClientProtocol
   public String getLinkTarget(String path) throws IOException {
     metrics.incrGetLinkTargetOps();
     /* Resolves the first symlink in the given path, returning a
@@ -1285,11 +1238,7 @@ public class NameNode implements Namenod
   }
 
 
-  ////////////////////////////////////////////////////////////////
-  // DatanodeProtocol
-  ////////////////////////////////////////////////////////////////
-  /** 
-   */
+  @Override // DatanodeProtocol
   public DatanodeRegistration registerDatanode(DatanodeRegistration nodeReg)
       throws IOException {
     verifyVersion(nodeReg.getVersion());
@@ -1298,32 +1247,19 @@ public class NameNode implements Namenod
     return nodeReg;
   }
 
-  /**
-   * Data node notify the name node that it is alive 
-   * Return an array of block-oriented commands for the datanode to execute.
-   * This will be either a transfer or a delete operation.
-   */
+  @Override // DatanodeProtocol
   public DatanodeCommand[] sendHeartbeat(DatanodeRegistration nodeReg,
-                                       long capacity,
-                                       long dfsUsed,
-                                       long remaining,
-                                       long blockPoolUsed,
-                                       int xmitsInProgress,
-                                       int xceiverCount,
-                                       int failedVolumes) throws IOException {
+      long capacity, long dfsUsed, long remaining, long blockPoolUsed,
+      int xmitsInProgress, int xceiverCount, int failedVolumes)
+      throws IOException {
     verifyRequest(nodeReg);
     return namesystem.handleHeartbeat(nodeReg, capacity, dfsUsed, remaining,
         blockPoolUsed, xceiverCount, xmitsInProgress, failedVolumes);
   }
 
-  /**
-   * sends block report to the corresponding namenode (for the poolId)
-   * @return DataNodeCommand from the namenode
-   * @throws IOException
-   */
+  @Override // DatanodeProtocol
   public DatanodeCommand blockReport(DatanodeRegistration nodeReg,
-                                     String poolId,
-                                     long[] blocks) throws IOException {
+      String poolId, long[] blocks) throws IOException {
     verifyRequest(nodeReg);
     BlockListAsLongs blist = new BlockListAsLongs(blocks);
     if(stateChangeLog.isDebugEnabled()) {
@@ -1338,10 +1274,9 @@ public class NameNode implements Namenod
     return null;
   }
 
-  public void blockReceived(DatanodeRegistration nodeReg, 
-                            String poolId,
-                            Block blocks[],
-                            String delHints[]) throws IOException {
+  @Override // DatanodeProtocol
+  public void blockReceived(DatanodeRegistration nodeReg, String poolId,
+      Block blocks[], String delHints[]) throws IOException {
     verifyRequest(nodeReg);
     if(stateChangeLog.isDebugEnabled()) {
       stateChangeLog.debug("*BLOCK* NameNode.blockReceived: "
@@ -1352,9 +1287,7 @@ public class NameNode implements Namenod
     }
   }
 
-  /**
-   * Handle an error report from a datanode.
-   */
+  @Override // DatanodeProtocol
   public void errorReport(DatanodeRegistration nodeReg,
                           int errorCode, String msg) throws IOException { 
     String dnName = (nodeReg == null ? "unknown DataNode" : nodeReg.getName());
@@ -1375,10 +1308,12 @@ public class NameNode implements Namenod
     }
   }
     
+  @Override // DatanodeProtocol, NamenodeProtocol
   public NamespaceInfo versionRequest() throws IOException {
     return namesystem.getNamespaceInfo();
   }
 
+  @Override // DatanodeProtocol
   public UpgradeCommand processUpgradeCommand(UpgradeCommand comm) throws IOException {
     return namesystem.processDistributedUpgradeCommand(comm);
   }
@@ -1520,7 +1455,7 @@ public class NameNode implements Namenod
     return false;
   }
 
-  @Override
+  @Override // RefreshAuthorizationPolicyProtocol
   public void refreshServiceAcl() throws IOException {
     if (!serviceAuthEnabled) {
       throw new AuthorizationException("Service Level Authorization not enabled!");
@@ -1532,21 +1467,21 @@ public class NameNode implements Namenod
     }
   }
 
-  @Override
+  @Override // RefreshAuthorizationPolicyProtocol
   public void refreshUserToGroupsMappings() throws IOException {
     LOG.info("Refreshing all user-to-groups mappings. Requested by user: " + 
              UserGroupInformation.getCurrentUser().getShortUserName());
     Groups.getUserToGroupsMappingService().refresh();
   }
 
-  @Override
+  @Override // RefreshAuthorizationPolicyProtocol
   public void refreshSuperUserGroupsConfiguration() {
     LOG.info("Refreshing SuperUser proxy group mapping list ");
 
     ProxyUsers.refreshSuperUserGroupsConfiguration();
   }
   
-  @Override
+  @Override // GetUserMappingsProtocol
   public String[] getGroupsForUser(String user) throws IOException {
     if (LOG.isDebugEnabled()) {
       LOG.debug("Getting groups for user " + user);
@@ -1725,4 +1660,12 @@ public class NameNode implements Namenod
       System.exit(-1);
     }
   }
+  
+  private static String getClientMachine() {
+    String clientMachine = Server.getRemoteAddress();
+    if (clientMachine == null) {
+      clientMachine = "";
+    }
+    return clientMachine;
+  }
 }

Modified: hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java?rev=1138160&r1=1138159&r2=1138160&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java (original)
+++ hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java Tue Jun 21 20:09:54 2011
@@ -142,12 +142,25 @@ class NamenodeJspHelper {
         + "\n</table></div>";
   }
 
-  static String getWarningText(FSNamesystem fsn) {
-    // Ideally this should be displayed in RED
+  /**
+   * Generate warning text if there are corrupt files.
+   * @return a warning if files are corrupt, otherwise return an empty string.
+   */
+  static String getCorruptFilesWarning(FSNamesystem fsn) {
     long missingBlocks = fsn.getMissingBlocksCount();
     if (missingBlocks > 0) {
-      return "<br> WARNING :" + " There are " + missingBlocks
-          + " missing blocks. Please check the log or run fsck. <br><br>";
+      StringBuilder result = new StringBuilder();
+
+      // Warning class is typically displayed in RED
+      result.append("<br/><a class=\"warning\" href=\"/corrupt_files.jsp\" title=\"List corrupt files\">\n");
+      result.append("<b>WARNING : There are " + missingBlocks
+          + " missing blocks. Please check the logs or run fsck in order to identify the missing blocks.</b>");
+      result.append("</a>");
+
+      result.append("<br/><div class=\"small\">See the Hadoop FAQ for common causes and potential solutions.");
+      result.append("<br/><br/>\n");
+
+      return result.toString();
     }
     return "";
   }

Modified: hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java?rev=1138160&r1=1138159&r2=1138160&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java (original)
+++ hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java Tue Jun 21 20:09:54 2011
@@ -80,7 +80,8 @@ public class DelegationTokenFetcher {
     err.println("  --webservice <url>  Url to contact NN on");
     err.println("  --renewer <name>    Name of the delegation token renewer");
     err.println("  --cancel            Cancel the delegation token");
-    err.println("  --renew             Renew the delegation token");
+    err.println("  --renew             Renew the delegation token.  Delegation " 
+    		+ "token must have been fetched using the --renewer <name> option.");
     err.println("  --print             Print the delegation token");
     err.println();
     GenericOptionsParser.printGenericCommandUsage(err);

Modified: hadoop/common/branches/HDFS-1073/hdfs/src/test/aop/org/apache/hadoop/hdfs/server/datanode/BlockReceiverAspects.aj
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1073/hdfs/src/test/aop/org/apache/hadoop/hdfs/server/datanode/BlockReceiverAspects.aj?rev=1138160&r1=1138159&r2=1138160&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1073/hdfs/src/test/aop/org/apache/hadoop/hdfs/server/datanode/BlockReceiverAspects.aj (original)
+++ hadoop/common/branches/HDFS-1073/hdfs/src/test/aop/org/apache/hadoop/hdfs/server/datanode/BlockReceiverAspects.aj Tue Jun 21 20:09:54 2011
@@ -17,9 +17,8 @@
  */
 package org.apache.hadoop.hdfs.server.datanode;
 
-import java.io.DataInput;
-import java.io.DataOutput;
 import java.io.IOException;
+import java.io.InputStream;
 import java.io.OutputStream;
 
 import org.apache.commons.logging.Log;
@@ -33,8 +32,7 @@ import org.apache.hadoop.hdfs.server.dat
 import org.apache.hadoop.hdfs.PipelinesTestUtil.PipelinesTest;
 import org.apache.hadoop.hdfs.PipelinesTestUtil.NodeBytes;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
-import org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Status;
-import org.apache.hadoop.hdfs.protocol.DataTransferProtocol.PipelineAck;
+import org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
 import org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException;
 
@@ -83,7 +81,7 @@ privileged public aspect BlockReceiverAs
   }
 
   pointcut afterDownstreamStatusRead(BlockReceiver.PacketResponder responder):
-    call(void PipelineAck.readFields(DataInput)) && this(responder);
+    call(void PipelineAck.readFields(InputStream)) && this(responder);
 
   after(BlockReceiver.PacketResponder responder)
       throws IOException: afterDownstreamStatusRead(responder) {
@@ -150,7 +148,7 @@ privileged public aspect BlockReceiverAs
   }
   
   pointcut preventAckSending () :
-    call (void PipelineAck.write(DataOutput)) 
+    call (void PipelineAck.write(OutputStream)) 
     && within (PacketResponder);
 
   static int ackCounter = 0;
@@ -203,7 +201,7 @@ privileged public aspect BlockReceiverAs
   }
 
   pointcut pipelineAck(BlockReceiver.PacketResponder packetresponder) :
-    call (void PipelineAck.readFields(DataInput))
+    call (void PipelineAck.readFields(InputStream))
       && this(packetresponder);
 
   after(BlockReceiver.PacketResponder packetresponder) throws IOException

Modified: hadoop/common/branches/HDFS-1073/hdfs/src/test/aop/org/apache/hadoop/hdfs/server/datanode/DataTransferProtocolAspects.aj
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1073/hdfs/src/test/aop/org/apache/hadoop/hdfs/server/datanode/DataTransferProtocolAspects.aj?rev=1138160&r1=1138159&r2=1138160&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1073/hdfs/src/test/aop/org/apache/hadoop/hdfs/server/datanode/DataTransferProtocolAspects.aj (original)
+++ hadoop/common/branches/HDFS-1073/hdfs/src/test/aop/org/apache/hadoop/hdfs/server/datanode/DataTransferProtocolAspects.aj Tue Jun 21 20:09:54 2011
@@ -19,15 +19,16 @@ package org.apache.hadoop.hdfs.server.da
 
 import java.io.DataInput;
 import java.io.DataInputStream;
+import java.io.InputStream;
 import java.io.IOException;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.fi.DataTransferTestUtil;
 import org.apache.hadoop.fi.DataTransferTestUtil.DataTransferTest;
-import org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Op;
-import org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Receiver;
-import org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Status;
+import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto;
+import org.apache.hadoop.hdfs.protocol.datatransfer.Op;
+import org.apache.hadoop.hdfs.protocol.datatransfer.Receiver;
 
 /** Aspect for DataTransferProtocol */
 public aspect DataTransferProtocolAspects {
@@ -53,9 +54,9 @@ public aspect DataTransferProtocolAspect
   }
 
   pointcut statusRead(DataXceiver dataxceiver):
-    call(Status Status.read(DataInput)) && this(dataxceiver);
+    call(BlockOpResponseProto BlockOpResponseProto.parseFrom(InputStream)) && this(dataxceiver);
 
-  after(DataXceiver dataxceiver) returning(Status status
+  after(DataXceiver dataxceiver) returning(BlockOpResponseProto status
       ) throws IOException: statusRead(dataxceiver) {
     final DataNode d = dataxceiver.getDataNode();
     LOG.info("FI: statusRead " + status + ", datanode="

Modified: hadoop/common/branches/HDFS-1073/hdfs/src/test/aop/org/apache/hadoop/hdfs/server/datanode/TestFiDataTransferProtocol.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1073/hdfs/src/test/aop/org/apache/hadoop/hdfs/server/datanode/TestFiDataTransferProtocol.java?rev=1138160&r1=1138159&r2=1138160&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1073/hdfs/src/test/aop/org/apache/hadoop/hdfs/server/datanode/TestFiDataTransferProtocol.java (original)
+++ hadoop/common/branches/HDFS-1073/hdfs/src/test/aop/org/apache/hadoop/hdfs/server/datanode/TestFiDataTransferProtocol.java Tue Jun 21 20:09:54 2011
@@ -36,7 +36,7 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.hdfs.protocol.DataTransferProtocol;
+import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtocol;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.log4j.Level;
 import org.junit.Assert;

Modified: hadoop/common/branches/HDFS-1073/hdfs/src/test/aop/org/apache/hadoop/hdfs/server/datanode/TestFiDataTransferProtocol2.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1073/hdfs/src/test/aop/org/apache/hadoop/hdfs/server/datanode/TestFiDataTransferProtocol2.java?rev=1138160&r1=1138159&r2=1138160&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1073/hdfs/src/test/aop/org/apache/hadoop/hdfs/server/datanode/TestFiDataTransferProtocol2.java (original)
+++ hadoop/common/branches/HDFS-1073/hdfs/src/test/aop/org/apache/hadoop/hdfs/server/datanode/TestFiDataTransferProtocol2.java Tue Jun 21 20:09:54 2011
@@ -37,7 +37,7 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.DFSClient;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.hdfs.protocol.DataTransferProtocol;
+import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtocol;
 import org.apache.log4j.Level;
 import org.junit.Assert;
 import org.junit.Test;

Modified: hadoop/common/branches/HDFS-1073/hdfs/src/test/findbugsExcludeFile.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1073/hdfs/src/test/findbugsExcludeFile.xml?rev=1138160&r1=1138159&r2=1138160&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1073/hdfs/src/test/findbugsExcludeFile.xml (original)
+++ hadoop/common/branches/HDFS-1073/hdfs/src/test/findbugsExcludeFile.xml Tue Jun 21 20:09:54 2011
@@ -3,6 +3,9 @@
        <Package name="org.apache.hadoop.record.compiler.generated" />
      </Match>
      <Match>
+       <Package name="org.apache.hadoop.hdfs.protocol.proto" />
+     </Match>
+     <Match>
        <Bug pattern="EI_EXPOSE_REP" />
      </Match>
      <Match>

Propchange: hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Tue Jun 21 20:09:54 2011
@@ -1,6 +1,7 @@
+/hadoop/common/trunk/hdfs/src/test/hdfs:1134994-1138149
 /hadoop/core/branches/branch-0.19/hdfs/src/test/hdfs:713112
 /hadoop/core/trunk/src/test/hdfs:776175-785643
 /hadoop/hdfs/branches/HDFS-1052/src/test/hdfs:987665-1095512
 /hadoop/hdfs/branches/HDFS-265/src/test/hdfs:796829-820463
 /hadoop/hdfs/branches/branch-0.21/src/test/hdfs:820487
-/hadoop/hdfs/trunk/src/test/hdfs:1086482-1134136
+/hadoop/hdfs/trunk/src/test/hdfs:1086482-1134991

Modified: hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/cli/TestHDFSCLI.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/cli/TestHDFSCLI.java?rev=1138160&r1=1138159&r2=1138160&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/cli/TestHDFSCLI.java (original)
+++ hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/cli/TestHDFSCLI.java Tue Jun 21 20:09:54 2011
@@ -56,7 +56,7 @@ public class TestHDFSCLI extends CLITest
                                                  .racks(racks)
                                                  .hosts(hosts)
                                                  .build();
-    
+    dfsCluster.waitClusterUp();
     namenode = conf.get(DFSConfigKeys.FS_DEFAULT_NAME_KEY, "file:///");
     
     username = System.getProperty("user.name");

Modified: hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/DFSTestUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/DFSTestUtil.java?rev=1138160&r1=1138159&r2=1138160&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/DFSTestUtil.java (original)
+++ hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/DFSTestUtil.java Tue Jun 21 20:09:54 2011
@@ -18,6 +18,8 @@
 
 package org.apache.hadoop.hdfs;
 
+import static org.junit.Assert.assertEquals;
+
 import java.io.BufferedOutputStream;
 import java.io.BufferedReader;
 import java.io.ByteArrayInputStream;
@@ -52,11 +54,12 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.DFSClient.DFSDataInputStream;
 import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
-import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
-import org.apache.hadoop.hdfs.protocol.DataTransferProtocol;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
+import org.apache.hadoop.hdfs.protocol.datatransfer.Sender;
+import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto;
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
@@ -66,14 +69,11 @@ import org.apache.hadoop.hdfs.server.nam
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
 import org.apache.hadoop.io.IOUtils;
-import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.ShellBasedUnixGroupsMapping;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.Token;
 
-import static org.junit.Assert.*;
-
 /** Utilities for HDFS tests */
 public class DFSTestUtil {
   
@@ -555,6 +555,15 @@ public class DFSTestUtil {
     IOUtils.copyBytes(is, os, s.length(), true);
   }
 
+  /* Append the given string to the given file */
+  public static void appendFile(FileSystem fs, Path p, String s) 
+      throws IOException {
+    assert fs.exists(p);
+    InputStream is = new ByteArrayInputStream(s.getBytes());
+    FSDataOutputStream os = fs.append(p);
+    IOUtils.copyBytes(is, os, s.length(), true);
+  }
+  
   // Returns url content as string.
   public static String urlGet(URL url) throws IOException {
     URLConnection conn = url.openConnection();
@@ -662,7 +671,7 @@ public class DFSTestUtil {
   }
 
   /** For {@link TestTransferRbw} */
-  public static DataTransferProtocol.Status transferRbw(final ExtendedBlock b, 
+  public static BlockOpResponseProto transferRbw(final ExtendedBlock b, 
       final DFSClient dfsClient, final DatanodeInfo... datanodes) throws IOException {
     assertEquals(2, datanodes.length);
     final Socket s = DFSOutputStream.createSocketForPipeline(datanodes[0],
@@ -674,10 +683,10 @@ public class DFSTestUtil {
     final DataInputStream in = new DataInputStream(NetUtils.getInputStream(s));
 
     // send the request
-    DataTransferProtocol.Sender.opTransferBlock(out, b, dfsClient.clientName,
+    Sender.opTransferBlock(out, b, dfsClient.clientName,
         new DatanodeInfo[]{datanodes[1]}, new Token<BlockTokenIdentifier>());
     out.flush();
 
-    return DataTransferProtocol.Status.read(in);
+    return BlockOpResponseProto.parseDelimitedFrom(in);
   }
 }

Modified: hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestClientBlockVerification.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestClientBlockVerification.java?rev=1138160&r1=1138159&r2=1138160&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestClientBlockVerification.java (original)
+++ hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestClientBlockVerification.java Tue Jun 21 20:09:54 2011
@@ -20,8 +20,8 @@ package org.apache.hadoop.hdfs;
 
 import java.util.List;
 
-import org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Status;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
 import org.apache.hadoop.fs.Path;
 
 import org.junit.Test;

Modified: hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestDataTransferProtocol.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestDataTransferProtocol.java?rev=1138160&r1=1138159&r2=1138160&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestDataTransferProtocol.java (original)
+++ hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestDataTransferProtocol.java Tue Jun 21 20:09:54 2011
@@ -17,13 +17,6 @@
  */
 package org.apache.hadoop.hdfs;
 
-import static org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Op.WRITE_BLOCK;
-import static org.apache.hadoop.hdfs.protocol.DataTransferProtocol.PacketHeader;
-import static org.apache.hadoop.hdfs.protocol.DataTransferProtocol.PipelineAck;
-import static org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Status;
-import static org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Status.ERROR;
-import static org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Status.SUCCESS;
-
 import java.io.ByteArrayInputStream;
 import java.io.ByteArrayOutputStream;
 import java.io.DataInputStream;
@@ -45,20 +38,26 @@ import org.apache.hadoop.fs.FSDataInputS
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hdfs.protocol.DataTransferProtocol;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
-import org.apache.hadoop.hdfs.protocol.DataTransferProtocol.BlockConstructionStage;
 import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType;
+import org.apache.hadoop.hdfs.protocol.datatransfer.BlockConstructionStage;
+import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtocol;
+import org.apache.hadoop.hdfs.protocol.datatransfer.Op;
+import org.apache.hadoop.hdfs.protocol.datatransfer.PacketHeader;
+import org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck;
+import org.apache.hadoop.hdfs.protocol.datatransfer.Sender;
+import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto;
+import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto.Builder;
+import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants;
 import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
-import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.io.IOUtils;
-import org.apache.hadoop.io.Text;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.util.DataChecksum;
+import org.apache.hadoop.util.StringUtils;
 import org.junit.Test;
 
 /**
@@ -93,6 +92,9 @@ public class TestDataTransferProtocol ex
       if ( testDescription != null ) {
         LOG.info("Testing : " + testDescription);
       }
+      LOG.info("Going to write:" +
+          StringUtils.byteToHexString(sendBuf.toByteArray()));
+      
       sock = new Socket();
       sock.connect(dnAddr, HdfsConstants.READ_TIMEOUT);
       sock.setSoTimeout(HdfsConstants.READ_TIMEOUT);
@@ -113,10 +115,11 @@ public class TestDataTransferProtocol ex
         }
         throw eof;
       }
-      for (int i=0; i<retBuf.length; i++) {
-        System.out.print(retBuf[i]);
-      }
-      System.out.println(":");
+
+      LOG.info("Received: " +
+          StringUtils.byteToHexString(retBuf));
+      LOG.info("Expected: " +
+          StringUtils.byteToHexString(recvBuf.toByteArray()));
       
       if (eofExpected) {
         throw new IOException("Did not recieve IOException when an exception " +
@@ -162,26 +165,35 @@ public class TestDataTransferProtocol ex
     sendOut.writeInt(0);           // zero checksum
 
     //ok finally write a block with 0 len
-    SUCCESS.write(recvOut);
-    Text.writeString(recvOut, "");
-    new PipelineAck(100, new Status[]{SUCCESS}).write(recvOut);
+    sendResponse(Status.SUCCESS, "", recvOut);
+    new PipelineAck(100, new Status[]{Status.SUCCESS}).write(recvOut);
     sendRecvData(description, false);
   }
   
+  private void sendResponse(Status status, String firstBadLink,
+      DataOutputStream out)
+  throws IOException {
+    Builder builder = BlockOpResponseProto.newBuilder().setStatus(status);
+    if (firstBadLink != null) {
+      builder.setFirstBadLink(firstBadLink);
+    }
+    builder.build()
+      .writeDelimitedTo(out);
+  }
+
   private void testWrite(ExtendedBlock block, BlockConstructionStage stage, long newGS,
       String description, Boolean eofExcepted) throws IOException {
     sendBuf.reset();
     recvBuf.reset();
-    DataTransferProtocol.Sender.opWriteBlock(sendOut, block, 0,
+    Sender.opWriteBlock(sendOut, block, 0,
         stage, newGS, block.getNumBytes(), block.getNumBytes(), "cl", null,
         new DatanodeInfo[1], BlockTokenSecretManager.DUMMY_TOKEN);
     if (eofExcepted) {
-      ERROR.write(recvOut);
+      sendResponse(Status.ERROR, null, recvOut);
       sendRecvData(description, true);
     } else if (stage == BlockConstructionStage.PIPELINE_CLOSE_RECOVERY) {
       //ok finally write a block with 0 len
-      SUCCESS.write(recvOut);
-      Text.writeString(recvOut, ""); // first bad node
+      sendResponse(Status.SUCCESS, "", recvOut);
       sendRecvData(description, false);
     } else {
       writeZeroLengthPacket(block, description);
@@ -355,12 +367,12 @@ public class TestDataTransferProtocol ex
     // bad ops
     sendBuf.reset();
     sendOut.writeShort((short)DataTransferProtocol.DATA_TRANSFER_VERSION);
-    sendOut.writeByte(WRITE_BLOCK.code - 1);
+    sendOut.writeByte(Op.WRITE_BLOCK.code - 1);
     sendRecvData("Wrong Op Code", true);
     
     /* Test OP_WRITE_BLOCK */
     sendBuf.reset();
-    DataTransferProtocol.Sender.opWriteBlock(sendOut, 
+    Sender.opWriteBlock(sendOut, 
         new ExtendedBlock(poolId, newBlockId), 0,
         BlockConstructionStage.PIPELINE_SETUP_CREATE, 0L, 0L, 0L, "cl", null,
         new DatanodeInfo[1], BlockTokenSecretManager.DUMMY_TOKEN);
@@ -369,12 +381,12 @@ public class TestDataTransferProtocol ex
     // bad bytes per checksum
     sendOut.writeInt(-1-random.nextInt(oneMil));
     recvBuf.reset();
-    ERROR.write(recvOut);
+    sendResponse(Status.ERROR, null, recvOut);
     sendRecvData("wrong bytesPerChecksum while writing", true);
 
     sendBuf.reset();
     recvBuf.reset();
-    DataTransferProtocol.Sender.opWriteBlock(sendOut,
+    Sender.opWriteBlock(sendOut,
         new ExtendedBlock(poolId, ++newBlockId), 0,
         BlockConstructionStage.PIPELINE_SETUP_CREATE, 0L, 0L, 0L, "cl", null,
         new DatanodeInfo[1], BlockTokenSecretManager.DUMMY_TOKEN);
@@ -389,16 +401,15 @@ public class TestDataTransferProtocol ex
       -1 - random.nextInt(oneMil)); // bad datalen
     hdr.write(sendOut);
 
-    SUCCESS.write(recvOut);
-    Text.writeString(recvOut, "");
-    new PipelineAck(100, new Status[]{ERROR}).write(recvOut);
+    sendResponse(Status.SUCCESS, "", recvOut);
+    new PipelineAck(100, new Status[]{Status.ERROR}).write(recvOut);
     sendRecvData("negative DATA_CHUNK len while writing block " + newBlockId, 
                  true);
 
     // test for writing a valid zero size block
     sendBuf.reset();
     recvBuf.reset();
-    DataTransferProtocol.Sender.opWriteBlock(sendOut, 
+    Sender.opWriteBlock(sendOut, 
         new ExtendedBlock(poolId, ++newBlockId), 0,
         BlockConstructionStage.PIPELINE_SETUP_CREATE, 0L, 0L, 0L, "cl", null,
         new DatanodeInfo[1], BlockTokenSecretManager.DUMMY_TOKEN);
@@ -415,9 +426,8 @@ public class TestDataTransferProtocol ex
     sendOut.writeInt(0);           // zero checksum
     sendOut.flush();
     //ok finally write a block with 0 len
-    SUCCESS.write(recvOut);
-    Text.writeString(recvOut, "");
-    new PipelineAck(100, new Status[]{SUCCESS}).write(recvOut);
+    sendResponse(Status.SUCCESS, "", recvOut);
+    new PipelineAck(100, new Status[]{Status.SUCCESS}).write(recvOut);
     sendRecvData("Writing a zero len block blockid " + newBlockId, false);
     
     /* Test OP_READ_BLOCK */
@@ -429,46 +439,46 @@ public class TestDataTransferProtocol ex
     sendBuf.reset();
     recvBuf.reset();
     blk.setBlockId(blkid-1);
-    DataTransferProtocol.Sender.opReadBlock(sendOut, blk, 0L, fileLen, "cl",
+    Sender.opReadBlock(sendOut, blk, 0L, fileLen, "cl",
           BlockTokenSecretManager.DUMMY_TOKEN);
     sendRecvData("Wrong block ID " + newBlockId + " for read", false); 
 
     // negative block start offset -1L
     sendBuf.reset();
     blk.setBlockId(blkid);
-    DataTransferProtocol.Sender.opReadBlock(sendOut, blk, -1L, fileLen, "cl",
+    Sender.opReadBlock(sendOut, blk, -1L, fileLen, "cl",
           BlockTokenSecretManager.DUMMY_TOKEN);
     sendRecvData("Negative start-offset for read for block " + 
                  firstBlock.getBlockId(), false);
 
     // bad block start offset
     sendBuf.reset();
-    DataTransferProtocol.Sender.opReadBlock(sendOut, blk, fileLen, fileLen, "cl",
+    Sender.opReadBlock(sendOut, blk, fileLen, fileLen, "cl",
           BlockTokenSecretManager.DUMMY_TOKEN);
     sendRecvData("Wrong start-offset for reading block " +
                  firstBlock.getBlockId(), false);
     
     // negative length is ok. Datanode assumes we want to read the whole block.
     recvBuf.reset();
-    SUCCESS.write(recvOut);    
+    sendResponse(Status.SUCCESS, null, recvOut);
     sendBuf.reset();
-    DataTransferProtocol.Sender.opReadBlock(sendOut, blk, 0L, 
+    Sender.opReadBlock(sendOut, blk, 0L, 
         -1 - random.nextInt(oneMil), "cl", BlockTokenSecretManager.DUMMY_TOKEN);
     sendRecvData("Negative length for reading block " +
                  firstBlock.getBlockId(), false);
     
     // length is more than size of block.
     recvBuf.reset();
-    ERROR.write(recvOut);    
+    sendResponse(Status.ERROR, null, recvOut);
     sendBuf.reset();
-    DataTransferProtocol.Sender.opReadBlock(sendOut, blk, 0L, 
+    Sender.opReadBlock(sendOut, blk, 0L, 
         fileLen + 1, "cl", BlockTokenSecretManager.DUMMY_TOKEN);
     sendRecvData("Wrong length for reading block " +
                  firstBlock.getBlockId(), false);
     
     //At the end of all this, read the file to make sure that succeeds finally.
     sendBuf.reset();
-    DataTransferProtocol.Sender.opReadBlock(sendOut, blk, 0L, 
+    Sender.opReadBlock(sendOut, blk, 0L, 
         fileLen, "cl", BlockTokenSecretManager.DUMMY_TOKEN);
     readFile(fileSys, file, fileLen);
     } finally {
@@ -499,14 +509,6 @@ public class TestDataTransferProtocol ex
     readBack.readFields(ByteBuffer.wrap(baos.toByteArray()));
     assertEquals(hdr, readBack);
 
-    // Test sanity check for good header
-    PacketHeader goodHeader = new PacketHeader(
-      4,                   // size of packet
-      0,                   // OffsetInBlock
-      100,                 // sequencenumber
-      true,                // lastPacketInBlock
-      0);                  // chunk length
-
     assertTrue(hdr.sanityCheck(99));
     assertFalse(hdr.sanityCheck(100));
   }

Modified: hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestDecommission.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestDecommission.java?rev=1138160&r1=1138159&r2=1138160&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestDecommission.java (original)
+++ hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestDecommission.java Tue Jun 21 20:09:54 2011
@@ -35,6 +35,7 @@ import org.apache.hadoop.hdfs.protocol.D
 import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
+import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
 import static org.junit.Assert.*;
 
 import org.junit.After;
@@ -201,7 +202,8 @@ public class TestDecommission {
     nodes.add(nodename);
     writeConfigFile(excludeFile, nodes);
     cluster.getNamesystem(nnIndex).refreshNodes(conf);
-    DatanodeInfo ret = cluster.getNamesystem(nnIndex).getDatanode(info[index]);
+    DatanodeInfo ret = NameNodeAdapter.getDatanode(
+        cluster.getNameNode(nnIndex), info[index]);
     waitNodeState(ret, waitForState);
     return ret;
   }
@@ -371,7 +373,7 @@ public class TestDecommission {
       // Stop decommissioning and verify stats
       writeConfigFile(excludeFile, null);
       fsn.refreshNodes(conf);
-      DatanodeInfo ret = fsn.getDatanode(downnode);
+      DatanodeInfo ret = NameNodeAdapter.getDatanode(namenode, downnode);
       waitNodeState(ret, AdminStates.NORMAL);
       verifyStats(namenode, fsn, ret, false);
     }

Modified: hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestReplaceDatanodeOnFailure.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestReplaceDatanodeOnFailure.java?rev=1138160&r1=1138159&r2=1138160&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestReplaceDatanodeOnFailure.java (original)
+++ hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestReplaceDatanodeOnFailure.java Tue Jun 21 20:09:54 2011
@@ -26,9 +26,10 @@ import org.apache.hadoop.conf.Configurat
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hdfs.protocol.DataTransferProtocol;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtocol;
+import org.apache.hadoop.hdfs.protocol.datatransfer.ReplaceDatanodeOnFailure;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.log4j.Level;
 import org.junit.Assert;
@@ -53,8 +54,7 @@ public class TestReplaceDatanodeOnFailur
   /** Test DEFAULT ReplaceDatanodeOnFailure policy. */
   @Test
   public void testDefaultPolicy() throws Exception {
-    final DataTransferProtocol.ReplaceDatanodeOnFailure p
-        = DataTransferProtocol.ReplaceDatanodeOnFailure.DEFAULT;
+    final ReplaceDatanodeOnFailure p = ReplaceDatanodeOnFailure.DEFAULT;
 
     final DatanodeInfo[] infos = new DatanodeInfo[5];
     final DatanodeInfo[][] datanodes = new DatanodeInfo[infos.length + 1][];
@@ -113,7 +113,7 @@ public class TestReplaceDatanodeOnFailur
     final Configuration conf = new HdfsConfiguration();
     
     //always replace a datanode
-    DataTransferProtocol.ReplaceDatanodeOnFailure.ALWAYS.write(conf);
+    ReplaceDatanodeOnFailure.ALWAYS.write(conf);
 
     final String[] racks = new String[REPLICATION];
     Arrays.fill(racks, RACK0);

Modified: hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestSafeMode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestSafeMode.java?rev=1138160&r1=1138159&r2=1138160&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestSafeMode.java (original)
+++ hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestSafeMode.java Tue Jun 21 20:09:54 2011
@@ -20,20 +20,44 @@ package org.apache.hadoop.hdfs;
 
 import java.io.IOException;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction;
 
-import junit.framework.TestCase;
+import static org.junit.Assert.*;
+import org.junit.Before;
+import org.junit.After;
+import org.junit.Test;
 
 /**
  * Tests to verify safe mode correctness.
  */
-public class TestSafeMode extends TestCase {
-  
-  static Log LOG = LogFactory.getLog(TestSafeMode.class);
+public class TestSafeMode {
+  Configuration conf; 
+  MiniDFSCluster cluster;
+  FileSystem fs;
+  DistributedFileSystem dfs;
+
+  @Before
+  public void startUp() throws IOException {
+    conf = new HdfsConfiguration();
+    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
+    cluster.waitActive();      
+    fs = cluster.getFileSystem();
+    dfs = (DistributedFileSystem)fs;
+  }
+
+  @After
+  public void tearDown() throws IOException {
+    if (fs != null) {
+      fs.close();
+    }
+    if (cluster != null) {
+      cluster.shutdown();
+    }
+  }
 
   /**
    * This test verifies that if SafeMode is manually entered, name-node does not
@@ -51,61 +75,123 @@ public class TestSafeMode extends TestCa
    *  
    * @throws IOException
    */
-  public void testManualSafeMode() throws IOException {
-    MiniDFSCluster cluster = null;
-    DistributedFileSystem fs = null;
+  @Test
+  public void testManualSafeMode() throws IOException {      
+    fs = (DistributedFileSystem)cluster.getFileSystem();
+    Path file1 = new Path("/tmp/testManualSafeMode/file1");
+    Path file2 = new Path("/tmp/testManualSafeMode/file2");
+    
+    // create two files with one block each.
+    DFSTestUtil.createFile(fs, file1, 1000, (short)1, 0);
+    DFSTestUtil.createFile(fs, file2, 2000, (short)1, 0);
+    fs.close();
+    cluster.shutdown();
+    
+    // now bring up just the NameNode.
+    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).format(false).build();
+    cluster.waitActive();
+    dfs = (DistributedFileSystem)cluster.getFileSystem();
+    
+    assertTrue("No datanode is started. Should be in SafeMode", 
+               dfs.setSafeMode(SafeModeAction.SAFEMODE_GET));
+    
+    // manually set safemode.
+    dfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
+    
+    // now bring up the datanode and wait for it to be active.
+    cluster.startDataNodes(conf, 1, true, null, null);
+    cluster.waitActive();
+    
+    // wait longer than dfs.namenode.safemode.extension
     try {
-      Configuration conf = new HdfsConfiguration();
-      // disable safemode extension to make the test run faster.
-      conf.set(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_EXTENSION_KEY, "1");
-      cluster = new MiniDFSCluster.Builder(conf).build();
-      cluster.waitActive();
-      
-      fs = (DistributedFileSystem)cluster.getFileSystem();
-      Path file1 = new Path("/tmp/testManualSafeMode/file1");
-      Path file2 = new Path("/tmp/testManualSafeMode/file2");
-      
-      LOG.info("Created file1 and file2.");
-      
-      // create two files with one block each.
-      DFSTestUtil.createFile(fs, file1, 1000, (short)1, 0);
-      DFSTestUtil.createFile(fs, file2, 2000, (short)1, 0);
-      fs.close();
-      cluster.shutdown();
-      
-      // now bring up just the NameNode.
-      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).format(false).build();
-      cluster.waitActive();
-      fs = (DistributedFileSystem)cluster.getFileSystem();
-      
-      LOG.info("Restarted cluster with just the NameNode");
-      
-      assertTrue("No datanode is started. Should be in SafeMode", 
-                 fs.setSafeMode(SafeModeAction.SAFEMODE_GET));
-      
-      // manually set safemode.
-      fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
-      
-      // now bring up the datanode and wait for it to be active.
-      cluster.startDataNodes(conf, 1, true, null, null);
-      cluster.waitActive();
-      
-      LOG.info("Datanode is started.");
-
-      // wait longer than dfs.namenode.safemode.extension
-      try {
-        Thread.sleep(2000);
-      } catch (InterruptedException ignored) {}
-      
-      assertTrue("should still be in SafeMode",
-          fs.setSafeMode(SafeModeAction.SAFEMODE_GET));
-      
-      fs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
-      assertFalse("should not be in SafeMode",
-          fs.setSafeMode(SafeModeAction.SAFEMODE_GET));
-    } finally {
-      if(fs != null) fs.close();
-      if(cluster!= null) cluster.shutdown();
+      Thread.sleep(2000);
+    } catch (InterruptedException ignored) {}
+
+    assertTrue("should still be in SafeMode",
+        dfs.setSafeMode(SafeModeAction.SAFEMODE_GET));
+    assertFalse("should not be in SafeMode", 
+        dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE));
+  }
+
+  public interface FSRun {
+    public abstract void run(FileSystem fs) throws IOException;
+  }
+
+  /**
+   * Assert that the given function fails to run due to a safe 
+   * mode exception.
+   */
+  public void runFsFun(String msg, FSRun f) {
+    try {
+      f.run(fs);
+      fail(msg);
+     } catch (IOException ioe) {
+       assertTrue(ioe.getMessage().contains("safe mode"));
+     }
+  }
+
+  /**
+   * Run various fs operations while the NN is in safe mode,
+   * assert that they are either allowed or fail as expected.
+   */
+  @Test
+  public void testOperationsWhileInSafeMode() throws IOException {
+    final Path file1 = new Path("/file1");
+
+    assertFalse(dfs.setSafeMode(SafeModeAction.SAFEMODE_GET));
+    DFSTestUtil.createFile(fs, file1, 1024, (short)1, 0);
+    assertTrue("Could not enter SM", 
+        dfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER));
+
+    runFsFun("Set quota while in SM", new FSRun() { 
+      public void run(FileSystem fs) throws IOException {
+        ((DistributedFileSystem)fs).setQuota(file1, 1, 1); 
+      }});
+
+    runFsFun("Set perm while in SM", new FSRun() {
+      public void run(FileSystem fs) throws IOException {
+        fs.setPermission(file1, FsPermission.getDefault());
+      }});
+
+    runFsFun("Set owner while in SM", new FSRun() {
+      public void run(FileSystem fs) throws IOException {
+        fs.setOwner(file1, "user", "group");
+      }});
+
+    runFsFun("Set repl while in SM", new FSRun() {
+      public void run(FileSystem fs) throws IOException {
+        fs.setReplication(file1, (short)1);
+      }});
+
+    runFsFun("Append file while in SM", new FSRun() {
+      public void run(FileSystem fs) throws IOException {
+        DFSTestUtil.appendFile(fs, file1, "new bytes");
+      }});
+
+    runFsFun("Delete file while in SM", new FSRun() {
+      public void run(FileSystem fs) throws IOException {
+        fs.delete(file1, false);
+      }});
+
+    runFsFun("Rename file while in SM", new FSRun() {
+      public void run(FileSystem fs) throws IOException {
+        fs.rename(file1, new Path("file2"));
+      }});
+
+    try {
+      fs.setTimes(file1, 0, 0);
+    } catch (IOException ioe) {
+      fail("Set times failed while in SM");
     }
+
+    try {
+      DFSTestUtil.readFile(fs, file1);
+    } catch (IOException ioe) {
+      fail("Set times failed while in SM");
+    }
+
+    assertFalse("Could not leave SM",
+        dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE));
   }
-}
+  
+}
\ No newline at end of file

Modified: hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java?rev=1138160&r1=1138159&r2=1138160&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java (original)
+++ hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java Tue Jun 21 20:09:54 2011
@@ -17,8 +17,6 @@
  */
 package org.apache.hadoop.hdfs.server.datanode;
 
-import static org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Status.*;
-
 import java.io.DataInputStream;
 import java.io.DataOutputStream;
 import java.io.IOException;
@@ -38,19 +36,21 @@ import org.apache.hadoop.conf.Configurat
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.DFSClient;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.hdfs.protocol.DataTransferProtocol;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
-import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType;
+import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.protocol.datatransfer.Sender;
+import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto;
+import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants;
 import org.apache.hadoop.hdfs.server.common.Util;
 import org.apache.hadoop.hdfs.util.DataTransferThrottler;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.net.NetUtils;
 /**
  * This class tests if block replacement request to data nodes work correctly.
@@ -258,13 +258,15 @@ public class TestBlockReplacement extend
     sock.setKeepAlive(true);
     // sendRequest
     DataOutputStream out = new DataOutputStream(sock.getOutputStream());
-    DataTransferProtocol.Sender.opReplaceBlock(out, block, source
+    Sender.opReplaceBlock(out, block, source
         .getStorageID(), sourceProxy, BlockTokenSecretManager.DUMMY_TOKEN);
     out.flush();
     // receiveResponse
     DataInputStream reply = new DataInputStream(sock.getInputStream());
 
-    return DataTransferProtocol.Status.read(reply) == SUCCESS;
+    BlockOpResponseProto proto =
+      BlockOpResponseProto.parseDelimitedFrom(reply);
+    return proto.getStatus() == Status.SUCCESS;
   }
 
   /**

Modified: hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDatanodeJsp.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDatanodeJsp.java?rev=1138160&r1=1138159&r2=1138160&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDatanodeJsp.java (original)
+++ hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDatanodeJsp.java Tue Jun 21 20:09:54 2011
@@ -19,24 +19,30 @@ package org.apache.hadoop.hdfs.server.da
 
 import static org.junit.Assert.assertTrue;
 
+import java.io.DataOutputStream;
 import java.io.IOException;
 import java.net.InetSocketAddress;
 import java.net.URL;
 import java.net.URLEncoder;
 
+import javax.servlet.http.HttpServletRequest;
+import javax.servlet.jsp.JspWriter;
+
 import org.apache.commons.httpclient.util.URIUtil;
-import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.server.common.JspHelper;
+import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.junit.Test;
+import org.mockito.Mockito;
 
 public class TestDatanodeJsp {
   
   private static final String FILE_DATA = "foo bar baz biz buz";
+  private static final HdfsConfiguration CONF = new HdfsConfiguration();
   
   private static void testViewingFile(MiniDFSCluster cluster, String filePath,
       boolean doTail) throws IOException {
@@ -74,8 +80,7 @@ public class TestDatanodeJsp {
   public void testViewFileJsp() throws IOException {
     MiniDFSCluster cluster = null;
     try {
-      Configuration conf = new HdfsConfiguration();
-      cluster = new MiniDFSCluster.Builder(conf).build();
+      cluster = new MiniDFSCluster.Builder(CONF).build();
       cluster.waitActive();
       
       testViewingFile(cluster, "/test-file", false);
@@ -92,5 +97,49 @@ public class TestDatanodeJsp {
       }
     }
   }
+  
+  @Test
+  public void testGenStamp() throws Exception {
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(CONF).numDataNodes(1)
+        .build();
+    try {
+      FileSystem fs = cluster.getFileSystem();
+      Path testFile = new Path("/test/mkdirs/TestchunkSizeToView");
+      writeFile(fs, testFile);
+      JspWriter writerMock = Mockito.mock(JspWriter.class);
+      HttpServletRequest reqMock = Mockito.mock(HttpServletRequest.class);
+      setTheMockExpectationsFromReq(testFile, reqMock);
+      DatanodeJspHelper.generateFileDetails(writerMock, reqMock, CONF);
+      Mockito.verify(writerMock, Mockito.atLeastOnce()).print(
+          "<input type=\"hidden\" name=\"genstamp\" value=\"987654321\">");
+    } finally {
+      cluster.shutdown();
+    }
+  }
+
+  private void setTheMockExpectationsFromReq(Path testFile,
+      HttpServletRequest reqMock) {
+    Mockito.doReturn("987654321").when(reqMock).getParameter("genstamp");
+    Mockito.doReturn("1234").when(reqMock).getParameter("blockId");
+    Mockito.doReturn("8081").when(reqMock).getParameter("datanodePort");
+    Mockito.doReturn("8080").when(reqMock).getParameter("namenodeInfoPort");
+    Mockito.doReturn("100").when(reqMock).getParameter("chunkSizeToView");
+    Mockito.doReturn("1").when(reqMock).getParameter("startOffset");
+    Mockito.doReturn("1024").when(reqMock).getParameter("blockSize");
+    Mockito.doReturn(NameNode.getHostPortString(NameNode.getAddress(CONF)))
+        .when(reqMock).getParameter("nnaddr");
+    Mockito.doReturn(testFile.toString()).when(reqMock).getPathInfo();
+  }
+
+  static Path writeFile(FileSystem fs, Path f) throws IOException {
+    DataOutputStream out = fs.create(f);
+    try {
+      out.writeBytes("umamahesh: " + f);
+    } finally {
+      out.close();
+    }
+    assertTrue(fs.exists(f));
+    return f;
+  }
 
 }

Modified: hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java?rev=1138160&r1=1138159&r2=1138160&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java (original)
+++ hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java Tue Jun 21 20:09:54 2011
@@ -17,6 +17,9 @@
  */
 package org.apache.hadoop.hdfs.server.datanode;
 
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
 import java.io.DataOutputStream;
 import java.io.File;
 import java.net.InetSocketAddress;
@@ -26,22 +29,20 @@ import org.apache.hadoop.conf.Configurat
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSTestUtil;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
-import org.apache.hadoop.hdfs.protocol.DataTransferProtocol.BlockConstructionStage;
-import org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Sender;
+import org.apache.hadoop.hdfs.protocol.datatransfer.BlockConstructionStage;
+import org.apache.hadoop.hdfs.protocol.datatransfer.Sender;
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
-import org.apache.hadoop.hdfs.HdfsConfiguration;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
-
-import org.junit.Test;
-import org.junit.Before;
 import org.junit.After;
-import static org.junit.Assert.*;
+import org.junit.Before;
+import org.junit.Test;
 
 /**
  * Test that datanodes can correctly handle errors during block read/write.

Modified: hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestTransferRbw.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestTransferRbw.java?rev=1138160&r1=1138159&r2=1138160&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestTransferRbw.java (original)
+++ hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestTransferRbw.java Tue Jun 21 20:09:54 2011
@@ -29,10 +29,12 @@ import org.apache.hadoop.hdfs.DFSTestUti
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.hdfs.protocol.DataTransferProtocol;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType;
+import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtocol;
+import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto;
+import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants.ReplicaState;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
 import org.apache.log4j.Level;
@@ -121,9 +123,9 @@ public class TestTransferRbw {
         //transfer RBW
         final ExtendedBlock b = new ExtendedBlock(bpid, oldrbw.getBlockId(), oldrbw.getBytesAcked(),
             oldrbw.getGenerationStamp());
-        final DataTransferProtocol.Status s = DFSTestUtil.transferRbw(
+        final BlockOpResponseProto s = DFSTestUtil.transferRbw(
             b, fs.getClient(), oldnodeinfo, newnodeinfo);
-        Assert.assertEquals(DataTransferProtocol.Status.SUCCESS, s);
+        Assert.assertEquals(Status.SUCCESS, s.getStatus());
       }
 
       //check new rbw

Modified: hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java?rev=1138160&r1=1138159&r2=1138160&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java (original)
+++ hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java Tue Jun 21 20:09:54 2011
@@ -21,6 +21,7 @@ import java.io.IOException;
 
 import org.apache.hadoop.ipc.Server;
 import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 
 /**
@@ -77,4 +78,18 @@ public class NameNodeAdapter {
   public static String getLeaseHolderForPath(NameNode namenode, String path) {
     return namenode.getNamesystem().leaseManager.getLeaseByPath(path).getHolder();
   }
+
+  /**
+   * Return the datanode descriptor for the given datanode.
+   */
+  public static DatanodeDescriptor getDatanode(NameNode namenode,
+      DatanodeID id) throws IOException {
+    FSNamesystem ns = namenode.getNamesystem();
+    ns.readLock();
+    try {
+      return ns.getDatanode(id);
+    } finally {
+      ns.readUnlock();
+    }
+  }
 }

Modified: hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBlockTokenWithDFS.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBlockTokenWithDFS.java?rev=1138160&r1=1138159&r2=1138160&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBlockTokenWithDFS.java (original)
+++ hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBlockTokenWithDFS.java Tue Jun 21 20:09:54 2011
@@ -43,6 +43,7 @@ import org.apache.hadoop.fs.FSDataOutput
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.security.token.*;
+import org.apache.hadoop.util.StringUtils;
 import org.apache.log4j.Level;
 
 import junit.framework.TestCase;
@@ -142,7 +143,8 @@ public class TestBlockTokenWithDFS exten
             + "when it is expected to be valid", shouldSucceed);
         return;
       }
-      fail("OP_READ_BLOCK failed due to reasons other than access token");
+      fail("OP_READ_BLOCK failed due to reasons other than access token: "
+          + StringUtils.stringifyException(ex));
     } finally {
       if (s != null) {
         try {

Modified: hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java?rev=1138160&r1=1138159&r2=1138160&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java (original)
+++ hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java Tue Jun 21 20:09:54 2011
@@ -61,7 +61,14 @@ public class TestDeadDatanode {
     FSNamesystem namesystem = cluster.getNamesystem();
     String state = alive ? "alive" : "dead";
     while (System.currentTimeMillis() < stopTime) {
-      if (namesystem.getDatanode(nodeID).isAlive == alive) {
+      namesystem.readLock();
+      DatanodeDescriptor dd;
+      try {
+        dd = namesystem.getDatanode(nodeID);
+      } finally {
+        namesystem.readUnlock();
+      }
+      if (dd.isAlive == alive) {
         LOG.info("datanode " + nodeID + " is " + state);
         return;
       }

Modified: hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java?rev=1138160&r1=1138159&r2=1138160&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java (original)
+++ hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java Tue Jun 21 20:09:54 2011
@@ -137,6 +137,7 @@ public class TestEditLog extends TestCas
    */
   public void testPreTxIdEditLogNoEdits() throws Exception {
     FSNamesystem namesys = Mockito.mock(FSNamesystem.class);
+    namesys.dir = Mockito.mock(FSDirectory.class);
     int numEdits = testLoad(
         StringUtils.hexStringToByte("ffffffed"), // just version number
         namesys);

Modified: hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestHDFSConcat.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestHDFSConcat.java?rev=1138160&r1=1138159&r2=1138160&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestHDFSConcat.java (original)
+++ hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestHDFSConcat.java Tue Jun 21 20:09:54 2011
@@ -32,6 +32,7 @@ import org.apache.commons.logging.LogFac
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.ContentSummary;
 import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSTestUtil;
@@ -86,16 +87,6 @@ public class TestHDFSConcat {
     }
   }
   
-  private void runCommand(DFSAdmin admin, String args[], boolean expectEror)
-  throws Exception {
-    int val = admin.run(args);
-    if (expectEror) {
-      assertEquals(val, -1);
-    } else {
-      assertTrue(val>=0);
-    }
-  }
-
   /**
    * Concatenates 10 files into one
    * Verifies the final size, deletion of the file, number of blocks
@@ -221,6 +212,46 @@ public class TestHDFSConcat {
     assertEquals(trgLen, totalLen+sFileLen);
     
   }
+  
+  /**
+   * Test that the concat operation is properly persisted in the
+   * edit log, and properly replayed on restart.
+   */
+  @Test
+  public void testConcatInEditLog() throws Exception {
+    final Path TEST_DIR = new Path("/testConcatInEditLog");
+    final long FILE_LEN = blockSize;
+    
+    // 1. Concat some files
+    Path[] srcFiles = new Path[3];
+    for (int i = 0; i < srcFiles.length; i++) {
+      Path path = new Path(TEST_DIR, "src-" + i);
+      DFSTestUtil.createFile(dfs, path, FILE_LEN, REPL_FACTOR, 1);
+      srcFiles[i] = path;
+    }    
+    Path targetFile = new Path(TEST_DIR, "target");
+    DFSTestUtil.createFile(dfs, targetFile, FILE_LEN, REPL_FACTOR, 1);
+    
+    dfs.concat(targetFile, srcFiles);
+    
+    // 2. Verify the concat operation basically worked, and record
+    // file status.
+    assertTrue(dfs.exists(targetFile));
+    FileStatus origStatus = dfs.getFileStatus(targetFile);
+
+    // 3. Restart NN to force replay from edit log
+    cluster.restartNameNode(true);
+    
+    // 4. Verify concat operation was replayed correctly and file status
+    // did not change.
+    assertTrue(dfs.exists(targetFile));
+    assertFalse(dfs.exists(srcFiles[0]));
+
+    FileStatus statusAfterRestart = dfs.getFileStatus(targetFile);
+
+    assertEquals(origStatus.getModificationTime(),
+        statusAfterRestart.getModificationTime());
+  }
 
   // compare content
   private void checkFileContent(byte[] concat, byte[][] bytes ) {

Modified: hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestHeartbeatHandling.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestHeartbeatHandling.java?rev=1138160&r1=1138159&r2=1138160&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestHeartbeatHandling.java (original)
+++ hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestHeartbeatHandling.java Tue Jun 21 20:09:54 2011
@@ -54,7 +54,13 @@ public class TestHeartbeatHandling exten
       final DatanodeRegistration nodeReg = 
         DataNodeTestUtils.getDNRegistrationForBP(cluster.getDataNodes().get(0), poolId);
         
-      DatanodeDescriptor dd = namesystem.getDatanode(nodeReg);
+      namesystem.readLock();
+      DatanodeDescriptor dd;
+      try {
+        dd = namesystem.getDatanode(nodeReg);
+      } finally {
+        namesystem.readUnlock();
+      }
       
       final int REMAINING_BLOCKS = 1;
       final int MAX_REPLICATE_LIMIT = 

Modified: hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNNThroughputBenchmark.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNNThroughputBenchmark.java?rev=1138160&r1=1138159&r2=1138160&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNNThroughputBenchmark.java (original)
+++ hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNNThroughputBenchmark.java Tue Jun 21 20:09:54 2011
@@ -24,7 +24,6 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
-import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.Test;
 
 public class TestNNThroughputBenchmark {