You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by gk...@apache.org on 2012/08/03 21:00:59 UTC

svn commit: r1369164 [13/16] - in /hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project: ./ hadoop-hdfs-httpfs/ hadoop-hdfs-httpfs/dev-support/ hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/ hadoop-hdfs-httpfs/src/main/java/o...

Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestReplicaMap.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestReplicaMap.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestReplicaMap.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestReplicaMap.java Fri Aug  3 19:00:15 2012
@@ -23,7 +23,6 @@ import static org.junit.Assert.fail;
 
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.server.datanode.FinalizedReplica;
-import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.ReplicaMap;
 import org.junit.Before;
 import org.junit.Test;
 

Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/journalservice/TestJournalService.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/journalservice/TestJournalService.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/journalservice/TestJournalService.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/journalservice/TestJournalService.java Fri Aug  3 19:00:15 2012
@@ -17,11 +17,11 @@
  */
 package org.apache.hadoop.hdfs.server.journalservice;
 
+import static org.junit.Assert.assertNotNull;
+
 import java.io.IOException;
 import java.net.InetSocketAddress;
 
-import junit.framework.Assert;
-
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileSystemTestHelper;
@@ -122,6 +122,6 @@ public class TestJournalService {
     
     // New epoch higher than the current epoch is successful
     FenceResponse resp = s.fence(info, currentEpoch+1, "fencer");
-    Assert.assertNotNull(resp);
+    assertNotNull(resp);
   }
 }
\ No newline at end of file

Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java Fri Aug  3 19:00:15 2012
@@ -17,6 +17,12 @@
  */
 package org.apache.hadoop.hdfs.server.namenode;
 
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+import static org.mockito.Mockito.doReturn;
+import static org.mockito.Mockito.mock;
+
 import java.io.File;
 import java.io.FileInputStream;
 import java.io.FileOutputStream;
@@ -37,33 +43,29 @@ import java.util.Set;
 import org.apache.commons.lang.StringUtils;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.PermissionStatus;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.server.common.Storage.StorageDirType;
 import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
-import org.apache.hadoop.hdfs.server.namenode.FileJournalManager.EditLogFile;
 import org.apache.hadoop.hdfs.server.namenode.FSImageStorageInspector.FSImageFile;
+import org.apache.hadoop.hdfs.server.namenode.FileJournalManager.EditLogFile;
 import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
 import org.apache.hadoop.hdfs.util.Holder;
 import org.apache.hadoop.hdfs.util.MD5FileUtils;
 import org.apache.hadoop.io.IOUtils;
-import org.apache.hadoop.conf.Configuration;
-import org.mockito.Mockito;
 import org.mockito.Matchers;
+import org.mockito.Mockito;
 
 import com.google.common.base.Joiner;
-import com.google.common.collect.Lists;
 import com.google.common.collect.ImmutableList;
+import com.google.common.collect.Lists;
 import com.google.common.collect.Maps;
 import com.google.common.collect.Sets;
 import com.google.common.io.Files;
 
-import static org.junit.Assert.*;
-import static org.mockito.Mockito.doReturn;
-import static org.mockito.Mockito.mock;
-
 /**
  * Utility functions for testing fsimage storage.
  */

Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java Fri Aug  3 19:00:15 2012
@@ -60,6 +60,7 @@ import org.apache.hadoop.net.DNS;
 import org.apache.hadoop.net.NetworkTopology;
 import org.apache.hadoop.security.Groups;
 import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.util.Time;
 import org.apache.hadoop.util.VersionInfo;
 import org.apache.log4j.Level;
 import org.apache.log4j.LogManager;
@@ -253,7 +254,7 @@ public class NNThroughputBenchmark {
         setNameNodeLoggingLevel(logLevel);
         for(tIdx=0; tIdx < numThreads; tIdx++)
           daemons.add(new StatsDaemon(tIdx, opsPerThread[tIdx], this));
-        start = System.currentTimeMillis();
+        start = Time.now();
         LOG.info("Starting " + numOpsRequired + " " + getOpName() + "(s).");
         for(StatsDaemon d : daemons)
           d.start();
@@ -261,7 +262,7 @@ public class NNThroughputBenchmark {
         while(isInPorgress()) {
           // try {Thread.sleep(500);} catch (InterruptedException e) {}
         }
-        elapsedTime = System.currentTimeMillis() - start;
+        elapsedTime = Time.now() - start;
         for(StatsDaemon d : daemons) {
           incrementStats(d.localNumOpsExecuted, d.localCumulativeTime);
           // System.out.println(d.toString() + ": ops Exec = " + d.localNumOpsExecuted);
@@ -391,6 +392,7 @@ public class NNThroughputBenchmark {
       setName(toString());
     }
 
+    @Override
     public void run() {
       localNumOpsExecuted = 0;
       localCumulativeTime = 0;
@@ -403,6 +405,7 @@ public class NNThroughputBenchmark {
       }
     }
 
+    @Override
     public String toString() {
       return "StatsDaemon-" + daemonId;
     }
@@ -445,16 +448,19 @@ public class NNThroughputBenchmark {
       keepResults = true;
     }
 
+    @Override
     String getOpName() {
       return OP_CLEAN_NAME;
     }
 
+    @Override
     void parseArguments(List<String> args) {
       boolean ignoreUnrelatedOptions = verifyOpArgument(args);
       if(args.size() > 2 && !ignoreUnrelatedOptions)
         printUsage();
     }
 
+    @Override
     void generateInputs(int[] opsPerThread) throws IOException {
       // do nothing
     }
@@ -462,6 +468,7 @@ public class NNThroughputBenchmark {
     /**
      * Does not require the argument
      */
+    @Override
     String getExecutionArgument(int daemonId) {
       return null;
     }
@@ -469,15 +476,17 @@ public class NNThroughputBenchmark {
     /**
      * Remove entire benchmark directory.
      */
+    @Override
     long executeOp(int daemonId, int inputIdx, String ignore) 
     throws IOException {
       nameNodeProto.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_LEAVE);
-      long start = System.currentTimeMillis();
+      long start = Time.now();
       nameNodeProto.delete(BASE_DIR_NAME, true);
-      long end = System.currentTimeMillis();
+      long end = Time.now();
       return end-start;
     }
 
+    @Override
     void printResults() {
       LOG.info("--- " + getOpName() + " inputs ---");
       LOG.info("Remove directory " + BASE_DIR_NAME);
@@ -507,10 +516,12 @@ public class NNThroughputBenchmark {
       parseArguments(args);
     }
 
+    @Override
     String getOpName() {
       return OP_CREATE_NAME;
     }
 
+    @Override
     void parseArguments(List<String> args) {
       boolean ignoreUnrelatedOptions = verifyOpArgument(args);
       int nrFilesPerDir = 4;
@@ -533,6 +544,7 @@ public class NNThroughputBenchmark {
       nameGenerator = new FileNameGenerator(getBaseDir(), nrFilesPerDir);
     }
 
+    @Override
     void generateInputs(int[] opsPerThread) throws IOException {
       assert opsPerThread.length == numThreads : "Error opsPerThread.length"; 
       nameNodeProto.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_LEAVE);
@@ -556,6 +568,7 @@ public class NNThroughputBenchmark {
     /**
      * returns client name
      */
+    @Override
     String getExecutionArgument(int daemonId) {
       return getClientName(daemonId);
     }
@@ -563,20 +576,22 @@ public class NNThroughputBenchmark {
     /**
      * Do file create.
      */
+    @Override
     long executeOp(int daemonId, int inputIdx, String clientName) 
     throws IOException {
-      long start = System.currentTimeMillis();
+      long start = Time.now();
       // dummyActionNoSynch(fileIdx);
       nameNodeProto.create(fileNames[daemonId][inputIdx], FsPermission.getDefault(),
                       clientName, new EnumSetWritable<CreateFlag>(EnumSet
               .of(CreateFlag.CREATE, CreateFlag.OVERWRITE)), true, replication, BLOCK_SIZE);
-      long end = System.currentTimeMillis();
+      long end = Time.now();
       for(boolean written = !closeUponCreate; !written; 
         written = nameNodeProto.complete(fileNames[daemonId][inputIdx],
                                     clientName, null));
       return end-start;
     }
 
+    @Override
     void printResults() {
       LOG.info("--- " + getOpName() + " inputs ---");
       LOG.info("nrFiles = " + numOpsRequired);
@@ -606,10 +621,12 @@ public class NNThroughputBenchmark {
       super(args);
     }
 
+    @Override
     String getOpName() {
       return OP_OPEN_NAME;
     }
 
+    @Override
     void parseArguments(List<String> args) {
       int ueIndex = args.indexOf("-useExisting");
       useExisting = (ueIndex >= 0);
@@ -619,6 +636,7 @@ public class NNThroughputBenchmark {
       super.parseArguments(args);
     }
 
+    @Override
     void generateInputs(int[] opsPerThread) throws IOException {
       // create files using opsPerThread
       String[] createArgs = new String[] {
@@ -651,11 +669,12 @@ public class NNThroughputBenchmark {
     /**
      * Do file open.
      */
+    @Override
     long executeOp(int daemonId, int inputIdx, String ignore) 
     throws IOException {
-      long start = System.currentTimeMillis();
+      long start = Time.now();
       nameNodeProto.getBlockLocations(fileNames[daemonId][inputIdx], 0L, BLOCK_SIZE);
-      long end = System.currentTimeMillis();
+      long end = Time.now();
       return end-start;
     }
   }
@@ -675,15 +694,17 @@ public class NNThroughputBenchmark {
       super(args);
     }
 
+    @Override
     String getOpName() {
       return OP_DELETE_NAME;
     }
 
+    @Override
     long executeOp(int daemonId, int inputIdx, String ignore) 
     throws IOException {
-      long start = System.currentTimeMillis();
+      long start = Time.now();
       nameNodeProto.delete(fileNames[daemonId][inputIdx], false);
-      long end = System.currentTimeMillis();
+      long end = Time.now();
       return end-start;
     }
   }
@@ -703,15 +724,17 @@ public class NNThroughputBenchmark {
       super(args);
     }
 
+    @Override
     String getOpName() {
       return OP_FILE_STATUS_NAME;
     }
 
+    @Override
     long executeOp(int daemonId, int inputIdx, String ignore) 
     throws IOException {
-      long start = System.currentTimeMillis();
+      long start = Time.now();
       nameNodeProto.getFileInfo(fileNames[daemonId][inputIdx]);
-      long end = System.currentTimeMillis();
+      long end = Time.now();
       return end-start;
     }
   }
@@ -733,10 +756,12 @@ public class NNThroughputBenchmark {
       super(args);
     }
 
+    @Override
     String getOpName() {
       return OP_RENAME_NAME;
     }
 
+    @Override
     void generateInputs(int[] opsPerThread) throws IOException {
       super.generateInputs(opsPerThread);
       destNames = new String[fileNames.length][];
@@ -748,12 +773,13 @@ public class NNThroughputBenchmark {
       }
     }
 
+    @Override
     long executeOp(int daemonId, int inputIdx, String ignore) 
     throws IOException {
-      long start = System.currentTimeMillis();
+      long start = Time.now();
       nameNodeProto.rename(fileNames[daemonId][inputIdx],
                       destNames[daemonId][inputIdx]);
-      long end = System.currentTimeMillis();
+      long end = Time.now();
       return end-start;
     }
   }
@@ -792,6 +818,7 @@ public class NNThroughputBenchmark {
       this.nrBlocks = 0;
     }
 
+    @Override
     public String toString() {
       return dnRegistration.toString();
     }
@@ -867,6 +894,7 @@ public class NNThroughputBenchmark {
       return blockReportList;
     }
 
+    @Override
     public int compareTo(String xferAddr) {
       return getXferAddr().compareTo(xferAddr);
     }
@@ -958,10 +986,12 @@ public class NNThroughputBenchmark {
       return numThreads;
     }
 
+    @Override
     String getOpName() {
       return OP_BLOCK_REPORT_NAME;
     }
 
+    @Override
     void parseArguments(List<String> args) {
       boolean ignoreUnrelatedOptions = verifyOpArgument(args);
       for (int i = 2; i < args.size(); i++) {       // parse command line
@@ -982,6 +1012,7 @@ public class NNThroughputBenchmark {
       }
     }
 
+    @Override
     void generateInputs(int[] ignore) throws IOException {
       int nrDatanodes = getNumDatanodes();
       int nrBlocks = (int)Math.ceil((double)blocksPerReport * nrDatanodes 
@@ -1043,22 +1074,25 @@ public class NNThroughputBenchmark {
     /**
      * Does not require the argument
      */
+    @Override
     String getExecutionArgument(int daemonId) {
       return null;
     }
 
+    @Override
     long executeOp(int daemonId, int inputIdx, String ignore) throws IOException {
       assert daemonId < numThreads : "Wrong daemonId.";
       TinyDatanode dn = datanodes[daemonId];
-      long start = System.currentTimeMillis();
+      long start = Time.now();
       StorageBlockReport[] report = { new StorageBlockReport(
           dn.storage, dn.getBlockReportList()) };
       nameNodeProto.blockReport(dn.dnRegistration, nameNode.getNamesystem()
           .getBlockPoolId(), report);
-      long end = System.currentTimeMillis();
+      long end = Time.now();
       return end-start;
     }
 
+    @Override
     void printResults() {
       String blockDistribution = "";
       String delim = "(";
@@ -1119,10 +1153,12 @@ public class NNThroughputBenchmark {
       numPendingBlocks = 0;
     }
 
+    @Override
     String getOpName() {
       return OP_REPLICATION_NAME;
     }
 
+    @Override
     void parseArguments(List<String> args) {
       boolean ignoreUnrelatedOptions = verifyOpArgument(args);
       for (int i = 2; i < args.size(); i++) {       // parse command line
@@ -1146,6 +1182,7 @@ public class NNThroughputBenchmark {
       }
     }
 
+    @Override
     void generateInputs(int[] ignore) throws IOException {
       final FSNamesystem namesystem = nameNode.getNamesystem();
 
@@ -1192,23 +1229,26 @@ public class NNThroughputBenchmark {
     /**
      * Does not require the argument
      */
+    @Override
     String getExecutionArgument(int daemonId) {
       return null;
     }
 
+    @Override
     long executeOp(int daemonId, int inputIdx, String ignore) throws IOException {
       assert daemonId < numThreads : "Wrong daemonId.";
-      long start = System.currentTimeMillis();
+      long start = Time.now();
       // compute data-node work
       int work = BlockManagerTestUtil.getComputedDatanodeWork(
           nameNode.getNamesystem().getBlockManager());
-      long end = System.currentTimeMillis();
+      long end = Time.now();
       numPendingBlocks += work;
       if(work == 0)
         daemons.get(daemonId).terminate();
       return end-start;
     }
 
+    @Override
     void printResults() {
       String blockDistribution = "";
       String delim = "(";

Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/OfflineEditsViewerHelper.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/OfflineEditsViewerHelper.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/OfflineEditsViewerHelper.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/OfflineEditsViewerHelper.java Fri Aug  3 19:00:15 2012
@@ -28,6 +28,7 @@ import org.apache.commons.logging.LogFac
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileContext;
 import org.apache.hadoop.fs.Options.Rename;
@@ -46,7 +47,6 @@ import org.apache.hadoop.hdfs.server.com
 import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.Token;
-import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 
 /**
  * OfflineEditsViewerHelper is a helper class for TestOfflineEditsViewer,
@@ -205,6 +205,7 @@ public class OfflineEditsViewerHelper {
       "JobTracker/foo.com@FOO.COM");
     try {
       longUgi.doAs(new PrivilegedExceptionAction<Object>() {
+        @Override
         public Object run() throws IOException, InterruptedException {
           token.renew(config);
           token.cancel(config);

Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogs.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogs.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogs.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogs.java Fri Aug  3 19:00:15 2012
@@ -18,9 +18,10 @@
 
 package org.apache.hadoop.hdfs.server.namenode;
 
-import static org.junit.Assert.*;
-import org.junit.Before;
-import org.junit.After;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
 
 import java.io.BufferedReader;
 import java.io.File;
@@ -44,6 +45,8 @@ import org.apache.log4j.Level;
 import org.apache.log4j.Logger;
 import org.apache.log4j.PatternLayout;
 import org.apache.log4j.RollingFileAppender;
+import org.junit.After;
+import org.junit.Before;
 import org.junit.Test;
 
 /**

Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java Fri Aug  3 19:00:15 2012
@@ -17,7 +17,10 @@
  */
 package org.apache.hadoop.hdfs.server.namenode;
 
-import static org.junit.Assert.*;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
 
 import java.io.File;
 import java.io.IOException;

Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckPointForSecurityTokens.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckPointForSecurityTokens.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckPointForSecurityTokens.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckPointForSecurityTokens.java Fri Aug  3 19:00:15 2012
@@ -17,9 +17,11 @@
  */
 package org.apache.hadoop.hdfs.server.namenode;
 
-import static org.junit.Assert.*;
-import junit.framework.Assert;
-import java.io.*;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+import java.io.IOException;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
@@ -120,7 +122,7 @@ public class TestCheckPointForSecurityTo
         renewToken(token1);
         renewToken(token2);
       } catch (IOException e) {
-        Assert.fail("Could not renew or cancel the token");
+        fail("Could not renew or cancel the token");
       }
 
       namesystem = cluster.getNamesystem();
@@ -148,7 +150,7 @@ public class TestCheckPointForSecurityTo
         renewToken(token5);
 
       } catch (IOException e) {
-        Assert.fail("Could not renew or cancel the token");
+        fail("Could not renew or cancel the token");
       }
 
       // restart cluster again
@@ -171,7 +173,7 @@ public class TestCheckPointForSecurityTo
         renewToken(token5);
         cancelToken(token5);
       } catch (IOException e) {
-        Assert.fail("Could not renew or cancel the token");
+        fail("Could not renew or cancel the token");
       }
 
     } finally {

Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java Fri Aug  3 19:00:15 2012
@@ -18,10 +18,19 @@
 package org.apache.hadoop.hdfs.server.namenode;
 
 import static org.apache.hadoop.hdfs.server.common.Util.fileAsURI;
-import junit.framework.TestCase;
-import java.net.InetSocketAddress;
+import static org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil.assertNNHasCheckpoints;
+import static org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil.getNameNodeCurrentDirs;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
 import java.io.File;
 import java.io.IOException;
+import java.lang.management.ManagementFactory;
+import java.net.InetSocketAddress;
 import java.net.URI;
 import java.util.ArrayList;
 import java.util.Collection;
@@ -29,11 +38,10 @@ import java.util.List;
 import java.util.Random;
 
 import org.apache.commons.cli.ParseException;
-import org.apache.commons.logging.impl.Log4JLogger;
-import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.fs.FSDataOutputStream;
@@ -42,12 +50,14 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.MiniDFSNNTopology;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
+import org.apache.hadoop.hdfs.server.common.Storage;
 import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
 import org.apache.hadoop.hdfs.server.common.StorageInfo;
 import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
@@ -60,8 +70,11 @@ import org.apache.hadoop.hdfs.tools.DFSA
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.GenericTestUtils.DelayAnswer;
+import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.log4j.Level;
+import org.junit.Before;
+import org.junit.Test;
 import org.mockito.ArgumentMatcher;
 import org.mockito.Mockito;
 import org.mockito.invocation.InvocationOnMock;
@@ -74,13 +87,10 @@ import com.google.common.collect.Immutab
 import com.google.common.collect.Lists;
 import com.google.common.primitives.Ints;
 
-import static org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil.assertNNHasCheckpoints;
-import static org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil.getNameNodeCurrentDirs;
-
 /**
  * This class tests the creation and validation of a checkpoint.
  */
-public class TestCheckpoint extends TestCase {
+public class TestCheckpoint {
 
   static {
     ((Log4JLogger)FSImage.LOG).getLogger().setLevel(Level.ALL);
@@ -96,7 +106,7 @@ public class TestCheckpoint extends Test
 
   private CheckpointFaultInjector faultInjector;
     
-  @Override
+  @Before
   public void setUp() throws IOException {
     FileUtil.fullyDeleteContents(new File(MiniDFSCluster.getBaseDirectory()));
     
@@ -135,6 +145,7 @@ public class TestCheckpoint extends Test
   /*
    * Verify that namenode does not startup if one namedir is bad.
    */
+  @Test
   public void testNameDirError() throws IOException {
     LOG.info("Starting testNameDirError");
     Configuration conf = new HdfsConfiguration();
@@ -176,6 +187,7 @@ public class TestCheckpoint extends Test
    * correctly (by removing the storage directory)
    * See https://issues.apache.org/jira/browse/HDFS-2011
    */
+  @Test
   public void testWriteTransactionIdHandlesIOE() throws Exception {
     LOG.info("Check IOException handled correctly by writeTransactionIdFile");
     ArrayList<URI> fsImageDirs = new ArrayList<URI>();
@@ -210,6 +222,7 @@ public class TestCheckpoint extends Test
   /*
    * Simulate namenode crashing after rolling edit log.
    */
+  @Test
   public void testSecondaryNamenodeError1()
     throws IOException {
     LOG.info("Starting testSecondaryNamenodeError1");
@@ -275,6 +288,7 @@ public class TestCheckpoint extends Test
   /*
    * Simulate a namenode crash after uploading new image
    */
+  @Test
   public void testSecondaryNamenodeError2() throws IOException {
     LOG.info("Starting testSecondaryNamenodeError2");
     Configuration conf = new HdfsConfiguration();
@@ -336,6 +350,7 @@ public class TestCheckpoint extends Test
   /*
    * Simulate a secondary namenode crash after rolling the edit log.
    */
+  @Test
   public void testSecondaryNamenodeError3() throws IOException {
     LOG.info("Starting testSecondaryNamenodeError3");
     Configuration conf = new HdfsConfiguration();
@@ -408,6 +423,7 @@ public class TestCheckpoint extends Test
    * back to the name-node.
    * Used to truncate primary fsimage file.
    */
+  @Test
   public void testSecondaryFailsToReturnImage() throws IOException {
     Mockito.doThrow(new IOException("If this exception is not caught by the " +
         "name-node, fs image will be truncated."))
@@ -421,6 +437,7 @@ public class TestCheckpoint extends Test
    * before even setting the length header. This used to cause image
    * truncation. Regression test for HDFS-3330.
    */
+  @Test
   public void testSecondaryFailsWithErrorBeforeSettingHeaders()
       throws IOException {
     Mockito.doThrow(new Error("If this exception is not caught by the " +
@@ -493,6 +510,7 @@ public class TestCheckpoint extends Test
    * The length header in the HTTP transfer should prevent
    * this from corrupting the NN.
    */
+  @Test
   public void testNameNodeImageSendFailWrongSize()
       throws IOException {
     LOG.info("Starting testNameNodeImageSendFailWrongSize");
@@ -507,6 +525,7 @@ public class TestCheckpoint extends Test
    * The digest header in the HTTP transfer should prevent
    * this from corrupting the NN.
    */
+  @Test
   public void testNameNodeImageSendFailWrongDigest()
       throws IOException {
     LOG.info("Starting testNameNodeImageSendFailWrongDigest");
@@ -524,7 +543,7 @@ public class TestCheckpoint extends Test
   private void doSendFailTest(String exceptionSubstring)
       throws IOException {
     Configuration conf = new HdfsConfiguration();
-    Path file1 = new Path("checkpoint-doSendFailTest-" + getName() + ".dat");
+    Path file1 = new Path("checkpoint-doSendFailTest-doSendFailTest.dat");
     MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
                                                .numDataNodes(numDatanodes)
                                                .build();
@@ -570,6 +589,7 @@ public class TestCheckpoint extends Test
    * Test that the NN locks its storage and edits directories, and won't start up
    * if the directories are already locked
    **/
+  @Test
   public void testNameDirLocking() throws IOException {
     Configuration conf = new HdfsConfiguration();
     MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
@@ -599,6 +619,7 @@ public class TestCheckpoint extends Test
    * Test that, if the edits dir is separate from the name dir, it is
    * properly locked.
    **/
+  @Test
   public void testSeparateEditsDirLocking() throws IOException {
     Configuration conf = new HdfsConfiguration();
     File editsDir = new File(MiniDFSCluster.getBaseDirectory() +
@@ -634,6 +655,7 @@ public class TestCheckpoint extends Test
   /**
    * Test that the SecondaryNameNode properly locks its storage directories.
    */
+  @Test
   public void testSecondaryNameNodeLocking() throws Exception {
     // Start a primary NN so that the secondary will start successfully
     Configuration conf = new HdfsConfiguration();
@@ -679,6 +701,39 @@ public class TestCheckpoint extends Test
     }
   }
   
+  /**
+   * Test that, an attempt to lock a storage that is already locked by a nodename,
+   * logs error message that includes JVM name of the namenode that locked it.
+   */
+  @Test
+  public void testStorageAlreadyLockedErrorMessage() throws Exception {
+    Configuration conf = new HdfsConfiguration();
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
+      .numDataNodes(0)
+      .build();
+    
+    StorageDirectory savedSd = null;
+    try {
+      NNStorage storage = cluster.getNameNode().getFSImage().getStorage();
+      for (StorageDirectory sd : storage.dirIterable(null)) {
+        assertLockFails(sd);
+        savedSd = sd;
+      }
+      
+      LogCapturer logs = GenericTestUtils.LogCapturer.captureLogs(LogFactory.getLog(Storage.class));
+      try {
+        // try to lock the storage that's already locked
+        savedSd.lock();
+        fail("Namenode should not be able to lock a storage that is already locked");
+      } catch (IOException ioe) {
+        String jvmName = ManagementFactory.getRuntimeMXBean().getName();
+        assertTrue("Error message does not include JVM name '" + jvmName 
+            + "'", logs.getOutput().contains(jvmName));
+      }
+    } finally {
+      cluster.shutdown();
+    }
+  }
 
   /**
    * Assert that the given storage directory can't be locked, because
@@ -727,6 +782,7 @@ public class TestCheckpoint extends Test
    * 2. if the NN does not contain an image, importing a checkpoint
    *    succeeds and re-saves the image
    */
+  @Test
   public void testImportCheckpoint() throws Exception {
     Configuration conf = new HdfsConfiguration();
     Path testPath = new Path("/testfile");
@@ -825,6 +881,7 @@ public class TestCheckpoint extends Test
   /**
    * Tests checkpoint in HDFS.
    */
+  @Test
   public void testCheckpoint() throws IOException {
     Path file1 = new Path("checkpoint.dat");
     Path file2 = new Path("checkpoint2.dat");
@@ -915,6 +972,7 @@ public class TestCheckpoint extends Test
   /**
    * Tests save namespace.
    */
+  @Test
   public void testSaveNamespace() throws IOException {
     MiniDFSCluster cluster = null;
     DistributedFileSystem fs = null;
@@ -1021,6 +1079,7 @@ public class TestCheckpoint extends Test
   
   /* Test case to test CheckpointSignature */
   @SuppressWarnings("deprecation")
+  @Test
   public void testCheckpointSignature() throws IOException {
 
     MiniDFSCluster cluster = null;
@@ -1055,6 +1114,7 @@ public class TestCheckpoint extends Test
    * - it then fails again for the same reason
    * - it then tries to checkpoint a third time
    */
+  @Test
   public void testCheckpointAfterTwoFailedUploads() throws IOException {
     MiniDFSCluster cluster = null;
     SecondaryNameNode secondary = null;
@@ -1111,6 +1171,7 @@ public class TestCheckpoint extends Test
    * 
    * @throws IOException
    */
+  @Test
   public void testMultipleSecondaryNamenodes() throws IOException {
     Configuration conf = new HdfsConfiguration();
     String nameserviceId1 = "ns1";
@@ -1161,6 +1222,7 @@ public class TestCheckpoint extends Test
    * Test that the secondary doesn't have to re-download image
    * if it hasn't changed.
    */
+  @Test
   public void testSecondaryImageDownload() throws IOException {
     LOG.info("Starting testSecondaryImageDownload");
     Configuration conf = new HdfsConfiguration();
@@ -1243,6 +1305,7 @@ public class TestCheckpoint extends Test
    * It verifies that this works even though the earlier-txid checkpoint gets
    * uploaded after the later-txid checkpoint.
    */
+  @Test
   public void testMultipleSecondaryNNsAgainstSameNN() throws Exception {
     Configuration conf = new HdfsConfiguration();
 
@@ -1328,6 +1391,7 @@ public class TestCheckpoint extends Test
    * It verifies that one of the two gets an error that it's uploading a
    * duplicate checkpoint, and the other one succeeds.
    */
+  @Test
   public void testMultipleSecondaryNNsAgainstSameNN2() throws Exception {
     Configuration conf = new HdfsConfiguration();
 
@@ -1350,6 +1414,7 @@ public class TestCheckpoint extends Test
       final Answer<Object> delegator = new GenericTestUtils.DelegateAnswer(origNN);
       NamenodeProtocol spyNN = Mockito.mock(NamenodeProtocol.class, delegator);
       DelayAnswer delayer = new DelayAnswer(LOG) {
+        @Override
         protected Object passThrough(InvocationOnMock invocation) throws Throwable {
           return delegator.answer(invocation);
         }
@@ -1420,6 +1485,7 @@ public class TestCheckpoint extends Test
    * is running. The secondary should shut itself down if if talks to a NN
    * with the wrong namespace.
    */
+  @Test
   public void testReformatNNBetweenCheckpoints() throws IOException {
     MiniDFSCluster cluster = null;
     SecondaryNameNode secondary = null;
@@ -1477,6 +1543,7 @@ public class TestCheckpoint extends Test
    * Test that the primary NN will not serve any files to a 2NN who doesn't
    * share its namespace ID, and also will not accept any files from one.
    */
+  @Test
   public void testNamespaceVerifiedOnFileTransfer() throws IOException {
     MiniDFSCluster cluster = null;
     
@@ -1538,6 +1605,7 @@ public class TestCheckpoint extends Test
    * the non-failed storage directory receives the checkpoint.
    */
   @SuppressWarnings("deprecation")
+  @Test
   public void testCheckpointWithFailedStorageDir() throws Exception {
     MiniDFSCluster cluster = null;
     SecondaryNameNode secondary = null;
@@ -1602,6 +1670,7 @@ public class TestCheckpoint extends Test
    * @throws Exception
    */
   @SuppressWarnings("deprecation")
+  @Test
   public void testCheckpointWithSeparateDirsAfterNameFails() throws Exception {
     MiniDFSCluster cluster = null;
     SecondaryNameNode secondary = null;
@@ -1674,6 +1743,7 @@ public class TestCheckpoint extends Test
   /**
    * Test that the 2NN triggers a checkpoint after the configurable interval
    */
+  @Test
   public void testCheckpointTriggerOnTxnCount() throws Exception {
     MiniDFSCluster cluster = null;
     SecondaryNameNode secondary = null;
@@ -1727,6 +1797,7 @@ public class TestCheckpoint extends Test
    * logs that connect the 2NN's old checkpoint to the current txid
    * get archived. Then, the 2NN tries to checkpoint again.
    */
+  @Test
   public void testSecondaryHasVeryOutOfDateImage() throws IOException {
     MiniDFSCluster cluster = null;
     SecondaryNameNode secondary = null;
@@ -1764,6 +1835,7 @@ public class TestCheckpoint extends Test
     }
   }
   
+  @Test
   public void testCommandLineParsing() throws ParseException {
     SecondaryNameNode.CommandLineOpts opts =
       new SecondaryNameNode.CommandLineOpts();

Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestClusterId.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestClusterId.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestClusterId.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestClusterId.java Fri Aug  3 19:00:15 2012
@@ -19,8 +19,8 @@ package org.apache.hadoop.hdfs.server.na
 
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY;
 import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
 import java.io.ByteArrayInputStream;
@@ -30,7 +30,6 @@ import java.io.IOException;
 import java.io.InputStream;
 import java.io.PrintStream;
 import java.net.URI;
-import java.security.Permission;
 import java.util.Collection;
 import java.util.Iterator;
 import java.util.List;
@@ -43,6 +42,8 @@ import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
 import org.apache.hadoop.hdfs.server.common.Storage;
 import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
+import org.apache.hadoop.util.ExitUtil;
+import org.apache.hadoop.util.ExitUtil.ExitException;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
@@ -69,7 +70,7 @@ public class TestClusterId {
 
   @Before
   public void setUp() throws IOException {
-    System.setSecurityManager(new NoExitSecurityManager());
+    ExitUtil.disableSystemExit();
 
     String baseDir = System.getProperty("test.build.data", "build/test/data");
 
@@ -90,8 +91,6 @@ public class TestClusterId {
 
   @After
   public void tearDown() throws IOException {
-    System.setSecurityManager(null);
-
     if (hdfsDir.exists() && !FileUtil.fullyDelete(hdfsDir)) {
       throw new IOException("Could not tearDown test directory '" + hdfsDir
           + "'");
@@ -446,32 +445,4 @@ public class TestClusterId {
     File version = new File(hdfsDir, "current/VERSION");
     assertFalse("Check version should not exist", version.exists());
   }
-
-  private static class ExitException extends SecurityException {
-    private static final long serialVersionUID = 1L;
-    public final int status;
-
-    public ExitException(int status) {
-      super("There is no escape!");
-      this.status = status;
-    }
-  }
-
-  private static class NoExitSecurityManager extends SecurityManager {
-    @Override
-    public void checkPermission(Permission perm) {
-      // allow anything.
-    }
-
-    @Override
-    public void checkPermission(Permission perm, Object context) {
-      // allow anything.
-    }
-
-    @Override
-    public void checkExit(int status) {
-      super.checkExit(status);
-      throw new ExitException(status);
-    }
-  }
 }
\ No newline at end of file

Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCorruptFilesJsp.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCorruptFilesJsp.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCorruptFilesJsp.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCorruptFilesJsp.java Fri Aug  3 19:00:15 2012
@@ -18,6 +18,7 @@
 package org.apache.hadoop.hdfs.server.namenode;
 
 import static org.junit.Assert.assertTrue;
+
 import java.net.URL;
 import java.util.Collection;
 

Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java Fri Aug  3 19:00:15 2012
@@ -17,11 +17,12 @@
  */
 package org.apache.hadoop.hdfs.server.namenode;
 
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.fail;
+
 import java.io.IOException;
 import java.util.concurrent.TimeoutException;
 
-import junit.framework.Assert;
-
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -42,6 +43,7 @@ import org.apache.hadoop.hdfs.server.pro
 import org.apache.hadoop.hdfs.server.protocol.StorageBlockReport;
 import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks;
 import org.apache.hadoop.hdfs.server.protocol.StorageReport;
+import org.apache.hadoop.util.Time;
 import org.junit.After;
 import org.junit.Test;
 
@@ -64,10 +66,10 @@ public class TestDeadDatanode {
    */
   private void waitForDatanodeState(String nodeID, boolean alive, int waitTime)
       throws TimeoutException, InterruptedException {
-    long stopTime = System.currentTimeMillis() + waitTime;
+    long stopTime = Time.now() + waitTime;
     FSNamesystem namesystem = cluster.getNamesystem();
     String state = alive ? "alive" : "dead";
-    while (System.currentTimeMillis() < stopTime) {
+    while (Time.now() < stopTime) {
       final DatanodeDescriptor dd = BlockManagerTestUtil.getDatanode(
           namesystem, nodeID);
       if (dd.isAlive == alive) {
@@ -120,7 +122,7 @@ public class TestDeadDatanode {
     // Ensure blockReceived call from dead datanode is rejected with IOException
     try {
       dnp.blockReceivedAndDeleted(reg, poolId, storageBlocks);
-      Assert.fail("Expected IOException is not thrown");
+      fail("Expected IOException is not thrown");
     } catch (IOException ex) {
       // Expected
     }
@@ -131,7 +133,7 @@ public class TestDeadDatanode {
         new long[] { 0L, 0L, 0L }) };
     try {
       dnp.blockReport(reg, poolId, report);
-      Assert.fail("Expected IOException is not thrown");
+      fail("Expected IOException is not thrown");
     } catch (IOException ex) {
       // Expected
     }
@@ -141,8 +143,8 @@ public class TestDeadDatanode {
     StorageReport[] rep = { new StorageReport(reg.getStorageID(), false, 0, 0,
         0, 0) };
     DatanodeCommand[] cmd = dnp.sendHeartbeat(reg, rep, 0, 0, 0).getCommands();
-    Assert.assertEquals(1, cmd.length);
-    Assert.assertEquals(cmd[0].getAction(), RegisterCommand.REGISTER
+    assertEquals(1, cmd.length);
+    assertEquals(cmd[0].getAction(), RegisterCommand.REGISTER
         .getAction());
   }
 }

Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java Fri Aug  3 19:00:15 2012
@@ -17,20 +17,35 @@
  */
 package org.apache.hadoop.hdfs.server.namenode;
 
-import junit.framework.TestCase;
-import java.io.*;
+import static org.apache.hadoop.test.MetricsAsserts.assertCounter;
+import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+import java.io.BufferedInputStream;
+import java.io.ByteArrayInputStream;
+import java.io.DataInputStream;
+import java.io.File;
+import java.io.FilenameFilter;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.PrintWriter;
+import java.io.RandomAccessFile;
+import java.io.StringWriter;
 import java.net.URI;
+import java.util.ArrayList;
+import java.util.Arrays;
 import java.util.Collection;
+import java.util.Collections;
 import java.util.Iterator;
 import java.util.LinkedList;
 import java.util.List;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.Arrays;
+import java.util.Random;
 import java.util.concurrent.Callable;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Executors;
-import java.util.Random;
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 
@@ -41,36 +56,32 @@ import org.apache.hadoop.conf.Configurat
 import org.apache.hadoop.fs.ChecksumException;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.permission.*;
-import org.apache.hadoop.hdfs.protocol.HdfsConstants;
-
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.fs.permission.PermissionStatus;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
-import org.apache.hadoop.hdfs.server.namenode.EditLogFileInputStream;
 import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
 import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
-import org.apache.hadoop.hdfs.server.namenode.NNStorage;
 import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics;
 import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.util.Time;
 import org.apache.log4j.Level;
 import org.aspectj.util.FileUtil;
-
-import org.mockito.Mockito;
 import org.junit.Test;
+import org.mockito.Mockito;
 
 import com.google.common.collect.ImmutableList;
 import com.google.common.collect.Lists;
 
-import static org.apache.hadoop.test.MetricsAsserts.*;
-
 /**
  * This class tests the creation and validation of a checkpoint.
  */
-public class TestEditLog extends TestCase {
+public class TestEditLog {
   
   static {
     ((Log4JLogger)FSEditLog.LOG).getLogger().setLevel(Level.ALL);
@@ -129,6 +140,7 @@ public class TestEditLog extends TestCas
     }
 
     // add a bunch of transactions.
+    @Override
     public void run() {
       PermissionStatus p = namesystem.createFsOwnerPermissions(
                                           new FsPermission((short)0777));
@@ -161,6 +173,7 @@ public class TestEditLog extends TestCas
   /**
    * Test case for an empty edit log from a prior version of Hadoop.
    */
+  @Test
   public void testPreTxIdEditLogNoEdits() throws Exception {
     FSNamesystem namesys = Mockito.mock(FSNamesystem.class);
     namesys.dir = Mockito.mock(FSDirectory.class);
@@ -174,6 +187,7 @@ public class TestEditLog extends TestCas
    * Test case for loading a very simple edit log from a format
    * prior to the inclusion of edit transaction IDs in the log.
    */
+  @Test
   public void testPreTxidEditLogWithEdits() throws Exception {
     Configuration conf = new HdfsConfiguration();
     MiniDFSCluster cluster = null;
@@ -202,6 +216,7 @@ public class TestEditLog extends TestCas
   /**
    * Simple test for writing to and rolling the edit log.
    */
+  @Test
   public void testSimpleEditLog() throws IOException {
     // start a cluster 
     Configuration conf = new HdfsConfiguration();
@@ -246,6 +261,7 @@ public class TestEditLog extends TestCas
   /**
    * Tests transaction logging in dfs.
    */
+  @Test
   public void testMultiThreadedEditLog() throws IOException {
     testEditLog(2048);
     // force edit buffer to automatically sync on each log of edit log entry
@@ -363,6 +379,7 @@ public class TestEditLog extends TestCas
     final String filename) throws Exception
   {
     exec.submit(new Callable<Void>() {
+      @Override
       public Void call() {
         log.logSetReplication(filename, (short)1);
         return null;
@@ -374,6 +391,7 @@ public class TestEditLog extends TestCas
     throws Exception
   {
     exec.submit(new Callable<Void>() {
+      @Override
       public Void call() {
         log.logSync();
         return null;
@@ -385,6 +403,7 @@ public class TestEditLog extends TestCas
     throws Exception
   {
     exec.submit(new Callable<Void>() {
+      @Override
       public Void call() throws Exception {
         log.logSyncAll();
         return null;
@@ -392,6 +411,7 @@ public class TestEditLog extends TestCas
     }).get();
   }
 
+  @Test
   public void testSyncBatching() throws Exception {
     // start a cluster 
     Configuration conf = new HdfsConfiguration();
@@ -454,6 +474,7 @@ public class TestEditLog extends TestCas
    * This sequence is legal and can occur if enterSafeMode() is closely
    * followed by saveNamespace.
    */
+  @Test
   public void testBatchedSyncWithClosedLogs() throws Exception {
     // start a cluster 
     Configuration conf = new HdfsConfiguration();
@@ -493,6 +514,7 @@ public class TestEditLog extends TestCas
     }
   }
   
+  @Test
   public void testEditChecksum() throws Exception {
     // start a cluster 
     Configuration conf = new HdfsConfiguration();
@@ -544,6 +566,7 @@ public class TestEditLog extends TestCas
    * Test what happens if the NN crashes when it has has started but
    * had no transactions written.
    */
+  @Test
   public void testCrashRecoveryNoTransactions() throws Exception {
     testCrashRecovery(0);
   }
@@ -552,6 +575,7 @@ public class TestEditLog extends TestCas
    * Test what happens if the NN crashes when it has has started and
    * had a few transactions written
    */
+  @Test
   public void testCrashRecoveryWithTransactions() throws Exception {
     testCrashRecovery(150);
   }
@@ -661,22 +685,26 @@ public class TestEditLog extends TestCas
   }
   
   // should succeed - only one corrupt log dir
+  @Test
   public void testCrashRecoveryEmptyLogOneDir() throws Exception {
     doTestCrashRecoveryEmptyLog(false, true, true);
   }
   
   // should fail - seen_txid updated to 3, but no log dir contains txid 3
+  @Test
   public void testCrashRecoveryEmptyLogBothDirs() throws Exception {
     doTestCrashRecoveryEmptyLog(true, true, false);
   }
 
   // should succeed - only one corrupt log dir
+  @Test
   public void testCrashRecoveryEmptyLogOneDirNoUpdateSeenTxId() 
       throws Exception {
     doTestCrashRecoveryEmptyLog(false, false, true);
   }
   
   // should succeed - both log dirs corrupt, but seen_txid never updated
+  @Test
   public void testCrashRecoveryEmptyLogBothDirsNoUpdateSeenTxId()
       throws Exception {
     doTestCrashRecoveryEmptyLog(true, false, true);
@@ -824,6 +852,7 @@ public class TestEditLog extends TestCas
     }
   }
 
+  @Test
   public void testFailedOpen() throws Exception {
     File logDir = new File(TEST_DIR, "testFailedOpen");
     logDir.mkdirs();
@@ -845,6 +874,7 @@ public class TestEditLog extends TestCas
    * Regression test for HDFS-1112/HDFS-3020. Ensures that, even if
    * logSync isn't called periodically, the edit log will sync itself.
    */
+  @Test
   public void testAutoSync() throws Exception {
     File logDir = new File(TEST_DIR, "testAutoSync");
     logDir.mkdirs();
@@ -1144,6 +1174,7 @@ public class TestEditLog extends TestCas
     final long endGapTxId = 2*TXNS_PER_ROLL;
 
     File[] files = new File(f1, "current").listFiles(new FilenameFilter() {
+        @Override
         public boolean accept(File dir, String name) {
           if (name.startsWith(NNStorage.getFinalizedEditsFileName(startGapTxId, 
                                   endGapTxId))) {
@@ -1278,6 +1309,7 @@ public class TestEditLog extends TestCas
     final long endErrorTxId = 2*TXNS_PER_ROLL;
 
     File[] files = new File(f1, "current").listFiles(new FilenameFilter() {
+        @Override
         public boolean accept(File dir, String name) {
           if (name.startsWith(NNStorage.getFinalizedEditsFileName(startErrorTxId, 
                                   endErrorTxId))) {
@@ -1316,6 +1348,7 @@ public class TestEditLog extends TestCas
     final long endErrorTxId = 2*TXNS_PER_ROLL;
 
     File[] files = new File(f1, "current").listFiles(new FilenameFilter() {
+        @Override
         public boolean accept(File dir, String name) {
           if (name.startsWith(NNStorage.getFinalizedEditsFileName(startErrorTxId, 
                                   endErrorTxId))) {
@@ -1382,7 +1415,7 @@ public class TestEditLog extends TestCas
     }
 
     // How long does it take to read through all these edit logs?
-    long startTime = System.currentTimeMillis();
+    long startTime = Time.now();
     try {
       cluster = new MiniDFSCluster.Builder(conf).
           numDataNodes(NUM_DATA_NODES).build();
@@ -1392,7 +1425,7 @@ public class TestEditLog extends TestCas
         cluster.shutdown();
       }
     }
-    long endTime = System.currentTimeMillis();
+    long endTime = Time.now();
     double delta = ((float)(endTime - startTime)) / 1000.0;
     LOG.info(String.format("loaded %d edit log segments in %.2f seconds",
         NUM_EDIT_LOG_ROLLS, delta));

Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogFileInputStream.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogFileInputStream.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogFileInputStream.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogFileInputStream.java Fri Aug  3 19:00:15 2012
@@ -17,7 +17,7 @@
  */
 package org.apache.hadoop.hdfs.server.namenode;
 
-import static org.junit.Assert.*;
+import static org.junit.Assert.assertEquals;
 
 import java.io.IOException;
 import java.io.OutputStream;

Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogJournalFailures.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogJournalFailures.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogJournalFailures.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogJournalFailures.java Fri Aug  3 19:00:15 2012
@@ -19,14 +19,11 @@ package org.apache.hadoop.hdfs.server.na
 
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
-import static org.mockito.Matchers.anyInt;
+import static org.junit.Assert.fail;
 import static org.mockito.Matchers.any;
-import static org.mockito.Mockito.atLeast;
 import static org.mockito.Mockito.doNothing;
 import static org.mockito.Mockito.doThrow;
 import static org.mockito.Mockito.spy;
-import static org.mockito.Mockito.times;
-import static org.mockito.Mockito.verify;
 
 import java.io.File;
 import java.io.IOException;
@@ -39,18 +36,19 @@ import org.apache.hadoop.hdfs.DFSConfigK
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.server.namenode.JournalSet.JournalAndStream;
+import org.apache.hadoop.ipc.RemoteException;
+import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.util.ExitUtil.ExitException;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
 import org.mockito.Mockito;
-import org.mockito.verification.VerificationMode;
 
 public class TestEditLogJournalFailures {
 
   private int editsPerformed = 0;
   private MiniDFSCluster cluster;
   private FileSystem fs;
-  private Runtime runtime;
 
   /**
    * Create the mini cluster for testing and sub in a custom runtime so that
@@ -64,23 +62,23 @@ public class TestEditLogJournalFailures 
   public void setUpMiniCluster(Configuration conf, boolean manageNameDfsDirs)
       throws IOException {
     cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
-        .manageNameDfsDirs(manageNameDfsDirs).build();
+        .manageNameDfsDirs(manageNameDfsDirs).checkExitOnShutdown(false).build();
     cluster.waitActive();
     fs = cluster.getFileSystem();
-    
-    runtime = Runtime.getRuntime();
-    runtime = spy(runtime);
-    doNothing().when(runtime).exit(anyInt());
-    
-    cluster.getNameNode().getFSImage().getEditLog().setRuntimeForTesting(runtime);
   }
   
   @After
   public void shutDownMiniCluster() throws IOException {
     if (fs != null)
       fs.close();
-    if (cluster != null)
-      cluster.shutdown();
+    if (cluster != null) {
+      try {
+        cluster.shutdown();
+      } catch (ExitException ee) {
+        // Ignore ExitExceptions as the tests may result in the
+        // NameNode doing an immediate shutdown.
+      }
+    }
   }
    
   @Test
@@ -88,11 +86,9 @@ public class TestEditLogJournalFailures 
     assertTrue(doAnEdit());
     // Invalidate one edits journal.
     invalidateEditsDirAtIndex(0, true, false);
-    // Make sure runtime.exit(...) hasn't been called at all yet.
-    assertExitInvocations(0);
+    // The NN has not terminated (no ExitException thrown)
     assertTrue(doAnEdit());
-    // A single journal failure should not result in a call to runtime.exit(...).
-    assertExitInvocations(0);
+    // A single journal failure should not result in a call to terminate
     assertFalse(cluster.getNameNode().isInSafeMode());
   }
    
@@ -102,12 +98,17 @@ public class TestEditLogJournalFailures 
     // Invalidate both edits journals.
     invalidateEditsDirAtIndex(0, true, false);
     invalidateEditsDirAtIndex(1, true, false);
-    // Make sure runtime.exit(...) hasn't been called at all yet.
-    assertExitInvocations(0);
-    assertTrue(doAnEdit());
-    // The previous edit could not be synced to any persistent storage, should
-    // have halted the NN.
-    assertExitInvocations(1);
+    // The NN has not terminated (no ExitException thrown)
+    try {
+      doAnEdit();
+      fail("The previous edit could not be synced to any persistent storage, "
+          + "should have halted the NN");
+    } catch (RemoteException re) {
+      assertTrue(re.getClassName().contains("ExitException"));
+      GenericTestUtils.assertExceptionContains(
+          "Could not sync enough journals to persistent storage. " +
+          "Unsynced transactions: 1", re);
+    }
   }
   
   @Test
@@ -116,12 +117,18 @@ public class TestEditLogJournalFailures 
     // Invalidate both edits journals.
     invalidateEditsDirAtIndex(0, true, true);
     invalidateEditsDirAtIndex(1, true, true);
-    // Make sure runtime.exit(...) hasn't been called at all yet.
-    assertExitInvocations(0);
-    assertTrue(doAnEdit());
-    // The previous edit could not be synced to any persistent storage, should
-    // have halted the NN.
-    assertExitInvocations(atLeast(1));
+    // The NN has not terminated (no ExitException thrown)
+    try {
+      doAnEdit();
+      fail("The previous edit could not be synced to any persistent storage, "
+          + " should have halted the NN");
+    } catch (RemoteException re) {
+      assertTrue(re.getClassName().contains("ExitException"));
+      GenericTestUtils.assertExceptionContains(
+          "Could not sync enough journals to persistent storage due to " +
+          "No journals available to flush. " +
+          "Unsynced transactions: 1", re);
+    }
   }
   
   @Test
@@ -129,11 +136,9 @@ public class TestEditLogJournalFailures 
     assertTrue(doAnEdit());
     // Invalidate one edits journal.
     invalidateEditsDirAtIndex(0, false, false);
-    // Make sure runtime.exit(...) hasn't been called at all yet.
-    assertExitInvocations(0);
+    // The NN has not terminated (no ExitException thrown)
     assertTrue(doAnEdit());
-    // A single journal failure should not result in a call to runtime.exit(...).
-    assertExitInvocations(0);
+    // A single journal failure should not result in a call to terminate
     assertFalse(cluster.getNameNode().isInSafeMode());
   }
   
@@ -157,15 +162,19 @@ public class TestEditLogJournalFailures 
     EditLogFileOutputStream nonRequiredSpy =
       spyOnStream(nonRequiredJas);
     
-    // Make sure runtime.exit(...) hasn't been called at all yet.
-    assertExitInvocations(0);
+    // The NN has not terminated (no ExitException thrown)
     
     // ..and that the other stream is active.
     assertTrue(nonRequiredJas.isActive());
     
-    // This will actually return true in the tests, since the NN will not in
-    // fact call Runtime.exit();
-    doAnEdit();
+    try {
+      doAnEdit();
+      fail("A single failure of a required journal should have halted the NN");
+    } catch (RemoteException re) {
+      assertTrue(re.getClassName().contains("ExitException"));
+      GenericTestUtils.assertExceptionContains(
+          "setReadyToFlush failed for required journal", re);
+    }
     
     // Since the required directory failed setReadyToFlush, and that
     // directory was listed prior to the non-required directory,
@@ -173,10 +182,6 @@ public class TestEditLogJournalFailures 
     // directory. Regression test for HDFS-2874.
     Mockito.verify(nonRequiredSpy, Mockito.never()).setReadyToFlush();
     assertFalse(nonRequiredJas.isActive());
-    
-    // A single failure of a required journal should result in a call to
-    // runtime.exit(...).
-    assertExitInvocations(atLeast(1));
   }
   
   @Test
@@ -201,28 +206,32 @@ public class TestEditLogJournalFailures 
     
     // All journals active.
     assertTrue(doAnEdit());
-    assertExitInvocations(0);
+    // The NN has not terminated (no ExitException thrown)
     
     // Invalidate 1/4 of the redundant journals.
     invalidateEditsDirAtIndex(0, false, false);
     assertTrue(doAnEdit());
-    assertExitInvocations(0);
+    // The NN has not terminated (no ExitException thrown)
 
     // Invalidate 2/4 of the redundant journals.
     invalidateEditsDirAtIndex(1, false, false);
     assertTrue(doAnEdit());
-    assertExitInvocations(0);
+    // The NN has not terminated (no ExitException thrown)
     
     // Invalidate 3/4 of the redundant journals.
     invalidateEditsDirAtIndex(2, false, false);
-    
-    // This will actually return true in the tests, since the NN will not in
-    // fact call Runtime.exit();
-    doAnEdit();
-    
-    // A failure of more than the minimum number of redundant journals should
-    // result in a call to runtime.exit(...).
-    assertExitInvocations(atLeast(1));
+
+    try {
+      doAnEdit();
+      fail("A failure of more than the minimum number of redundant journals "
+          + "should have halted ");
+    } catch (RemoteException re) {
+      assertTrue(re.getClassName().contains("ExitException"));
+      GenericTestUtils.assertExceptionContains(
+          "Could not sync enough journals to persistent storage due to " +
+          "setReadyToFlush failed for too many journals. " +
+          "Unsynced transactions: 1", re);
+    }
   }
 
   /**
@@ -275,25 +284,4 @@ public class TestEditLogJournalFailures 
   private boolean doAnEdit() throws IOException {
     return fs.mkdirs(new Path("/tmp", Integer.toString(editsPerformed++)));
   }
-  
-  /**
-   * Make sure that Runtime.exit(...) has been called exactly
-   * <code>expectedExits<code> number of times.
-   * 
-   * @param expectedExits the exact number of times Runtime.exit(...) should
-   *                      have been called.
-   */
-  private void assertExitInvocations(int expectedExits) {
-    assertExitInvocations(times(expectedExits));
-  }
-
-  /**
-   * Make sure that Runtime.exit(...) has been called
-   * <code>expectedExits<code> number of times.
-   * 
-   * @param expectedExits the number of times Runtime.exit(...) should have been called.
-   */
-  private void assertExitInvocations(VerificationMode expectedExits) {
-    verify(runtime, expectedExits).exit(anyInt());
-  }
 }

Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogRace.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogRace.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogRace.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogRace.java Fri Aug  3 19:00:15 2012
@@ -17,40 +17,41 @@
  */
 package org.apache.hadoop.hdfs.server.namenode;
 
-import org.apache.commons.logging.Log;
-
-import org.apache.commons.logging.LogFactory;
-import org.apache.commons.logging.impl.Log4JLogger;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+import static org.mockito.Mockito.doAnswer;
+import static org.mockito.Mockito.spy;
 
-import java.io.*;
+import java.io.File;
+import java.io.IOException;
 import java.util.ArrayList;
 import java.util.List;
 import java.util.concurrent.CountDownLatch;
 import java.util.concurrent.atomic.AtomicReference;
 
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.permission.*;
-
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.fs.permission.PermissionStatus;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
-import org.apache.hadoop.hdfs.server.namenode.EditLogFileInputStream;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
 import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
 import org.apache.hadoop.hdfs.server.namenode.JournalSet.JournalAndStream;
 import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
+import org.apache.hadoop.util.Time;
 import org.apache.log4j.Level;
-
-import static org.junit.Assert.*;
 import org.junit.Test;
 import org.mockito.invocation.InvocationOnMock;
 import org.mockito.stubbing.Answer;
 
-import static org.mockito.Mockito.*;
-
 /**
  * This class tests various synchronization bugs in FSEditLog rolling
  * and namespace saving.
@@ -111,6 +112,7 @@ public class TestEditLogRace {
     }
 
     // add a bunch of transactions.
+    @Override
     public void run() {
       thr = Thread.currentThread();
       PermissionStatus p = namesystem.createFsOwnerPermissions(
@@ -367,6 +369,7 @@ public class TestEditLogRace {
       final CountDownLatch waitToEnterFlush = new CountDownLatch(1);
       
       final Thread doAnEditThread = new Thread() {
+        @Override
         public void run() {
           try {
             LOG.info("Starting mkdirs");
@@ -410,9 +413,9 @@ public class TestEditLogRace {
       LOG.info("Trying to enter safe mode.");
       LOG.info("This should block for " + BLOCK_TIME + "sec, since flush will sleep that long");
       
-      long st = System.currentTimeMillis();
+      long st = Time.now();
       namesystem.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
-      long et = System.currentTimeMillis();
+      long et = Time.now();
       LOG.info("Entered safe mode");
       // Make sure we really waited for the flush to complete!
       assertTrue(et - st > (BLOCK_TIME - 1)*1000);
@@ -462,6 +465,7 @@ public class TestEditLogRace {
       final CountDownLatch waitToEnterSync = new CountDownLatch(1);
       
       final Thread doAnEditThread = new Thread() {
+        @Override
         public void run() {
           try {
             LOG.info("Starting mkdirs");
@@ -503,9 +507,9 @@ public class TestEditLogRace {
       LOG.info("Trying to enter safe mode.");
       LOG.info("This should block for " + BLOCK_TIME + "sec, since we have pending edits");
       
-      long st = System.currentTimeMillis();
+      long st = Time.now();
       namesystem.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
-      long et = System.currentTimeMillis();
+      long et = Time.now();
       LOG.info("Entered safe mode");
       // Make sure we really waited for the flush to complete!
       assertTrue(et - st > (BLOCK_TIME - 1)*1000);

Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditsDoubleBuffer.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditsDoubleBuffer.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditsDoubleBuffer.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditsDoubleBuffer.java Fri Aug  3 19:00:15 2012
@@ -17,7 +17,10 @@
  */
 package org.apache.hadoop.hdfs.server.namenode;
 
-import static org.junit.Assert.*;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
 
 import java.io.IOException;
 

Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java Fri Aug  3 19:00:15 2012
@@ -21,6 +21,8 @@ package org.apache.hadoop.hdfs.server.na
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
+import static org.mockito.Mockito.doNothing;
+import static org.mockito.Mockito.spy;
 
 import java.io.BufferedInputStream;
 import java.io.File;
@@ -30,7 +32,6 @@ import java.io.IOException;
 import java.io.RandomAccessFile;
 import java.nio.channels.FileChannel;
 import java.util.Map;
-import java.util.Set;
 import java.util.SortedMap;
 
 import org.apache.commons.logging.impl.Log4JLogger;
@@ -44,20 +45,14 @@ import org.apache.hadoop.hdfs.MiniDFSClu
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogLoader.EditLogValidation;
-import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.DeleteOp;
-import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.OpInstanceCache;
 import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.log4j.Level;
 import org.junit.Test;
 
 import com.google.common.collect.Maps;
-import com.google.common.collect.Sets;
 import com.google.common.io.Files;
 
-import static org.mockito.Mockito.doNothing;
-import static org.mockito.Mockito.spy;
-
 public class TestFSEditLogLoader {
   
   static {

Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageStorageInspector.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageStorageInspector.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageStorageInspector.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageStorageInspector.java Fri Aug  3 19:00:15 2012
@@ -17,19 +17,21 @@
  */
 package org.apache.hadoop.hdfs.server.namenode;
 
-import static org.junit.Assert.*;
+import static org.apache.hadoop.hdfs.server.namenode.NNStorage.getFinalizedEditsFileName;
+import static org.apache.hadoop.hdfs.server.namenode.NNStorage.getImageFileName;
+import static org.apache.hadoop.hdfs.server.namenode.NNStorage.getInProgressEditsFileName;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertSame;
+import static org.junit.Assert.assertTrue;
 
 import java.io.File;
 import java.io.IOException;
+
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
-import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
-import static org.apache.hadoop.hdfs.server.namenode.NNStorage.getInProgressEditsFileName;
-import static org.apache.hadoop.hdfs.server.namenode.NNStorage.getFinalizedEditsFileName;
-import static org.apache.hadoop.hdfs.server.namenode.NNStorage.getImageFileName;
-
 import org.apache.hadoop.hdfs.server.namenode.FSImageStorageInspector.FSImageFile;
+import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
 import org.junit.Test;
 
 public class TestFSImageStorageInspector {

Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystem.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystem.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystem.java Fri Aug  3 19:00:15 2012
@@ -18,8 +18,8 @@
 
 package org.apache.hadoop.hdfs.server.namenode;
 
-import static org.apache.hadoop.hdfs.DFSConfigKeys.*;
-import static org.junit.Assert.*;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY;
+import static org.junit.Assert.assertEquals;
 
 import java.io.IOException;
 import java.net.URI;

Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileJournalManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileJournalManager.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileJournalManager.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileJournalManager.java Fri Aug  3 19:00:15 2012
@@ -17,34 +17,36 @@
  */
 package org.apache.hadoop.hdfs.server.namenode;
 
-import static org.junit.Assert.*;
+import static org.apache.hadoop.hdfs.server.namenode.TestEditLog.TXNS_PER_FAIL;
+import static org.apache.hadoop.hdfs.server.namenode.TestEditLog.TXNS_PER_ROLL;
+import static org.apache.hadoop.hdfs.server.namenode.TestEditLog.setupEdits;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
 
+import java.io.File;
+import java.io.FilenameFilter;
+import java.io.IOException;
+import java.io.RandomAccessFile;
 import java.net.URI;
 import java.util.Collections;
-import java.util.List;
 import java.util.Iterator;
+import java.util.List;
 import java.util.PriorityQueue;
 
-import java.io.RandomAccessFile;
-import java.io.File;
-import java.io.FilenameFilter;
-import java.io.IOException;
-import org.junit.Test;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
 import org.apache.hadoop.hdfs.server.namenode.JournalManager.CorruptionException;
 import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
+import org.apache.hadoop.hdfs.server.namenode.TestEditLog.AbortSpec;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.test.GenericTestUtils;
-import static org.apache.hadoop.hdfs.server.namenode.TestEditLog.setupEdits;
-import static org.apache.hadoop.hdfs.server.namenode.TestEditLog.AbortSpec;
-import static org.apache.hadoop.hdfs.server.namenode.TestEditLog.TXNS_PER_ROLL;
-import static org.apache.hadoop.hdfs.server.namenode.TestEditLog.TXNS_PER_FAIL;
+import org.junit.Test;
 
-import com.google.common.collect.ImmutableList;
 import com.google.common.base.Joiner;
+import com.google.common.collect.ImmutableList;
 
 public class TestFileJournalManager {
   static final Log LOG = LogFactory.getLog(TestFileJournalManager.class);
@@ -296,6 +298,7 @@ public class TestFileJournalManager {
     final long startGapTxId = 3*TXNS_PER_ROLL + 1;
     final long endGapTxId = 4*TXNS_PER_ROLL;
     File[] files = new File(f, "current").listFiles(new FilenameFilter() {
+        @Override
         public boolean accept(File dir, String name) {
           if (name.startsWith(NNStorage.getFinalizedEditsFileName(startGapTxId, endGapTxId))) {
             return true;
@@ -327,6 +330,7 @@ public class TestFileJournalManager {
     StorageDirectory sd = storage.dirIterator(NameNodeDirType.EDITS).next();
 
     File[] files = new File(f, "current").listFiles(new FilenameFilter() {
+        @Override
         public boolean accept(File dir, String name) {
           if (name.startsWith("edits_inprogress")) {
             return true;

Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileLimit.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileLimit.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileLimit.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileLimit.java Fri Aug  3 19:00:15 2012
@@ -17,27 +17,28 @@
  */
 package org.apache.hadoop.hdfs.server.namenode;
 
+import static org.junit.Assert.assertTrue;
+
 import java.io.IOException;
 import java.util.Random;
 
-import junit.framework.TestCase;
-
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.junit.Test;
 
 
 /**
  * This class tests that a file system adheres to the limit of
  * maximum number of files that is configured.
  */
-public class TestFileLimit extends TestCase {
+public class TestFileLimit {
   static final long seed = 0xDEADBEEFL;
   static final int blockSize = 8192;
   boolean simulatedStorage = false;
@@ -75,6 +76,7 @@ public class TestFileLimit extends TestC
   /**
    * Test that file data becomes available before file is closed.
    */
+  @Test
   public void testFileLimit() throws IOException {
     Configuration conf = new HdfsConfiguration();
     int maxObjects = 5;
@@ -166,6 +168,7 @@ public class TestFileLimit extends TestC
     }
   }
 
+  @Test
   public void testFileLimitSimulated() throws IOException {
     simulatedStorage = true;
     testFileLimit();

Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsLimits.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsLimits.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsLimits.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsLimits.java Fri Aug  3 19:00:15 2012
@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.hdfs.server.namenode;
 
+import static org.apache.hadoop.hdfs.server.common.Util.fileAsURI;
 import static org.junit.Assert.assertEquals;
 import static org.mockito.Matchers.anyObject;
 import static org.mockito.Mockito.mock;
@@ -30,12 +31,11 @@ import org.apache.hadoop.conf.Configurat
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.PermissionStatus;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.protocol.FSLimitException;
 import org.apache.hadoop.hdfs.protocol.FSLimitException.MaxDirectoryItemsExceededException;
 import org.apache.hadoop.hdfs.protocol.FSLimitException.PathComponentTooLongException;
 import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
-import org.apache.hadoop.hdfs.MiniDFSCluster;
-import static org.apache.hadoop.hdfs.server.common.Util.fileAsURI;
 import org.junit.Before;
 import org.junit.Test;