You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by to...@apache.org on 2011/04/20 04:28:21 UTC

svn commit: r1095253 [4/4] - in /hadoop/hdfs/branches/HDFS-1073: ./ bin/ src/c++/libhdfs/ src/contrib/hdfsproxy/ src/java/ src/java/org/apache/hadoop/fs/ src/java/org/apache/hadoop/hdfs/ src/java/org/apache/hadoop/hdfs/protocol/ src/java/org/apache/had...

Modified: hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java?rev=1095253&r1=1095252&r2=1095253&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java (original)
+++ hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java Wed Apr 20 02:28:19 2011
@@ -554,7 +554,7 @@ public class NNThroughputBenchmark {
       // dummyActionNoSynch(fileIdx);
       nameNode.create(fileNames[daemonId][inputIdx], FsPermission.getDefault(),
                       clientName, new EnumSetWritable<CreateFlag>(EnumSet
-              .of(CreateFlag.OVERWRITE)), true, replication, BLOCK_SIZE);
+              .of(CreateFlag.CREATE, CreateFlag.OVERWRITE)), true, replication, BLOCK_SIZE);
       long end = System.currentTimeMillis();
       for(boolean written = !closeUponCreate; !written; 
         written = nameNode.complete(fileNames[daemonId][inputIdx],
@@ -967,7 +967,7 @@ public class NNThroughputBenchmark {
       for(int idx=0; idx < nrFiles; idx++) {
         String fileName = nameGenerator.getNextFileName("ThroughputBench");
         nameNode.create(fileName, FsPermission.getDefault(), clientName,
-            new EnumSetWritable<CreateFlag>(EnumSet.of(CreateFlag.OVERWRITE)), true, replication,
+            new EnumSetWritable<CreateFlag>(EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE)), true, replication,
             BLOCK_SIZE);
         Block lastBlock = addBlocks(fileName, clientName);
         nameNode.complete(fileName, clientName, lastBlock);

Modified: hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBlocksWithNotEnoughRacks.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBlocksWithNotEnoughRacks.java?rev=1095253&r1=1095252&r2=1095253&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBlocksWithNotEnoughRacks.java (original)
+++ hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBlocksWithNotEnoughRacks.java Wed Apr 20 02:28:19 2011
@@ -76,7 +76,7 @@ public class TestBlocksWithNotEnoughRack
       String newRacks[] = {"/rack2"} ;
       cluster.startDataNodes(conf, 1, true, null, newRacks);
 
-      while ( (numRacks < 2) || (curReplicas < REPLICATION_FACTOR) ||
+      while ( (numRacks < 2) || (curReplicas != REPLICATION_FACTOR) ||
               (neededReplicationSize > 0) ) {
         LOG.info("Waiting for replication");
         Thread.sleep(600);

Modified: hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java?rev=1095253&r1=1095252&r2=1095253&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java (original)
+++ hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java Wed Apr 20 02:28:19 2011
@@ -20,12 +20,15 @@ package org.apache.hadoop.hdfs.server.na
 import junit.framework.TestCase;
 import java.io.*;
 import java.net.URI;
+import java.util.ArrayList;
 import java.util.Iterator;
 import java.util.concurrent.Callable;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Executors;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.ChecksumException;
 import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.*;
 
 import org.apache.hadoop.hdfs.HdfsConfiguration;
@@ -143,12 +146,7 @@ public class TestEditLog extends TestCas
   
   private int testLoad(byte[] data, FSNamesystem namesys) throws IOException {
     FSEditLogLoader loader = new FSEditLogLoader(namesys);
-    EditLogInputStream mockStream = Mockito.mock(EditLogInputStream.class);
-    ByteArrayInputStream bais = new ByteArrayInputStream(data);
-    Mockito.doReturn(new DataInputStream(bais))
-      .when(mockStream).getDataInputStream();
-    
-    return loader.loadFSEdits(mockStream, 1);
+    return loader.loadFSEdits(new EditLogByteInputStream(data), 1);
   }
 
   /**
@@ -375,6 +373,39 @@ public class TestEditLog extends TestCas
     }
   }
   
+  public void testEditChecksum() throws Exception {
+    // start a cluster 
+    Configuration conf = new HdfsConfiguration();
+    MiniDFSCluster cluster = null;
+    FileSystem fileSys = null;
+    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES).build();
+    cluster.waitActive();
+    fileSys = cluster.getFileSystem();
+    final FSNamesystem namesystem = cluster.getNamesystem();
+
+    FSImage fsimage = namesystem.getFSImage();
+    final FSEditLog editLog = fsimage.getEditLog();
+    fileSys.mkdirs(new Path("/tmp"));
+    File editFile = editLog.getFsEditName();
+    editLog.close();
+    cluster.shutdown();
+      long fileLen = editFile.length();
+    System.out.println("File name: " + editFile + " len: " + fileLen);
+    RandomAccessFile rwf = new RandomAccessFile(editFile, "rw");
+    rwf.seek(fileLen-4); // seek to checksum bytes
+    int b = rwf.readInt();
+    rwf.seek(fileLen-4);
+    rwf.writeInt(b+1);
+    rwf.close();
+    
+    try {
+      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES).format(false).build();
+      fail("should not be able to start");
+    } catch (ChecksumException e) {
+      // expected
+    }
+  }
+
   public void testGetValidLength() throws Exception {
     assertTrue(TEST_DIR.mkdirs() || TEST_DIR.exists());
 
@@ -420,4 +451,45 @@ public class TestEditLog extends TestCas
       file.delete();
     }
   }
+
+  private static class EditLogByteInputStream extends EditLogInputStream {
+    private InputStream input;
+    private long len;
+
+    public EditLogByteInputStream(byte[] data) {
+      len = data.length;
+      input = new ByteArrayInputStream(data);
+    }
+
+    public int available() throws IOException {
+      return input.available();
+    }
+    
+    public int read() throws IOException {
+      return input.read();
+    }
+    
+    public long length() throws IOException {
+      return len;
+    }
+    
+    public int read(byte[] b, int off, int len) throws IOException {
+      return input.read(b, off, len);
+    }
+
+    public void close() throws IOException {
+      input.close();
+    }
+
+    @Override // JournalStream
+    public String getName() {
+      return "AnonEditLogByteInputStream";
+    }
+
+    @Override // JournalStream
+    public JournalType getType() {
+      return JournalType.FILE;
+    }
+  }
+
 }

Modified: hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestStorageRestore.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestStorageRestore.java?rev=1095253&r1=1095252&r2=1095253&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestStorageRestore.java (original)
+++ hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestStorageRestore.java Wed Apr 20 02:28:19 2011
@@ -33,9 +33,8 @@ import junit.framework.TestCase;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.cli.CmdFactoryDFS;
-import org.apache.hadoop.cli.util.CLITestData;
-import org.apache.hadoop.cli.util.CommandExecutor;
+import org.apache.hadoop.cli.CLITestCmdDFS;
+import org.apache.hadoop.cli.util.*;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
@@ -350,10 +349,9 @@ public class TestStorageRestore extends 
 
       String cmd = "-fs NAMENODE -restoreFailedStorage false";
       String namenode = config.get(DFSConfigKeys.FS_DEFAULT_NAME_KEY, "file:///");
-      CommandExecutor executor = 
-        CmdFactoryDFS.getCommandExecutor(
-          new CLITestData.TestCmd(cmd, CLITestData.TestCmd.CommandType.DFSADMIN),
-          namenode);
+      CommandExecutor executor =
+          new CLITestCmdDFS(cmd, new CLICommandDFSAdmin()).getExecutor(namenode);
+
       executor.executeCommand(cmd);
       restore = fsi.getStorage().getRestoreFailedStorage();
       assertFalse("After set true call restore is " + restore, restore);

Modified: hadoop/hdfs/branches/HDFS-1073/src/test/unit/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/test/unit/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java?rev=1095253&r1=1095252&r2=1095253&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/test/unit/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java (original)
+++ hadoop/hdfs/branches/HDFS-1073/src/test/unit/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java Wed Apr 20 02:28:19 2011
@@ -20,6 +20,7 @@ package org.apache.hadoop.hdfs.server.na
 
 import static org.junit.Assert.*;
 
+import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.PermissionStatus;
 
@@ -120,4 +121,33 @@ public class TestINodeFile {
                                   0L, 0L, preferredBlockSize);
   }
 
+  @Test
+  public void testGetFullPathName() {
+    PermissionStatus perms = new PermissionStatus(
+      userName, null, FsPermission.getDefault());
+
+    replication = 3;
+    preferredBlockSize = 128*1024*1024;
+    INodeFile inf = new INodeFile(perms, null, replication,
+                                  0L, 0L, preferredBlockSize);
+    inf.setLocalName("f");
+
+    INodeDirectory root = new INodeDirectory(INodeDirectory.ROOT_NAME, perms);
+    INodeDirectory dir = new INodeDirectory("d", perms);
+
+    assertEquals("f", inf.getFullPathName());
+    assertEquals("", inf.getLocalParentDir());
+
+    dir.addChild(inf, false, false);
+    assertEquals("d"+Path.SEPARATOR+"f", inf.getFullPathName());
+    assertEquals("d", inf.getLocalParentDir());
+    
+    root.addChild(dir, false, false);
+    assertEquals(Path.SEPARATOR+"d"+Path.SEPARATOR+"f", inf.getFullPathName());
+    assertEquals(Path.SEPARATOR+"d", dir.getFullPathName());
+
+    assertEquals(Path.SEPARATOR, root.getFullPathName());
+    assertEquals(Path.SEPARATOR, root.getLocalParentDir());
+    
+  }
 }

Propchange: hadoop/hdfs/branches/HDFS-1073/src/webapps/datanode/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Wed Apr 20 02:28:19 2011
@@ -2,3 +2,4 @@
 /hadoop/core/trunk/src/webapps/datanode:776175-784663
 /hadoop/hdfs/branches/HDFS-265/src/webapps/datanode:796829-820463
 /hadoop/hdfs/branches/branch-0.21/src/webapps/datanode:820487
+/hadoop/hdfs/trunk/src/webapps/datanode:1086482-1095244

Propchange: hadoop/hdfs/branches/HDFS-1073/src/webapps/hdfs/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Wed Apr 20 02:28:19 2011
@@ -2,3 +2,4 @@
 /hadoop/core/trunk/src/webapps/hdfs:776175-784663
 /hadoop/hdfs/branches/HDFS-265/src/webapps/hdfs:796829-820463
 /hadoop/hdfs/branches/branch-0.21/src/webapps/hdfs:820487
+/hadoop/hdfs/trunk/src/webapps/hdfs:1086482-1095244

Propchange: hadoop/hdfs/branches/HDFS-1073/src/webapps/secondary/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Wed Apr 20 02:28:19 2011
@@ -2,3 +2,4 @@
 /hadoop/core/trunk/src/webapps/secondary:776175-784663
 /hadoop/hdfs/branches/HDFS-265/src/webapps/secondary:796829-820463
 /hadoop/hdfs/branches/branch-0.21/src/webapps/secondary:820487
+/hadoop/hdfs/trunk/src/webapps/secondary:1086482-1095244