You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by to...@apache.org on 2012/10/08 21:19:14 UTC

svn commit: r1395730 - in /hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs: ./ src/test/java/org/apache/hadoop/hdfs/server/datanode/ src/test/java/org/apache/hadoop/hdfs/server/namenode/ src/test/unit/org/apache/hadoop/hdfs/server/datan...

Author: todd
Date: Mon Oct  8 19:19:13 2012
New Revision: 1395730

URL: http://svn.apache.org/viewvc?rev=1395730&view=rev
Log:
HDFS-4007. Rehabilitate bit-rotted unit tests under hadoop-hdfs-project/hadoop-hdfs/src/test/unit/. Contributed by Colin Patrick McCabe.

Added:
    hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataDirs.java
    hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java
Removed:
    hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/unit/org/apache/hadoop/hdfs/server/datanode/TestDataDirs.java
    hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/unit/org/apache/hadoop/hdfs/server/namenode/TestGetImageServlet.java
    hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/unit/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java
    hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/unit/org/apache/hadoop/hdfs/server/namenode/TestNNLeaseRecovery.java
Modified:
    hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt

Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1395730&r1=1395729&r2=1395730&view=diff
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Mon Oct  8 19:19:13 2012
@@ -42,6 +42,9 @@ Release 2.0.3-alpha - Unreleased
 
     HDFS-4008. TestBalancerWithEncryptedTransfer needs a timeout. (eli)
 
+    HDFS-4007. Rehabilitate bit-rotted unit tests under
+    hadoop-hdfs-project/hadoop-hdfs/src/test/unit/ (Colin Patrick McCabe via todd)
+
   OPTIMIZATIONS
 
   BUG FIXES 

Added: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataDirs.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataDirs.java?rev=1395730&view=auto
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataDirs.java (added)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataDirs.java Mon Oct  8 19:19:13 2012
@@ -0,0 +1,59 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdfs.server.datanode;
+
+import java.io.*;
+import java.net.URI;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.List;
+
+import org.junit.Test;
+import static org.junit.Assert.*;
+import static org.mockito.Mockito.*;
+import static org.apache.hadoop.test.MockitoMaker.*;
+
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.LocalFileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsPermission;
+
+public class TestDataDirs {
+
+  @Test public void testGetDataDirsFromURIs() throws Throwable {
+    File localDir = make(stub(File.class).returning(true).from.exists());
+    when(localDir.mkdir()).thenReturn(true);
+    FsPermission normalPerm = new FsPermission("700");
+    FsPermission badPerm = new FsPermission("000");
+    FileStatus stat = make(stub(FileStatus.class)
+        .returning(normalPerm, normalPerm, badPerm).from.getPermission());
+    when(stat.isDirectory()).thenReturn(true);
+    LocalFileSystem fs = make(stub(LocalFileSystem.class)
+        .returning(stat).from.getFileStatus(any(Path.class)));
+    when(fs.pathToFile(any(Path.class))).thenReturn(localDir);
+    Collection<URI> uris = Arrays.asList(new URI("file:/p1/"),
+        new URI("file:/p2/"), new URI("file:/p3/"));
+
+    List<File> dirs = DataNode.getDataDirsFromURIs(uris, fs, normalPerm);
+
+    verify(fs, times(2)).setPermission(any(Path.class), eq(normalPerm));
+    verify(fs, times(6)).getFileStatus(any(Path.class));
+    assertEquals("number of valid data dirs", dirs.size(), 1);
+  }
+}

Added: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java?rev=1395730&view=auto
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java (added)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java Mon Oct  8 19:19:13 2012
@@ -0,0 +1,202 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdfs.server.namenode;
+
+import static org.junit.Assert.*;
+
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.fs.permission.PermissionStatus;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
+
+import org.junit.Test;
+
+public class TestINodeFile {
+
+  static final short BLOCKBITS = 48;
+  static final long BLKSIZE_MAXVALUE = ~(0xffffL << BLOCKBITS);
+
+  private String userName = "Test";
+  private short replication;
+  private long preferredBlockSize;
+
+  /**
+   * Test for the Replication value. Sets a value and checks if it was set
+   * correct.
+   */
+  @Test
+  public void testReplication () {
+    replication = 3;
+    preferredBlockSize = 128*1024*1024;
+    INodeFile inf = new INodeFile(new PermissionStatus(userName, null, 
+                                  FsPermission.getDefault()), null, replication,
+                                  0L, 0L, preferredBlockSize);
+    assertEquals("True has to be returned in this case", replication,
+                 inf.getReplication());
+  }
+
+  /**
+   * IllegalArgumentException is expected for setting below lower bound
+   * for Replication.
+   * @throws IllegalArgumentException as the result
+   */
+  @Test(expected=IllegalArgumentException.class)
+  public void testReplicationBelowLowerBound ()
+              throws IllegalArgumentException {
+    replication = -1;
+    preferredBlockSize = 128*1024*1024;
+    new INodeFile(new PermissionStatus(userName, null,
+                                  FsPermission.getDefault()), null, replication,
+                                  0L, 0L, preferredBlockSize);
+  }
+
+  /**
+   * Test for the PreferredBlockSize value. Sets a value and checks if it was
+   * set correct.
+   */
+  @Test
+  public void testPreferredBlockSize () {
+    replication = 3;
+    preferredBlockSize = 128*1024*1024;
+    INodeFile inf = new INodeFile(new PermissionStatus(userName, null,
+                                  FsPermission.getDefault()), null, replication,
+                                  0L, 0L, preferredBlockSize);
+    assertEquals("True has to be returned in this case", preferredBlockSize,
+           inf.getPreferredBlockSize());
+  }
+
+  @Test
+  public void testPreferredBlockSizeUpperBound () {
+    replication = 3;
+    preferredBlockSize = BLKSIZE_MAXVALUE;
+    INodeFile inf = new INodeFile(new PermissionStatus(userName, null, 
+                                  FsPermission.getDefault()), null, replication,
+                                  0L, 0L, preferredBlockSize);
+    assertEquals("True has to be returned in this case", BLKSIZE_MAXVALUE,
+                 inf.getPreferredBlockSize());
+  }
+
+  /**
+   * IllegalArgumentException is expected for setting below lower bound
+   * for PreferredBlockSize.
+   * @throws IllegalArgumentException as the result
+   */
+  @Test(expected=IllegalArgumentException.class)
+  public void testPreferredBlockSizeBelowLowerBound ()
+              throws IllegalArgumentException {
+    replication = 3;
+    preferredBlockSize = -1;
+    new INodeFile(new PermissionStatus(userName, null, 
+                                  FsPermission.getDefault()), null, replication,
+                                  0L, 0L, preferredBlockSize);
+  } 
+
+  /**
+   * IllegalArgumentException is expected for setting above upper bound
+   * for PreferredBlockSize.
+   * @throws IllegalArgumentException as the result
+   */
+  @Test(expected=IllegalArgumentException.class)
+  public void testPreferredBlockSizeAboveUpperBound ()
+              throws IllegalArgumentException {
+    replication = 3;
+    preferredBlockSize = BLKSIZE_MAXVALUE+1;
+    new INodeFile(new PermissionStatus(userName, null, 
+                                  FsPermission.getDefault()), null, replication,
+                                  0L, 0L, preferredBlockSize);
+  }
+
+  @Test
+  public void testGetFullPathName() {
+    PermissionStatus perms = new PermissionStatus(
+      userName, null, FsPermission.getDefault());
+
+    replication = 3;
+    preferredBlockSize = 128*1024*1024;
+    INodeFile inf = new INodeFile(perms, null, replication,
+                                  0L, 0L, preferredBlockSize);
+    inf.setLocalName("f");
+
+    INodeDirectory root = new INodeDirectory(INodeDirectory.ROOT_NAME, perms);
+    INodeDirectory dir = new INodeDirectory("d", perms);
+
+    assertEquals("f", inf.getFullPathName());
+    assertEquals("", inf.getLocalParentDir());
+
+    dir.addChild(inf, false);
+    assertEquals("d"+Path.SEPARATOR+"f", inf.getFullPathName());
+    assertEquals("d", inf.getLocalParentDir());
+    
+    root.addChild(dir, false);
+    assertEquals(Path.SEPARATOR+"d"+Path.SEPARATOR+"f", inf.getFullPathName());
+    assertEquals(Path.SEPARATOR+"d", dir.getFullPathName());
+
+    assertEquals(Path.SEPARATOR, root.getFullPathName());
+    assertEquals(Path.SEPARATOR, root.getLocalParentDir());
+    
+  }
+  
+  @Test
+  public void testAppendBlocks() {
+    INodeFile origFile = createINodeFiles(1, "origfile")[0];
+    assertEquals("Number of blocks didn't match", origFile.numBlocks(), 1L);
+
+    INodeFile[] appendFiles =   createINodeFiles(4, "appendfile");
+    origFile.appendBlocks(appendFiles, getTotalBlocks(appendFiles));
+    assertEquals("Number of blocks didn't match", origFile.numBlocks(), 5L);
+  }
+
+  /** 
+   * Gives the count of blocks for a given number of files
+   * @param files Array of INode files
+   * @return total count of blocks
+   */
+  private int getTotalBlocks(INodeFile[] files) {
+    int nBlocks=0;
+    for(int i=0; i < files.length; i++) {
+       nBlocks += files[i].numBlocks();
+    }
+    return nBlocks;
+  }
+  
+  /** 
+   * Creates the required number of files with one block each
+   * @param nCount Number of INodes to create
+   * @return Array of INode files
+   */
+  private INodeFile[] createINodeFiles(int nCount, String fileNamePrefix) {
+    if(nCount <= 0)
+      return new INodeFile[1];
+
+    replication = 3;
+    preferredBlockSize = 128 * 1024 * 1024;
+    INodeFile[] iNodes = new INodeFile[nCount];
+    for (int i = 0; i < nCount; i++) {
+      PermissionStatus perms = new PermissionStatus(userName, null,
+          FsPermission.getDefault());
+      iNodes[i] = new INodeFile(perms, null, replication, 0L, 0L,
+          preferredBlockSize);
+      iNodes[i].setLocalName(fileNamePrefix +  Integer.toString(i));
+      BlockInfo newblock = new BlockInfo(replication);
+      iNodes[i].addBlock(newblock);
+    }
+    
+    return iNodes;
+  }
+}