You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by mo...@apache.org on 2011/07/24 03:43:06 UTC
svn commit: r1150247 - in /hadoop/common/trunk/hdfs: CHANGES.txt
src/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
src/test/unit/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java
Author: molkov
Date: Sun Jul 24 01:43:05 2011
New Revision: 1150247
URL: http://svn.apache.org/viewvc?rev=1150247&view=rev
Log:
HDFS-1776 Bug in Bug in Concat code. Contributed by Bharath Mundlapudi
Modified:
hadoop/common/trunk/hdfs/CHANGES.txt
hadoop/common/trunk/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
hadoop/common/trunk/hdfs/src/test/unit/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java
Modified: hadoop/common/trunk/hdfs/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hdfs/CHANGES.txt?rev=1150247&r1=1150246&r2=1150247&view=diff
==============================================================================
--- hadoop/common/trunk/hdfs/CHANGES.txt (original)
+++ hadoop/common/trunk/hdfs/CHANGES.txt Sun Jul 24 01:43:05 2011
@@ -879,6 +879,8 @@ Trunk (unreleased changes)
HDFS-2114. re-commission of a decommissioned node does not delete
excess replicas. (John George via mattf)
+ HDFS-1776. Bug in Concat code. (Bharath Mundlapudi via Dmytro Molkov)
+
Release 0.22.0 - Unreleased
INCOMPATIBLE CHANGES
Modified: hadoop/common/trunk/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java?rev=1150247&r1=1150246&r2=1150247&view=diff
==============================================================================
--- hadoop/common/trunk/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java (original)
+++ hadoop/common/trunk/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java Sun Jul 24 01:43:05 2011
@@ -127,7 +127,7 @@ public class INodeFile extends INode {
size += in.blocks.length;
}
- for(BlockInfo bi: this.blocks) {
+ for(BlockInfo bi: newlist) {
bi.setINode(this);
}
this.blocks = newlist;
Modified: hadoop/common/trunk/hdfs/src/test/unit/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hdfs/src/test/unit/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java?rev=1150247&r1=1150246&r2=1150247&view=diff
==============================================================================
--- hadoop/common/trunk/hdfs/src/test/unit/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java (original)
+++ hadoop/common/trunk/hdfs/src/test/unit/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java Sun Jul 24 01:43:05 2011
@@ -23,6 +23,7 @@ import static org.junit.Assert.*;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.permission.PermissionStatus;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.junit.Test;
@@ -150,4 +151,56 @@ public class TestINodeFile {
assertEquals(Path.SEPARATOR, root.getLocalParentDir());
}
+
+ @Test
+ public void testAppendBlocks() {
+ INodeFile origFile = createINodeFiles(1, "origfile")[0];
+ assertEquals("Number of blocks didn't match", origFile.numBlocks(), 1L);
+
+ INodeFile[] appendFiles = createINodeFiles(4, "appendfile");
+ origFile.appendBlocks(appendFiles, getTotalBlocks(appendFiles));
+ assertEquals("Number of blocks didn't match", origFile.numBlocks(), 5L);
+
+ for(int i=0; i< origFile.numBlocks(); i++) {
+ assertSame("INodeFiles didn't Match", origFile, origFile.getBlocks()[i].getINode());
+ }
+ }
+
+ /**
+ * Gives the count of blocks for a given number of files
+ * @param files Array of INode files
+ * @return total count of blocks
+ */
+ private int getTotalBlocks(INodeFile[] files) {
+ int nBlocks=0;
+ for(int i=0; i < files.length; i++) {
+ nBlocks += files[i].numBlocks();
+ }
+ return nBlocks;
+ }
+
+ /**
+ * Creates the required number of files with one block each
+ * @param nCount Number of INodes to create
+ * @return Array of INode files
+ */
+ private INodeFile[] createINodeFiles(int nCount, String fileNamePrefix) {
+ if(nCount <= 0)
+ return new INodeFile[1];
+
+ replication = 3;
+ preferredBlockSize = 128 * 1024 * 1024;
+ INodeFile[] iNodes = new INodeFile[nCount];
+ for (int i = 0; i < nCount; i++) {
+ PermissionStatus perms = new PermissionStatus(userName, null,
+ FsPermission.getDefault());
+ iNodes[i] = new INodeFile(perms, null, replication, 0L, 0L,
+ preferredBlockSize);
+ iNodes[i].setLocalName(fileNamePrefix + Integer.toString(i));
+ BlockInfo newblock = new BlockInfo(replication);
+ iNodes[i].addBlock(newblock);
+ }
+
+ return iNodes;
+ }
}