You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by zj...@apache.org on 2015/06/03 01:47:35 UTC
[03/50] [abbrv] hadoop git commit: HDFS-8482. Rename
BlockInfoContiguous to BlockInfo. Contributed by Zhe Zhang.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/8b88df72/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestTruncateQuotaUpdate.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestTruncateQuotaUpdate.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestTruncateQuotaUpdate.java
index f6b18e6..0d726e9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestTruncateQuotaUpdate.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestTruncateQuotaUpdate.java
@@ -20,7 +20,7 @@ package org.apache.hadoop.hdfs.server.namenode;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.permission.PermissionStatus;
import org.apache.hadoop.hdfs.protocol.Block;
-import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.namenode.snapshot.FileDiff;
import org.apache.hadoop.hdfs.server.namenode.snapshot.FileDiffList;
import org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshotFeature;
@@ -100,7 +100,7 @@ public class TestTruncateQuotaUpdate {
@Test
public void testTruncateWithSnapshotAndDivergence() {
INodeFile file = createMockFile(BLOCKSIZE * 2 + BLOCKSIZE / 2, REPLICATION);
- BlockInfoContiguous[] blocks = new BlockInfoContiguous
+ BlockInfo[] blocks = new BlockInfo
[file.getBlocks().length];
System.arraycopy(file.getBlocks(), 0, blocks, 0, blocks.length);
addSnapshotFeature(file, blocks);
@@ -130,11 +130,11 @@ public class TestTruncateQuotaUpdate {
}
private INodeFile createMockFile(long size, short replication) {
- ArrayList<BlockInfoContiguous> blocks = new ArrayList<>();
+ ArrayList<BlockInfo> blocks = new ArrayList<>();
long createdSize = 0;
while (createdSize < size) {
long blockSize = Math.min(BLOCKSIZE, size - createdSize);
- BlockInfoContiguous bi = newBlock(blockSize, replication);
+ BlockInfo bi = newBlock(blockSize, replication);
blocks.add(bi);
createdSize += BLOCKSIZE;
}
@@ -142,16 +142,16 @@ public class TestTruncateQuotaUpdate {
.createImmutable((short) 0x1ff));
return new INodeFile(
++nextMockINodeId, new byte[0], perm, 0, 0,
- blocks.toArray(new BlockInfoContiguous[blocks.size()]), replication,
+ blocks.toArray(new BlockInfo[blocks.size()]), replication,
BLOCKSIZE);
}
- private BlockInfoContiguous newBlock(long size, short replication) {
+ private BlockInfo newBlock(long size, short replication) {
Block b = new Block(++nextMockBlockId, size, ++nextMockGenstamp);
- return new BlockInfoContiguous(b, replication);
+ return new BlockInfo(b, replication);
}
- private static void addSnapshotFeature(INodeFile file, BlockInfoContiguous[] blocks) {
+ private static void addSnapshotFeature(INodeFile file, BlockInfo[] blocks) {
FileDiff diff = mock(FileDiff.class);
when(diff.getBlocks()).thenReturn(blocks);
FileDiffList diffList = new FileDiffList();
http://git-wip-us.apache.org/repos/asf/hadoop/blob/8b88df72/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotTestHelper.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotTestHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotTestHelper.java
index 11b19f3..a1abd08 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotTestHelper.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotTestHelper.java
@@ -43,7 +43,7 @@ import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
-import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
import org.apache.hadoop.hdfs.server.datanode.BlockPoolSliceStorage;
@@ -176,7 +176,7 @@ public class SnapshotTestHelper {
*
* Specific information for different types of INode:
* {@link INodeDirectory}:childrenSize
- * {@link INodeFile}: fileSize, block list. Check {@link BlockInfoContiguous#toString()}
+ * {@link INodeFile}: fileSize, block list. Check {@link BlockInfo#toString()}
* and {@link BlockInfoContiguousUnderConstruction#toString()} for detailed information.
* {@link FileWithSnapshot}: next link
* </pre>
http://git-wip-us.apache.org/repos/asf/hadoop/blob/8b88df72/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestFileWithSnapshotFeature.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestFileWithSnapshotFeature.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestFileWithSnapshotFeature.java
index 8b9ebea..ac81488 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestFileWithSnapshotFeature.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestFileWithSnapshotFeature.java
@@ -20,7 +20,7 @@ package org.apache.hadoop.hdfs.server.namenode.snapshot;
import com.google.common.collect.Lists;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
-import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
import org.apache.hadoop.hdfs.server.namenode.INode;
import org.apache.hadoop.hdfs.server.namenode.INodeFile;
@@ -49,8 +49,8 @@ public class TestFileWithSnapshotFeature {
FileDiff diff = mock(FileDiff.class);
BlockStoragePolicySuite bsps = mock(BlockStoragePolicySuite.class);
BlockStoragePolicy bsp = mock(BlockStoragePolicy.class);
- BlockInfoContiguous[] blocks = new BlockInfoContiguous[] {
- new BlockInfoContiguous(new Block(1, BLOCK_SIZE, 1), REPL_1)
+ BlockInfo[] blocks = new BlockInfo[] {
+ new BlockInfo(new Block(1, BLOCK_SIZE, 1), REPL_1)
};
// No snapshot
http://git-wip-us.apache.org/repos/asf/hadoop/blob/8b88df72/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotBlocksMap.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotBlocksMap.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotBlocksMap.java
index 85072d1..7bffb33 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotBlocksMap.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotBlocksMap.java
@@ -36,7 +36,7 @@ import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
-import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
@@ -108,14 +108,14 @@ public class TestSnapshotBlocksMap {
final FSDirectory dir, final BlockManager blkManager) throws Exception {
final INodeFile file = INodeFile.valueOf(dir.getINode(path), path);
assertEquals(numBlocks, file.getBlocks().length);
- for(BlockInfoContiguous b : file.getBlocks()) {
+ for(BlockInfo b : file.getBlocks()) {
assertBlockCollection(blkManager, file, b);
}
return file;
}
static void assertBlockCollection(final BlockManager blkManager,
- final INodeFile file, final BlockInfoContiguous b) {
+ final INodeFile file, final BlockInfo b) {
Assert.assertSame(b, blkManager.getStoredBlock(b));
Assert.assertSame(file, blkManager.getBlockCollection(b));
Assert.assertSame(file, b.getBlockCollection());
@@ -146,10 +146,10 @@ public class TestSnapshotBlocksMap {
{
final INodeFile f2 = assertBlockCollection(file2.toString(), 3, fsdir,
blockmanager);
- BlockInfoContiguous[] blocks = f2.getBlocks();
+ BlockInfo[] blocks = f2.getBlocks();
hdfs.delete(sub2, true);
// The INode should have been removed from the blocksMap
- for(BlockInfoContiguous b : blocks) {
+ for(BlockInfo b : blocks) {
assertNull(blockmanager.getBlockCollection(b));
}
}
@@ -177,7 +177,7 @@ public class TestSnapshotBlocksMap {
// Check the block information for file0
final INodeFile f0 = assertBlockCollection(file0.toString(), 4, fsdir,
blockmanager);
- BlockInfoContiguous[] blocks0 = f0.getBlocks();
+ BlockInfo[] blocks0 = f0.getBlocks();
// Also check the block information for snapshot of file0
Path snapshotFile0 = SnapshotTestHelper.getSnapshotPath(sub1, "s0",
@@ -187,7 +187,7 @@ public class TestSnapshotBlocksMap {
// Delete file0
hdfs.delete(file0, true);
// Make sure the blocks of file0 is still in blocksMap
- for(BlockInfoContiguous b : blocks0) {
+ for(BlockInfo b : blocks0) {
assertNotNull(blockmanager.getBlockCollection(b));
}
assertBlockCollection(snapshotFile0.toString(), 4, fsdir, blockmanager);
@@ -201,7 +201,7 @@ public class TestSnapshotBlocksMap {
hdfs.deleteSnapshot(sub1, "s1");
// Make sure the first block of file0 is still in blocksMap
- for(BlockInfoContiguous b : blocks0) {
+ for(BlockInfo b : blocks0) {
assertNotNull(blockmanager.getBlockCollection(b));
}
assertBlockCollection(snapshotFile0.toString(), 4, fsdir, blockmanager);
@@ -293,7 +293,7 @@ public class TestSnapshotBlocksMap {
hdfs.append(bar);
INodeFile barNode = fsdir.getINode4Write(bar.toString()).asFile();
- BlockInfoContiguous[] blks = barNode.getBlocks();
+ BlockInfo[] blks = barNode.getBlocks();
assertEquals(1, blks.length);
assertEquals(BLOCKSIZE, blks[0].getNumBytes());
ExtendedBlock previous = new ExtendedBlock(fsn.getBlockPoolId(), blks[0]);
@@ -331,7 +331,7 @@ public class TestSnapshotBlocksMap {
hdfs.append(bar);
INodeFile barNode = fsdir.getINode4Write(bar.toString()).asFile();
- BlockInfoContiguous[] blks = barNode.getBlocks();
+ BlockInfo[] blks = barNode.getBlocks();
assertEquals(1, blks.length);
ExtendedBlock previous = new ExtendedBlock(fsn.getBlockPoolId(), blks[0]);
cluster.getNameNodeRpc()
@@ -370,7 +370,7 @@ public class TestSnapshotBlocksMap {
hdfs.append(bar);
INodeFile barNode = fsdir.getINode4Write(bar.toString()).asFile();
- BlockInfoContiguous[] blks = barNode.getBlocks();
+ BlockInfo[] blks = barNode.getBlocks();
assertEquals(1, blks.length);
ExtendedBlock previous = new ExtendedBlock(fsn.getBlockPoolId(), blks[0]);
cluster.getNameNodeRpc()
@@ -421,7 +421,7 @@ public class TestSnapshotBlocksMap {
out.write(testData);
out.close();
INodeFile barNode = fsdir.getINode4Write(bar.toString()).asFile();
- BlockInfoContiguous[] blks = barNode.getBlocks();
+ BlockInfo[] blks = barNode.getBlocks();
assertEquals(1, blks.length);
assertEquals(testData.length, blks[0].getNumBytes());
http://git-wip-us.apache.org/repos/asf/hadoop/blob/8b88df72/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java
index cdd655e..139a37e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java
@@ -42,7 +42,7 @@ import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.MiniDFSNNTopology;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
-import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
@@ -262,12 +262,12 @@ public class TestSnapshotDeletion {
DFSTestUtil.createFile(hdfs, tempFile, BLOCKSIZE, REPLICATION, seed);
final INodeFile temp = TestSnapshotBlocksMap.assertBlockCollection(
tempFile.toString(), 1, fsdir, blockmanager);
- BlockInfoContiguous[] blocks = temp.getBlocks();
+ BlockInfo[] blocks = temp.getBlocks();
hdfs.delete(tempDir, true);
// check dir's quota usage
checkQuotaUsageComputation(dir, 8, BLOCKSIZE * REPLICATION * 3);
// check blocks of tempFile
- for (BlockInfoContiguous b : blocks) {
+ for (BlockInfo b : blocks) {
assertNull(blockmanager.getBlockCollection(b));
}
@@ -344,7 +344,7 @@ public class TestSnapshotDeletion {
// while deletion, we add diff for subsub and metaChangeFile1, and remove
// newFile
checkQuotaUsageComputation(dir, 9L, BLOCKSIZE * REPLICATION * 4);
- for (BlockInfoContiguous b : blocks) {
+ for (BlockInfo b : blocks) {
assertNull(blockmanager.getBlockCollection(b));
}
@@ -481,7 +481,7 @@ public class TestSnapshotDeletion {
final INodeFile toDeleteFileNode = TestSnapshotBlocksMap
.assertBlockCollection(toDeleteFile.toString(), 1, fsdir, blockmanager);
- BlockInfoContiguous[] blocks = toDeleteFileNode.getBlocks();
+ BlockInfo[] blocks = toDeleteFileNode.getBlocks();
// create snapshot s0 on dir
SnapshotTestHelper.createSnapshot(hdfs, dir, "s0");
@@ -507,7 +507,7 @@ public class TestSnapshotDeletion {
// metaChangeDir's diff, dir's diff. diskspace: remove toDeleteFile, and
// metaChangeFile's replication factor decreases
checkQuotaUsageComputation(dir, 6, 2 * BLOCKSIZE * REPLICATION - BLOCKSIZE);
- for (BlockInfoContiguous b : blocks) {
+ for (BlockInfo b : blocks) {
assertNull(blockmanager.getBlockCollection(b));
}
@@ -801,7 +801,7 @@ public class TestSnapshotDeletion {
FileStatus statusBeforeDeletion13 = hdfs.getFileStatus(file13_s1);
INodeFile file14Node = TestSnapshotBlocksMap.assertBlockCollection(
file14_s2.toString(), 1, fsdir, blockmanager);
- BlockInfoContiguous[] blocks_14 = file14Node.getBlocks();
+ BlockInfo[] blocks_14 = file14Node.getBlocks();
TestSnapshotBlocksMap.assertBlockCollection(file15_s2.toString(), 1, fsdir,
blockmanager);
@@ -838,7 +838,7 @@ public class TestSnapshotDeletion {
modDirStr + "file15");
assertFalse(hdfs.exists(file14_s1));
assertFalse(hdfs.exists(file15_s1));
- for (BlockInfoContiguous b : blocks_14) {
+ for (BlockInfo b : blocks_14) {
assertNull(blockmanager.getBlockCollection(b));
}