You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by zh...@apache.org on 2015/01/13 16:33:40 UTC
[11/14] hadoop git commit: HDFS-7056. Snapshot support for truncate.
Contributed by Konstantin Shvachko and Plamen Jeliazkov.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9953e921/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileWithSnapshotFeature.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileWithSnapshotFeature.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileWithSnapshotFeature.java
index 16f534f..e348231 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileWithSnapshotFeature.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileWithSnapshotFeature.java
@@ -21,6 +21,7 @@ import java.util.List;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.namenode.AclFeature;
import org.apache.hadoop.hdfs.server.namenode.INode;
import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo;
@@ -154,18 +155,19 @@ public class FileWithSnapshotFeature implements INode.Feature {
AclStorage.removeAclFeature(aclFeature);
}
}
-
- collectBlocksAndClear(file, collectedBlocks, removedINodes);
-
+
+ getDiffs().combineAndCollectSnapshotBlocks(
+ file, removed, collectedBlocks, removedINodes);
+
long dsDelta = oldDiskspace - file.diskspaceConsumed();
return Quota.Counts.newInstance(0, dsDelta);
}
-
+
/**
* If some blocks at the end of the block list no longer belongs to
* any inode, collect them and update the block list.
*/
- private void collectBlocksAndClear(final INodeFile file,
+ public void collectBlocksAndClear(final INodeFile file,
final BlocksMapUpdateInfo info, final List<INode> removedINodes) {
// check if everything is deleted.
if (isCurrentFileDeleted() && getDiffs().asList().isEmpty()) {
@@ -174,13 +176,19 @@ public class FileWithSnapshotFeature implements INode.Feature {
}
// find max file size.
final long max;
+ FileDiff diff = getDiffs().getLast();
if (isCurrentFileDeleted()) {
- final FileDiff last = getDiffs().getLast();
- max = last == null? 0: last.getFileSize();
+ max = diff == null? 0: diff.getFileSize();
} else {
max = file.computeFileSize();
}
- file.collectBlocksBeyondMax(max, info);
+ // Collect blocks that should be deleted
+ FileDiff last = diffs.getLast();
+ BlockInfo[] snapshotBlocks = last == null ? null : last.getBlocks();
+ if(snapshotBlocks == null)
+ file.collectBlocksBeyondMax(max, info);
+ else
+ file.collectBlocksBeyondSnapshot(snapshotBlocks, info);
}
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9953e921/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockRecoveryCommand.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockRecoveryCommand.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockRecoveryCommand.java
index c512038..ced3296 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockRecoveryCommand.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockRecoveryCommand.java
@@ -22,6 +22,7 @@ import java.util.ArrayList;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
@@ -53,8 +54,8 @@ public class BlockRecoveryCommand extends DatanodeCommand {
@InterfaceAudience.Private
@InterfaceStability.Evolving
public static class RecoveringBlock extends LocatedBlock {
- private boolean truncate;
private final long newGenerationStamp;
+ private final Block recoveryBlock;
/**
* Create RecoveringBlock.
@@ -62,15 +63,17 @@ public class BlockRecoveryCommand extends DatanodeCommand {
public RecoveringBlock(ExtendedBlock b, DatanodeInfo[] locs, long newGS) {
super(b, locs, -1, false); // startOffset is unknown
this.newGenerationStamp = newGS;
+ this.recoveryBlock = null;
}
/**
- * RecoveryingBlock with truncate option.
+ * Create RecoveringBlock with copy-on-truncate option.
*/
- public RecoveringBlock(ExtendedBlock b, DatanodeInfo[] locs, long newGS,
- boolean truncate) {
- this(b, locs, newGS);
- this.truncate = truncate;
+ public RecoveringBlock(ExtendedBlock b, DatanodeInfo[] locs,
+ Block recoveryBlock) {
+ super(b, locs, -1, false); // startOffset is unknown
+ this.newGenerationStamp = recoveryBlock.getGenerationStamp();
+ this.recoveryBlock = recoveryBlock;
}
/**
@@ -82,10 +85,10 @@ public class BlockRecoveryCommand extends DatanodeCommand {
}
/**
- * Return whether to truncate the block to the ExtendedBlock's length.
+ * Return the new block.
*/
- public boolean getTruncateFlag() {
- return truncate;
+ public Block getNewBlock() {
+ return recoveryBlock;
}
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9953e921/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/InterDatanodeProtocol.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/InterDatanodeProtocol.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/InterDatanodeProtocol.java
index 62915b4..72cb0c1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/InterDatanodeProtocol.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/InterDatanodeProtocol.java
@@ -67,5 +67,6 @@ public interface InterDatanodeProtocol {
* Update replica with the new generation stamp and length.
*/
String updateReplicaUnderRecovery(ExtendedBlock oldBlock, long recoveryId,
- long newLength) throws IOException;
+ long newBlockId, long newLength)
+ throws IOException;
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9953e921/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/InterDatanodeProtocol.proto
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/InterDatanodeProtocol.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/InterDatanodeProtocol.proto
index 47f79be..1a21777 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/InterDatanodeProtocol.proto
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/InterDatanodeProtocol.proto
@@ -59,6 +59,8 @@ message UpdateReplicaUnderRecoveryRequestProto {
required ExtendedBlockProto block = 1; // Block identifier
required uint64 recoveryId = 2; // New genstamp of the replica
required uint64 newLength = 3; // New length of the replica
+ // New blockId for copy (truncate), default is 0.
+ optional uint64 newBlockId = 4 [default = 0];
}
/**
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9953e921/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/fsimage.proto
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/fsimage.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/fsimage.proto
index 588f6c8..643a034 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/fsimage.proto
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/fsimage.proto
@@ -270,6 +270,7 @@ message SnapshotDiffSection {
optional uint64 fileSize = 2;
optional bytes name = 3;
optional INodeSection.INodeFile snapshotCopy = 4;
+ repeated BlockProto blocks = 5;
}
message DiffEntry {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9953e921/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto
index d989c0a..97906b1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto
@@ -554,9 +554,9 @@ enum ReplicaStateProto {
* Block that needs to be recovered with at a given location
*/
message RecoveringBlockProto {
- required uint64 newGenStamp = 1; // New genstamp post recovery
- required LocatedBlockProto block = 2; // Block to be recovered
- optional bool truncateFlag = 3; // Block needs to be truncated
+ required uint64 newGenStamp = 1; // New genstamp post recovery
+ required LocatedBlockProto block = 2; // Block to be recovered
+ optional BlockProto truncateBlock = 3; // New block for recovery (truncate)
}
/**
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9953e921/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
index e4834d6..7a4960e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
@@ -1239,7 +1239,7 @@ public class TestReplicationPolicy {
when(mbc.setLastBlock((BlockInfo) any(), (DatanodeStorageInfo[]) any()))
.thenReturn(ucBlock);
- bm.convertLastBlockToUnderConstruction(mbc);
+ bm.convertLastBlockToUnderConstruction(mbc, 0L);
// Choose 1 block from UnderReplicatedBlocks. Then it should pick 1 block
// from QUEUE_VERY_UNDER_REPLICATED.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9953e921/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
index e03b756..78eedf9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
@@ -1106,6 +1106,7 @@ public class SimulatedFSDataset implements FsDatasetSpi<FsVolumeSpi> {
@Override // FsDatasetSpi
public String updateReplicaUnderRecovery(ExtendedBlock oldBlock,
long recoveryId,
+ long newBlockId,
long newlength) {
// Caller does not care about the exact Storage UUID returned.
return datanodeUuid;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9953e921/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java
index 987b480..9bf5e52 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java
@@ -56,7 +56,6 @@ import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.hdfs.MiniDFSNNTopology;
import org.apache.hadoop.hdfs.StorageType;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
@@ -219,10 +218,10 @@ public class TestBlockRecovery {
syncList.add(record1);
syncList.add(record2);
- when(dn1.updateReplicaUnderRecovery((ExtendedBlock)anyObject(), anyLong(),
- anyLong())).thenReturn("storage1");
- when(dn2.updateReplicaUnderRecovery((ExtendedBlock)anyObject(), anyLong(),
- anyLong())).thenReturn("storage2");
+ when(dn1.updateReplicaUnderRecovery((ExtendedBlock)anyObject(), anyLong(),
+ anyLong(), anyLong())).thenReturn("storage1");
+ when(dn2.updateReplicaUnderRecovery((ExtendedBlock)anyObject(), anyLong(),
+ anyLong(), anyLong())).thenReturn("storage2");
dn.syncBlock(rBlock, syncList);
}
@@ -245,8 +244,10 @@ public class TestBlockRecovery {
InterDatanodeProtocol dn2 = mock(InterDatanodeProtocol.class);
testSyncReplicas(replica1, replica2, dn1, dn2, REPLICA_LEN1);
- verify(dn1).updateReplicaUnderRecovery(block, RECOVERY_ID, REPLICA_LEN1);
- verify(dn2).updateReplicaUnderRecovery(block, RECOVERY_ID, REPLICA_LEN1);
+ verify(dn1).updateReplicaUnderRecovery(block, RECOVERY_ID, BLOCK_ID,
+ REPLICA_LEN1);
+ verify(dn2).updateReplicaUnderRecovery(block, RECOVERY_ID, BLOCK_ID,
+ REPLICA_LEN1);
// two finalized replicas have different length
replica1 = new ReplicaRecoveryInfo(BLOCK_ID,
@@ -284,8 +285,10 @@ public class TestBlockRecovery {
InterDatanodeProtocol dn2 = mock(InterDatanodeProtocol.class);
testSyncReplicas(replica1, replica2, dn1, dn2, REPLICA_LEN1);
- verify(dn1).updateReplicaUnderRecovery(block, RECOVERY_ID, REPLICA_LEN1);
- verify(dn2).updateReplicaUnderRecovery(block, RECOVERY_ID, REPLICA_LEN1);
+ verify(dn1).updateReplicaUnderRecovery(block, RECOVERY_ID, BLOCK_ID,
+ REPLICA_LEN1);
+ verify(dn2).updateReplicaUnderRecovery(block, RECOVERY_ID, BLOCK_ID,
+ REPLICA_LEN1);
// rbw replica has a different length from the finalized one
replica1 = new ReplicaRecoveryInfo(BLOCK_ID,
@@ -297,9 +300,10 @@ public class TestBlockRecovery {
dn2 = mock(InterDatanodeProtocol.class);
testSyncReplicas(replica1, replica2, dn1, dn2, REPLICA_LEN1);
- verify(dn1).updateReplicaUnderRecovery(block, RECOVERY_ID, REPLICA_LEN1);
+ verify(dn1).updateReplicaUnderRecovery(block, RECOVERY_ID, BLOCK_ID,
+ REPLICA_LEN1);
verify(dn2, never()).updateReplicaUnderRecovery(
- block, RECOVERY_ID, REPLICA_LEN1);
+ block, RECOVERY_ID, BLOCK_ID, REPLICA_LEN1);
}
/**
@@ -323,9 +327,10 @@ public class TestBlockRecovery {
InterDatanodeProtocol dn2 = mock(InterDatanodeProtocol.class);
testSyncReplicas(replica1, replica2, dn1, dn2, REPLICA_LEN1);
- verify(dn1).updateReplicaUnderRecovery(block, RECOVERY_ID, REPLICA_LEN1);
+ verify(dn1).updateReplicaUnderRecovery(block, RECOVERY_ID, BLOCK_ID,
+ REPLICA_LEN1);
verify(dn2, never()).updateReplicaUnderRecovery(
- block, RECOVERY_ID, REPLICA_LEN1);
+ block, RECOVERY_ID, BLOCK_ID, REPLICA_LEN1);
// rbw replica has a different length from the finalized one
replica1 = new ReplicaRecoveryInfo(BLOCK_ID,
@@ -337,9 +342,10 @@ public class TestBlockRecovery {
dn2 = mock(InterDatanodeProtocol.class);
testSyncReplicas(replica1, replica2, dn1, dn2, REPLICA_LEN1);
- verify(dn1).updateReplicaUnderRecovery(block, RECOVERY_ID, REPLICA_LEN1);
+ verify(dn1).updateReplicaUnderRecovery(block, RECOVERY_ID, BLOCK_ID,
+ REPLICA_LEN1);
verify(dn2, never()).updateReplicaUnderRecovery(
- block, RECOVERY_ID, REPLICA_LEN1);
+ block, RECOVERY_ID, BLOCK_ID, REPLICA_LEN1);
}
/**
@@ -362,8 +368,8 @@ public class TestBlockRecovery {
long minLen = Math.min(REPLICA_LEN1, REPLICA_LEN2);
testSyncReplicas(replica1, replica2, dn1, dn2, minLen);
- verify(dn1).updateReplicaUnderRecovery(block, RECOVERY_ID, minLen);
- verify(dn2).updateReplicaUnderRecovery(block, RECOVERY_ID, minLen);
+ verify(dn1).updateReplicaUnderRecovery(block, RECOVERY_ID, BLOCK_ID, minLen);
+ verify(dn2).updateReplicaUnderRecovery(block, RECOVERY_ID, BLOCK_ID, minLen);
}
/**
@@ -385,9 +391,9 @@ public class TestBlockRecovery {
InterDatanodeProtocol dn2 = mock(InterDatanodeProtocol.class);
testSyncReplicas(replica1, replica2, dn1, dn2, REPLICA_LEN1);
- verify(dn1).updateReplicaUnderRecovery(block, RECOVERY_ID, REPLICA_LEN1);
+ verify(dn1).updateReplicaUnderRecovery(block, RECOVERY_ID, BLOCK_ID, REPLICA_LEN1);
verify(dn2, never()).updateReplicaUnderRecovery(
- block, RECOVERY_ID, REPLICA_LEN1);
+ block, RECOVERY_ID, BLOCK_ID, REPLICA_LEN1);
}
/**
@@ -411,8 +417,8 @@ public class TestBlockRecovery {
long minLen = Math.min(REPLICA_LEN1, REPLICA_LEN2);
testSyncReplicas(replica1, replica2, dn1, dn2, minLen);
- verify(dn1).updateReplicaUnderRecovery(block, RECOVERY_ID, minLen);
- verify(dn2).updateReplicaUnderRecovery(block, RECOVERY_ID, minLen);
+ verify(dn1).updateReplicaUnderRecovery(block, RECOVERY_ID, BLOCK_ID, minLen);
+ verify(dn2).updateReplicaUnderRecovery(block, RECOVERY_ID, BLOCK_ID, minLen);
}
private Collection<RecoveringBlock> initRecoveringBlocks() throws IOException {
@@ -513,7 +519,7 @@ public class TestBlockRecovery {
}
DataNode spyDN = spy(dn);
doThrow(new IOException()).when(spyDN).updateReplicaUnderRecovery(
- block, RECOVERY_ID, block.getNumBytes());
+ block, RECOVERY_ID, BLOCK_ID, block.getNumBytes());
try {
spyDN.syncBlock(rBlock, initBlockRecords(spyDN));
fail("Sync should fail");
@@ -634,7 +640,8 @@ public class TestBlockRecovery {
recoveryInitResult.get());
dataNode.updateReplicaUnderRecovery(block.getBlock(), block.getBlock()
- .getGenerationStamp() + 1, block.getBlockSize());
+ .getGenerationStamp() + 1, block.getBlock().getBlockId(),
+ block.getBlockSize());
} finally {
if (null != cluster) {
cluster.shutdown();
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9953e921/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestInterDatanodeProtocol.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestInterDatanodeProtocol.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestInterDatanodeProtocol.java
index 65a5176..3609684 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestInterDatanodeProtocol.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestInterDatanodeProtocol.java
@@ -198,7 +198,8 @@ public class TestInterDatanodeProtocol {
//verify updateBlock
ExtendedBlock newblock = new ExtendedBlock(b.getBlockPoolId(),
b.getBlockId(), b.getNumBytes()/2, b.getGenerationStamp()+1);
- idp.updateReplicaUnderRecovery(b, recoveryId, newblock.getNumBytes());
+ idp.updateReplicaUnderRecovery(b, recoveryId, b.getBlockId(),
+ newblock.getNumBytes());
checkMetaInfo(newblock, datanode);
// Verify correct null response trying to init recovery for a missing block
@@ -368,7 +369,8 @@ public class TestInterDatanodeProtocol {
.getBlockId(), rri.getNumBytes() - 1, rri.getGenerationStamp());
try {
//update should fail
- fsdataset.updateReplicaUnderRecovery(tmp, recoveryid, newlength);
+ fsdataset.updateReplicaUnderRecovery(tmp, recoveryid,
+ tmp.getBlockId(), newlength);
Assert.fail();
} catch(IOException ioe) {
System.out.println("GOOD: getting " + ioe);
@@ -377,7 +379,8 @@ public class TestInterDatanodeProtocol {
//update
final String storageID = fsdataset.updateReplicaUnderRecovery(
- new ExtendedBlock(b.getBlockPoolId(), rri), recoveryid, newlength);
+ new ExtendedBlock(b.getBlockPoolId(), rri), recoveryid,
+ rri.getBlockId(), newlength);
assertTrue(storageID != null);
} finally {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9953e921/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCommitBlockSynchronization.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCommitBlockSynchronization.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCommitBlockSynchronization.java
index d0502b3..eae65cc 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCommitBlockSynchronization.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCommitBlockSynchronization.java
@@ -71,6 +71,7 @@ public class TestCommitBlockSynchronization {
doReturn(true).when(file).isUnderConstruction();
doReturn(blockInfo).when(namesystemSpy).getStoredBlock(any(Block.class));
+ doReturn(blockInfo).when(file).getLastBlock();
doReturn("").when(namesystemSpy).closeFileCommitBlocks(
any(INodeFile.class), any(BlockInfo.class));
doReturn(mock(FSEditLog.class)).when(namesystemSpy).getEditLog();
@@ -105,6 +106,7 @@ public class TestCommitBlockSynchronization {
completedBlockInfo.setGenerationStamp(genStamp);
doReturn(completedBlockInfo).when(namesystemSpy)
.getStoredBlock(any(Block.class));
+ doReturn(completedBlockInfo).when(file).getLastBlock();
// Repeat the call to make sure it does not throw
namesystemSpy.commitBlockSynchronization(
@@ -176,6 +178,7 @@ public class TestCommitBlockSynchronization {
completedBlockInfo.setGenerationStamp(genStamp);
doReturn(completedBlockInfo).when(namesystemSpy)
.getStoredBlock(any(Block.class));
+ doReturn(completedBlockInfo).when(file).getLastBlock();
namesystemSpy.commitBlockSynchronization(
lastBlock, genStamp, length, true, false, newTargets, null);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9953e921/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java
index ba9d04e..1f854d1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java
@@ -18,14 +18,22 @@
package org.apache.hadoop.hdfs.server.namenode;
+import static org.hamcrest.CoreMatchers.equalTo;
+import static org.hamcrest.CoreMatchers.not;
import static org.hamcrest.core.Is.is;
import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertThat;
+import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.io.IOException;
-import java.net.InetAddress;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.ContentSummary;
import org.apache.hadoop.fs.FSDataOutputStream;
@@ -39,14 +47,14 @@ import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
-import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
-import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
-import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
-import org.apache.hadoop.hdfs.server.common.GenerationStamp;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
+import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.util.Time;
import org.apache.log4j.Level;
import org.junit.AfterClass;
import org.junit.BeforeClass;
@@ -57,6 +65,7 @@ public class TestFileTruncate {
GenericTestUtils.setLogLevel(NameNode.stateChangeLog, Level.ALL);
GenericTestUtils.setLogLevel(FSEditLogLoader.LOG, Level.ALL);
}
+ static final Log LOG = LogFactory.getLog(TestFileTruncate.class);
static final int BLOCK_SIZE = 4;
static final short REPLICATION = 3;
static final int DATANODE_NUM = 3;
@@ -129,6 +138,287 @@ public class TestFileTruncate {
fs.delete(parent, true);
}
+ @Test
+ public void testSnapshotWithAppendTruncate() throws IOException {
+ testSnapshotWithAppendTruncate(0, 1, 2);
+ testSnapshotWithAppendTruncate(0, 2, 1);
+ testSnapshotWithAppendTruncate(1, 0, 2);
+ testSnapshotWithAppendTruncate(1, 2, 0);
+ testSnapshotWithAppendTruncate(2, 0, 1);
+ testSnapshotWithAppendTruncate(2, 1, 0);
+ }
+
+ /**
+ * Create three snapshots with appended and truncated file.
+ * Delete snapshots in the specified order and verify that
+ * remaining snapshots are still readable.
+ */
+ void testSnapshotWithAppendTruncate(int ... deleteOrder) throws IOException {
+ FSDirectory fsDir = cluster.getNamesystem().getFSDirectory();
+ Path parent = new Path("/test");
+ fs.mkdirs(parent);
+ fs.setQuota(parent, 100, 1000);
+ fs.allowSnapshot(parent);
+ String truncateFile = "testSnapshotWithAppendTruncate";
+ final Path src = new Path(parent, truncateFile);
+ int[] length = new int[4];
+ length[0] = 2 * BLOCK_SIZE + BLOCK_SIZE / 2;
+ DFSTestUtil.createFile(fs, src, 64, length[0], BLOCK_SIZE, REPLICATION, 0L);
+ Block firstBlk = getLocatedBlocks(src).get(0).getBlock().getLocalBlock();
+ Path[] snapshotFiles = new Path[4];
+
+ // Diskspace consumed should be 10 bytes * 3. [blk 1,2,3]
+ ContentSummary contentSummary = fs.getContentSummary(parent);
+ assertThat(contentSummary.getSpaceConsumed(), is(30L));
+
+ // Add file to snapshot and append
+ String[] ss = new String[] {"ss0", "ss1", "ss2", "ss3"};
+ Path snapshotDir = fs.createSnapshot(parent, ss[0]);
+ snapshotFiles[0] = new Path(snapshotDir, truncateFile);
+ length[1] = length[2] = length[0] + BLOCK_SIZE + 1;
+ DFSTestUtil.appendFile(fs, src, BLOCK_SIZE + 1);
+ Block lastBlk = getLocatedBlocks(src).getLastLocatedBlock()
+ .getBlock().getLocalBlock();
+
+ // Diskspace consumed should be 15 bytes * 3. [blk 1,2,3,4]
+ contentSummary = fs.getContentSummary(parent);
+ assertThat(contentSummary.getSpaceConsumed(), is(45L));
+
+ // Create another snapshot without changes
+ snapshotDir = fs.createSnapshot(parent, ss[1]);
+ snapshotFiles[1] = new Path(snapshotDir, truncateFile);
+
+ // Create another snapshot and append
+ snapshotDir = fs.createSnapshot(parent, ss[2]);
+ snapshotFiles[2] = new Path(snapshotDir, truncateFile);
+ DFSTestUtil.appendFile(fs, src, BLOCK_SIZE -1 + BLOCK_SIZE / 2);
+ Block appendedBlk = getLocatedBlocks(src).getLastLocatedBlock()
+ .getBlock().getLocalBlock();
+
+ // Diskspace consumed should be 20 bytes * 3. [blk 1,2,3,4,5]
+ contentSummary = fs.getContentSummary(parent);
+ assertThat(contentSummary.getSpaceConsumed(), is(60L));
+
+ // Truncate to block boundary
+ int newLength = length[0] + BLOCK_SIZE / 2;
+ boolean isReady = fs.truncate(src, newLength);
+ assertTrue("Recovery is not expected.", isReady);
+ assertFileLength(snapshotFiles[2], length[2]);
+ assertFileLength(snapshotFiles[1], length[1]);
+ assertFileLength(snapshotFiles[0], length[0]);
+ assertBlockNotPresent(appendedBlk);
+
+ // Diskspace consumed should be 16 bytes * 3. [blk 1,2,3 SS:4]
+ contentSummary = fs.getContentSummary(parent);
+ assertThat(contentSummary.getSpaceConsumed(), is(48L));
+
+ // Truncate full block again
+ newLength = length[0] - BLOCK_SIZE / 2;
+ isReady = fs.truncate(src, newLength);
+ assertTrue("Recovery is not expected.", isReady);
+ assertFileLength(snapshotFiles[2], length[2]);
+ assertFileLength(snapshotFiles[1], length[1]);
+ assertFileLength(snapshotFiles[0], length[0]);
+
+ // Diskspace consumed should be 16 bytes * 3. [blk 1,2 SS:3,4]
+ contentSummary = fs.getContentSummary(parent);
+ assertThat(contentSummary.getSpaceConsumed(), is(48L));
+
+ // Truncate half of the last block
+ newLength -= BLOCK_SIZE / 2;
+ isReady = fs.truncate(src, newLength);
+ assertFalse("Recovery is expected.", isReady);
+ checkBlockRecovery(src);
+ assertFileLength(snapshotFiles[2], length[2]);
+ assertFileLength(snapshotFiles[1], length[1]);
+ assertFileLength(snapshotFiles[0], length[0]);
+ Block replacedBlk = getLocatedBlocks(src).getLastLocatedBlock()
+ .getBlock().getLocalBlock();
+
+ // Diskspace consumed should be 16 bytes * 3. [blk 1,6 SS:2,3,4]
+ contentSummary = fs.getContentSummary(parent);
+ assertThat(contentSummary.getSpaceConsumed(), is(54L));
+
+ snapshotDir = fs.createSnapshot(parent, ss[3]);
+ snapshotFiles[3] = new Path(snapshotDir, truncateFile);
+ length[3] = newLength;
+
+ // Delete file. Should still be able to read snapshots
+ int numINodes = fsDir.getInodeMapSize();
+ isReady = fs.delete(src, false);
+ assertTrue("Delete failed.", isReady);
+ assertFileLength(snapshotFiles[3], length[3]);
+ assertFileLength(snapshotFiles[2], length[2]);
+ assertFileLength(snapshotFiles[1], length[1]);
+ assertFileLength(snapshotFiles[0], length[0]);
+ assertEquals("Number of INodes should not change",
+ numINodes, fsDir.getInodeMapSize());
+
+ fs.deleteSnapshot(parent, ss[3]);
+
+ assertBlockExists(firstBlk);
+ assertBlockExists(lastBlk);
+ assertBlockNotPresent(replacedBlk);
+
+ // Diskspace consumed should be 16 bytes * 3. [SS:1,2,3,4]
+ contentSummary = fs.getContentSummary(parent);
+ assertThat(contentSummary.getSpaceConsumed(), is(48L));
+
+ // delete snapshots in the specified order
+ fs.deleteSnapshot(parent, ss[deleteOrder[0]]);
+ assertFileLength(snapshotFiles[deleteOrder[1]], length[deleteOrder[1]]);
+ assertFileLength(snapshotFiles[deleteOrder[2]], length[deleteOrder[2]]);
+ assertBlockExists(firstBlk);
+ assertBlockExists(lastBlk);
+ assertEquals("Number of INodes should not change",
+ numINodes, fsDir.getInodeMapSize());
+
+ // Diskspace consumed should be 16 bytes * 3. [SS:1,2,3,4]
+ contentSummary = fs.getContentSummary(parent);
+ assertThat(contentSummary.getSpaceConsumed(), is(48L));
+
+ fs.deleteSnapshot(parent, ss[deleteOrder[1]]);
+ assertFileLength(snapshotFiles[deleteOrder[2]], length[deleteOrder[2]]);
+ assertBlockExists(firstBlk);
+ contentSummary = fs.getContentSummary(parent);
+ if(fs.exists(snapshotFiles[0])) {
+ // Diskspace consumed should be 0 bytes * 3. [SS:1,2,3]
+ assertBlockNotPresent(lastBlk);
+ assertThat(contentSummary.getSpaceConsumed(), is(36L));
+ } else {
+ // Diskspace consumed should be 48 bytes * 3. [SS:1,2,3,4]
+ assertThat(contentSummary.getSpaceConsumed(), is(48L));
+ }
+ assertEquals("Number of INodes should not change",
+ numINodes, fsDir .getInodeMapSize());
+
+ fs.deleteSnapshot(parent, ss[deleteOrder[2]]);
+ assertBlockNotPresent(firstBlk);
+ assertBlockNotPresent(lastBlk);
+
+ // Diskspace consumed should be 0 bytes * 3. []
+ contentSummary = fs.getContentSummary(parent);
+ assertThat(contentSummary.getSpaceConsumed(), is(0L));
+ assertNotEquals("Number of INodes should change",
+ numINodes, fsDir.getInodeMapSize());
+ }
+
+ /**
+ * Create three snapshots with file truncated 3 times.
+ * Delete snapshots in the specified order and verify that
+ * remaining snapshots are still readable.
+ */
+ @Test
+ public void testSnapshotWithTruncates() throws IOException {
+ testSnapshotWithTruncates(0, 1, 2);
+ testSnapshotWithTruncates(0, 2, 1);
+ testSnapshotWithTruncates(1, 0, 2);
+ testSnapshotWithTruncates(1, 2, 0);
+ testSnapshotWithTruncates(2, 0, 1);
+ testSnapshotWithTruncates(2, 1, 0);
+ }
+
+ void testSnapshotWithTruncates(int ... deleteOrder) throws IOException {
+ Path parent = new Path("/test");
+ fs.mkdirs(parent);
+ fs.setQuota(parent, 100, 1000);
+ fs.allowSnapshot(parent);
+ String truncateFile = "testSnapshotWithTruncates";
+ final Path src = new Path(parent, truncateFile);
+ int[] length = new int[3];
+ length[0] = 3 * BLOCK_SIZE;
+ DFSTestUtil.createFile(fs, src, 64, length[0], BLOCK_SIZE, REPLICATION, 0L);
+ Block firstBlk = getLocatedBlocks(src).get(0).getBlock().getLocalBlock();
+ Block lastBlk = getLocatedBlocks(src).getLastLocatedBlock()
+ .getBlock().getLocalBlock();
+ Path[] snapshotFiles = new Path[3];
+
+ // Diskspace consumed should be 12 bytes * 3. [blk 1,2,3]
+ ContentSummary contentSummary = fs.getContentSummary(parent);
+ assertThat(contentSummary.getSpaceConsumed(), is(36L));
+
+ // Add file to snapshot and append
+ String[] ss = new String[] {"ss0", "ss1", "ss2"};
+ Path snapshotDir = fs.createSnapshot(parent, ss[0]);
+ snapshotFiles[0] = new Path(snapshotDir, truncateFile);
+ length[1] = 2 * BLOCK_SIZE;
+ boolean isReady = fs.truncate(src, 2 * BLOCK_SIZE);
+ assertTrue("Recovery is not expected.", isReady);
+
+ // Diskspace consumed should be 12 bytes * 3. [blk 1,2 SS:3]
+ contentSummary = fs.getContentSummary(parent);
+ assertThat(contentSummary.getSpaceConsumed(), is(36L));
+ snapshotDir = fs.createSnapshot(parent, ss[1]);
+ snapshotFiles[1] = new Path(snapshotDir, truncateFile);
+
+ // Create another snapshot with truncate
+ length[2] = BLOCK_SIZE + BLOCK_SIZE / 2;
+ isReady = fs.truncate(src, BLOCK_SIZE + BLOCK_SIZE / 2);
+ assertFalse("Recovery is expected.", isReady);
+ checkBlockRecovery(src);
+ snapshotDir = fs.createSnapshot(parent, ss[2]);
+ snapshotFiles[2] = new Path(snapshotDir, truncateFile);
+ assertFileLength(snapshotFiles[0], length[0]);
+ assertBlockExists(lastBlk);
+
+ // Diskspace consumed should be 14 bytes * 3. [blk 1,4 SS:2,3]
+ contentSummary = fs.getContentSummary(parent);
+ assertThat(contentSummary.getSpaceConsumed(), is(42L));
+
+ fs.deleteSnapshot(parent, ss[deleteOrder[0]]);
+ assertFileLength(snapshotFiles[deleteOrder[1]], length[deleteOrder[1]]);
+ assertFileLength(snapshotFiles[deleteOrder[2]], length[deleteOrder[2]]);
+ assertFileLength(src, length[2]);
+ assertBlockExists(firstBlk);
+
+ contentSummary = fs.getContentSummary(parent);
+ if(fs.exists(snapshotFiles[0])) {
+ // Diskspace consumed should be 14 bytes * 3. [blk 1,4 SS:2,3]
+ assertThat(contentSummary.getSpaceConsumed(), is(42L));
+ assertBlockExists(lastBlk);
+ } else {
+ // Diskspace consumed should be 10 bytes * 3. [blk 1,4 SS:2]
+ assertThat(contentSummary.getSpaceConsumed(), is(30L));
+ assertBlockNotPresent(lastBlk);
+ }
+
+ fs.deleteSnapshot(parent, ss[deleteOrder[1]]);
+ assertFileLength(snapshotFiles[deleteOrder[2]], length[deleteOrder[2]]);
+ assertFileLength(src, length[2]);
+ assertBlockExists(firstBlk);
+
+ contentSummary = fs.getContentSummary(parent);
+ if(fs.exists(snapshotFiles[0])) {
+ // Diskspace consumed should be 14 bytes * 3. [blk 1,4 SS:2,3]
+ assertThat(contentSummary.getSpaceConsumed(), is(42L));
+ assertBlockExists(lastBlk);
+ } else if(fs.exists(snapshotFiles[1])) {
+ // Diskspace consumed should be 10 bytes * 3. [blk 1,4 SS:2]
+ assertThat(contentSummary.getSpaceConsumed(), is(30L));
+ assertBlockNotPresent(lastBlk);
+ } else {
+ // Diskspace consumed should be 6 bytes * 3. [blk 1,4 SS:]
+ assertThat(contentSummary.getSpaceConsumed(), is(18L));
+ assertBlockNotPresent(lastBlk);
+ }
+
+ fs.deleteSnapshot(parent, ss[deleteOrder[2]]);
+ assertFileLength(src, length[2]);
+ assertBlockExists(firstBlk);
+
+ // Diskspace consumed should be 6 bytes * 3. [blk 1,4 SS:]
+ contentSummary = fs.getContentSummary(parent);
+ assertThat(contentSummary.getSpaceConsumed(), is(18L));
+ assertThat(contentSummary.getLength(), is(6L));
+
+ fs.delete(src, false);
+ assertBlockNotPresent(firstBlk);
+
+ // Diskspace consumed should be 0 bytes * 3. []
+ contentSummary = fs.getContentSummary(parent);
+ assertThat(contentSummary.getSpaceConsumed(), is(0L));
+ }
+
/**
* Failure / recovery test for truncate.
* In this failure the DNs fail to recover the blocks and the NN triggers
@@ -159,8 +449,6 @@ public class TestFileTruncate {
boolean isReady = fs.truncate(p, newLength);
assertThat("truncate should have triggered block recovery.",
isReady, is(false));
- FileStatus fileStatus = fs.getFileStatus(p);
- assertThat(fileStatus.getLen(), is((long) newLength));
boolean recoveryTriggered = false;
for(int i = 0; i < RECOVERY_ATTEMPTS; i++) {
@@ -168,8 +456,6 @@ public class TestFileTruncate {
NameNodeAdapter.getLeaseHolderForPath(cluster.getNameNode(),
p.toUri().getPath());
if(leaseHolder.equals(HdfsServerConstants.NAMENODE_LEASE_HOLDER)) {
- cluster.startDataNodes(conf, DATANODE_NUM, true,
- HdfsServerConstants.StartupOption.REGULAR, null);
recoveryTriggered = true;
break;
}
@@ -177,6 +463,9 @@ public class TestFileTruncate {
}
assertThat("lease recovery should have occurred in ~" +
SLEEP * RECOVERY_ATTEMPTS + " ms.", recoveryTriggered, is(true));
+ cluster.startDataNodes(conf, DATANODE_NUM, true,
+ StartupOption.REGULAR, null);
+ cluster.waitActive();
checkBlockRecovery(p);
@@ -184,10 +473,10 @@ public class TestFileTruncate {
.setLeasePeriod(HdfsConstants.LEASE_SOFTLIMIT_PERIOD,
HdfsConstants.LEASE_HARDLIMIT_PERIOD);
- fileStatus = fs.getFileStatus(p);
+ FileStatus fileStatus = fs.getFileStatus(p);
assertThat(fileStatus.getLen(), is((long) newLength));
- AppendTestUtil.checkFullFile(fs, p, newLength, contents, p.toString());
+ checkFullFile(p, newLength, contents);
fs.delete(p, false);
}
@@ -198,10 +487,9 @@ public class TestFileTruncate {
public void testTruncateEditLogLoad() throws IOException {
int startingFileSize = 2 * BLOCK_SIZE + BLOCK_SIZE / 2;
int toTruncate = 1;
-
+ final String s = "/testTruncateEditLogLoad";
+ final Path p = new Path(s);
byte[] contents = AppendTestUtil.initBuffer(startingFileSize);
-
- final Path p = new Path("/testTruncateEditLogLoad");
writeContents(contents, startingFileSize, p);
int newLength = startingFileSize - toTruncate;
@@ -209,54 +497,183 @@ public class TestFileTruncate {
assertThat("truncate should have triggered block recovery.",
isReady, is(false));
- checkBlockRecovery(p);
-
cluster.restartNameNode();
+ String holder = UserGroupInformation.getCurrentUser().getUserName();
+ cluster.getNamesystem().recoverLease(s, holder, "");
+
+ checkBlockRecovery(p);
+
FileStatus fileStatus = fs.getFileStatus(p);
assertThat(fileStatus.getLen(), is((long) newLength));
- AppendTestUtil.checkFullFile(fs, p, newLength, contents, p.toString());
+ checkFullFile(p, newLength, contents);
fs.delete(p, false);
}
/**
+ * Upgrade, RollBack, and restart test for Truncate.
+ */
+ @Test
+ public void testUpgradeAndRestart() throws IOException {
+ Path parent = new Path("/test");
+ fs.mkdirs(parent);
+ fs.setQuota(parent, 100, 1000);
+ fs.allowSnapshot(parent);
+ String truncateFile = "testUpgrade";
+ final Path p = new Path(parent, truncateFile);
+ int startingFileSize = 2 * BLOCK_SIZE;
+ int toTruncate = 1;
+ byte[] contents = AppendTestUtil.initBuffer(startingFileSize);
+ writeContents(contents, startingFileSize, p);
+
+ Path snapshotDir = fs.createSnapshot(parent, "ss0");
+ Path snapshotFile = new Path(snapshotDir, truncateFile);
+
+ int newLengthBeforeUpgrade = startingFileSize - toTruncate;
+ boolean isReady = fs.truncate(p, newLengthBeforeUpgrade);
+ assertThat("truncate should have triggered block recovery.",
+ isReady, is(false));
+
+ checkBlockRecovery(p);
+
+ checkFullFile(p, newLengthBeforeUpgrade, contents);
+ assertFileLength(snapshotFile, startingFileSize);
+ long totalBlockBefore = cluster.getNamesystem().getBlocksTotal();
+
+ restartCluster(StartupOption.UPGRADE);
+
+ assertThat("SafeMode should be OFF",
+ cluster.getNamesystem().isInSafeMode(), is(false));
+ assertThat("NameNode should be performing upgrade.",
+ cluster.getNamesystem().isUpgradeFinalized(), is(false));
+ FileStatus fileStatus = fs.getFileStatus(p);
+ assertThat(fileStatus.getLen(), is((long) newLengthBeforeUpgrade));
+
+ int newLengthAfterUpgrade = newLengthBeforeUpgrade - toTruncate;
+ Block oldBlk = getLocatedBlocks(p).getLastLocatedBlock()
+ .getBlock().getLocalBlock();
+ isReady = fs.truncate(p, newLengthAfterUpgrade);
+ assertThat("truncate should have triggered block recovery.",
+ isReady, is(false));
+ fileStatus = fs.getFileStatus(p);
+ assertThat(fileStatus.getLen(), is((long) newLengthAfterUpgrade));
+ assertThat("Should copy on truncate during upgrade",
+ getLocatedBlocks(p).getLastLocatedBlock().getBlock()
+ .getLocalBlock().getBlockId(), is(not(equalTo(oldBlk.getBlockId()))));
+
+ checkBlockRecovery(p);
+
+ checkFullFile(p, newLengthAfterUpgrade, contents);
+ assertThat("Total block count should be unchanged from copy-on-truncate",
+ cluster.getNamesystem().getBlocksTotal(), is(totalBlockBefore));
+
+ restartCluster(StartupOption.ROLLBACK);
+
+ assertThat("File does not exist " + p, fs.exists(p), is(true));
+ fileStatus = fs.getFileStatus(p);
+ assertThat(fileStatus.getLen(), is((long) newLengthBeforeUpgrade));
+ checkFullFile(p, newLengthBeforeUpgrade, contents);
+ assertThat("Total block count should be unchanged from rolling back",
+ cluster.getNamesystem().getBlocksTotal(), is(totalBlockBefore));
+
+ restartCluster(StartupOption.REGULAR);
+ assertThat("Total block count should be unchanged from start-up",
+ cluster.getNamesystem().getBlocksTotal(), is(totalBlockBefore));
+ checkFullFile(p, newLengthBeforeUpgrade, contents);
+ assertFileLength(snapshotFile, startingFileSize);
+
+ // empty edits and restart
+ fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
+ fs.saveNamespace();
+ cluster.restartNameNode(true);
+ assertThat("Total block count should be unchanged from start-up",
+ cluster.getNamesystem().getBlocksTotal(), is(totalBlockBefore));
+ checkFullFile(p, newLengthBeforeUpgrade, contents);
+ assertFileLength(snapshotFile, startingFileSize);
+
+ fs.deleteSnapshot(parent, "ss0");
+ fs.delete(parent, true);
+ assertThat("File " + p + " shouldn't exist", fs.exists(p), is(false));
+ }
+
+ /**
* Check truncate recovery.
*/
@Test
- public void testTruncateLastBlock() throws IOException {
+ public void testTruncateRecovery() throws IOException {
FSNamesystem fsn = cluster.getNamesystem();
-
- String src = "/file";
+ String client = "client";
+ String clientMachine = "clientMachine";
+ Path parent = new Path("/test");
+ String src = "/test/testTruncateRecovery";
Path srcPath = new Path(src);
byte[] contents = AppendTestUtil.initBuffer(BLOCK_SIZE);
writeContents(contents, BLOCK_SIZE, srcPath);
- INodeFile inode = fsn.getFSDirectory().getINode(src).asFile();
- long oldGenstamp = GenerationStamp.LAST_RESERVED_STAMP;
- DatanodeDescriptor dn = DFSTestUtil.getLocalDatanodeDescriptor();
- DatanodeStorageInfo storage = DFSTestUtil.createDatanodeStorageInfo(
- dn.getDatanodeUuid(), InetAddress.getLocalHost().getHostAddress());
- dn.isAlive = true;
-
- BlockInfoUnderConstruction blockInfo = new BlockInfoUnderConstruction(
- new Block(0, 1, oldGenstamp), (short) 1,
- HdfsServerConstants.BlockUCState.BEING_TRUNCATED,
- new DatanodeStorageInfo[] {storage});
+ INodesInPath iip = fsn.getFSDirectory().getINodesInPath4Write(src, true);
+ INodeFile file = iip.getLastINode().asFile();
+ long initialGenStamp = file.getLastBlock().getGenerationStamp();
+ // Test that prepareFileForTruncate sets up in-place truncate.
+ fsn.writeLock();
+ try {
+ Block oldBlock = file.getLastBlock();
+ Block truncateBlock =
+ fsn.prepareFileForTruncate(iip, client, clientMachine, 1, null);
+ // In-place truncate uses old block id with new genStamp.
+ assertThat(truncateBlock.getBlockId(),
+ is(equalTo(oldBlock.getBlockId())));
+ assertThat(truncateBlock.getNumBytes(),
+ is(oldBlock.getNumBytes()));
+ assertThat(truncateBlock.getGenerationStamp(),
+ is(fsn.getBlockIdManager().getGenerationStampV2()));
+ assertThat(file.getLastBlock().getBlockUCState(),
+ is(HdfsServerConstants.BlockUCState.UNDER_RECOVERY));
+ long blockRecoveryId = ((BlockInfoUnderConstruction) file.getLastBlock())
+ .getBlockRecoveryId();
+ assertThat(blockRecoveryId, is(initialGenStamp + 1));
+ fsn.getEditLog().logTruncate(
+ src, client, clientMachine, BLOCK_SIZE-1, Time.now(), truncateBlock);
+ } finally {
+ fsn.writeUnlock();
+ }
- inode.setBlocks(new BlockInfo[] {blockInfo});
+ // Re-create file and ensure we are ready to copy on truncate
+ writeContents(contents, BLOCK_SIZE, srcPath);
+ fs.allowSnapshot(parent);
+ fs.createSnapshot(parent, "ss0");
+ iip = fsn.getFSDirectory().getINodesInPath(src, true);
+ file = iip.getLastINode().asFile();
+ file.recordModification(iip.getLatestSnapshotId(), true);
+ assertThat(file.isBlockInLatestSnapshot(file.getLastBlock()), is(true));
+ initialGenStamp = file.getLastBlock().getGenerationStamp();
+ // Test that prepareFileForTruncate sets up copy-on-write truncate
fsn.writeLock();
try {
- fsn.initializeBlockRecovery(inode);
- assertThat(inode.getLastBlock().getBlockUCState(),
- is(HdfsServerConstants.BlockUCState.BEING_TRUNCATED));
- long blockRecoveryId = ((BlockInfoUnderConstruction) inode.getLastBlock())
+ Block oldBlock = file.getLastBlock();
+ Block truncateBlock =
+ fsn.prepareFileForTruncate(iip, client, clientMachine, 1, null);
+ // Copy-on-write truncate makes new block with new id and genStamp
+ assertThat(truncateBlock.getBlockId(),
+ is(not(equalTo(oldBlock.getBlockId()))));
+ assertThat(truncateBlock.getNumBytes() < oldBlock.getNumBytes(),
+ is(true));
+ assertThat(truncateBlock.getGenerationStamp(),
+ is(fsn.getBlockIdManager().getGenerationStampV2()));
+ assertThat(file.getLastBlock().getBlockUCState(),
+ is(HdfsServerConstants.BlockUCState.UNDER_RECOVERY));
+ long blockRecoveryId = ((BlockInfoUnderConstruction) file.getLastBlock())
.getBlockRecoveryId();
- assertThat(blockRecoveryId, is(oldGenstamp + 2));
+ assertThat(blockRecoveryId, is(initialGenStamp + 1));
+ fsn.getEditLog().logTruncate(
+ src, client, clientMachine, BLOCK_SIZE-1, Time.now(), truncateBlock);
} finally {
fsn.writeUnlock();
}
+ checkBlockRecovery(srcPath);
+ fs.deleteSnapshot(parent, "ss0");
+ fs.delete(parent, true);
}
static void writeContents(byte[] contents, int fileLength, Path p)
@@ -286,4 +703,38 @@ public class TestFileTruncate {
static LocatedBlocks getLocatedBlocks(Path src) throws IOException {
return fs.getClient().getLocatedBlocks(src.toString(), 0, Long.MAX_VALUE);
}
+
+ static void assertBlockExists(Block blk) {
+ assertNotNull("BlocksMap does not contain block: " + blk,
+ cluster.getNamesystem().getStoredBlock(blk));
+ }
+
+ static void assertBlockNotPresent(Block blk) {
+ assertNull("BlocksMap should not contain block: " + blk,
+ cluster.getNamesystem().getStoredBlock(blk));
+ }
+
+ static void assertFileLength(Path file, long length) throws IOException {
+ byte[] data = DFSTestUtil.readFileBuffer(fs, file);
+ assertEquals("Wrong data size in snapshot.", length, data.length);
+ }
+
+ static void checkFullFile(Path p, int newLength, byte[] contents)
+ throws IOException {
+ AppendTestUtil.checkFullFile(fs, p, newLength, contents, p.toString());
+ }
+
+ static void restartCluster(StartupOption o)
+ throws IOException {
+ cluster.shutdown();
+ if(StartupOption.ROLLBACK == o)
+ NameNode.doRollback(conf, false);
+ cluster = new MiniDFSCluster.Builder(conf).numDataNodes(DATANODE_NUM)
+ .format(false)
+ .nameNodePort(NameNode.DEFAULT_PORT)
+ .startupOption(o==StartupOption.ROLLBACK ? StartupOption.REGULAR : o)
+ .dnStartupOption(o!=StartupOption.ROLLBACK ? StartupOption.REGULAR : o)
+ .build();
+ fs = cluster.getFileSystem();
+ }
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9953e921/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored
index 002c506..dce3f47 100644
Binary files a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored and b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored differ
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9953e921/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml
index f61c075..6e8078b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml
@@ -1,6 +1,6 @@
<?xml version="1.0" encoding="UTF-8"?>
<EDITS>
- <EDITS_VERSION>-60</EDITS_VERSION>
+ <EDITS_VERSION>-61</EDITS_VERSION>
<RECORD>
<OPCODE>OP_START_LOG_SEGMENT</OPCODE>
<DATA>
@@ -13,8 +13,8 @@
<TXID>2</TXID>
<DELEGATION_KEY>
<KEY_ID>1</KEY_ID>
- <EXPIRY_DATE>1421822547136</EXPIRY_DATE>
- <KEY>24319c7d1f7c0828</KEY>
+ <EXPIRY_DATE>1421826999207</EXPIRY_DATE>
+ <KEY>ca9a0c8b240570b3</KEY>
</DELEGATION_KEY>
</DATA>
</RECORD>
@@ -24,8 +24,8 @@
<TXID>3</TXID>
<DELEGATION_KEY>
<KEY_ID>2</KEY_ID>
- <EXPIRY_DATE>1421822547140</EXPIRY_DATE>
- <KEY>254b1207021431f4</KEY>
+ <EXPIRY_DATE>1421826999210</EXPIRY_DATE>
+ <KEY>833c25a6fb2b0a6f</KEY>
</DELEGATION_KEY>
</DATA>
</RECORD>
@@ -37,19 +37,19 @@
<INODEID>16386</INODEID>
<PATH>/file_create</PATH>
<REPLICATION>1</REPLICATION>
- <MTIME>1421131348286</MTIME>
- <ATIME>1421131348286</ATIME>
+ <MTIME>1421135800328</MTIME>
+ <ATIME>1421135800328</ATIME>
<BLOCKSIZE>512</BLOCKSIZE>
- <CLIENT_NAME>DFSClient_NONMAPREDUCE_526346936_1</CLIENT_NAME>
+ <CLIENT_NAME>DFSClient_NONMAPREDUCE_240777107_1</CLIENT_NAME>
<CLIENT_MACHINE>127.0.0.1</CLIENT_MACHINE>
<OVERWRITE>true</OVERWRITE>
<PERMISSION_STATUS>
- <USERNAME>plamenjeliazkov</USERNAME>
+ <USERNAME>shv</USERNAME>
<GROUPNAME>supergroup</GROUPNAME>
<MODE>420</MODE>
</PERMISSION_STATUS>
- <RPC_CLIENTID>99bcddc1-3460-4630-9904-6c7ca5811945</RPC_CLIENTID>
- <RPC_CALLID>6</RPC_CALLID>
+ <RPC_CLIENTID>cb20a92a-2c2f-4305-a838-2a01c6e73e18</RPC_CLIENTID>
+ <RPC_CALLID>9</RPC_CALLID>
</DATA>
</RECORD>
<RECORD>
@@ -60,14 +60,14 @@
<INODEID>0</INODEID>
<PATH>/file_create</PATH>
<REPLICATION>1</REPLICATION>
- <MTIME>1421131348328</MTIME>
- <ATIME>1421131348286</ATIME>
+ <MTIME>1421135800357</MTIME>
+ <ATIME>1421135800328</ATIME>
<BLOCKSIZE>512</BLOCKSIZE>
<CLIENT_NAME></CLIENT_NAME>
<CLIENT_MACHINE></CLIENT_MACHINE>
<OVERWRITE>false</OVERWRITE>
<PERMISSION_STATUS>
- <USERNAME>plamenjeliazkov</USERNAME>
+ <USERNAME>shv</USERNAME>
<GROUPNAME>supergroup</GROUPNAME>
<MODE>420</MODE>
</PERMISSION_STATUS>
@@ -88,9 +88,9 @@
<LENGTH>0</LENGTH>
<SRC>/file_create</SRC>
<DST>/file_moved</DST>
- <TIMESTAMP>1421131348343</TIMESTAMP>
- <RPC_CLIENTID>99bcddc1-3460-4630-9904-6c7ca5811945</RPC_CLIENTID>
- <RPC_CALLID>9</RPC_CALLID>
+ <TIMESTAMP>1421135800368</TIMESTAMP>
+ <RPC_CLIENTID>cb20a92a-2c2f-4305-a838-2a01c6e73e18</RPC_CLIENTID>
+ <RPC_CALLID>12</RPC_CALLID>
</DATA>
</RECORD>
<RECORD>
@@ -99,9 +99,9 @@
<TXID>8</TXID>
<LENGTH>0</LENGTH>
<PATH>/file_moved</PATH>
- <TIMESTAMP>1421131348353</TIMESTAMP>
- <RPC_CLIENTID>99bcddc1-3460-4630-9904-6c7ca5811945</RPC_CLIENTID>
- <RPC_CALLID>10</RPC_CALLID>
+ <TIMESTAMP>1421135800377</TIMESTAMP>
+ <RPC_CLIENTID>cb20a92a-2c2f-4305-a838-2a01c6e73e18</RPC_CLIENTID>
+ <RPC_CALLID>13</RPC_CALLID>
</DATA>
</RECORD>
<RECORD>
@@ -111,9 +111,9 @@
<LENGTH>0</LENGTH>
<INODEID>16387</INODEID>
<PATH>/directory_mkdir</PATH>
- <TIMESTAMP>1421131348366</TIMESTAMP>
+ <TIMESTAMP>1421135800394</TIMESTAMP>
<PERMISSION_STATUS>
- <USERNAME>plamenjeliazkov</USERNAME>
+ <USERNAME>shv</USERNAME>
<GROUPNAME>supergroup</GROUPNAME>
<MODE>493</MODE>
</PERMISSION_STATUS>
@@ -146,8 +146,8 @@
<TXID>13</TXID>
<SNAPSHOTROOT>/directory_mkdir</SNAPSHOTROOT>
<SNAPSHOTNAME>snapshot1</SNAPSHOTNAME>
- <RPC_CLIENTID>99bcddc1-3460-4630-9904-6c7ca5811945</RPC_CLIENTID>
- <RPC_CALLID>15</RPC_CALLID>
+ <RPC_CLIENTID>cb20a92a-2c2f-4305-a838-2a01c6e73e18</RPC_CLIENTID>
+ <RPC_CALLID>18</RPC_CALLID>
</DATA>
</RECORD>
<RECORD>
@@ -157,8 +157,8 @@
<SNAPSHOTROOT>/directory_mkdir</SNAPSHOTROOT>
<SNAPSHOTOLDNAME>snapshot1</SNAPSHOTOLDNAME>
<SNAPSHOTNEWNAME>snapshot2</SNAPSHOTNEWNAME>
- <RPC_CLIENTID>99bcddc1-3460-4630-9904-6c7ca5811945</RPC_CLIENTID>
- <RPC_CALLID>16</RPC_CALLID>
+ <RPC_CLIENTID>cb20a92a-2c2f-4305-a838-2a01c6e73e18</RPC_CLIENTID>
+ <RPC_CALLID>19</RPC_CALLID>
</DATA>
</RECORD>
<RECORD>
@@ -167,8 +167,8 @@
<TXID>15</TXID>
<SNAPSHOTROOT>/directory_mkdir</SNAPSHOTROOT>
<SNAPSHOTNAME>snapshot2</SNAPSHOTNAME>
- <RPC_CLIENTID>99bcddc1-3460-4630-9904-6c7ca5811945</RPC_CLIENTID>
- <RPC_CALLID>17</RPC_CALLID>
+ <RPC_CLIENTID>cb20a92a-2c2f-4305-a838-2a01c6e73e18</RPC_CLIENTID>
+ <RPC_CALLID>20</RPC_CALLID>
</DATA>
</RECORD>
<RECORD>
@@ -179,19 +179,19 @@
<INODEID>16388</INODEID>
<PATH>/file_create</PATH>
<REPLICATION>1</REPLICATION>
- <MTIME>1421131348401</MTIME>
- <ATIME>1421131348401</ATIME>
+ <MTIME>1421135800442</MTIME>
+ <ATIME>1421135800442</ATIME>
<BLOCKSIZE>512</BLOCKSIZE>
- <CLIENT_NAME>DFSClient_NONMAPREDUCE_526346936_1</CLIENT_NAME>
+ <CLIENT_NAME>DFSClient_NONMAPREDUCE_240777107_1</CLIENT_NAME>
<CLIENT_MACHINE>127.0.0.1</CLIENT_MACHINE>
<OVERWRITE>true</OVERWRITE>
<PERMISSION_STATUS>
- <USERNAME>plamenjeliazkov</USERNAME>
+ <USERNAME>shv</USERNAME>
<GROUPNAME>supergroup</GROUPNAME>
<MODE>420</MODE>
</PERMISSION_STATUS>
- <RPC_CLIENTID>99bcddc1-3460-4630-9904-6c7ca5811945</RPC_CLIENTID>
- <RPC_CALLID>18</RPC_CALLID>
+ <RPC_CLIENTID>cb20a92a-2c2f-4305-a838-2a01c6e73e18</RPC_CLIENTID>
+ <RPC_CALLID>21</RPC_CALLID>
</DATA>
</RECORD>
<RECORD>
@@ -202,14 +202,14 @@
<INODEID>0</INODEID>
<PATH>/file_create</PATH>
<REPLICATION>1</REPLICATION>
- <MTIME>1421131348405</MTIME>
- <ATIME>1421131348401</ATIME>
+ <MTIME>1421135800445</MTIME>
+ <ATIME>1421135800442</ATIME>
<BLOCKSIZE>512</BLOCKSIZE>
<CLIENT_NAME></CLIENT_NAME>
<CLIENT_MACHINE></CLIENT_MACHINE>
<OVERWRITE>false</OVERWRITE>
<PERMISSION_STATUS>
- <USERNAME>plamenjeliazkov</USERNAME>
+ <USERNAME>shv</USERNAME>
<GROUPNAME>supergroup</GROUPNAME>
<MODE>420</MODE>
</PERMISSION_STATUS>
@@ -265,10 +265,10 @@
<LENGTH>0</LENGTH>
<SRC>/file_create</SRC>
<DST>/file_moved</DST>
- <TIMESTAMP>1421131348436</TIMESTAMP>
+ <TIMESTAMP>1421135800485</TIMESTAMP>
<OPTIONS>NONE</OPTIONS>
- <RPC_CLIENTID>99bcddc1-3460-4630-9904-6c7ca5811945</RPC_CLIENTID>
- <RPC_CALLID>25</RPC_CALLID>
+ <RPC_CLIENTID>cb20a92a-2c2f-4305-a838-2a01c6e73e18</RPC_CLIENTID>
+ <RPC_CALLID>28</RPC_CALLID>
</DATA>
</RECORD>
<RECORD>
@@ -279,19 +279,19 @@
<INODEID>16389</INODEID>
<PATH>/file_concat_target</PATH>
<REPLICATION>1</REPLICATION>
- <MTIME>1421131348443</MTIME>
- <ATIME>1421131348443</ATIME>
+ <MTIME>1421135800495</MTIME>
+ <ATIME>1421135800495</ATIME>
<BLOCKSIZE>512</BLOCKSIZE>
- <CLIENT_NAME>DFSClient_NONMAPREDUCE_526346936_1</CLIENT_NAME>
+ <CLIENT_NAME>DFSClient_NONMAPREDUCE_240777107_1</CLIENT_NAME>
<CLIENT_MACHINE>127.0.0.1</CLIENT_MACHINE>
<OVERWRITE>true</OVERWRITE>
<PERMISSION_STATUS>
- <USERNAME>plamenjeliazkov</USERNAME>
+ <USERNAME>shv</USERNAME>
<GROUPNAME>supergroup</GROUPNAME>
<MODE>420</MODE>
</PERMISSION_STATUS>
- <RPC_CLIENTID>99bcddc1-3460-4630-9904-6c7ca5811945</RPC_CLIENTID>
- <RPC_CALLID>27</RPC_CALLID>
+ <RPC_CLIENTID>cb20a92a-2c2f-4305-a838-2a01c6e73e18</RPC_CLIENTID>
+ <RPC_CALLID>30</RPC_CALLID>
</DATA>
</RECORD>
<RECORD>
@@ -396,8 +396,8 @@
<INODEID>0</INODEID>
<PATH>/file_concat_target</PATH>
<REPLICATION>1</REPLICATION>
- <MTIME>1421131348998</MTIME>
- <ATIME>1421131348443</ATIME>
+ <MTIME>1421135801050</MTIME>
+ <ATIME>1421135800495</ATIME>
<BLOCKSIZE>512</BLOCKSIZE>
<CLIENT_NAME></CLIENT_NAME>
<CLIENT_MACHINE></CLIENT_MACHINE>
@@ -418,7 +418,7 @@
<GENSTAMP>1003</GENSTAMP>
</BLOCK>
<PERMISSION_STATUS>
- <USERNAME>plamenjeliazkov</USERNAME>
+ <USERNAME>shv</USERNAME>
<GROUPNAME>supergroup</GROUPNAME>
<MODE>420</MODE>
</PERMISSION_STATUS>
@@ -432,19 +432,19 @@
<INODEID>16390</INODEID>
<PATH>/file_concat_0</PATH>
<REPLICATION>1</REPLICATION>
- <MTIME>1421131349001</MTIME>
- <ATIME>1421131349001</ATIME>
+ <MTIME>1421135801053</MTIME>
+ <ATIME>1421135801053</ATIME>
<BLOCKSIZE>512</BLOCKSIZE>
- <CLIENT_NAME>DFSClient_NONMAPREDUCE_526346936_1</CLIENT_NAME>
+ <CLIENT_NAME>DFSClient_NONMAPREDUCE_240777107_1</CLIENT_NAME>
<CLIENT_MACHINE>127.0.0.1</CLIENT_MACHINE>
<OVERWRITE>true</OVERWRITE>
<PERMISSION_STATUS>
- <USERNAME>plamenjeliazkov</USERNAME>
+ <USERNAME>shv</USERNAME>
<GROUPNAME>supergroup</GROUPNAME>
<MODE>420</MODE>
</PERMISSION_STATUS>
- <RPC_CLIENTID>99bcddc1-3460-4630-9904-6c7ca5811945</RPC_CLIENTID>
- <RPC_CALLID>38</RPC_CALLID>
+ <RPC_CLIENTID>cb20a92a-2c2f-4305-a838-2a01c6e73e18</RPC_CLIENTID>
+ <RPC_CALLID>41</RPC_CALLID>
</DATA>
</RECORD>
<RECORD>
@@ -549,8 +549,8 @@
<INODEID>0</INODEID>
<PATH>/file_concat_0</PATH>
<REPLICATION>1</REPLICATION>
- <MTIME>1421131349032</MTIME>
- <ATIME>1421131349001</ATIME>
+ <MTIME>1421135801091</MTIME>
+ <ATIME>1421135801053</ATIME>
<BLOCKSIZE>512</BLOCKSIZE>
<CLIENT_NAME></CLIENT_NAME>
<CLIENT_MACHINE></CLIENT_MACHINE>
@@ -571,7 +571,7 @@
<GENSTAMP>1006</GENSTAMP>
</BLOCK>
<PERMISSION_STATUS>
- <USERNAME>plamenjeliazkov</USERNAME>
+ <USERNAME>shv</USERNAME>
<GROUPNAME>supergroup</GROUPNAME>
<MODE>420</MODE>
</PERMISSION_STATUS>
@@ -585,19 +585,19 @@
<INODEID>16391</INODEID>
<PATH>/file_concat_1</PATH>
<REPLICATION>1</REPLICATION>
- <MTIME>1421131349036</MTIME>
- <ATIME>1421131349036</ATIME>
+ <MTIME>1421135801095</MTIME>
+ <ATIME>1421135801095</ATIME>
<BLOCKSIZE>512</BLOCKSIZE>
- <CLIENT_NAME>DFSClient_NONMAPREDUCE_526346936_1</CLIENT_NAME>
+ <CLIENT_NAME>DFSClient_NONMAPREDUCE_240777107_1</CLIENT_NAME>
<CLIENT_MACHINE>127.0.0.1</CLIENT_MACHINE>
<OVERWRITE>true</OVERWRITE>
<PERMISSION_STATUS>
- <USERNAME>plamenjeliazkov</USERNAME>
+ <USERNAME>shv</USERNAME>
<GROUPNAME>supergroup</GROUPNAME>
<MODE>420</MODE>
</PERMISSION_STATUS>
- <RPC_CLIENTID>99bcddc1-3460-4630-9904-6c7ca5811945</RPC_CLIENTID>
- <RPC_CALLID>47</RPC_CALLID>
+ <RPC_CLIENTID>cb20a92a-2c2f-4305-a838-2a01c6e73e18</RPC_CLIENTID>
+ <RPC_CALLID>50</RPC_CALLID>
</DATA>
</RECORD>
<RECORD>
@@ -702,8 +702,8 @@
<INODEID>0</INODEID>
<PATH>/file_concat_1</PATH>
<REPLICATION>1</REPLICATION>
- <MTIME>1421131349060</MTIME>
- <ATIME>1421131349036</ATIME>
+ <MTIME>1421135801126</MTIME>
+ <ATIME>1421135801095</ATIME>
<BLOCKSIZE>512</BLOCKSIZE>
<CLIENT_NAME></CLIENT_NAME>
<CLIENT_MACHINE></CLIENT_MACHINE>
@@ -724,7 +724,7 @@
<GENSTAMP>1009</GENSTAMP>
</BLOCK>
<PERMISSION_STATUS>
- <USERNAME>plamenjeliazkov</USERNAME>
+ <USERNAME>shv</USERNAME>
<GROUPNAME>supergroup</GROUPNAME>
<MODE>420</MODE>
</PERMISSION_STATUS>
@@ -736,13 +736,13 @@
<TXID>57</TXID>
<LENGTH>0</LENGTH>
<TRG>/file_concat_target</TRG>
- <TIMESTAMP>1421131349064</TIMESTAMP>
+ <TIMESTAMP>1421135801130</TIMESTAMP>
<SOURCES>
<SOURCE1>/file_concat_0</SOURCE1>
<SOURCE2>/file_concat_1</SOURCE2>
</SOURCES>
- <RPC_CLIENTID>99bcddc1-3460-4630-9904-6c7ca5811945</RPC_CLIENTID>
- <RPC_CALLID>55</RPC_CALLID>
+ <RPC_CLIENTID>cb20a92a-2c2f-4305-a838-2a01c6e73e18</RPC_CLIENTID>
+ <RPC_CALLID>58</RPC_CALLID>
</DATA>
</RECORD>
<RECORD>
@@ -753,19 +753,19 @@
<INODEID>16392</INODEID>
<PATH>/file_create</PATH>
<REPLICATION>1</REPLICATION>
- <MTIME>1421131349068</MTIME>
- <ATIME>1421131349068</ATIME>
+ <MTIME>1421135810102</MTIME>
+ <ATIME>1421135810102</ATIME>
<BLOCKSIZE>512</BLOCKSIZE>
- <CLIENT_NAME>DFSClient_NONMAPREDUCE_526346936_1</CLIENT_NAME>
+ <CLIENT_NAME>DFSClient_NONMAPREDUCE_240777107_1</CLIENT_NAME>
<CLIENT_MACHINE>127.0.0.1</CLIENT_MACHINE>
<OVERWRITE>true</OVERWRITE>
<PERMISSION_STATUS>
- <USERNAME>plamenjeliazkov</USERNAME>
+ <USERNAME>shv</USERNAME>
<GROUPNAME>supergroup</GROUPNAME>
<MODE>420</MODE>
</PERMISSION_STATUS>
- <RPC_CLIENTID>99bcddc1-3460-4630-9904-6c7ca5811945</RPC_CLIENTID>
- <RPC_CALLID>57</RPC_CALLID>
+ <RPC_CLIENTID>cb20a92a-2c2f-4305-a838-2a01c6e73e18</RPC_CLIENTID>
+ <RPC_CALLID>63</RPC_CALLID>
</DATA>
</RECORD>
<RECORD>
@@ -837,8 +837,8 @@
<INODEID>0</INODEID>
<PATH>/file_create</PATH>
<REPLICATION>1</REPLICATION>
- <MTIME>1421131349085</MTIME>
- <ATIME>1421131349068</ATIME>
+ <MTIME>1421135810122</MTIME>
+ <ATIME>1421135810102</ATIME>
<BLOCKSIZE>512</BLOCKSIZE>
<CLIENT_NAME></CLIENT_NAME>
<CLIENT_MACHINE></CLIENT_MACHINE>
@@ -854,7 +854,7 @@
<GENSTAMP>1011</GENSTAMP>
</BLOCK>
<PERMISSION_STATUS>
- <USERNAME>plamenjeliazkov</USERNAME>
+ <USERNAME>shv</USERNAME>
<GROUPNAME>supergroup</GROUPNAME>
<MODE>420</MODE>
</PERMISSION_STATUS>
@@ -865,10 +865,10 @@
<DATA>
<TXID>66</TXID>
<SRC>/file_create</SRC>
- <CLIENTNAME>DFSClient_NONMAPREDUCE_526346936_1</CLIENTNAME>
+ <CLIENTNAME>DFSClient_NONMAPREDUCE_240777107_1</CLIENTNAME>
<CLIENTMACHINE>127.0.0.1</CLIENTMACHINE>
<NEWLENGTH>512</NEWLENGTH>
- <TIMESTAMP>1421131349088</TIMESTAMP>
+ <TIMESTAMP>1421135810125</TIMESTAMP>
</DATA>
</RECORD>
<RECORD>
@@ -879,15 +879,15 @@
<INODEID>16393</INODEID>
<PATH>/file_symlink</PATH>
<VALUE>/file_concat_target</VALUE>
- <MTIME>1421131349095</MTIME>
- <ATIME>1421131349095</ATIME>
+ <MTIME>1421135810132</MTIME>
+ <ATIME>1421135810132</ATIME>
<PERMISSION_STATUS>
- <USERNAME>plamenjeliazkov</USERNAME>
+ <USERNAME>shv</USERNAME>
<GROUPNAME>supergroup</GROUPNAME>
<MODE>511</MODE>
</PERMISSION_STATUS>
- <RPC_CLIENTID>99bcddc1-3460-4630-9904-6c7ca5811945</RPC_CLIENTID>
- <RPC_CALLID>64</RPC_CALLID>
+ <RPC_CLIENTID>cb20a92a-2c2f-4305-a838-2a01c6e73e18</RPC_CLIENTID>
+ <RPC_CALLID>70</RPC_CALLID>
</DATA>
</RECORD>
<RECORD>
@@ -898,19 +898,19 @@
<INODEID>16394</INODEID>
<PATH>/hard-lease-recovery-test</PATH>
<REPLICATION>1</REPLICATION>
- <MTIME>1421131349098</MTIME>
- <ATIME>1421131349098</ATIME>
+ <MTIME>1421135810135</MTIME>
+ <ATIME>1421135810135</ATIME>
<BLOCKSIZE>512</BLOCKSIZE>
- <CLIENT_NAME>DFSClient_NONMAPREDUCE_526346936_1</CLIENT_NAME>
+ <CLIENT_NAME>DFSClient_NONMAPREDUCE_240777107_1</CLIENT_NAME>
<CLIENT_MACHINE>127.0.0.1</CLIENT_MACHINE>
<OVERWRITE>true</OVERWRITE>
<PERMISSION_STATUS>
- <USERNAME>plamenjeliazkov</USERNAME>
+ <USERNAME>shv</USERNAME>
<GROUPNAME>supergroup</GROUPNAME>
<MODE>420</MODE>
</PERMISSION_STATUS>
- <RPC_CLIENTID>99bcddc1-3460-4630-9904-6c7ca5811945</RPC_CLIENTID>
- <RPC_CALLID>65</RPC_CALLID>
+ <RPC_CLIENTID>cb20a92a-2c2f-4305-a838-2a01c6e73e18</RPC_CLIENTID>
+ <RPC_CALLID>71</RPC_CALLID>
</DATA>
</RECORD>
<RECORD>
@@ -966,7 +966,7 @@
<OPCODE>OP_REASSIGN_LEASE</OPCODE>
<DATA>
<TXID>74</TXID>
- <LEASEHOLDER>DFSClient_NONMAPREDUCE_526346936_1</LEASEHOLDER>
+ <LEASEHOLDER>DFSClient_NONMAPREDUCE_240777107_1</LEASEHOLDER>
<PATH>/hard-lease-recovery-test</PATH>
<NEWHOLDER>HDFS_NameNode</NEWHOLDER>
</DATA>
@@ -979,8 +979,8 @@
<INODEID>0</INODEID>
<PATH>/hard-lease-recovery-test</PATH>
<REPLICATION>1</REPLICATION>
- <MTIME>1421131351230</MTIME>
- <ATIME>1421131349098</ATIME>
+ <MTIME>1421135812235</MTIME>
+ <ATIME>1421135810135</ATIME>
<BLOCKSIZE>512</BLOCKSIZE>
<CLIENT_NAME></CLIENT_NAME>
<CLIENT_MACHINE></CLIENT_MACHINE>
@@ -991,7 +991,7 @@
<GENSTAMP>1013</GENSTAMP>
</BLOCK>
<PERMISSION_STATUS>
- <USERNAME>plamenjeliazkov</USERNAME>
+ <USERNAME>shv</USERNAME>
<GROUPNAME>supergroup</GROUPNAME>
<MODE>420</MODE>
</PERMISSION_STATUS>
@@ -1002,13 +1002,13 @@
<DATA>
<TXID>76</TXID>
<POOLNAME>pool1</POOLNAME>
- <OWNERNAME>plamenjeliazkov</OWNERNAME>
- <GROUPNAME>staff</GROUPNAME>
+ <OWNERNAME>shv</OWNERNAME>
+ <GROUPNAME>shv</GROUPNAME>
<MODE>493</MODE>
<LIMIT>9223372036854775807</LIMIT>
<MAXRELATIVEEXPIRY>2305843009213693951</MAXRELATIVEEXPIRY>
- <RPC_CLIENTID>99bcddc1-3460-4630-9904-6c7ca5811945</RPC_CLIENTID>
- <RPC_CALLID>72</RPC_CALLID>
+ <RPC_CLIENTID>cb20a92a-2c2f-4305-a838-2a01c6e73e18</RPC_CLIENTID>
+ <RPC_CALLID>78</RPC_CALLID>
</DATA>
</RECORD>
<RECORD>
@@ -1017,8 +1017,8 @@
<TXID>77</TXID>
<POOLNAME>pool1</POOLNAME>
<LIMIT>99</LIMIT>
- <RPC_CLIENTID>99bcddc1-3460-4630-9904-6c7ca5811945</RPC_CLIENTID>
- <RPC_CALLID>73</RPC_CALLID>
+ <RPC_CLIENTID>cb20a92a-2c2f-4305-a838-2a01c6e73e18</RPC_CLIENTID>
+ <RPC_CALLID>79</RPC_CALLID>
</DATA>
</RECORD>
<RECORD>
@@ -1029,9 +1029,9 @@
<PATH>/path</PATH>
<REPLICATION>1</REPLICATION>
<POOL>pool1</POOL>
- <EXPIRATION>2305844430345046085</EXPIRATION>
- <RPC_CLIENTID>99bcddc1-3460-4630-9904-6c7ca5811945</RPC_CLIENTID>
- <RPC_CALLID>74</RPC_CALLID>
+ <EXPIRATION>2305844430349507141</EXPIRATION>
+ <RPC_CLIENTID>cb20a92a-2c2f-4305-a838-2a01c6e73e18</RPC_CLIENTID>
+ <RPC_CALLID>80</RPC_CALLID>
</DATA>
</RECORD>
<RECORD>
@@ -1040,8 +1040,8 @@
<TXID>79</TXID>
<ID>1</ID>
<REPLICATION>2</REPLICATION>
- <RPC_CLIENTID>99bcddc1-3460-4630-9904-6c7ca5811945</RPC_CLIENTID>
- <RPC_CALLID>75</RPC_CALLID>
+ <RPC_CLIENTID>cb20a92a-2c2f-4305-a838-2a01c6e73e18</RPC_CLIENTID>
+ <RPC_CALLID>81</RPC_CALLID>
</DATA>
</RECORD>
<RECORD>
@@ -1049,8 +1049,8 @@
<DATA>
<TXID>80</TXID>
<ID>1</ID>
- <RPC_CLIENTID>99bcddc1-3460-4630-9904-6c7ca5811945</RPC_CLIENTID>
- <RPC_CALLID>76</RPC_CALLID>
+ <RPC_CLIENTID>cb20a92a-2c2f-4305-a838-2a01c6e73e18</RPC_CLIENTID>
+ <RPC_CALLID>82</RPC_CALLID>
</DATA>
</RECORD>
<RECORD>
@@ -1058,8 +1058,8 @@
<DATA>
<TXID>81</TXID>
<POOLNAME>pool1</POOLNAME>
- <RPC_CLIENTID>99bcddc1-3460-4630-9904-6c7ca5811945</RPC_CLIENTID>
- <RPC_CALLID>77</RPC_CALLID>
+ <RPC_CLIENTID>cb20a92a-2c2f-4305-a838-2a01c6e73e18</RPC_CLIENTID>
+ <RPC_CALLID>83</RPC_CALLID>
</DATA>
</RECORD>
<RECORD>
@@ -1105,8 +1105,8 @@
<NAME>a1</NAME>
<VALUE>0x313233</VALUE>
</XATTR>
- <RPC_CLIENTID>99bcddc1-3460-4630-9904-6c7ca5811945</RPC_CLIENTID>
- <RPC_CALLID>79</RPC_CALLID>
+ <RPC_CLIENTID>cb20a92a-2c2f-4305-a838-2a01c6e73e18</RPC_CLIENTID>
+ <RPC_CALLID>85</RPC_CALLID>
</DATA>
</RECORD>
<RECORD>
@@ -1119,8 +1119,8 @@
<NAME>a2</NAME>
<VALUE>0x373839</VALUE>
</XATTR>
- <RPC_CLIENTID>99bcddc1-3460-4630-9904-6c7ca5811945</RPC_CLIENTID>
- <RPC_CALLID>80</RPC_CALLID>
+ <RPC_CLIENTID>cb20a92a-2c2f-4305-a838-2a01c6e73e18</RPC_CLIENTID>
+ <RPC_CALLID>86</RPC_CALLID>
</DATA>
</RECORD>
<RECORD>
@@ -1132,22 +1132,22 @@
<NAMESPACE>USER</NAMESPACE>
<NAME>a2</NAME>
</XATTR>
- <RPC_CLIENTID>99bcddc1-3460-4630-9904-6c7ca5811945</RPC_CLIENTID>
- <RPC_CALLID>81</RPC_CALLID>
+ <RPC_CLIENTID>cb20a92a-2c2f-4305-a838-2a01c6e73e18</RPC_CLIENTID>
+ <RPC_CALLID>87</RPC_CALLID>
</DATA>
</RECORD>
<RECORD>
<OPCODE>OP_ROLLING_UPGRADE_START</OPCODE>
<DATA>
<TXID>86</TXID>
- <STARTTIME>1421131352186</STARTTIME>
+ <STARTTIME>1421135813268</STARTTIME>
</DATA>
</RECORD>
<RECORD>
<OPCODE>OP_ROLLING_UPGRADE_FINALIZE</OPCODE>
<DATA>
<TXID>87</TXID>
- <FINALIZETIME>1421131352186</FINALIZETIME>
+ <FINALIZETIME>1421135813268</FINALIZETIME>
</DATA>
</RECORD>
<RECORD>