You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by ji...@apache.org on 2013/06/06 22:15:08 UTC
svn commit: r1490421 - in
/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs: ./
src/main/java/org/apache/hadoop/hdfs/server/namenode/
src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/
src/test/java/org/apache/hadoop/hdfs/server/namenode...
Author: jing9
Date: Thu Jun 6 20:15:07 2013
New Revision: 1490421
URL: http://svn.apache.org/r1490421
Log:
HDFS-4877. Snapshot: fix the scenario where a directory is renamed under its prior descendant. Contributed by Jing Zhao.
Modified:
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectoryWithQuota.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeMap.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeSymlink.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/AbstractINodeDiffList.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectorySnapshottable.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectoryWithSnapshot.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileUnderConstructionWithSnapshot.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileWithSnapshot.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1490421&r1=1490420&r2=1490421&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Thu Jun 6 20:15:07 2013
@@ -1056,6 +1056,9 @@ Release 2.1.0-beta - UNRELEASED
HDFS-4850. Fix OfflineImageViewer to work on fsimages with empty files or
snapshots. (jing9)
+ HDFS-4877. Snapshot: fix the scenario where a directory is renamed under
+ its prior descendant. (jing9)
+
Release 2.0.5-alpha - UNRELEASED
INCOMPATIBLE CHANGES
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java?rev=1490421&r1=1490420&r2=1490421&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java Thu Jun 6 20:15:07 2013
@@ -678,7 +678,7 @@ public class FSDirectory implements Clos
Quota.Counts.newInstance(), false, Snapshot.INVALID_ID);
newSrcCounts.subtract(oldSrcCounts);
srcParent.addSpaceConsumed(newSrcCounts.get(Quota.NAMESPACE),
- newSrcCounts.get(Quota.DISKSPACE), false, Snapshot.INVALID_ID);
+ newSrcCounts.get(Quota.DISKSPACE), false);
}
return true;
@@ -944,8 +944,8 @@ public class FSDirectory implements Clos
BlocksMapUpdateInfo collectedBlocks = new BlocksMapUpdateInfo();
List<INode> removedINodes = new ArrayList<INode>();
filesDeleted = removedDst.cleanSubtree(null,
- dstIIP.getLatestSnapshot(), collectedBlocks, removedINodes).get(
- Quota.NAMESPACE);
+ dstIIP.getLatestSnapshot(), collectedBlocks, removedINodes, true)
+ .get(Quota.NAMESPACE);
getFSNamesystem().removePathAndBlocks(src, collectedBlocks,
removedINodes);
}
@@ -963,7 +963,7 @@ public class FSDirectory implements Clos
Quota.Counts.newInstance(), false, Snapshot.INVALID_ID);
newSrcCounts.subtract(oldSrcCounts);
srcParent.addSpaceConsumed(newSrcCounts.get(Quota.NAMESPACE),
- newSrcCounts.get(Quota.DISKSPACE), false, Snapshot.INVALID_ID);
+ newSrcCounts.get(Quota.DISKSPACE), false);
}
return filesDeleted >= 0;
@@ -1408,9 +1408,9 @@ public class FSDirectory implements Clos
targetNode.destroyAndCollectBlocks(collectedBlocks, removedINodes);
} else {
Quota.Counts counts = targetNode.cleanSubtree(null, latestSnapshot,
- collectedBlocks, removedINodes);
+ collectedBlocks, removedINodes, true);
parent.addSpaceConsumed(-counts.get(Quota.NAMESPACE),
- -counts.get(Quota.DISKSPACE), true, Snapshot.INVALID_ID);
+ -counts.get(Quota.DISKSPACE), true);
removed = counts.get(Quota.NAMESPACE);
}
if (NameNode.stateChangeLog.isDebugEnabled()) {
@@ -1797,7 +1797,8 @@ public class FSDirectory implements Clos
final INode[] inodes = inodesInPath.getINodes();
for(int i=0; i < numOfINodes; i++) {
if (inodes[i].isQuotaSet()) { // a directory with quota
- INodeDirectoryWithQuota node =(INodeDirectoryWithQuota)inodes[i];
+ INodeDirectoryWithQuota node = (INodeDirectoryWithQuota) inodes[i]
+ .asDirectory();
node.addSpaceConsumed2Cache(nsDelta, dsDelta);
}
}
@@ -2033,7 +2034,8 @@ public class FSDirectory implements Clos
}
if (inodes[i].isQuotaSet()) { // a directory with quota
try {
- ((INodeDirectoryWithQuota)inodes[i]).verifyQuota(nsDelta, dsDelta);
+ ((INodeDirectoryWithQuota) inodes[i].asDirectory()).verifyQuota(
+ nsDelta, dsDelta);
} catch (QuotaExceededException e) {
e.setPathName(getFullPathName(inodes, i));
throw e;
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java?rev=1490421&r1=1490420&r2=1490421&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java Thu Jun 6 20:15:07 2013
@@ -342,12 +342,13 @@ public abstract class INode implements D
* deletion/update will be added to the given map.
* @param removedINodes
* INodes collected from the descents for further cleaning up of
- * inodeMap
+ * inodeMap
* @return quota usage delta when deleting a snapshot
*/
public abstract Quota.Counts cleanSubtree(final Snapshot snapshot,
Snapshot prior, BlocksMapUpdateInfo collectedBlocks,
- List<INode> removedINodes) throws QuotaExceededException;
+ List<INode> removedINodes, boolean countDiffChange)
+ throws QuotaExceededException;
/**
* Destroy self and clear everything! If the INode is a file, this method
@@ -388,17 +389,10 @@ public abstract class INode implements D
* Check and add namespace/diskspace consumed to itself and the ancestors.
* @throws QuotaExceededException if quote is violated.
*/
- public void addSpaceConsumed(long nsDelta, long dsDelta, boolean verify,
- int snapshotId) throws QuotaExceededException {
- if (parent != null) {
- parent.addSpaceConsumed(nsDelta, dsDelta, verify, snapshotId);
- }
- }
-
- public void addSpaceConsumedToRenameSrc(long nsDelta, long dsDelta,
- boolean verify, int snapshotId) throws QuotaExceededException {
+ public void addSpaceConsumed(long nsDelta, long dsDelta, boolean verify)
+ throws QuotaExceededException {
if (parent != null) {
- parent.addSpaceConsumedToRenameSrc(nsDelta, dsDelta, verify, snapshotId);
+ parent.addSpaceConsumed(nsDelta, dsDelta, verify);
}
}
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java?rev=1490421&r1=1490420&r2=1490421&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java Thu Jun 6 20:15:07 2013
@@ -23,6 +23,7 @@ import java.util.ArrayList;
import java.util.Collections;
import java.util.Iterator;
import java.util.List;
+import java.util.Map;
import org.apache.hadoop.fs.PathIsNotDirectoryException;
import org.apache.hadoop.fs.UnresolvedLinkException;
@@ -499,7 +500,8 @@ public class INodeDirectory extends INod
/** Call cleanSubtree(..) recursively down the subtree. */
public Quota.Counts cleanSubtreeRecursively(final Snapshot snapshot,
Snapshot prior, final BlocksMapUpdateInfo collectedBlocks,
- final List<INode> removedINodes) throws QuotaExceededException {
+ final List<INode> removedINodes, final Map<INode, INode> excludedNodes,
+ final boolean countDiffChange) throws QuotaExceededException {
Quota.Counts counts = Quota.Counts.newInstance();
// in case of deletion snapshot, since this call happens after we modify
// the diff list, the snapshot to be deleted has been combined or renamed
@@ -508,9 +510,14 @@ public class INodeDirectory extends INod
// INodeDirectoryWithSnapshot#cleanSubtree)
Snapshot s = snapshot != null && prior != null ? prior : snapshot;
for (INode child : getChildrenList(s)) {
- Quota.Counts childCounts = child.cleanSubtree(snapshot, prior,
- collectedBlocks, removedINodes);
- counts.add(childCounts);
+ if (snapshot != null && excludedNodes != null
+ && excludedNodes.containsKey(child)) {
+ continue;
+ } else {
+ Quota.Counts childCounts = child.cleanSubtree(snapshot, prior,
+ collectedBlocks, removedINodes, countDiffChange);
+ counts.add(childCounts);
+ }
}
return counts;
}
@@ -527,8 +534,9 @@ public class INodeDirectory extends INod
@Override
public Quota.Counts cleanSubtree(final Snapshot snapshot, Snapshot prior,
- final BlocksMapUpdateInfo collectedBlocks,
- final List<INode> removedINodes) throws QuotaExceededException {
+ final BlocksMapUpdateInfo collectedBlocks,
+ final List<INode> removedINodes, final boolean countDiffChange)
+ throws QuotaExceededException {
if (prior == null && snapshot == null) {
// destroy the whole subtree and collect blocks that should be deleted
Quota.Counts counts = Quota.Counts.newInstance();
@@ -538,7 +546,7 @@ public class INodeDirectory extends INod
} else {
// process recursively down the subtree
Quota.Counts counts = cleanSubtreeRecursively(snapshot, prior,
- collectedBlocks, removedINodes);
+ collectedBlocks, removedINodes, null, countDiffChange);
if (isQuotaSet()) {
((INodeDirectoryWithQuota) this).addSpaceConsumed2Cache(
-counts.get(Quota.NAMESPACE), -counts.get(Quota.DISKSPACE));
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectoryWithQuota.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectoryWithQuota.java?rev=1490421&r1=1490420&r2=1490421&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectoryWithQuota.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectoryWithQuota.java Thu Jun 6 20:15:07 2013
@@ -132,7 +132,7 @@ public class INodeDirectoryWithQuota ext
@Override
public final void addSpaceConsumed(final long nsDelta, final long dsDelta,
- boolean verify, int snapshotId) throws QuotaExceededException {
+ boolean verify) throws QuotaExceededException {
if (isQuotaSet()) {
// The following steps are important:
// check quotas in this inode and all ancestors before changing counts
@@ -143,11 +143,11 @@ public class INodeDirectoryWithQuota ext
verifyQuota(nsDelta, dsDelta);
}
// (2) verify quota and then add count in ancestors
- super.addSpaceConsumed(nsDelta, dsDelta, verify, snapshotId);
+ super.addSpaceConsumed(nsDelta, dsDelta, verify);
// (3) add count in this inode
addSpaceConsumed2Cache(nsDelta, dsDelta);
} else {
- super.addSpaceConsumed(nsDelta, dsDelta, verify, snapshotId);
+ super.addSpaceConsumed(nsDelta, dsDelta, verify);
}
}
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java?rev=1490421&r1=1490420&r2=1490421&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java Thu Jun 6 20:15:07 2013
@@ -271,7 +271,8 @@ public class INodeFile extends INodeWith
@Override
public Quota.Counts cleanSubtree(final Snapshot snapshot, Snapshot prior,
- final BlocksMapUpdateInfo collectedBlocks, final List<INode> removedINodes)
+ final BlocksMapUpdateInfo collectedBlocks,
+ final List<INode> removedINodes, final boolean countDiffChange)
throws QuotaExceededException {
Quota.Counts counts = Quota.Counts.newInstance();
if (snapshot == null && prior == null) {
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeMap.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeMap.java?rev=1490421&r1=1490420&r2=1490421&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeMap.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeMap.java Thu Jun 6 20:15:07 2013
@@ -113,8 +113,8 @@ public class INodeMap {
@Override
public Counts cleanSubtree(Snapshot snapshot, Snapshot prior,
- BlocksMapUpdateInfo collectedBlocks, List<INode> removedINodes)
- throws QuotaExceededException {
+ BlocksMapUpdateInfo collectedBlocks, List<INode> removedINodes,
+ boolean countDiffChange) throws QuotaExceededException {
return null;
}
};
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java?rev=1490421&r1=1490420&r2=1490421&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java Thu Jun 6 20:15:07 2013
@@ -254,10 +254,10 @@ public abstract class INodeReference ext
@Override // used by WithCount
public Quota.Counts cleanSubtree(Snapshot snapshot, Snapshot prior,
- BlocksMapUpdateInfo collectedBlocks, final List<INode> removedINodes)
- throws QuotaExceededException {
+ BlocksMapUpdateInfo collectedBlocks, final List<INode> removedINodes,
+ final boolean countDiffChange) throws QuotaExceededException {
return referred.cleanSubtree(snapshot, prior, collectedBlocks,
- removedINodes);
+ removedINodes, countDiffChange);
}
@Override // used by WithCount
@@ -396,29 +396,6 @@ public abstract class INodeReference ext
return withNameList.get(-i - 2);
}
}
-
- @Override
- public final void addSpaceConsumed(long nsDelta, long dsDelta,
- boolean verify, int snapshotId) throws QuotaExceededException {
- INodeReference parentRef = getParentReference();
- if (parentRef != null) {
- parentRef.addSpaceConsumed(nsDelta, dsDelta, verify, snapshotId);
- }
- addSpaceConsumedToRenameSrc(nsDelta, dsDelta, verify, snapshotId);
- }
-
- @Override
- public final void addSpaceConsumedToRenameSrc(long nsDelta, long dsDelta,
- boolean verify, int snapshotId) throws QuotaExceededException {
- if (snapshotId != Snapshot.INVALID_ID) {
- for (INodeReference.WithName withName : withNameList) {
- if (withName.getLastSnapshotId() >= snapshotId) {
- withName.addSpaceConsumed(nsDelta, dsDelta, verify, snapshotId);
- break;
- }
- }
- }
- }
}
/** A reference with a fixed name. */
@@ -476,15 +453,20 @@ public abstract class INodeReference ext
Preconditions.checkState(this.lastSnapshotId >= lastSnapshotId);
final INode referred = this.getReferredINode().asReference()
.getReferredINode();
- // we cannot use cache for the referred node since its cached quota may
- // have already been updated by changes in the current tree
- return referred.computeQuotaUsage(counts, false, this.lastSnapshotId);
+ // We will continue the quota usage computation using the same snapshot id
+ // as time line (if the given snapshot id is valid). Also, we cannot use
+ // cache for the referred node since its cached quota may have already
+ // been updated by changes in the current tree.
+ int id = lastSnapshotId > Snapshot.INVALID_ID ?
+ lastSnapshotId : this.lastSnapshotId;
+ return referred.computeQuotaUsage(counts, false, id);
}
@Override
public Quota.Counts cleanSubtree(final Snapshot snapshot, Snapshot prior,
final BlocksMapUpdateInfo collectedBlocks,
- final List<INode> removedINodes) throws QuotaExceededException {
+ final List<INode> removedINodes, final boolean countDiffChange)
+ throws QuotaExceededException {
// since WithName node resides in deleted list acting as a snapshot copy,
// the parameter snapshot must be non-null
Preconditions.checkArgument(snapshot != null);
@@ -500,11 +482,19 @@ public abstract class INodeReference ext
}
Quota.Counts counts = getReferredINode().cleanSubtree(snapshot, prior,
- collectedBlocks, removedINodes);
+ collectedBlocks, removedINodes, false);
INodeReference ref = getReferredINode().getParentReference();
if (ref != null) {
ref.addSpaceConsumed(-counts.get(Quota.NAMESPACE),
- -counts.get(Quota.DISKSPACE), true, Snapshot.INVALID_ID);
+ -counts.get(Quota.DISKSPACE), true);
+ }
+
+ if (snapshot.getId() < lastSnapshotId) {
+ // for a WithName node, when we compute its quota usage, we only count
+ // in all the nodes existing at the time of the corresponding rename op.
+ // Thus if we are deleting a snapshot before/at the snapshot associated
+ // with lastSnapshotId, we do not need to update the quota upwards.
+ counts = Quota.Counts.newInstance();
}
return counts;
}
@@ -529,16 +519,17 @@ public abstract class INodeReference ext
// 1. create snapshot s1 on /test
// 2. rename /test/foo/bar to /test/foo2/bar
// 3. create snapshot s2 on /test
- // 4. delete snapshot s2
+ // 4. rename foo2 again
+ // 5. delete snapshot s2
return;
}
try {
Quota.Counts counts = referred.cleanSubtree(snapshot, prior,
- collectedBlocks, removedINodes);
+ collectedBlocks, removedINodes, false);
INodeReference ref = getReferredINode().getParentReference();
if (ref != null) {
ref.addSpaceConsumed(-counts.get(Quota.NAMESPACE),
- -counts.get(Quota.DISKSPACE), true, Snapshot.INVALID_ID);
+ -counts.get(Quota.DISKSPACE), true);
}
} catch (QuotaExceededException e) {
LOG.error("should not exceed quota while snapshot deletion", e);
@@ -588,8 +579,8 @@ public abstract class INodeReference ext
@Override
public Quota.Counts cleanSubtree(Snapshot snapshot, Snapshot prior,
- BlocksMapUpdateInfo collectedBlocks, List<INode> removedINodes)
- throws QuotaExceededException {
+ BlocksMapUpdateInfo collectedBlocks, List<INode> removedINodes,
+ final boolean countDiffChange) throws QuotaExceededException {
if (snapshot == null && prior == null) {
Quota.Counts counts = Quota.Counts.newInstance();
this.computeQuotaUsage(counts, true);
@@ -609,7 +600,7 @@ public abstract class INodeReference ext
return Quota.Counts.newInstance();
}
return getReferredINode().cleanSubtree(snapshot, prior,
- collectedBlocks, removedINodes);
+ collectedBlocks, removedINodes, countDiffChange);
}
}
@@ -648,8 +639,11 @@ public abstract class INodeReference ext
sfile.deleteCurrentFile();
if (snapshot != null) {
try {
+ // when calling cleanSubtree of the referred node, since we
+ // compute quota usage updates before calling this destroy
+ // function, we use true for countDiffChange
referred.cleanSubtree(snapshot, prior, collectedBlocks,
- removedINodes);
+ removedINodes, true);
} catch (QuotaExceededException e) {
LOG.error("should not exceed quota while snapshot deletion", e);
}
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeSymlink.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeSymlink.java?rev=1490421&r1=1490420&r2=1490421&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeSymlink.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeSymlink.java Thu Jun 6 20:15:07 2013
@@ -76,7 +76,8 @@ public class INodeSymlink extends INodeW
@Override
public Quota.Counts cleanSubtree(final Snapshot snapshot, Snapshot prior,
- final BlocksMapUpdateInfo collectedBlocks, final List<INode> removedINodes) {
+ final BlocksMapUpdateInfo collectedBlocks,
+ final List<INode> removedINodes, final boolean countDiffChange) {
if (snapshot == null && prior == null) {
destroyAndCollectBlocks(collectedBlocks, removedINodes);
}
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/AbstractINodeDiffList.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/AbstractINodeDiffList.java?rev=1490421&r1=1490420&r2=1490421&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/AbstractINodeDiffList.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/AbstractINodeDiffList.java Thu Jun 6 20:15:07 2013
@@ -68,7 +68,8 @@ abstract class AbstractINodeDiffList<N e
*/
public final Quota.Counts deleteSnapshotDiff(final Snapshot snapshot,
Snapshot prior, final N currentINode,
- final BlocksMapUpdateInfo collectedBlocks, final List<INode> removedINodes)
+ final BlocksMapUpdateInfo collectedBlocks,
+ final List<INode> removedINodes, boolean countDiffChange)
throws QuotaExceededException {
int snapshotIndex = Collections.binarySearch(diffs, snapshot.getId());
@@ -80,14 +81,14 @@ abstract class AbstractINodeDiffList<N e
diffs.get(snapshotIndex).setSnapshot(prior);
} else {
removed = diffs.remove(0);
- counts.add(Quota.NAMESPACE, 1);
- // We add 1 to the namespace quota usage since we delete a diff.
- // The quota change will be propagated to
- // 1) ancestors in the current tree, and
- // 2) src tree of any renamed ancestor.
- // Because for 2) we do not calculate the number of diff for quota
- // usage, we need to compensate this diff change for 2)
- currentINode.addSpaceConsumedToRenameSrc(1, 0, false, snapshot.getId());
+ if (countDiffChange) {
+ counts.add(Quota.NAMESPACE, 1);
+ } else {
+ // the currentINode must be a descendant of a WithName node, which set
+ // countDiffChange to false. In that case we should count in the diff
+ // change when updating the quota usage in the current tree
+ currentINode.addSpaceConsumed(-1, 0, false);
+ }
counts.add(removed.destroyDiffAndCollectBlocks(currentINode,
collectedBlocks, removedINodes));
}
@@ -98,8 +99,11 @@ abstract class AbstractINodeDiffList<N e
} else {
// combine the to-be-removed diff with its previous diff
removed = diffs.remove(snapshotIndex);
- counts.add(Quota.NAMESPACE, 1);
- currentINode.addSpaceConsumedToRenameSrc(1, 0, false, snapshot.getId());
+ if (countDiffChange) {
+ counts.add(Quota.NAMESPACE, 1);
+ } else {
+ currentINode.addSpaceConsumed(-1, 0, false);
+ }
if (previous.snapshotINode == null) {
previous.snapshotINode = removed.snapshotINode;
} else if (removed.snapshotINode != null) {
@@ -117,7 +121,7 @@ abstract class AbstractINodeDiffList<N e
/** Add an {@link AbstractINodeDiff} for the given snapshot. */
final D addDiff(Snapshot latest, N currentINode)
throws QuotaExceededException {
- currentINode.addSpaceConsumed(1, 0, true, Snapshot.INVALID_ID);
+ currentINode.addSpaceConsumed(1, 0, true);
return addLast(createDiff(latest, currentINode));
}
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectorySnapshottable.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectorySnapshottable.java?rev=1490421&r1=1490420&r2=1490421&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectorySnapshottable.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectorySnapshottable.java Thu Jun 6 20:15:07 2013
@@ -319,21 +319,23 @@ public class INodeDirectorySnapshottable
+ " from path " + this.getFullPathName()
+ ": the snapshot does not exist.");
} else {
- final Snapshot snapshot = snapshotsByNames.remove(i);
+ final Snapshot snapshot = snapshotsByNames.get(i);
Snapshot prior = Snapshot.findLatestSnapshot(this, snapshot);
try {
Quota.Counts counts = cleanSubtree(snapshot, prior, collectedBlocks,
- removedINodes);
+ removedINodes, true);
INodeDirectory parent = getParent();
if (parent != null) {
// there will not be any WithName node corresponding to the deleted
// snapshot, thus only update the quota usage in the current tree
parent.addSpaceConsumed(-counts.get(Quota.NAMESPACE),
- -counts.get(Quota.DISKSPACE), true, Snapshot.INVALID_ID);
+ -counts.get(Quota.DISKSPACE), true);
}
} catch(QuotaExceededException e) {
LOG.error("BUG: removeSnapshot increases namespace usage.", e);
}
+ // remove from snapshotsByNames after successfully cleaning the subtree
+ snapshotsByNames.remove(i);
return snapshot;
}
}
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectoryWithSnapshot.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectoryWithSnapshot.java?rev=1490421&r1=1490420&r2=1490421&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectoryWithSnapshot.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectoryWithSnapshot.java Thu Jun 6 20:15:07 2013
@@ -23,8 +23,10 @@ import java.util.ArrayDeque;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Deque;
+import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
+import java.util.Map;
import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport.DiffReportEntry;
@@ -658,7 +660,7 @@ public class INodeDirectoryWithSnapshot
if (added && !removeDeletedChild) {
final Quota.Counts counts = deletedChild.computeQuotaUsage();
addSpaceConsumed(counts.get(Quota.NAMESPACE),
- counts.get(Quota.DISKSPACE), false, Snapshot.INVALID_ID);
+ counts.get(Quota.DISKSPACE), false);
}
}
@@ -692,9 +694,12 @@ public class INodeDirectoryWithSnapshot
@Override
public Quota.Counts cleanSubtree(final Snapshot snapshot, Snapshot prior,
- final BlocksMapUpdateInfo collectedBlocks, final List<INode> removedINodes)
+ final BlocksMapUpdateInfo collectedBlocks,
+ final List<INode> removedINodes, final boolean countDiffChange)
throws QuotaExceededException {
Quota.Counts counts = Quota.Counts.newInstance();
+ Map<INode, INode> priorCreated = null;
+ Map<INode, INode> priorDeleted = null;
if (snapshot == null) { // delete the current directory
recordModification(prior, null);
// delete everything in created list
@@ -706,8 +711,28 @@ public class INodeDirectoryWithSnapshot
} else {
// update prior
prior = getDiffs().updatePrior(snapshot, prior);
+ // if there is a snapshot diff associated with prior, we need to record
+ // its original created and deleted list before deleting post
+ if (prior != null) {
+ DirectoryDiff priorDiff = this.getDiffs().getDiff(prior);
+ if (priorDiff != null && priorDiff.getSnapshot().equals(prior)) {
+ List<INode> cList = priorDiff.diff.getList(ListType.CREATED);
+ List<INode> dList = priorDiff.diff.getList(ListType.DELETED);
+ priorCreated = new HashMap<INode, INode>(cList.size());
+ for (INode cNode : cList) {
+ priorCreated.put(cNode, cNode);
+ }
+ priorDeleted = new HashMap<INode, INode>(dList.size());
+ for (INode dNode : dList) {
+ priorDeleted.put(dNode, dNode);
+ }
+ }
+ }
+
counts.add(getDiffs().deleteSnapshotDiff(snapshot, prior, this,
- collectedBlocks, removedINodes));
+ collectedBlocks, removedINodes, countDiffChange));
+
+ // check priorDiff again since it may be created during the diff deletion
if (prior != null) {
DirectoryDiff priorDiff = this.getDiffs().getDiff(prior);
if (priorDiff != null && priorDiff.getSnapshot().equals(prior)) {
@@ -716,11 +741,17 @@ public class INodeDirectoryWithSnapshot
// use null as prior in the cleanSubtree call. Files/directories that
// were created before "prior" will be covered by the later
// cleanSubtreeRecursively call.
- for (INode cNode : priorDiff.getChildrenDiff().getList(
- ListType.CREATED)) {
- counts.add(cNode.cleanSubtree(snapshot, null, collectedBlocks,
- removedINodes));
+ if (priorCreated != null) {
+ // we only check the node originally in prior's created list
+ for (INode cNode : priorDiff.getChildrenDiff().getList(
+ ListType.CREATED)) {
+ if (priorCreated.containsKey(cNode)) {
+ counts.add(cNode.cleanSubtree(snapshot, null, collectedBlocks,
+ removedINodes, countDiffChange));
+ }
+ }
}
+
// When a directory is moved from the deleted list of the posterior
// diff to the deleted list of this diff, we need to destroy its
// descendants that were 1) created after taking this diff and 2)
@@ -728,16 +759,19 @@ public class INodeDirectoryWithSnapshot
// For files moved from posterior's deleted list, we also need to
// delete its snapshot copy associated with the posterior snapshot.
+
for (INode dNode : priorDiff.getChildrenDiff().getList(
ListType.DELETED)) {
- counts.add(cleanDeletedINode(dNode, snapshot, prior,
- collectedBlocks, removedINodes));
+ if (priorDeleted == null || !priorDeleted.containsKey(dNode)) {
+ counts.add(cleanDeletedINode(dNode, snapshot, prior,
+ collectedBlocks, removedINodes, countDiffChange));
+ }
}
}
}
}
counts.add(cleanSubtreeRecursively(snapshot, prior, collectedBlocks,
- removedINodes));
+ removedINodes, priorDeleted, countDiffChange));
if (isQuotaSet()) {
this.addSpaceConsumed2Cache(-counts.get(Quota.NAMESPACE),
@@ -755,9 +789,11 @@ public class INodeDirectoryWithSnapshot
* @param collectedBlocks Used to collect blocks for later deletion.
* @return Quota usage update.
*/
- private static Quota.Counts cleanDeletedINode(INode inode, final Snapshot post,
- final Snapshot prior, final BlocksMapUpdateInfo collectedBlocks,
- final List<INode> removedINodes) throws QuotaExceededException {
+ private static Quota.Counts cleanDeletedINode(INode inode,
+ final Snapshot post, final Snapshot prior,
+ final BlocksMapUpdateInfo collectedBlocks,
+ final List<INode> removedINodes, final boolean countDiffChange)
+ throws QuotaExceededException {
Quota.Counts counts = Quota.Counts.newInstance();
Deque<INode> queue = new ArrayDeque<INode>();
queue.addLast(inode);
@@ -766,7 +802,8 @@ public class INodeDirectoryWithSnapshot
if (topNode instanceof INodeReference.WithName) {
INodeReference.WithName wn = (INodeReference.WithName) topNode;
if (wn.getLastSnapshotId() >= post.getId()) {
- wn.cleanSubtree(post, prior, collectedBlocks, removedINodes);
+ wn.cleanSubtree(post, prior, collectedBlocks, removedINodes,
+ countDiffChange);
}
// For DstReference node, since the node is not in the created list of
// prior, we should treat it as regular file/dir
@@ -774,20 +811,28 @@ public class INodeDirectoryWithSnapshot
&& topNode.asFile() instanceof FileWithSnapshot) {
FileWithSnapshot fs = (FileWithSnapshot) topNode.asFile();
counts.add(fs.getDiffs().deleteSnapshotDiff(post, prior,
- topNode.asFile(), collectedBlocks, removedINodes));
+ topNode.asFile(), collectedBlocks, removedINodes, countDiffChange));
} else if (topNode.isDirectory()) {
INodeDirectory dir = topNode.asDirectory();
+ ChildrenDiff priorChildrenDiff = null;
if (dir instanceof INodeDirectoryWithSnapshot) {
// delete files/dirs created after prior. Note that these
// files/dirs, along with inode, were deleted right after post.
INodeDirectoryWithSnapshot sdir = (INodeDirectoryWithSnapshot) dir;
DirectoryDiff priorDiff = sdir.getDiffs().getDiff(prior);
if (priorDiff != null && priorDiff.getSnapshot().equals(prior)) {
- counts.add(priorDiff.diff.destroyCreatedList(sdir,
+ priorChildrenDiff = priorDiff.getChildrenDiff();
+ counts.add(priorChildrenDiff.destroyCreatedList(sdir,
collectedBlocks, removedINodes));
}
}
+
for (INode child : dir.getChildrenList(prior)) {
+ if (priorChildrenDiff != null
+ && priorChildrenDiff.search(ListType.DELETED,
+ child.getLocalNameBytes()) != null) {
+ continue;
+ }
queue.addLast(child);
}
}
@@ -864,29 +909,39 @@ public class INodeDirectoryWithSnapshot
if (inode instanceof INodeReference.WithName && snapshot != null) {
// this inode has been renamed before the deletion of the DstReference
// subtree
- inode.cleanSubtree(snapshot, prior, collectedBlocks, removedINodes);
+ inode.cleanSubtree(snapshot, prior, collectedBlocks, removedINodes,
+ true);
} else {
// for DstReference node, continue this process to its subtree
destroyDstSubtree(inode.asReference().getReferredINode(), snapshot,
prior, collectedBlocks, removedINodes);
}
} else if (inode.isFile() && snapshot != null) {
- inode.cleanSubtree(snapshot, prior, collectedBlocks, removedINodes);
+ inode.cleanSubtree(snapshot, prior, collectedBlocks, removedINodes, true);
} else if (inode.isDirectory()) {
+ Map<INode, INode> excludedNodes = null;
if (inode instanceof INodeDirectoryWithSnapshot) {
INodeDirectoryWithSnapshot sdir = (INodeDirectoryWithSnapshot) inode;
DirectoryDiffList diffList = sdir.getDiffs();
if (snapshot != null) {
diffList.deleteSnapshotDiff(snapshot, prior, sdir, collectedBlocks,
- removedINodes);
+ removedINodes, true);
}
DirectoryDiff priorDiff = diffList.getDiff(prior);
if (priorDiff != null && priorDiff.getSnapshot().equals(prior)) {
priorDiff.diff.destroyCreatedList(sdir, collectedBlocks,
removedINodes);
+ List<INode> dList = priorDiff.diff.getList(ListType.DELETED);
+ excludedNodes = new HashMap<INode, INode>(dList.size());
+ for (INode dNode : dList) {
+ excludedNodes.put(dNode, dNode);
+ }
}
}
for (INode child : inode.asDirectory().getChildrenList(prior)) {
+ if (excludedNodes != null && excludedNodes.containsKey(child)) {
+ continue;
+ }
destroyDstSubtree(child, snapshot, prior, collectedBlocks,
removedINodes);
}
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileUnderConstructionWithSnapshot.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileUnderConstructionWithSnapshot.java?rev=1490421&r1=1490420&r2=1490421&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileUnderConstructionWithSnapshot.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileUnderConstructionWithSnapshot.java Thu Jun 6 20:15:07 2013
@@ -104,7 +104,8 @@ public class INodeFileUnderConstructionW
@Override
public Quota.Counts cleanSubtree(final Snapshot snapshot, Snapshot prior,
- final BlocksMapUpdateInfo collectedBlocks, final List<INode> removedINodes)
+ final BlocksMapUpdateInfo collectedBlocks,
+ final List<INode> removedINodes, final boolean countDiffChange)
throws QuotaExceededException {
if (snapshot == null) { // delete the current file
recordModification(prior, null);
@@ -114,7 +115,7 @@ public class INodeFileUnderConstructionW
} else { // delete a snapshot
prior = getDiffs().updatePrior(snapshot, prior);
return diffs.deleteSnapshotDiff(snapshot, prior, this, collectedBlocks,
- removedINodes);
+ removedINodes, countDiffChange);
}
}
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileWithSnapshot.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileWithSnapshot.java?rev=1490421&r1=1490420&r2=1490421&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileWithSnapshot.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileWithSnapshot.java Thu Jun 6 20:15:07 2013
@@ -91,7 +91,8 @@ public class INodeFileWithSnapshot exten
@Override
public Quota.Counts cleanSubtree(final Snapshot snapshot, Snapshot prior,
- final BlocksMapUpdateInfo collectedBlocks, final List<INode> removedINodes)
+ final BlocksMapUpdateInfo collectedBlocks,
+ final List<INode> removedINodes, final boolean countDiffChange)
throws QuotaExceededException {
if (snapshot == null) { // delete the current file
recordModification(prior, null);
@@ -101,7 +102,7 @@ public class INodeFileWithSnapshot exten
} else { // delete a snapshot
prior = getDiffs().updatePrior(snapshot, prior);
return diffs.deleteSnapshotDiff(snapshot, prior, this, collectedBlocks,
- removedINodes);
+ removedINodes, countDiffChange);
}
}
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java?rev=1490421&r1=1490420&r2=1490421&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java Thu Jun 6 20:15:07 2013
@@ -454,7 +454,7 @@ public class TestRenameWithSnapshots {
// delete snapshot s5. The diff of s5 should be combined to s4
hdfs.deleteSnapshot(sdir1, "s5");
- restartClusterAndCheckImage();
+ restartClusterAndCheckImage(true);
assertFalse(hdfs.exists(bar2_s5));
final Path bar2_s4 = SnapshotTestHelper.getSnapshotPath(sdir1, "s4",
"foo/bar2");
@@ -492,7 +492,7 @@ public class TestRenameWithSnapshots {
assertFalse(hdfs.exists(bar3_s2));
// restart the cluster and check fsimage
- restartClusterAndCheckImage();
+ restartClusterAndCheckImage(true);
// delete snapshot s2.
hdfs.deleteSnapshot(sdir2, "s2");
@@ -500,14 +500,15 @@ public class TestRenameWithSnapshots {
assertFalse(hdfs.exists(bar2_s2));
// restart the cluster and check fsimage
- restartClusterAndCheckImage();
+ restartClusterAndCheckImage(true);
hdfs.deleteSnapshot(sdir1, "s3");
- restartClusterAndCheckImage();
+ restartClusterAndCheckImage(true);
hdfs.deleteSnapshot(sdir1, "s1");
- restartClusterAndCheckImage();
+ restartClusterAndCheckImage(true);
}
- private void restartClusterAndCheckImage() throws IOException {
+ private void restartClusterAndCheckImage(boolean compareQuota)
+ throws IOException {
File fsnBefore = new File(testDir, "dumptree_before");
File fsnMiddle = new File(testDir, "dumptree_middle");
File fsnAfter = new File(testDir, "dumptree_after");
@@ -538,8 +539,10 @@ public class TestRenameWithSnapshots {
// dump the namespace loaded from fsimage
SnapshotTestHelper.dumpTree2File(fsdir, fsnAfter);
- SnapshotTestHelper.compareDumpedTreeInFile(fsnBefore, fsnMiddle, true);
- SnapshotTestHelper.compareDumpedTreeInFile(fsnBefore, fsnAfter, true);
+ SnapshotTestHelper.compareDumpedTreeInFile(fsnBefore, fsnMiddle,
+ compareQuota);
+ SnapshotTestHelper.compareDumpedTreeInFile(fsnBefore, fsnAfter,
+ compareQuota);
}
/**
@@ -579,7 +582,7 @@ public class TestRenameWithSnapshots {
// delete snapshot s5.
hdfs.deleteSnapshot(sdir1, "s5");
- restartClusterAndCheckImage();
+ restartClusterAndCheckImage(true);
assertFalse(hdfs.exists(foo_s5));
status = hdfs.getFileStatus(foo_s4);
assertEquals(REPL_1, status.getReplication());
@@ -604,18 +607,18 @@ public class TestRenameWithSnapshots {
.getLocalName());
// restart cluster
- restartClusterAndCheckImage();
+ restartClusterAndCheckImage(true);
// delete snapshot s2.
hdfs.deleteSnapshot(sdir2, "s2");
assertFalse(hdfs.exists(foo_s2));
// restart the cluster and check fsimage
- restartClusterAndCheckImage();
+ restartClusterAndCheckImage(true);
hdfs.deleteSnapshot(sdir1, "s3");
- restartClusterAndCheckImage();
+ restartClusterAndCheckImage(true);
hdfs.deleteSnapshot(sdir1, "s1");
- restartClusterAndCheckImage();
+ restartClusterAndCheckImage(true);
}
/**
@@ -650,7 +653,7 @@ public class TestRenameWithSnapshots {
hdfs.rename(bar2_dir1, bar2_dir2);
// restart the cluster and check fsimage
- restartClusterAndCheckImage();
+ restartClusterAndCheckImage(true);
// modification on /dir2/foo and /dir2/bar
final Path bar1_dir2 = new Path(foo_dir2, "bar1");
@@ -686,7 +689,7 @@ public class TestRenameWithSnapshots {
hdfs.rename(bar2_dir2, bar2_dir3);
// restart the cluster and check fsimage
- restartClusterAndCheckImage();
+ restartClusterAndCheckImage(true);
// modification on /dir3/foo and /dir3/bar
final Path bar1_dir3 = new Path(foo_dir3, "bar1");
@@ -718,7 +721,7 @@ public class TestRenameWithSnapshots {
hdfs.rename(bar2_dir3, bar2_dir2);
// restart the cluster and check fsimage
- restartClusterAndCheckImage();
+ restartClusterAndCheckImage(true);
// modification on /dir2/foo
hdfs.setReplication(bar1_dir2, REPL);
@@ -773,14 +776,14 @@ public class TestRenameWithSnapshots {
.getLocalName());
// restart the cluster and check fsimage
- restartClusterAndCheckImage();
+ restartClusterAndCheckImage(true);
// delete foo
hdfs.delete(foo_dir1, true);
hdfs.delete(bar2_dir1, true);
// restart the cluster and check fsimage
- restartClusterAndCheckImage();
+ restartClusterAndCheckImage(true);
// check
assertTrue(hdfs.exists(bar1_s1));
@@ -844,7 +847,7 @@ public class TestRenameWithSnapshots {
hdfs.setReplication(bar_dir2, REPL_1);
// restart the cluster and check fsimage
- restartClusterAndCheckImage();
+ restartClusterAndCheckImage(true);
// create snapshots
SnapshotTestHelper.createSnapshot(hdfs, sdir1, "s11");
@@ -863,7 +866,7 @@ public class TestRenameWithSnapshots {
hdfs.setReplication(bar_dir3, REPL_2);
// restart the cluster and check fsimage
- restartClusterAndCheckImage();
+ restartClusterAndCheckImage(true);
// create snapshots
SnapshotTestHelper.createSnapshot(hdfs, sdir1, "s111");
@@ -917,7 +920,7 @@ public class TestRenameWithSnapshots {
hdfs.setReplication(bar_dir2, REPL);
// restart the cluster and check fsimage
- restartClusterAndCheckImage();
+ restartClusterAndCheckImage(true);
// create snapshots
SnapshotTestHelper.createSnapshot(hdfs, sdir1, "s1111");
@@ -999,14 +1002,14 @@ public class TestRenameWithSnapshots {
assertEquals("s1", barDiffs.get(0).snapshot.getRoot().getLocalName());
// restart the cluster and check fsimage
- restartClusterAndCheckImage();
+ restartClusterAndCheckImage(true);
// delete foo
hdfs.delete(foo_dir1, true);
hdfs.delete(bar_dir1, true);
// restart the cluster and check fsimage
- restartClusterAndCheckImage();
+ restartClusterAndCheckImage(true);
// check
final Path bar1_s1111 = SnapshotTestHelper.getSnapshotPath(sdir1, "s1111",
@@ -1127,7 +1130,7 @@ public class TestRenameWithSnapshots {
hdfs.rename(foo, newfoo);
// restart the cluster and check fsimage
- restartClusterAndCheckImage();
+ restartClusterAndCheckImage(true);
final Path bar2 = new Path(newfoo, "bar2");
DFSTestUtil.createFile(hdfs, bar2, BLOCKSIZE, REPL, SEED);
@@ -1145,7 +1148,7 @@ public class TestRenameWithSnapshots {
// delete snapshot s4. The diff of s4 should be combined to s3
hdfs.deleteSnapshot(sdir1, "s4");
// restart the cluster and check fsimage
- restartClusterAndCheckImage();
+ restartClusterAndCheckImage(true);
Path bar_s3 = SnapshotTestHelper.getSnapshotPath(sdir1, "s3", "foo/bar");
assertFalse(hdfs.exists(bar_s3));
@@ -1175,18 +1178,18 @@ public class TestRenameWithSnapshots {
assertEquals("s2", diffs.get(0).snapshot.getRoot().getLocalName());
// restart the cluster and check fsimage
- restartClusterAndCheckImage();
+ restartClusterAndCheckImage(true);
// delete snapshot s2.
hdfs.deleteSnapshot(sdir2, "s2");
assertFalse(hdfs.exists(bar_s2));
- restartClusterAndCheckImage();
+ restartClusterAndCheckImage(true);
// make sure the whole referred subtree has been destroyed
assertEquals(4, fsdir.getRoot().getNamespace());
assertEquals(0, fsdir.getRoot().getDiskspace());
hdfs.deleteSnapshot(sdir1, "s1");
- restartClusterAndCheckImage();
+ restartClusterAndCheckImage(true);
assertEquals(3, fsdir.getRoot().getNamespace());
assertEquals(0, fsdir.getRoot().getDiskspace());
}
@@ -1232,7 +1235,7 @@ public class TestRenameWithSnapshots {
INode fooNode = fooRef.asFile();
assertTrue(fooNode instanceof INodeFileWithSnapshot);
- restartClusterAndCheckImage();
+ restartClusterAndCheckImage(true);
}
/**
@@ -1724,7 +1727,7 @@ public class TestRenameWithSnapshots {
cluster = new MiniDFSCluster.Builder(conf).format(false)
.numDataNodes(REPL).build();
cluster.waitActive();
- restartClusterAndCheckImage();
+ restartClusterAndCheckImage(true);
}
/**
@@ -1784,7 +1787,108 @@ public class TestRenameWithSnapshots {
final Path foo2 = new Path(bar2, "foo");
hdfs.rename(foo, foo2);
- restartClusterAndCheckImage();
+ restartClusterAndCheckImage(true);
+
+ // delete snap1
+ hdfs.deleteSnapshot(sdir1, snap1);
+
+ restartClusterAndCheckImage(true);
+ }
+
+ /**
+ * move a directory to its prior descedant
+ */
+ @Test
+ public void testRename2PreDescendant_2() throws Exception {
+ final Path root = new Path("/");
+ final Path sdir1 = new Path("/dir1");
+ final Path sdir2 = new Path("/dir2");
+ final Path foo = new Path(sdir1, "foo");
+ final Path bar = new Path(foo, "bar");
+ final Path file1InBar = new Path(bar, "file1");
+ final Path file2InBar = new Path(bar, "file2");
+ hdfs.mkdirs(bar);
+ hdfs.mkdirs(sdir2);
+ DFSTestUtil.createFile(hdfs, file1InBar, BLOCKSIZE, REPL, SEED);
+ DFSTestUtil.createFile(hdfs, file2InBar, BLOCKSIZE, REPL, SEED);
+
+ hdfs.setQuota(sdir1, Long.MAX_VALUE - 1, Long.MAX_VALUE - 1);
+ hdfs.setQuota(sdir2, Long.MAX_VALUE - 1, Long.MAX_VALUE - 1);
+ hdfs.setQuota(foo, Long.MAX_VALUE - 1, Long.MAX_VALUE - 1);
+ hdfs.setQuota(bar, Long.MAX_VALUE - 1, Long.MAX_VALUE - 1);
+
+ // create snapshot on root
+ SnapshotTestHelper.createSnapshot(hdfs, root, snap1);
+ // delete file1InBar
+ hdfs.delete(file1InBar, true);
+
+ // create another snapshot on root
+ SnapshotTestHelper.createSnapshot(hdfs, root, snap2);
+ // delete file2InBar
+ hdfs.delete(file2InBar, true);
+
+ // /dir1/foo/bar -> /dir2/bar
+ final Path bar2 = new Path(sdir2, "bar2");
+ hdfs.rename(bar, bar2);
+
+ // /dir1/foo -> /dir2/bar/foo
+ final Path foo2 = new Path(bar2, "foo2");
+ hdfs.rename(foo, foo2);
+
+ restartClusterAndCheckImage(true);
+
+ // delete snapshot snap2
+ hdfs.deleteSnapshot(root, snap2);
+
+ // after deleteing snap2, the WithName node "bar", which originally was
+ // stored in the deleted list of "foo" for snap2, is moved to its deleted
+ // list for snap1. In that case, it will not be counted when calculating
+ // quota for "foo". However, we do not update this quota usage change while
+ // deleting snap2.
+ restartClusterAndCheckImage(false);
+ }
+
+ /**
+ * move a directory to its prior descedant
+ */
+ @Test
+ public void testRename2PreDescendant_3() throws Exception {
+ final Path root = new Path("/");
+ final Path sdir1 = new Path("/dir1");
+ final Path sdir2 = new Path("/dir2");
+ final Path foo = new Path(sdir1, "foo");
+ final Path bar = new Path(foo, "bar");
+ final Path fileInBar = new Path(bar, "file");
+ hdfs.mkdirs(bar);
+ hdfs.mkdirs(sdir2);
+ DFSTestUtil.createFile(hdfs, fileInBar, BLOCKSIZE, REPL, SEED);
+
+ hdfs.setQuota(sdir1, Long.MAX_VALUE - 1, Long.MAX_VALUE - 1);
+ hdfs.setQuota(sdir2, Long.MAX_VALUE - 1, Long.MAX_VALUE - 1);
+ hdfs.setQuota(foo, Long.MAX_VALUE - 1, Long.MAX_VALUE - 1);
+ hdfs.setQuota(bar, Long.MAX_VALUE - 1, Long.MAX_VALUE - 1);
+
+ // create snapshot on root
+ SnapshotTestHelper.createSnapshot(hdfs, root, snap1);
+ // delete fileInBar
+ hdfs.delete(fileInBar, true);
+ // create another snapshot on root
+ SnapshotTestHelper.createSnapshot(hdfs, root, snap2);
+
+ // /dir1/foo/bar -> /dir2/bar
+ final Path bar2 = new Path(sdir2, "bar2");
+ hdfs.rename(bar, bar2);
+
+ // /dir1/foo -> /dir2/bar/foo
+ final Path foo2 = new Path(bar2, "foo2");
+ hdfs.rename(foo, foo2);
+
+ restartClusterAndCheckImage(true);
+
+ // delete snapshot snap1
+ hdfs.deleteSnapshot(root, snap1);
+
+ restartClusterAndCheckImage(true);
}
/**
@@ -1851,7 +1955,7 @@ public class TestRenameWithSnapshots {
assertEquals(0, diff.getList(ListType.CREATED).size());
assertEquals(0, diff.getList(ListType.DELETED).size());
- restartClusterAndCheckImage();
+ restartClusterAndCheckImage(true);
}
/**
@@ -1928,7 +2032,7 @@ public class TestRenameWithSnapshots {
assertSame(wc, wc2);
assertSame(fooRef2, wc.getParentReference());
- restartClusterAndCheckImage();
+ restartClusterAndCheckImage(true);
}
/**
@@ -2041,7 +2145,7 @@ public class TestRenameWithSnapshots {
.isEmpty());
assertTrue(barNode.getChildrenList(null).isEmpty());
- restartClusterAndCheckImage();
+ restartClusterAndCheckImage(true);
}
/**
@@ -2137,6 +2241,6 @@ public class TestRenameWithSnapshots {
assertSame(fooNode.asReference().getReferredINode(),
fooNode_s2.getReferredINode());
- restartClusterAndCheckImage();
+ restartClusterAndCheckImage(true);
}
}