You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by ha...@apache.org on 2017/05/31 22:42:48 UTC

[13/50] [abbrv] hadoop git commit: Revert "HDFS-10797. Disk usage summary of snapshots causes renamed blocks to get counted twice. Contributed by Sean Mackrory."

Revert "HDFS-10797. Disk usage summary of snapshots causes renamed blocks to get counted twice. Contributed by Sean Mackrory."

This reverts commit 6a38d118d86b7907009bcec34f1b788d076f1d1c.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b8b69d79
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b8b69d79
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b8b69d79

Branch: refs/heads/YARN-1011
Commit: b8b69d797aed8dfeb65ea462c2856f62e9aa1023
Parents: 2cba561
Author: Wei-Chiu Chuang <we...@apache.org>
Authored: Wed May 24 17:21:22 2017 -0700
Committer: Wei-Chiu Chuang <we...@apache.org>
Committed: Wed May 24 17:21:22 2017 -0700

----------------------------------------------------------------------
 .../ContentSummaryComputationContext.java       |  94 +--------
 .../hadoop/hdfs/server/namenode/INode.java      |   1 -
 .../hdfs/server/namenode/INodeDirectory.java    |  11 +-
 .../hadoop/hdfs/server/namenode/INodeFile.java  |   1 -
 .../hdfs/server/namenode/INodeReference.java    |   2 -
 .../hdfs/server/namenode/INodeSymlink.java      |   1 -
 .../snapshot/DirectorySnapshottableFeature.java |   9 +-
 .../snapshot/DirectoryWithSnapshotFeature.java  |  14 +-
 .../hdfs/server/namenode/snapshot/Snapshot.java |   1 -
 .../snapshot/TestRenameWithSnapshots.java       | 199 -------------------
 10 files changed, 26 insertions(+), 307 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b8b69d79/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java
index 53fa552..b35270d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java
@@ -21,10 +21,6 @@ import com.google.common.base.Preconditions;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
-import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
-
-import java.util.HashSet;
-import java.util.Set;
 
 @InterfaceAudience.Private
 @InterfaceStability.Unstable
@@ -39,8 +35,6 @@ public class ContentSummaryComputationContext {
   private long yieldCount = 0;
   private long sleepMilliSec = 0;
   private int sleepNanoSec = 0;
-  private Set<INode> includedNodes = new HashSet<>();
-  private Set<INode> deletedSnapshottedNodes = new HashSet<>();
 
   /**
    * Constructor
@@ -57,8 +51,8 @@ public class ContentSummaryComputationContext {
     this.fsn = fsn;
     this.limitPerRun = limitPerRun;
     this.nextCountLimit = limitPerRun;
-    setCounts(new ContentCounts.Builder().build());
-    setSnapshotCounts(new ContentCounts.Builder().build());
+    this.counts = new ContentCounts.Builder().build();
+    this.snapshotCounts = new ContentCounts.Builder().build();
     this.sleepMilliSec = sleepMicroSec/1000;
     this.sleepNanoSec = (int)((sleepMicroSec%1000)*1000);
   }
@@ -88,7 +82,6 @@ public class ContentSummaryComputationContext {
     }
 
     // Have we reached the limit?
-    ContentCounts counts = getCounts();
     long currentCount = counts.getFileCount() +
         counts.getSymlinkCount() +
         counts.getDirectoryCount() +
@@ -130,22 +123,14 @@ public class ContentSummaryComputationContext {
   }
 
   /** Get the content counts */
-  public synchronized ContentCounts getCounts() {
+  public ContentCounts getCounts() {
     return counts;
   }
 
-  private synchronized void setCounts(ContentCounts counts) {
-    this.counts = counts;
-  }
-
   public ContentCounts getSnapshotCounts() {
     return snapshotCounts;
   }
 
-  private void setSnapshotCounts(ContentCounts snapshotCounts) {
-    this.snapshotCounts = snapshotCounts;
-  }
-
   public BlockStoragePolicySuite getBlockStoragePolicySuite() {
     Preconditions.checkState((bsps != null || fsn != null),
         "BlockStoragePolicySuite must be either initialized or available via" +
@@ -153,77 +138,4 @@ public class ContentSummaryComputationContext {
     return (bsps != null) ? bsps:
         fsn.getBlockManager().getStoragePolicySuite();
   }
-
-  /**
-   * If the node is an INodeReference, resolves it to the actual inode.
-   * Snapshot diffs represent renamed / moved files as different
-   * INodeReferences, but the underlying INode it refers to is consistent.
-   *
-   * @param node
-   * @return The referred INode if there is one, else returns the input
-   * unmodified.
-   */
-  private INode resolveINodeReference(INode node) {
-    if (node.isReference() && node instanceof INodeReference) {
-      return ((INodeReference)node).getReferredINode();
-    }
-    return node;
-  }
-
-  /**
-   * Reports that a node is about to be included in this summary. Can be used
-   * either to simply report that a node has been including, or check whether
-   * a node has already been included.
-   *
-   * @param node
-   * @return true if node has already been included
-   */
-  public boolean nodeIncluded(INode node) {
-    INode resolvedNode = resolveINodeReference(node);
-    synchronized (includedNodes) {
-      if (!includedNodes.contains(resolvedNode)) {
-        includedNodes.add(resolvedNode);
-        return false;
-      }
-    }
-    return true;
-  }
-
-  /**
-   * Schedules a node that is listed as DELETED in a snapshot's diff to be
-   * included in the summary at the end of computation. See
-   * {@link #tallyDeletedSnapshottedINodes()} for more context.
-   *
-   * @param node
-   */
-  public void reportDeletedSnapshottedNode(INode node) {
-    deletedSnapshottedNodes.add(node);
-  }
-
-  /**
-   * Finalizes the computation by including all nodes that were reported as
-   * deleted by a snapshot but have not been already including due to other
-   * references.
-   * <p>
-   * Nodes that get renamed are listed in the snapshot's diff as both DELETED
-   * under the old name and CREATED under the new name. The computation
-   * relies on nodes to report themselves as being included (via
-   * {@link #nodeIncluded(INode)} as the only reliable way to determine which
-   * nodes were renamed within the tree being summarized and which were
-   * removed (either by deletion or being renamed outside of the tree).
-   */
-  public synchronized void tallyDeletedSnapshottedINodes() {
-    /* Temporarily create a new counts object so these results can then be
-    added to both counts and snapshotCounts */
-    ContentCounts originalCounts = getCounts();
-    setCounts(new ContentCounts.Builder().build());
-    for (INode node : deletedSnapshottedNodes) {
-      if (!nodeIncluded(node)) {
-        node.computeContentSummary(Snapshot.CURRENT_STATE_ID, this);
-      }
-    }
-    originalCounts.addContents(getCounts());
-    snapshotCounts.addContents(getCounts());
-    setCounts(originalCounts);
-  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b8b69d79/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
index e1db990..c6258a1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
@@ -429,7 +429,6 @@ public abstract class INode implements INodeAttributes, Diff.Element<byte[]> {
   public final ContentSummary computeAndConvertContentSummary(int snapshotId,
       ContentSummaryComputationContext summary) {
     computeContentSummary(snapshotId, summary);
-    summary.tallyDeletedSnapshottedINodes();
     final ContentCounts counts = summary.getCounts();
     final ContentCounts snapshotCounts = summary.getSnapshotCounts();
     final QuotaCounts q = getQuotaCounts();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b8b69d79/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
index a4098bc..a29a118 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
@@ -630,10 +630,17 @@ public class INodeDirectory extends INodeWithAdditionalFields
   @Override
   public ContentSummaryComputationContext computeContentSummary(int snapshotId,
       ContentSummaryComputationContext summary) {
-    summary.nodeIncluded(this);
     final DirectoryWithSnapshotFeature sf = getDirectoryWithSnapshotFeature();
     if (sf != null && snapshotId == Snapshot.CURRENT_STATE_ID) {
-      sf.computeContentSummary4Snapshot(summary);
+      final ContentCounts counts = new ContentCounts.Builder().build();
+      // if the getContentSummary call is against a non-snapshot path, the
+      // computation should include all the deleted files/directories
+      sf.computeContentSummary4Snapshot(summary.getBlockStoragePolicySuite(),
+          counts);
+      summary.getCounts().addContents(counts);
+      // Also add ContentSummary to snapshotCounts (So we can extract it
+      // later from the ContentSummary of all).
+      summary.getSnapshotCounts().addContents(counts);
     }
     final DirectoryWithQuotaFeature q = getDirectoryWithQuotaFeature();
     if (q != null && snapshotId == Snapshot.CURRENT_STATE_ID) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b8b69d79/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
index 9cdac06..fcdb064 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
@@ -840,7 +840,6 @@ public class INodeFile extends INodeWithAdditionalFields
   @Override
   public final ContentSummaryComputationContext computeContentSummary(
       int snapshotId, final ContentSummaryComputationContext summary) {
-    summary.nodeIncluded(this);
     final ContentCounts counts = summary.getCounts();
     counts.addContent(Content.FILE, 1);
     final long fileLen = computeFileSize(snapshotId);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b8b69d79/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java
index 56aaf8c..1b85237 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java
@@ -315,7 +315,6 @@ public abstract class INodeReference extends INode {
   @Override
   public ContentSummaryComputationContext computeContentSummary(int snapshotId,
       ContentSummaryComputationContext summary) {
-    summary.nodeIncluded(this);
     return referred.computeContentSummary(snapshotId, summary);
   }
 
@@ -505,7 +504,6 @@ public abstract class INodeReference extends INode {
     @Override
     public final ContentSummaryComputationContext computeContentSummary(
         int snapshotId, ContentSummaryComputationContext summary) {
-      summary.nodeIncluded(this);
       final int s = snapshotId < lastSnapshotId ? snapshotId : lastSnapshotId;
       // only count storagespace for WithName
       final QuotaCounts q = computeQuotaUsage(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b8b69d79/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeSymlink.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeSymlink.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeSymlink.java
index 1223f4e..c76bea0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeSymlink.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeSymlink.java
@@ -96,7 +96,6 @@ public class INodeSymlink extends INodeWithAdditionalFields {
   @Override
   public ContentSummaryComputationContext computeContentSummary(int snapshotId,
       final ContentSummaryComputationContext summary) {
-    summary.nodeIncluded(this);
     summary.getCounts().addContent(Content.SYMLINK, 1);
     return summary;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b8b69d79/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectorySnapshottableFeature.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectorySnapshottableFeature.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectorySnapshottableFeature.java
index 3039ad3..fbfc278 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectorySnapshottableFeature.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectorySnapshottableFeature.java
@@ -30,9 +30,9 @@ import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
 import org.apache.hadoop.hdfs.protocol.SnapshotException;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
 import org.apache.hadoop.hdfs.server.namenode.Content;
 import org.apache.hadoop.hdfs.server.namenode.ContentCounts;
-import org.apache.hadoop.hdfs.server.namenode.ContentSummaryComputationContext;
 import org.apache.hadoop.hdfs.server.namenode.INode;
 import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
 import org.apache.hadoop.hdfs.server.namenode.INodeDirectory.SnapshotAndINode;
@@ -233,12 +233,11 @@ public class DirectorySnapshottableFeature extends DirectoryWithSnapshotFeature
   }
 
   @Override
-  public void computeContentSummary4Snapshot(ContentSummaryComputationContext
-                                                   context) {
-    ContentCounts counts = context.getCounts();
+  public void computeContentSummary4Snapshot(final BlockStoragePolicySuite bsps,
+      final ContentCounts counts) {
     counts.addContent(Content.SNAPSHOT, snapshotsByNames.size());
     counts.addContent(Content.SNAPSHOTTABLE_DIRECTORY, 1);
-    super.computeContentSummary4Snapshot(context);
+    super.computeContentSummary4Snapshot(bsps, counts);
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b8b69d79/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java
index 9addbfa..0111b3b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java
@@ -30,6 +30,7 @@ import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
 import org.apache.hadoop.hdfs.server.namenode.AclStorage;
+import org.apache.hadoop.hdfs.server.namenode.ContentCounts;
 import org.apache.hadoop.hdfs.server.namenode.ContentSummaryComputationContext;
 import org.apache.hadoop.hdfs.server.namenode.FSImageSerialization;
 import org.apache.hadoop.hdfs.server.namenode.INode;
@@ -628,13 +629,18 @@ public class DirectoryWithSnapshotFeature implements INode.Feature {
     return counts;
   }
 
-  public void computeContentSummary4Snapshot(
-      ContentSummaryComputationContext context) {
+  public void computeContentSummary4Snapshot(final BlockStoragePolicySuite bsps,
+      final ContentCounts counts) {
+    // Create a new blank summary context for blocking processing of subtree.
+    ContentSummaryComputationContext summary = 
+        new ContentSummaryComputationContext(bsps);
     for(DirectoryDiff d : diffs) {
-      for(INode deletedNode : d.getChildrenDiff().getList(ListType.DELETED)) {
-        context.reportDeletedSnapshottedNode(deletedNode);
+      for(INode deleted : d.getChildrenDiff().getList(ListType.DELETED)) {
+        deleted.computeContentSummary(Snapshot.CURRENT_STATE_ID, summary);
       }
     }
+    // Add the counts from deleted trees.
+    counts.addContents(summary.getCounts());
   }
   
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b8b69d79/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/Snapshot.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/Snapshot.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/Snapshot.java
index 832a339..e98e766 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/Snapshot.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/Snapshot.java
@@ -177,7 +177,6 @@ public class Snapshot implements Comparable<byte[]> {
     @Override
     public ContentSummaryComputationContext computeContentSummary(
         int snapshotId, ContentSummaryComputationContext summary) {
-      summary.nodeIncluded(this);
       return computeDirectoryContentSummary(summary, snapshotId);
     }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b8b69d79/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java
index d1b3aa6..91eec78 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java
@@ -36,10 +36,8 @@ import java.util.Random;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.ContentSummary;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Options.Rename;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
@@ -2413,201 +2411,4 @@ public class TestRenameWithSnapshots {
     assertTrue(existsInDiffReport(entries, DiffType.RENAME, "foo/file2", "newDir/file2"));
     assertTrue(existsInDiffReport(entries, DiffType.RENAME, "foo/file3", "newDir/file1"));
   }
-
-  private void checkSpaceConsumed(String message, Path directory,
-                                  long expectedSpace) throws Exception {
-    ContentSummary summary = hdfs.getContentSummary(directory);
-    assertEquals(message, expectedSpace, summary.getSpaceConsumed());
-  }
-
-  /**
-   * Runs through various combinations of renames, deletes, appends and other
-   * operations in a snapshotted directory and ensures disk usage summaries
-   * (e.g. du -s) are computed correctly.
-   *
-   * @throws Exception
-   */
-  @Test (timeout=300000)
-  public void testDu() throws Exception {
-    File tempFile = File.createTempFile("testDu-", ".tmp");
-    tempFile.deleteOnExit();
-
-    final FileSystem localfs = FileSystem.getLocal(conf);
-    final Path localOriginal = new Path(tempFile.getPath());
-    final Path dfsRoot = new Path("/testDu");
-    final Path dfsOriginal = new Path(dfsRoot, "original");
-    final Path dfsRenamed1 = new Path(dfsRoot, "renamed1");
-    final Path dfsRenamed2 = new Path(dfsRoot, "renamed2");
-    final Path dfsAppended = new Path(dfsRoot, "appended");
-
-    /* We will test with a single block worth of data. If we don't at least use
-    a multiple of BLOCKSIZE, append operations will modify snapshotted blocks
-    and other factors will come into play here that we'll have to account for */
-    final long spaceIncrement = BLOCKSIZE * REPL;
-    final byte[] appendData = new byte[(int) BLOCKSIZE];
-    DFSTestUtil.createFile(localfs, localOriginal, BLOCKSIZE, REPL, SEED);
-
-    FSDataOutputStream out = null;
-    long expectedSpace = 0;
-
-    hdfs.mkdirs(dfsRoot);
-    checkSpaceConsumed("Du is wrong immediately",
-        dfsRoot, 0L);
-
-    hdfs.copyFromLocalFile(localOriginal, dfsOriginal);
-    expectedSpace += spaceIncrement;
-    checkSpaceConsumed("Du is wrong after creating / copying file",
-        dfsRoot, expectedSpace);
-
-    SnapshotTestHelper.createSnapshot(hdfs, dfsRoot, "s0");
-    checkSpaceConsumed("Du is wrong after snapshotting",
-        dfsRoot, expectedSpace);
-
-    hdfs.rename(dfsOriginal, dfsRenamed1);
-    checkSpaceConsumed("Du is wrong after 1 rename",
-        dfsRoot, expectedSpace);
-
-    hdfs.rename(dfsRenamed1, dfsRenamed2);
-    checkSpaceConsumed("Du is wrong after 2 renames",
-        dfsRoot, expectedSpace);
-
-    hdfs.delete(dfsRenamed2, false);
-    checkSpaceConsumed("Du is wrong after deletion",
-        dfsRoot, expectedSpace);
-
-    hdfs.copyFromLocalFile(localOriginal, dfsOriginal);
-    expectedSpace += spaceIncrement;
-    checkSpaceConsumed("Du is wrong after replacing a renamed file",
-        dfsRoot, expectedSpace);
-
-    hdfs.copyFromLocalFile(localOriginal, dfsAppended);
-    expectedSpace += spaceIncrement;
-    SnapshotTestHelper.createSnapshot(hdfs, dfsRoot, "s1");
-
-    out = hdfs.append(dfsAppended);
-    out.write(appendData);
-    out.close();
-    expectedSpace += spaceIncrement;
-    checkSpaceConsumed("Du is wrong after 1 snapshot + append",
-        dfsRoot, expectedSpace);
-
-    SnapshotTestHelper.createSnapshot(hdfs, dfsRoot, "s2");
-    out = hdfs.append(dfsAppended);
-    out.write(appendData);
-    out.close();
-    expectedSpace += spaceIncrement;
-    checkSpaceConsumed("Du is wrong after 2 snapshot + appends",
-        dfsRoot, expectedSpace);
-
-    SnapshotTestHelper.createSnapshot(hdfs, dfsRoot, "s3");
-    out = hdfs.append(dfsAppended);
-    out.write(appendData);
-    out.close();
-    expectedSpace += spaceIncrement;
-    hdfs.rename(dfsAppended, dfsRenamed1);
-    checkSpaceConsumed("Du is wrong after snapshot, append, & rename",
-        dfsRoot, expectedSpace);
-    hdfs.delete(dfsRenamed1, false);
-    // everything but the last append is snapshotted
-    expectedSpace -= spaceIncrement;
-    checkSpaceConsumed("Du is wrong after snapshot, append, delete & rename",
-        dfsRoot, expectedSpace);
-
-    hdfs.delete(dfsOriginal, false);
-    hdfs.deleteSnapshot(dfsRoot, "s0");
-    hdfs.deleteSnapshot(dfsRoot, "s1");
-    hdfs.deleteSnapshot(dfsRoot, "s2");
-    hdfs.deleteSnapshot(dfsRoot, "s3");
-    expectedSpace = 0;
-    checkSpaceConsumed("Du is wrong after deleting all files and snapshots",
-        dfsRoot, expectedSpace);
-  }
-
-  /**
-   * Runs through various combinations of renames, deletes, appends and other
-   * operations between two snapshotted directories and ensures disk usage
-   * summaries (e.g. du -s) are computed correctly.
-   *
-   * This test currently assumes some incorrect behavior when files have been
-   * moved between subdirectories of the one being queried. In the cases
-   * below, only 1 block worth of data should ever actually be used. However
-   * if there are 2 - 3 subdirectories that do contained or have contained
-   * when snapshotted the same file, that file will be counted 2-3 times,
-   * respectively, since each directory is computed independently recursively.
-   *
-   * @throws Exception
-   */
-  @Test (timeout=300000)
-  public void testDuMultipleDirs() throws Exception {
-    File tempFile = File.createTempFile("testDuMultipleDirs-", "" + ".tmp");
-    tempFile.deleteOnExit();
-
-    final FileSystem localfs = FileSystem.getLocal(conf);
-    final Path localOriginal = new Path(tempFile.getPath());
-    final Path dfsRoot = new Path("/testDuMultipleDirs");
-    final Path snapshottable1 = new Path(dfsRoot, "snapshottable1");
-    final Path snapshottable2 = new Path(dfsRoot, "snapshottable2");
-    final Path nonsnapshottable = new Path(dfsRoot, "nonsnapshottable");
-    final Path subdirectory = new Path(snapshottable1, "subdirectory");
-    final Path dfsOriginal = new Path(snapshottable1, "file");
-    final Path renamedNonsnapshottable = new Path(nonsnapshottable, "file");
-    final Path renamedSnapshottable = new Path(snapshottable2, "file");
-    final Path renamedSubdirectory = new Path(subdirectory, "file");
-
-    /* We will test with a single block worth of data. If we don't at least use
-    a multiple of BLOCKSIZE, append operations will modify snapshotted blocks
-    and other factors will come into play here that we'll have to account for */
-    final long spaceConsumed = BLOCKSIZE * REPL;
-    DFSTestUtil.createFile(localfs, localOriginal, BLOCKSIZE, REPL, SEED);
-
-    hdfs.mkdirs(snapshottable1);
-    hdfs.mkdirs(snapshottable2);
-    hdfs.mkdirs(nonsnapshottable);
-    hdfs.mkdirs(subdirectory);
-    checkSpaceConsumed("Du is wrong immediately",
-        dfsRoot, 0L);
-
-    hdfs.copyFromLocalFile(localOriginal, dfsOriginal);
-    checkSpaceConsumed("Du is wrong after creating / copying file",
-        snapshottable1, spaceConsumed);
-
-    SnapshotTestHelper.createSnapshot(hdfs, snapshottable1, "s1");
-    checkSpaceConsumed("Du is wrong in original dir after 1st snapshot",
-        snapshottable1, spaceConsumed);
-
-    hdfs.rename(dfsOriginal, renamedNonsnapshottable);
-    checkSpaceConsumed("Du is wrong in original dir after 1st rename",
-        snapshottable1, spaceConsumed);
-    checkSpaceConsumed("Du is wrong in non-snapshottable dir after 1st rename",
-        nonsnapshottable, spaceConsumed);
-    checkSpaceConsumed("Du is wrong in root dir after 1st rename",
-        dfsRoot, spaceConsumed);
-
-    hdfs.rename(renamedNonsnapshottable, renamedSnapshottable);
-    checkSpaceConsumed("Du is wrong in original dir after 2nd rename",
-        snapshottable1, spaceConsumed);
-    checkSpaceConsumed("Du is wrong in non-snapshottable dir after 2nd rename",
-        nonsnapshottable, 0);
-    checkSpaceConsumed("Du is wrong in snapshottable dir after 2nd rename",
-        snapshottable2, spaceConsumed);
-    checkSpaceConsumed("Du is wrong in root dir after 2nd rename",
-        dfsRoot, spaceConsumed);
-
-    SnapshotTestHelper.createSnapshot(hdfs, snapshottable2, "s2");
-    hdfs.rename(renamedSnapshottable, renamedSubdirectory);
-    checkSpaceConsumed("Du is wrong in original dir after 3rd rename",
-        snapshottable1, spaceConsumed);
-    checkSpaceConsumed("Du is wrong in snapshottable dir after 3rd rename",
-        snapshottable2, spaceConsumed);
-    checkSpaceConsumed("Du is wrong in original subdirectory after 3rd rename",
-        subdirectory, spaceConsumed);
-    checkSpaceConsumed("Du is wrong in root dir after 3rd rename",
-        dfsRoot, spaceConsumed);
-
-    hdfs.delete(renamedSubdirectory, false);
-    hdfs.deleteSnapshot(snapshottable1, "s1");
-    hdfs.deleteSnapshot(snapshottable2, "s2");
-    checkSpaceConsumed("Du is wrong after deleting all files and snapshots",
-        dfsRoot, 0);
-  }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org