You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by su...@apache.org on 2016/10/25 02:26:15 UTC
[05/50] [abbrv] hadoop git commit: HDFS-10976. Report erasure coding
policy of EC files in Fsck. Contributed by Wei-Chiu Chuang.
HDFS-10976. Report erasure coding policy of EC files in Fsck. Contributed by Wei-Chiu Chuang.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5e83a21c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5e83a21c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5e83a21c
Branch: refs/heads/YARN-2915
Commit: 5e83a21cb66c78e89ac5af9a130ab0aee596a9f4
Parents: 3fbf4cd
Author: Wei-Chiu Chuang <we...@apache.org>
Authored: Thu Oct 20 13:02:16 2016 -0700
Committer: Wei-Chiu Chuang <we...@apache.org>
Committed: Thu Oct 20 13:06:43 2016 -0700
----------------------------------------------------------------------
.../hadoop/hdfs/server/namenode/NamenodeFsck.java | 14 ++++++++++++--
.../apache/hadoop/hdfs/server/namenode/TestFsck.java | 14 +++++++++++++-
2 files changed, 25 insertions(+), 3 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/5e83a21c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
index 8302035..a2e249d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
@@ -55,6 +55,7 @@ import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.DatanodeInfoWithStorage;
import org.apache.hadoop.hdfs.protocol.DirectoryListing;
+import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
@@ -540,11 +541,20 @@ public class NamenodeFsck implements DataEncryptionKeyFactory {
res.totalFiles++;
res.totalSize += fileLen;
res.totalBlocks += blocks.locatedBlockCount();
+ String redundancyPolicy;
+ ErasureCodingPolicy ecPolicy = file.getErasureCodingPolicy();
+ if (ecPolicy == null) { // a replicated file
+ redundancyPolicy = "replicated: replication=" +
+ file.getReplication() + ",";
+ } else {
+ redundancyPolicy = "erasure-coded: policy=" + ecPolicy.getName() + ",";
+ }
+
if (showOpenFiles && isOpen) {
- out.print(path + " " + fileLen + " bytes, " +
+ out.print(path + " " + fileLen + " bytes, " + redundancyPolicy + " " +
blocks.locatedBlockCount() + " block(s), OPENFORWRITE: ");
} else if (showFiles) {
- out.print(path + " " + fileLen + " bytes, " +
+ out.print(path + " " + fileLen + " bytes, " + redundancyPolicy + " " +
blocks.locatedBlockCount() + " block(s): ");
} else if (showprogress) {
out.print('.');
http://git-wip-us.apache.org/repos/asf/hadoop/blob/5e83a21c/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
index aa41e9b..254a86c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
@@ -1700,9 +1700,21 @@ public class TestFsck {
// restart the cluster; bring up namenode but not the data nodes
cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(0).format(false).build();
- outStr = runFsck(conf, 1, true, "/");
+ outStr = runFsck(conf, 1, true, "/", "-files", "-blocks");
// expect the result is corrupt
assertTrue(outStr.contains(NamenodeFsck.CORRUPT_STATUS));
+ String[] outLines = outStr.split("\\r?\\n");
+ for (String line: outLines) {
+ if (line.contains(largeFilePath.toString())) {
+ final HdfsFileStatus file = cluster.getNameNode().getRpcServer().
+ getFileInfo(largeFilePath.toString());
+ assertTrue(line.contains("policy=" +
+ file.getErasureCodingPolicy().getName()));
+ } else if (line.contains(replFilePath.toString())) {
+ assertTrue(line.contains("replication=" + cluster.getFileSystem().
+ getFileStatus(replFilePath).getReplication()));
+ }
+ }
System.out.println(outStr);
}
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org