You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by zh...@apache.org on 2015/03/02 18:15:27 UTC
[11/50] [abbrv] hadoop git commit: HDFS-7537. Add "UNDER MIN REPL'D
BLOCKS" count to fsck. Contributed by GAO Rui
HDFS-7537. Add "UNDER MIN REPL'D BLOCKS" count to fsck. Contributed by GAO Rui
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0127820a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0127820a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0127820a
Branch: refs/heads/HDFS-7285
Commit: 0127820a0dc6f0d4f741e276a1048117f0e78fc5
Parents: 21e9e91
Author: Tsz-Wo Nicholas Sze <sz...@hortonworks.com>
Authored: Thu Feb 26 11:45:56 2015 +0800
Committer: Zhe Zhang <zh...@cloudera.com>
Committed: Mon Mar 2 09:13:51 2015 -0800
----------------------------------------------------------------------
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +
.../hdfs/server/namenode/NamenodeFsck.java | 36 +++++++--
.../hadoop/hdfs/server/namenode/TestFsck.java | 81 +++++++++++++++++++-
3 files changed, 111 insertions(+), 9 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/0127820a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 6dc7a0f..4523bf4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -673,6 +673,9 @@ Release 2.7.0 - UNRELEASED
HDFS-7495. Remove updatePosition argument from DFSInputStream#getBlockAt()
(cmccabe)
+ HDFS-7537. Add "UNDER MIN REPL'D BLOCKS" count to fsck. (GAO Rui via
+ szetszwo)
+
OPTIMIZATIONS
HDFS-7454. Reduce memory footprint for AclEntries in NameNode.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/0127820a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
index f36b773..3c7918f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
@@ -513,6 +513,9 @@ public class NamenodeFsck implements DataEncryptionKeyFactory {
res.totalReplicas += liveReplicas;
short targetFileReplication = file.getReplication();
res.numExpectedReplicas += targetFileReplication;
+ if(liveReplicas<minReplication){
+ res.numUnderMinReplicatedBlocks++;
+ }
if (liveReplicas > targetFileReplication) {
res.excessiveReplicas += (liveReplicas - targetFileReplication);
res.numOverReplicatedBlocks += 1;
@@ -859,6 +862,7 @@ public class NamenodeFsck implements DataEncryptionKeyFactory {
long corruptBlocks = 0L;
long excessiveReplicas = 0L;
long missingReplicas = 0L;
+ long numUnderMinReplicatedBlocks=0L;
long numOverReplicatedBlocks = 0L;
long numUnderReplicatedBlocks = 0L;
long numMisReplicatedBlocks = 0L; // blocks that do not satisfy block placement policy
@@ -875,10 +879,13 @@ public class NamenodeFsck implements DataEncryptionKeyFactory {
long totalReplicas = 0L;
final short replication;
+ final int minReplication;
Result(Configuration conf) {
this.replication = (short)conf.getInt(DFSConfigKeys.DFS_REPLICATION_KEY,
DFSConfigKeys.DFS_REPLICATION_DEFAULT);
+ this.minReplication = (short)conf.getInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MIN_KEY,
+ DFSConfigKeys.DFS_NAMENODE_REPLICATION_MIN_DEFAULT);
}
/**
@@ -926,15 +933,28 @@ public class NamenodeFsck implements DataEncryptionKeyFactory {
res.append(" (Total open file blocks (not validated): ").append(
totalOpenFilesBlocks).append(")");
}
- if (corruptFiles > 0) {
- res.append("\n ********************************").append(
- "\n CORRUPT FILES:\t").append(corruptFiles);
- if (missingSize > 0) {
- res.append("\n MISSING BLOCKS:\t").append(missingIds.size()).append(
- "\n MISSING SIZE:\t\t").append(missingSize).append(" B");
+ if (corruptFiles > 0 || numUnderMinReplicatedBlocks>0) {
+ res.append("\n ********************************");
+ if(numUnderMinReplicatedBlocks>0){
+ res.append("\n UNDER MIN REPL'D BLOCKS:\t").append(numUnderMinReplicatedBlocks);
+ if(totalBlocks>0){
+ res.append(" (").append(
+ ((float) (numUnderMinReplicatedBlocks * 100) / (float) totalBlocks))
+ .append(" %)");
+ }
+ res.append("\n ").append("DFSConfigKeys.DFS_NAMENODE_REPLICATION_MIN_KEY:\t")
+ .append(minReplication);
}
- if (corruptBlocks > 0) {
- res.append("\n CORRUPT BLOCKS: \t").append(corruptBlocks);
+ if(corruptFiles>0) {
+ res.append(
+ "\n CORRUPT FILES:\t").append(corruptFiles);
+ if (missingSize > 0) {
+ res.append("\n MISSING BLOCKS:\t").append(missingIds.size()).append(
+ "\n MISSING SIZE:\t\t").append(missingSize).append(" B");
+ }
+ if (corruptBlocks > 0) {
+ res.append("\n CORRUPT BLOCKS: \t").append(corruptBlocks);
+ }
}
res.append("\n ********************************");
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/0127820a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
index 7cdf5ec..33de692 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
@@ -693,7 +693,86 @@ public class TestFsck {
if (cluster != null) {cluster.shutdown();}
}
}
-
+
+ @Test
+ public void testUnderMinReplicatedBlock() throws Exception {
+ Configuration conf = new HdfsConfiguration();
+ conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000);
+ // Set short retry timeouts so this test runs faster
+ conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE, 10);
+ // Set minReplication to 2
+ short minReplication=2;
+ conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MIN_KEY,minReplication);
+ FileSystem fs = null;
+ DFSClient dfsClient = null;
+ LocatedBlocks blocks = null;
+ int replicaCount = 0;
+ Random random = new Random();
+ String outStr = null;
+ short factor = 1;
+ MiniDFSCluster cluster = null;
+ try {
+ cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
+ cluster.waitActive();
+ fs = cluster.getFileSystem();
+ Path file1 = new Path("/testUnderMinReplicatedBlock");
+ DFSTestUtil.createFile(fs, file1, 1024, minReplication, 0);
+ // Wait until file replication has completed
+ DFSTestUtil.waitReplication(fs, file1, minReplication);
+ ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, file1);
+
+ // Make sure filesystem is in healthy state
+ outStr = runFsck(conf, 0, true, "/");
+ System.out.println(outStr);
+ assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
+
+ // corrupt the first replica
+ File blockFile = cluster.getBlockFile(0, block);
+ if (blockFile != null && blockFile.exists()) {
+ RandomAccessFile raFile = new RandomAccessFile(blockFile, "rw");
+ FileChannel channel = raFile.getChannel();
+ String badString = "BADBAD";
+ int rand = random.nextInt((int) channel.size()/2);
+ raFile.seek(rand);
+ raFile.write(badString.getBytes());
+ raFile.close();
+ }
+
+ dfsClient = new DFSClient(new InetSocketAddress("localhost",
+ cluster.getNameNodePort()), conf);
+ blocks = dfsClient.getNamenode().
+ getBlockLocations(file1.toString(), 0, Long.MAX_VALUE);
+ replicaCount = blocks.get(0).getLocations().length;
+ while (replicaCount != factor) {
+ try {
+ Thread.sleep(100);
+ // Read the file to trigger reportBadBlocks
+ try {
+ IOUtils.copyBytes(fs.open(file1), new IOUtils.NullOutputStream(), conf,
+ true);
+ } catch (IOException ie) {
+ // Ignore exception
+ }
+ System.out.println("sleep in try: replicaCount="+replicaCount+" factor="+factor);
+ } catch (InterruptedException ignore) {
+ }
+ blocks = dfsClient.getNamenode().
+ getBlockLocations(file1.toString(), 0, Long.MAX_VALUE);
+ replicaCount = blocks.get(0).getLocations().length;
+ }
+
+ // Check if fsck reports the same
+ outStr = runFsck(conf, 0, true, "/");
+ System.out.println(outStr);
+ assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
+ assertTrue(outStr.contains("UNDER MIN REPL'D BLOCKS:\t1 (100.0 %)"));
+ assertTrue(outStr.contains("DFSConfigKeys.DFS_NAMENODE_REPLICATION_MIN_KEY:\t2"));
+ } finally {
+ if (cluster != null) {cluster.shutdown();}
+ }
+ }
+
+
/** Test if fsck can return -1 in case of failure
*
* @throws Exception