You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by ge...@apache.org on 2015/03/02 10:03:36 UTC
hadoop git commit: HDFS-7789. DFSck should resolve the path to
support cross-FS symlinks. (gera)
Repository: hadoop
Updated Branches:
refs/heads/trunk 67ed59348 -> cbb492578
HDFS-7789. DFSck should resolve the path to support cross-FS symlinks. (gera)
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cbb49257
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cbb49257
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cbb49257
Branch: refs/heads/trunk
Commit: cbb492578ef09300821b7199de54c6508f9d7fe8
Parents: 67ed593
Author: Gera Shegalov <ge...@apache.org>
Authored: Thu Feb 12 04:32:43 2015 -0800
Committer: Gera Shegalov <ge...@apache.org>
Committed: Mon Mar 2 00:55:35 2015 -0800
----------------------------------------------------------------------
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 ++
.../org/apache/hadoop/hdfs/tools/DFSck.java | 31 +++++++++++++-------
.../hadoop/hdfs/server/namenode/TestFsck.java | 14 ++++++---
.../namenode/TestFsckWithMultipleNameNodes.java | 20 +++++++++++++
4 files changed, 53 insertions(+), 15 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/cbb49257/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 5ca16af..d5208da 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -697,6 +697,9 @@ Release 2.7.0 - UNRELEASED
HDFS-7439. Add BlockOpResponseProto's message to the exception messages.
(Takanobu Asanuma via szetszwo)
+ HDFS-7789. DFSck should resolve the path to support cross-FS symlinks.
+ (gera)
+
OPTIMIZATIONS
HDFS-7454. Reduce memory footprint for AclEntries in NameNode.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/cbb49257/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java
index ec83a90..dc6d9d4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java
@@ -225,6 +225,14 @@ public class DFSck extends Configured implements Tool {
return errCode;
}
+
+ private Path getResolvedPath(String dir) throws IOException {
+ Configuration conf = getConf();
+ Path dirPath = new Path(dir);
+ FileSystem fs = dirPath.getFileSystem(conf);
+ return fs.resolvePath(dirPath);
+ }
+
/**
* Derive the namenode http address from the current file system,
* either default or as set by "-fs" in the generic options.
@@ -236,19 +244,12 @@ public class DFSck extends Configured implements Tool {
Configuration conf = getConf();
//get the filesystem object to verify it is an HDFS system
- final FileSystem fs;
- try {
- fs = target.getFileSystem(conf);
- } catch (IOException ioe) {
- System.err.println("FileSystem is inaccessible due to:\n"
- + StringUtils.stringifyException(ioe));
- return null;
- }
+ final FileSystem fs = target.getFileSystem(conf);
if (!(fs instanceof DistributedFileSystem)) {
System.err.println("FileSystem is " + fs.getUri());
return null;
}
-
+
return DFSUtil.getInfoServer(HAUtil.getAddressOfActive(fs), conf,
DFSUtil.getHttpClientScheme(conf));
}
@@ -303,8 +304,16 @@ public class DFSck extends Configured implements Tool {
dir = "/";
}
- final Path dirpath = new Path(dir);
- final URI namenodeAddress = getCurrentNamenodeAddress(dirpath);
+ Path dirpath = null;
+ URI namenodeAddress = null;
+ try {
+ dirpath = getResolvedPath(dir);
+ namenodeAddress = getCurrentNamenodeAddress(dirpath);
+ } catch (IOException ioe) {
+ System.err.println("FileSystem is inaccessible due to:\n"
+ + StringUtils.stringifyException(ioe));
+ }
+
if (namenodeAddress == null) {
//Error message already output in {@link #getCurrentNamenodeAddress()}
System.err.println("DFSck exiting.");
http://git-wip-us.apache.org/repos/asf/hadoop/blob/cbb49257/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
index 1053b5f..409fffc 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
@@ -211,10 +211,16 @@ public class TestFsck {
try {
// Audit log should contain one getfileinfo and one fsck
reader = new BufferedReader(new FileReader(auditLogFile));
- String line = reader.readLine();
- assertNotNull(line);
- assertTrue("Expected getfileinfo event not found in audit log",
- getfileinfoPattern.matcher(line).matches());
+ String line;
+
+ // one extra getfileinfo stems from resolving the path
+ //
+ for (int i = 0; i < 2; i++) {
+ line = reader.readLine();
+ assertNotNull(line);
+ assertTrue("Expected getfileinfo event not found in audit log",
+ getfileinfoPattern.matcher(line).matches());
+ }
line = reader.readLine();
assertNotNull(line);
assertTrue("Expected fsck event not found in audit log", fsckPattern
http://git-wip-us.apache.org/repos/asf/hadoop/blob/cbb49257/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsckWithMultipleNameNodes.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsckWithMultipleNameNodes.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsckWithMultipleNameNodes.java
index f4cb624..124b301 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsckWithMultipleNameNodes.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsckWithMultipleNameNodes.java
@@ -18,6 +18,7 @@
package org.apache.hadoop.hdfs.server.namenode;
import java.io.IOException;
+import java.net.URI;
import java.util.Random;
import java.util.concurrent.TimeoutException;
@@ -26,6 +27,8 @@ import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.viewfs.ConfigUtil;
+import org.apache.hadoop.fs.viewfs.ViewFileSystem;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
@@ -119,6 +122,23 @@ public class TestFsckWithMultipleNameNodes {
LOG.info("result=" + result);
Assert.assertTrue(result.contains("Status: HEALTHY"));
}
+
+ // Test viewfs
+ //
+ LOG.info("RUN_TEST 3");
+ final String[] vurls = new String[nNameNodes];
+ for (int i = 0; i < vurls.length; i++) {
+ String link = "/mount/nn_" + i + FILE_NAME;
+ ConfigUtil.addLink(conf, link, new URI(urls[i]));
+ vurls[i] = "viewfs:" + link;
+ }
+
+ for(int i = 0; i < vurls.length; i++) {
+ LOG.info("vurls[" + i + "]=" + vurls[i]);
+ final String result = TestFsck.runFsck(conf, 0, false, vurls[i]);
+ LOG.info("result=" + result);
+ Assert.assertTrue(result.contains("Status: HEALTHY"));
+ }
} finally {
cluster.shutdown();
}