You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by aw...@apache.org on 2015/07/08 17:16:29 UTC
[01/12] hadoop git commit: HADOOP-5732. Add SFTPFileSystem.
Contributed by Ramtin Boustani and Inigo Goiri
Repository: hadoop
Updated Branches:
refs/heads/HADOOP-12111 adbacf701 -> 8243608fd
HADOOP-5732. Add SFTPFileSystem. Contributed by Ramtin Boustani and Inigo Goiri
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/559425dc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/559425dc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/559425dc
Branch: refs/heads/HADOOP-12111
Commit: 559425dcb9302861c3c28b759492a68f8d597092
Parents: e0febce
Author: Chris Douglas <cd...@apache.org>
Authored: Tue Jul 7 10:07:07 2015 -0700
Committer: Chris Douglas <cd...@apache.org>
Committed: Tue Jul 7 10:08:59 2015 -0700
----------------------------------------------------------------------
hadoop-common-project/hadoop-common/CHANGES.txt | 3 +
hadoop-common-project/hadoop-common/pom.xml | 5 +
.../hadoop/fs/sftp/SFTPConnectionPool.java | 303 +++++++++
.../apache/hadoop/fs/sftp/SFTPFileSystem.java | 671 +++++++++++++++++++
.../apache/hadoop/fs/sftp/SFTPInputStream.java | 130 ++++
.../org/apache/hadoop/fs/sftp/package-info.java | 19 +
.../hadoop/fs/sftp/TestSFTPFileSystem.java | 308 +++++++++
hadoop-project/pom.xml | 5 +
8 files changed, 1444 insertions(+)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/559425dc/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index ee96eee..194e2e3 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -526,6 +526,9 @@ Release 2.8.0 - UNRELEASED
HADOOP-10971. Add -C flag to make `hadoop fs -ls` print filenames only.
(Kengo Seki via aajisaka)
+ HADOOP-5732. Add SFTP FileSystem. (Ramtin Boustani and Inigo Goiri via
+ cdouglas)
+
IMPROVEMENTS
HADOOP-6842. "hadoop fs -text" does not give a useful text representation
http://git-wip-us.apache.org/repos/asf/hadoop/blob/559425dc/hadoop-common-project/hadoop-common/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/pom.xml b/hadoop-common-project/hadoop-common/pom.xml
index c3a306b..6b1388a 100644
--- a/hadoop-common-project/hadoop-common/pom.xml
+++ b/hadoop-common-project/hadoop-common/pom.xml
@@ -238,6 +238,11 @@
<artifactId>jsr305</artifactId>
<scope>compile</scope>
</dependency>
+ <dependency>
+ <groupId>org.apache.sshd</groupId>
+ <artifactId>sshd-core</artifactId>
+ <scope>test</scope>
+ </dependency>
<dependency>
<groupId>org.apache.htrace</groupId>
http://git-wip-us.apache.org/repos/asf/hadoop/blob/559425dc/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/sftp/SFTPConnectionPool.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/sftp/SFTPConnectionPool.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/sftp/SFTPConnectionPool.java
new file mode 100644
index 0000000..c7fae7b
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/sftp/SFTPConnectionPool.java
@@ -0,0 +1,303 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.sftp;
+
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.Set;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.util.StringUtils;
+
+import com.jcraft.jsch.ChannelSftp;
+import com.jcraft.jsch.JSch;
+import com.jcraft.jsch.JSchException;
+import com.jcraft.jsch.Session;
+
+/** Concurrent/Multiple Connections. */
+class SFTPConnectionPool {
+
+ public static final Log LOG = LogFactory.getLog(SFTPFileSystem.class);
+ // Maximum number of allowed live connections. This doesn't mean we cannot
+ // have more live connections. It means that when we have more
+ // live connections than this threshold, any unused connection will be
+ // closed.
+ private int maxConnection;
+ private int liveConnectionCount = 0;
+ private HashMap<ConnectionInfo, HashSet<ChannelSftp>> idleConnections =
+ new HashMap<ConnectionInfo, HashSet<ChannelSftp>>();
+ private HashMap<ChannelSftp, ConnectionInfo> con2infoMap =
+ new HashMap<ChannelSftp, ConnectionInfo>();
+
+ SFTPConnectionPool(int maxConnection) {
+ this.maxConnection = maxConnection;
+ }
+
+ synchronized ChannelSftp getFromPool(ConnectionInfo info) throws IOException {
+ Set<ChannelSftp> cons = idleConnections.get(info);
+ ChannelSftp channel;
+
+ if (cons != null && cons.size() > 0) {
+ Iterator<ChannelSftp> it = cons.iterator();
+ if (it.hasNext()) {
+ channel = it.next();
+ idleConnections.remove(info);
+ return channel;
+ } else {
+ throw new IOException("Connection pool error.");
+ }
+ }
+ return null;
+ }
+
+ /** Add the channel into pool.
+ * @param channel
+ */
+ synchronized void returnToPool(ChannelSftp channel) {
+ ConnectionInfo info = con2infoMap.get(channel);
+ HashSet<ChannelSftp> cons = idleConnections.get(info);
+ if (cons == null) {
+ cons = new HashSet<ChannelSftp>();
+ idleConnections.put(info, cons);
+ }
+ cons.add(channel);
+
+ }
+
+ /** Shutdown the connection pool and close all open connections. */
+ synchronized void shutdown() {
+ if (this.con2infoMap == null){
+ return; // already shutdown in case it is called
+ }
+ LOG.info("Inside shutdown, con2infoMap size=" + con2infoMap.size());
+
+ this.maxConnection = 0;
+ Set<ChannelSftp> cons = con2infoMap.keySet();
+ if (cons != null && cons.size() > 0) {
+ // make a copy since we need to modify the underlying Map
+ Set<ChannelSftp> copy = new HashSet<ChannelSftp>(cons);
+ // Initiate disconnect from all outstanding connections
+ for (ChannelSftp con : copy) {
+ try {
+ disconnect(con);
+ } catch (IOException ioe) {
+ ConnectionInfo info = con2infoMap.get(con);
+ LOG.error(
+ "Error encountered while closing connection to " + info.getHost(),
+ ioe);
+ }
+ }
+ }
+ // make sure no further connections can be returned.
+ this.idleConnections = null;
+ this.con2infoMap = null;
+ }
+
+ public synchronized int getMaxConnection() {
+ return maxConnection;
+ }
+
+ public synchronized void setMaxConnection(int maxConn) {
+ this.maxConnection = maxConn;
+ }
+
+ public ChannelSftp connect(String host, int port, String user,
+ String password, String keyFile) throws IOException {
+ // get connection from pool
+ ConnectionInfo info = new ConnectionInfo(host, port, user);
+ ChannelSftp channel = getFromPool(info);
+
+ if (channel != null) {
+ if (channel.isConnected()) {
+ return channel;
+ } else {
+ channel = null;
+ synchronized (this) {
+ --liveConnectionCount;
+ con2infoMap.remove(channel);
+ }
+ }
+ }
+
+ // create a new connection and add to pool
+ JSch jsch = new JSch();
+ Session session = null;
+ try {
+ if (user == null || user.length() == 0) {
+ user = System.getProperty("user.name");
+ }
+
+ if (password == null) {
+ password = "";
+ }
+
+ if (keyFile != null && keyFile.length() > 0) {
+ jsch.addIdentity(keyFile);
+ }
+
+ if (port <= 0) {
+ session = jsch.getSession(user, host);
+ } else {
+ session = jsch.getSession(user, host, port);
+ }
+
+ session.setPassword(password);
+
+ java.util.Properties config = new java.util.Properties();
+ config.put("StrictHostKeyChecking", "no");
+ session.setConfig(config);
+
+ session.connect();
+ channel = (ChannelSftp) session.openChannel("sftp");
+ channel.connect();
+
+ synchronized (this) {
+ con2infoMap.put(channel, info);
+ liveConnectionCount++;
+ }
+
+ return channel;
+
+ } catch (JSchException e) {
+ throw new IOException(StringUtils.stringifyException(e));
+ }
+ }
+
+ void disconnect(ChannelSftp channel) throws IOException {
+ if (channel != null) {
+ // close connection if too many active connections
+ boolean closeConnection = false;
+ synchronized (this) {
+ if (liveConnectionCount > maxConnection) {
+ --liveConnectionCount;
+ con2infoMap.remove(channel);
+ closeConnection = true;
+ }
+ }
+ if (closeConnection) {
+ if (channel.isConnected()) {
+ try {
+ Session session = channel.getSession();
+ channel.disconnect();
+ session.disconnect();
+ } catch (JSchException e) {
+ throw new IOException(StringUtils.stringifyException(e));
+ }
+ }
+
+ } else {
+ returnToPool(channel);
+ }
+ }
+ }
+
+ public int getIdleCount() {
+ return this.idleConnections.size();
+ }
+
+ public int getLiveConnCount() {
+ return this.liveConnectionCount;
+ }
+
+ public int getConnPoolSize() {
+ return this.con2infoMap.size();
+ }
+
+ /**
+ * Class to capture the minimal set of information that distinguish
+ * between different connections.
+ */
+ static class ConnectionInfo {
+ private String host = "";
+ private int port;
+ private String user = "";
+
+ ConnectionInfo(String hst, int prt, String usr) {
+ this.host = hst;
+ this.port = prt;
+ this.user = usr;
+ }
+
+ public String getHost() {
+ return host;
+ }
+
+ public void setHost(String hst) {
+ this.host = hst;
+ }
+
+ public int getPort() {
+ return port;
+ }
+
+ public void setPort(int prt) {
+ this.port = prt;
+ }
+
+ public String getUser() {
+ return user;
+ }
+
+ public void setUser(String usr) {
+ this.user = usr;
+ }
+
+ @Override
+ public boolean equals(Object obj) {
+ if (this == obj) {
+ return true;
+ }
+
+ if (obj instanceof ConnectionInfo) {
+ ConnectionInfo con = (ConnectionInfo) obj;
+
+ boolean ret = true;
+ if (this.host == null || !this.host.equalsIgnoreCase(con.host)) {
+ ret = false;
+ }
+ if (this.port >= 0 && this.port != con.port) {
+ ret = false;
+ }
+ if (this.user == null || !this.user.equalsIgnoreCase(con.user)) {
+ ret = false;
+ }
+ return ret;
+ } else {
+ return false;
+ }
+
+ }
+
+ @Override
+ public int hashCode() {
+ int hashCode = 0;
+ if (host != null) {
+ hashCode += host.hashCode();
+ }
+ hashCode += port;
+ if (user != null) {
+ hashCode += user.hashCode();
+ }
+ return hashCode;
+ }
+
+ }
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/559425dc/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/sftp/SFTPFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/sftp/SFTPFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/sftp/SFTPFileSystem.java
new file mode 100644
index 0000000..8b6267a
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/sftp/SFTPFileSystem.java
@@ -0,0 +1,671 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.sftp;
+
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.net.URI;
+import java.net.URLDecoder;
+import java.util.ArrayList;
+import java.util.Vector;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.util.Progressable;
+
+import com.jcraft.jsch.ChannelSftp;
+import com.jcraft.jsch.ChannelSftp.LsEntry;
+import com.jcraft.jsch.SftpATTRS;
+import com.jcraft.jsch.SftpException;
+
+/** SFTP FileSystem. */
+public class SFTPFileSystem extends FileSystem {
+
+ public static final Log LOG = LogFactory.getLog(SFTPFileSystem.class);
+
+ private SFTPConnectionPool connectionPool;
+ private URI uri;
+
+ private static final int DEFAULT_SFTP_PORT = 22;
+ private static final int DEFAULT_MAX_CONNECTION = 5;
+ public static final int DEFAULT_BUFFER_SIZE = 1024 * 1024;
+ public static final int DEFAULT_BLOCK_SIZE = 4 * 1024;
+ public static final String FS_SFTP_USER_PREFIX = "fs.sftp.user.";
+ public static final String FS_SFTP_PASSWORD_PREFIX = "fs.sftp.password.";
+ public static final String FS_SFTP_HOST = "fs.sftp.host";
+ public static final String FS_SFTP_HOST_PORT = "fs.sftp.host.port";
+ public static final String FS_SFTP_KEYFILE = "fs.sftp.keyfile";
+ public static final String FS_SFTP_CONNECTION_MAX = "fs.sftp.connection.max";
+ public static final String E_SAME_DIRECTORY_ONLY =
+ "only same directory renames are supported";
+ public static final String E_HOST_NULL = "Invalid host specified";
+ public static final String E_USER_NULL =
+ "No user specified for sftp connection. Expand URI or credential file.";
+ public static final String E_PATH_DIR = "Path %s is a directory.";
+ public static final String E_FILE_STATUS = "Failed to get file status";
+ public static final String E_FILE_NOTFOUND = "File %s does not exist.";
+ public static final String E_FILE_EXIST = "File already exists: %s";
+ public static final String E_CREATE_DIR =
+ "create(): Mkdirs failed to create: %s";
+ public static final String E_DIR_CREATE_FROMFILE =
+ "Can't make directory for path %s since it is a file.";
+ public static final String E_MAKE_DIR_FORPATH =
+ "Can't make directory for path \"%s\" under \"%s\".";
+ public static final String E_DIR_NOTEMPTY = "Directory: %s is not empty.";
+ public static final String E_FILE_CHECK_FAILED = "File check failed";
+ public static final String E_NOT_SUPPORTED = "Not supported";
+ public static final String E_SPATH_NOTEXIST = "Source path %s does not exist";
+ public static final String E_DPATH_EXIST =
+ "Destination path %s already exist, cannot rename!";
+ public static final String E_FAILED_GETHOME = "Failed to get home directory";
+ public static final String E_FAILED_DISCONNECT = "Failed to disconnect";
+
+ /**
+ * Set configuration from UI.
+ *
+ * @param uri
+ * @param conf
+ * @throws IOException
+ */
+ private void setConfigurationFromURI(URI uriInfo, Configuration conf)
+ throws IOException {
+
+ // get host information from URI
+ String host = uriInfo.getHost();
+ host = (host == null) ? conf.get(FS_SFTP_HOST, null) : host;
+ if (host == null) {
+ throw new IOException(E_HOST_NULL);
+ }
+ conf.set(FS_SFTP_HOST, host);
+
+ int port = uriInfo.getPort();
+ port = (port == -1)
+ ? conf.getInt(FS_SFTP_HOST_PORT, DEFAULT_SFTP_PORT)
+ : port;
+ conf.setInt(FS_SFTP_HOST_PORT, port);
+
+ // get user/password information from URI
+ String userAndPwdFromUri = uriInfo.getUserInfo();
+ if (userAndPwdFromUri != null) {
+ String[] userPasswdInfo = userAndPwdFromUri.split(":");
+ String user = userPasswdInfo[0];
+ user = URLDecoder.decode(user, "UTF-8");
+ conf.set(FS_SFTP_USER_PREFIX + host, user);
+ if (userPasswdInfo.length > 1) {
+ conf.set(FS_SFTP_PASSWORD_PREFIX + host + "." +
+ user, userPasswdInfo[1]);
+ }
+ }
+
+ String user = conf.get(FS_SFTP_USER_PREFIX + host);
+ if (user == null || user.equals("")) {
+ throw new IllegalStateException(E_USER_NULL);
+ }
+
+ int connectionMax =
+ conf.getInt(FS_SFTP_CONNECTION_MAX, DEFAULT_MAX_CONNECTION);
+ connectionPool = new SFTPConnectionPool(connectionMax);
+ }
+
+ /**
+ * Connecting by using configuration parameters.
+ *
+ * @return An FTPClient instance
+ * @throws IOException
+ */
+ private ChannelSftp connect() throws IOException {
+ Configuration conf = getConf();
+
+ String host = conf.get(FS_SFTP_HOST, null);
+ int port = conf.getInt(FS_SFTP_HOST_PORT, DEFAULT_SFTP_PORT);
+ String user = conf.get(FS_SFTP_USER_PREFIX + host, null);
+ String pwd = conf.get(FS_SFTP_PASSWORD_PREFIX + host + "." + user, null);
+ String keyFile = conf.get(FS_SFTP_KEYFILE, null);
+
+ ChannelSftp channel =
+ connectionPool.connect(host, port, user, pwd, keyFile);
+
+ return channel;
+ }
+
+ /**
+ * Logout and disconnect the given channel.
+ *
+ * @param client
+ * @throws IOException
+ */
+ private void disconnect(ChannelSftp channel) throws IOException {
+ connectionPool.disconnect(channel);
+ }
+
+ /**
+ * Resolve against given working directory.
+ *
+ * @param workDir
+ * @param path
+ * @return absolute path
+ */
+ private Path makeAbsolute(Path workDir, Path path) {
+ if (path.isAbsolute()) {
+ return path;
+ }
+ return new Path(workDir, path);
+ }
+
+ /**
+ * Convenience method, so that we don't open a new connection when using this
+ * method from within another method. Otherwise every API invocation incurs
+ * the overhead of opening/closing a TCP connection.
+ * @throws IOException
+ */
+ private boolean exists(ChannelSftp channel, Path file) throws IOException {
+ try {
+ getFileStatus(channel, file);
+ return true;
+ } catch (FileNotFoundException fnfe) {
+ return false;
+ } catch (IOException ioe) {
+ throw new IOException(E_FILE_STATUS, ioe);
+ }
+ }
+
+ /**
+ * Convenience method, so that we don't open a new connection when using this
+ * method from within another method. Otherwise every API invocation incurs
+ * the overhead of opening/closing a TCP connection.
+ */
+ @SuppressWarnings("unchecked")
+ private FileStatus getFileStatus(ChannelSftp client, Path file)
+ throws IOException {
+ FileStatus fileStat = null;
+ Path workDir;
+ try {
+ workDir = new Path(client.pwd());
+ } catch (SftpException e) {
+ throw new IOException(e);
+ }
+ Path absolute = makeAbsolute(workDir, file);
+ Path parentPath = absolute.getParent();
+ if (parentPath == null) { // root directory
+ long length = -1; // Length of root directory on server not known
+ boolean isDir = true;
+ int blockReplication = 1;
+ long blockSize = DEFAULT_BLOCK_SIZE; // Block Size not known.
+ long modTime = -1; // Modification time of root directory not known.
+ Path root = new Path("/");
+ return new FileStatus(length, isDir, blockReplication, blockSize,
+ modTime,
+ root.makeQualified(this.getUri(), this.getWorkingDirectory()));
+ }
+ String pathName = parentPath.toUri().getPath();
+ Vector<LsEntry> sftpFiles;
+ try {
+ sftpFiles = (Vector<LsEntry>) client.ls(pathName);
+ } catch (SftpException e) {
+ throw new FileNotFoundException(String.format(E_FILE_NOTFOUND, file));
+ }
+ if (sftpFiles != null) {
+ for (LsEntry sftpFile : sftpFiles) {
+ if (sftpFile.getFilename().equals(file.getName())) {
+ // file found in directory
+ fileStat = getFileStatus(client, sftpFile, parentPath);
+ break;
+ }
+ }
+ if (fileStat == null) {
+ throw new FileNotFoundException(String.format(E_FILE_NOTFOUND, file));
+ }
+ } else {
+ throw new FileNotFoundException(String.format(E_FILE_NOTFOUND, file));
+ }
+ return fileStat;
+ }
+
+ /**
+ * Convert the file information in LsEntry to a {@link FileStatus} object. *
+ *
+ * @param sftpFile
+ * @param parentPath
+ * @return file status
+ * @throws IOException
+ */
+ private FileStatus getFileStatus(ChannelSftp channel, LsEntry sftpFile,
+ Path parentPath) throws IOException {
+
+ SftpATTRS attr = sftpFile.getAttrs();
+ long length = attr.getSize();
+ boolean isDir = attr.isDir();
+ boolean isLink = attr.isLink();
+ if (isLink) {
+ String link = parentPath.toUri().getPath() + "/" + sftpFile.getFilename();
+ try {
+ link = channel.realpath(link);
+
+ Path linkParent = new Path("/", link);
+
+ FileStatus fstat = getFileStatus(channel, linkParent);
+ isDir = fstat.isDirectory();
+ length = fstat.getLen();
+ } catch (Exception e) {
+ throw new IOException(e);
+ }
+ }
+ int blockReplication = 1;
+ // Using default block size since there is no way in SFTP channel to know of
+ // block sizes on server. The assumption could be less than ideal.
+ long blockSize = DEFAULT_BLOCK_SIZE;
+ long modTime = attr.getMTime() * 1000; // convert to milliseconds
+ long accessTime = 0;
+ FsPermission permission = getPermissions(sftpFile);
+ // not be able to get the real user group name, just use the user and group
+ // id
+ String user = Integer.toString(attr.getUId());
+ String group = Integer.toString(attr.getGId());
+ Path filePath = new Path(parentPath, sftpFile.getFilename());
+
+ return new FileStatus(length, isDir, blockReplication, blockSize, modTime,
+ accessTime, permission, user, group, filePath.makeQualified(
+ this.getUri(), this.getWorkingDirectory()));
+ }
+
+ /**
+ * Return file permission.
+ *
+ * @param sftpFile
+ * @return file permission
+ */
+ private FsPermission getPermissions(LsEntry sftpFile) {
+ return new FsPermission((short) sftpFile.getAttrs().getPermissions());
+ }
+
+ /**
+ * Convenience method, so that we don't open a new connection when using this
+ * method from within another method. Otherwise every API invocation incurs
+ * the overhead of opening/closing a TCP connection.
+ */
+ private boolean mkdirs(ChannelSftp client, Path file, FsPermission permission)
+ throws IOException {
+ boolean created = true;
+ Path workDir;
+ try {
+ workDir = new Path(client.pwd());
+ } catch (SftpException e) {
+ throw new IOException(e);
+ }
+ Path absolute = makeAbsolute(workDir, file);
+ String pathName = absolute.getName();
+ if (!exists(client, absolute)) {
+ Path parent = absolute.getParent();
+ created =
+ (parent == null || mkdirs(client, parent, FsPermission.getDefault()));
+ if (created) {
+ String parentDir = parent.toUri().getPath();
+ boolean succeeded = true;
+ try {
+ client.cd(parentDir);
+ client.mkdir(pathName);
+ } catch (SftpException e) {
+ throw new IOException(String.format(E_MAKE_DIR_FORPATH, pathName,
+ parentDir));
+ }
+ created = created & succeeded;
+ }
+ } else if (isFile(client, absolute)) {
+ throw new IOException(String.format(E_DIR_CREATE_FROMFILE, absolute));
+ }
+ return created;
+ }
+
+ /**
+ * Convenience method, so that we don't open a new connection when using this
+ * method from within another method. Otherwise every API invocation incurs
+ * the overhead of opening/closing a TCP connection.
+ * @throws IOException
+ */
+ private boolean isFile(ChannelSftp channel, Path file) throws IOException {
+ try {
+ return !getFileStatus(channel, file).isDirectory();
+ } catch (FileNotFoundException e) {
+ return false; // file does not exist
+ } catch (IOException ioe) {
+ throw new IOException(E_FILE_CHECK_FAILED, ioe);
+ }
+ }
+
+ /**
+ * Convenience method, so that we don't open a new connection when using this
+ * method from within another method. Otherwise every API invocation incurs
+ * the overhead of opening/closing a TCP connection.
+ */
+ private boolean delete(ChannelSftp channel, Path file, boolean recursive)
+ throws IOException {
+ Path workDir;
+ try {
+ workDir = new Path(channel.pwd());
+ } catch (SftpException e) {
+ throw new IOException(e);
+ }
+ Path absolute = makeAbsolute(workDir, file);
+ String pathName = absolute.toUri().getPath();
+ FileStatus fileStat = null;
+ try {
+ fileStat = getFileStatus(channel, absolute);
+ } catch (FileNotFoundException e) {
+ // file not found, no need to delete, return true
+ return false;
+ }
+ if (!fileStat.isDirectory()) {
+ boolean status = true;
+ try {
+ channel.rm(pathName);
+ } catch (SftpException e) {
+ status = false;
+ }
+ return status;
+ } else {
+ boolean status = true;
+ FileStatus[] dirEntries = listStatus(channel, absolute);
+ if (dirEntries != null && dirEntries.length > 0) {
+ if (!recursive) {
+ throw new IOException(String.format(E_DIR_NOTEMPTY, file));
+ }
+ for (int i = 0; i < dirEntries.length; ++i) {
+ delete(channel, new Path(absolute, dirEntries[i].getPath()),
+ recursive);
+ }
+ }
+ try {
+ channel.rmdir(pathName);
+ } catch (SftpException e) {
+ status = false;
+ }
+ return status;
+ }
+ }
+
+ /**
+ * Convenience method, so that we don't open a new connection when using this
+ * method from within another method. Otherwise every API invocation incurs
+ * the overhead of opening/closing a TCP connection.
+ */
+ @SuppressWarnings("unchecked")
+ private FileStatus[] listStatus(ChannelSftp client, Path file)
+ throws IOException {
+ Path workDir;
+ try {
+ workDir = new Path(client.pwd());
+ } catch (SftpException e) {
+ throw new IOException(e);
+ }
+ Path absolute = makeAbsolute(workDir, file);
+ FileStatus fileStat = getFileStatus(client, absolute);
+ if (!fileStat.isDirectory()) {
+ return new FileStatus[] {fileStat};
+ }
+ Vector<LsEntry> sftpFiles;
+ try {
+ sftpFiles = (Vector<LsEntry>) client.ls(absolute.toUri().getPath());
+ } catch (SftpException e) {
+ throw new IOException(e);
+ }
+ ArrayList<FileStatus> fileStats = new ArrayList<FileStatus>();
+ for (int i = 0; i < sftpFiles.size(); i++) {
+ LsEntry entry = sftpFiles.get(i);
+ String fname = entry.getFilename();
+ // skip current and parent directory, ie. "." and ".."
+ if (!".".equalsIgnoreCase(fname) && !"..".equalsIgnoreCase(fname)) {
+ fileStats.add(getFileStatus(client, entry, absolute));
+ }
+ }
+ return fileStats.toArray(new FileStatus[fileStats.size()]);
+ }
+
+ /**
+ * Convenience method, so that we don't open a new connection when using this
+ * method from within another method. Otherwise every API invocation incurs
+ * the overhead of opening/closing a TCP connection.
+ *
+ * @param channel
+ * @param src
+ * @param dst
+ * @return rename successful?
+ * @throws IOException
+ */
+ private boolean rename(ChannelSftp channel, Path src, Path dst)
+ throws IOException {
+ Path workDir;
+ try {
+ workDir = new Path(channel.pwd());
+ } catch (SftpException e) {
+ throw new IOException(e);
+ }
+ Path absoluteSrc = makeAbsolute(workDir, src);
+ Path absoluteDst = makeAbsolute(workDir, dst);
+
+ if (!exists(channel, absoluteSrc)) {
+ throw new IOException(String.format(E_SPATH_NOTEXIST, src));
+ }
+ if (exists(channel, absoluteDst)) {
+ throw new IOException(String.format(E_DPATH_EXIST, dst));
+ }
+ boolean renamed = true;
+ try {
+ channel.cd("/");
+ channel.rename(src.toUri().getPath(), dst.toUri().getPath());
+ } catch (SftpException e) {
+ renamed = false;
+ }
+ return renamed;
+ }
+
+ @Override
+ public void initialize(URI uriInfo, Configuration conf) throws IOException {
+ super.initialize(uriInfo, conf);
+
+ setConfigurationFromURI(uriInfo, conf);
+ setConf(conf);
+ this.uri = uriInfo;
+ }
+
+ @Override
+ public URI getUri() {
+ return uri;
+ }
+
+ @Override
+ public FSDataInputStream open(Path f, int bufferSize) throws IOException {
+ ChannelSftp channel = connect();
+ Path workDir;
+ try {
+ workDir = new Path(channel.pwd());
+ } catch (SftpException e) {
+ throw new IOException(e);
+ }
+ Path absolute = makeAbsolute(workDir, f);
+ FileStatus fileStat = getFileStatus(channel, absolute);
+ if (fileStat.isDirectory()) {
+ disconnect(channel);
+ throw new IOException(String.format(E_PATH_DIR, f));
+ }
+ InputStream is;
+ try {
+ // the path could be a symbolic link, so get the real path
+ absolute = new Path("/", channel.realpath(absolute.toUri().getPath()));
+
+ is = channel.get(absolute.toUri().getPath());
+ } catch (SftpException e) {
+ throw new IOException(e);
+ }
+
+ FSDataInputStream fis =
+ new FSDataInputStream(new SFTPInputStream(is, channel, statistics));
+ return fis;
+ }
+
+ /**
+ * A stream obtained via this call must be closed before using other APIs of
+ * this class or else the invocation will block.
+ */
+ @Override
+ public FSDataOutputStream create(Path f, FsPermission permission,
+ boolean overwrite, int bufferSize, short replication, long blockSize,
+ Progressable progress) throws IOException {
+ final ChannelSftp client = connect();
+ Path workDir;
+ try {
+ workDir = new Path(client.pwd());
+ } catch (SftpException e) {
+ throw new IOException(e);
+ }
+ Path absolute = makeAbsolute(workDir, f);
+ if (exists(client, f)) {
+ if (overwrite) {
+ delete(client, f, false);
+ } else {
+ disconnect(client);
+ throw new IOException(String.format(E_FILE_EXIST, f));
+ }
+ }
+ Path parent = absolute.getParent();
+ if (parent == null || !mkdirs(client, parent, FsPermission.getDefault())) {
+ parent = (parent == null) ? new Path("/") : parent;
+ disconnect(client);
+ throw new IOException(String.format(E_CREATE_DIR, parent));
+ }
+ OutputStream os;
+ try {
+ client.cd(parent.toUri().getPath());
+ os = client.put(f.getName());
+ } catch (SftpException e) {
+ throw new IOException(e);
+ }
+ FSDataOutputStream fos = new FSDataOutputStream(os, statistics) {
+ @Override
+ public void close() throws IOException {
+ super.close();
+ disconnect(client);
+ }
+ };
+
+ return fos;
+ }
+
+ @Override
+ public FSDataOutputStream append(Path f, int bufferSize,
+ Progressable progress)
+ throws IOException {
+ throw new IOException(E_NOT_SUPPORTED);
+ }
+
+ /*
+ * The parent of source and destination can be different. It is suppose to
+ * work like 'move'
+ */
+ @Override
+ public boolean rename(Path src, Path dst) throws IOException {
+ ChannelSftp channel = connect();
+ try {
+ boolean success = rename(channel, src, dst);
+ return success;
+ } finally {
+ disconnect(channel);
+ }
+ }
+
+ @Override
+ public boolean delete(Path f, boolean recursive) throws IOException {
+ ChannelSftp channel = connect();
+ try {
+ boolean success = delete(channel, f, recursive);
+ return success;
+ } finally {
+ disconnect(channel);
+ }
+ }
+
+ @Override
+ public FileStatus[] listStatus(Path f) throws IOException {
+ ChannelSftp client = connect();
+ try {
+ FileStatus[] stats = listStatus(client, f);
+ return stats;
+ } finally {
+ disconnect(client);
+ }
+ }
+
+ @Override
+ public void setWorkingDirectory(Path newDir) {
+ // we do not maintain the working directory state
+ }
+
+ @Override
+ public Path getWorkingDirectory() {
+ // Return home directory always since we do not maintain state.
+ return getHomeDirectory();
+ }
+
+ @Override
+ public Path getHomeDirectory() {
+ ChannelSftp channel = null;
+ try {
+ channel = connect();
+ Path homeDir = new Path(channel.pwd());
+ return homeDir;
+ } catch (Exception ioe) {
+ return null;
+ } finally {
+ try {
+ disconnect(channel);
+ } catch (IOException ioe) {
+ return null;
+ }
+ }
+ }
+
+ @Override
+ public boolean mkdirs(Path f, FsPermission permission) throws IOException {
+ ChannelSftp client = connect();
+ try {
+ boolean success = mkdirs(client, f, permission);
+ return success;
+ } finally {
+ disconnect(client);
+ }
+ }
+
+ @Override
+ public FileStatus getFileStatus(Path f) throws IOException {
+ ChannelSftp channel = connect();
+ try {
+ FileStatus status = getFileStatus(channel, f);
+ return status;
+ } finally {
+ disconnect(channel);
+ }
+ }
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/559425dc/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/sftp/SFTPInputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/sftp/SFTPInputStream.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/sftp/SFTPInputStream.java
new file mode 100644
index 0000000..ece2c1c
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/sftp/SFTPInputStream.java
@@ -0,0 +1,130 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.sftp;
+
+import java.io.IOException;
+import java.io.InputStream;
+
+import org.apache.hadoop.fs.FSInputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.util.StringUtils;
+
+import com.jcraft.jsch.ChannelSftp;
+import com.jcraft.jsch.JSchException;
+import com.jcraft.jsch.Session;
+
+/** SFTP FileSystem input stream. */
+class SFTPInputStream extends FSInputStream {
+
+ public static final String E_SEEK_NOTSUPPORTED = "Seek not supported";
+ public static final String E_CLIENT_NULL =
+ "SFTP client null or not connected";
+ public static final String E_NULL_INPUTSTREAM = "Null InputStream";
+ public static final String E_STREAM_CLOSED = "Stream closed";
+ public static final String E_CLIENT_NOTCONNECTED = "Client not connected";
+
+ private InputStream wrappedStream;
+ private ChannelSftp channel;
+ private FileSystem.Statistics stats;
+ private boolean closed;
+ private long pos;
+
+ SFTPInputStream(InputStream stream, ChannelSftp channel,
+ FileSystem.Statistics stats) {
+
+ if (stream == null) {
+ throw new IllegalArgumentException(E_NULL_INPUTSTREAM);
+ }
+ if (channel == null || !channel.isConnected()) {
+ throw new IllegalArgumentException(E_CLIENT_NULL);
+ }
+ this.wrappedStream = stream;
+ this.channel = channel;
+ this.stats = stats;
+
+ this.pos = 0;
+ this.closed = false;
+ }
+
+ @Override
+ public void seek(long position) throws IOException {
+ throw new IOException(E_SEEK_NOTSUPPORTED);
+ }
+
+ @Override
+ public boolean seekToNewSource(long targetPos) throws IOException {
+ throw new IOException(E_SEEK_NOTSUPPORTED);
+ }
+
+ @Override
+ public long getPos() throws IOException {
+ return pos;
+ }
+
+ @Override
+ public synchronized int read() throws IOException {
+ if (closed) {
+ throw new IOException(E_STREAM_CLOSED);
+ }
+
+ int byteRead = wrappedStream.read();
+ if (byteRead >= 0) {
+ pos++;
+ }
+ if (stats != null & byteRead >= 0) {
+ stats.incrementBytesRead(1);
+ }
+ return byteRead;
+ }
+
+ public synchronized int read(byte[] buf, int off, int len)
+ throws IOException {
+ if (closed) {
+ throw new IOException(E_STREAM_CLOSED);
+ }
+
+ int result = wrappedStream.read(buf, off, len);
+ if (result > 0) {
+ pos += result;
+ }
+ if (stats != null & result > 0) {
+ stats.incrementBytesRead(result);
+ }
+
+ return result;
+ }
+
+ public synchronized void close() throws IOException {
+ if (closed) {
+ return;
+ }
+ super.close();
+ closed = true;
+ if (!channel.isConnected()) {
+ throw new IOException(E_CLIENT_NOTCONNECTED);
+ }
+
+ try {
+ Session session = channel.getSession();
+ channel.disconnect();
+ session.disconnect();
+ } catch (JSchException e) {
+ throw new IOException(StringUtils.stringifyException(e));
+ }
+ }
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/559425dc/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/sftp/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/sftp/package-info.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/sftp/package-info.java
new file mode 100644
index 0000000..1427e48
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/sftp/package-info.java
@@ -0,0 +1,19 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/** SFTP FileSystem package. */
+package org.apache.hadoop.fs.sftp;
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hadoop/blob/559425dc/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/sftp/TestSFTPFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/sftp/TestSFTPFileSystem.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/sftp/TestSFTPFileSystem.java
new file mode 100644
index 0000000..06d9bf0
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/sftp/TestSFTPFileSystem.java
@@ -0,0 +1,308 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.sftp;
+
+import java.io.IOException;
+import java.net.URI;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.util.Shell;
+
+import org.apache.sshd.SshServer;
+import org.apache.sshd.common.NamedFactory;
+import org.apache.sshd.server.Command;
+import org.apache.sshd.server.PasswordAuthenticator;
+import org.apache.sshd.server.UserAuth;
+import org.apache.sshd.server.auth.UserAuthPassword;
+import org.apache.sshd.server.keyprovider.SimpleGeneratorHostKeyProvider;
+import org.apache.sshd.server.session.ServerSession;
+import org.apache.sshd.server.sftp.SftpSubsystem;
+
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.TestName;
+
+import static org.junit.Assert.*;
+import static org.junit.Assume.assumeTrue;
+
+public class TestSFTPFileSystem {
+
+ private static final String TEST_SFTP_DIR = "testsftp";
+ private static final String TEST_ROOT_DIR =
+ System.getProperty("test.build.data", "build/test/data");
+
+ @Rule public TestName name = new TestName();
+
+ private static final String connection = "sftp://user:password@localhost";
+ private static Path localDir = null;
+ private static FileSystem localFs = null;
+ private static FileSystem sftpFs = null;
+ private static SshServer sshd = null;
+ private static int port;
+
+ private static void startSshdServer() throws IOException {
+ sshd = SshServer.setUpDefaultServer();
+ // ask OS to assign a port
+ sshd.setPort(0);
+ sshd.setKeyPairProvider(new SimpleGeneratorHostKeyProvider());
+
+ List<NamedFactory<UserAuth>> userAuthFactories =
+ new ArrayList<NamedFactory<UserAuth>>();
+ userAuthFactories.add(new UserAuthPassword.Factory());
+
+ sshd.setUserAuthFactories(userAuthFactories);
+
+ sshd.setPasswordAuthenticator(new PasswordAuthenticator() {
+ @Override
+ public boolean authenticate(String username, String password,
+ ServerSession session) {
+ if (username.equals("user") && password.equals("password")) {
+ return true;
+ }
+ return false;
+ }
+ });
+
+ sshd.setSubsystemFactories(
+ Arrays.<NamedFactory<Command>>asList(new SftpSubsystem.Factory()));
+
+ sshd.start();
+ port = sshd.getPort();
+ }
+
+ @BeforeClass
+ public static void setUp() throws Exception {
+ // skip all tests if running on Windows
+ assumeTrue(!Shell.WINDOWS);
+
+ startSshdServer();
+
+ Configuration conf = new Configuration();
+ conf.setClass("fs.sftp.impl", SFTPFileSystem.class, FileSystem.class);
+ conf.setInt("fs.sftp.host.port", port);
+ conf.setBoolean("fs.sftp.impl.disable.cache", true);
+
+ localFs = FileSystem.getLocal(conf);
+ localDir = localFs.makeQualified(new Path(TEST_ROOT_DIR, TEST_SFTP_DIR));
+ if (localFs.exists(localDir)) {
+ localFs.delete(localDir, true);
+ }
+ localFs.mkdirs(localDir);
+
+ sftpFs = FileSystem.get(URI.create(connection), conf);
+ }
+
+ @AfterClass
+ public static void tearDown() {
+ if (localFs != null) {
+ try {
+ localFs.delete(localDir, true);
+ localFs.close();
+ } catch (IOException e) {
+ // ignore
+ }
+ }
+ if (sftpFs != null) {
+ try {
+ sftpFs.close();
+ } catch (IOException e) {
+ // ignore
+ }
+ }
+ if (sshd != null) {
+ try {
+ sshd.stop(true);
+ } catch (InterruptedException e) {
+ // ignore
+ }
+ }
+ }
+
+ private static final Path touch(FileSystem fs, String filename)
+ throws IOException {
+ return touch(fs, filename, null);
+ }
+
+ private static final Path touch(FileSystem fs, String filename, byte[] data)
+ throws IOException {
+ Path lPath = new Path(localDir.toUri().getPath(), filename);
+ FSDataOutputStream out = null;
+ try {
+ out = fs.create(lPath);
+ if (data != null) {
+ out.write(data);
+ }
+ } finally {
+ if (out != null) {
+ out.close();
+ }
+ }
+ return lPath;
+ }
+
+ /**
+ * Creates a file and deletes it.
+ *
+ * @throws Exception
+ */
+ @Test
+ public void testCreateFile() throws Exception {
+ Path file = touch(sftpFs, name.getMethodName().toLowerCase());
+ assertTrue(localFs.exists(file));
+ assertTrue(sftpFs.delete(file, false));
+ assertFalse(localFs.exists(file));
+ }
+
+ /**
+ * Checks if a new created file exists.
+ *
+ * @throws Exception
+ */
+ @Test
+ public void testFileExists() throws Exception {
+ Path file = touch(localFs, name.getMethodName().toLowerCase());
+ assertTrue(sftpFs.exists(file));
+ assertTrue(localFs.exists(file));
+ assertTrue(sftpFs.delete(file, false));
+ assertFalse(sftpFs.exists(file));
+ assertFalse(localFs.exists(file));
+ }
+
+ /**
+ * Test writing to a file and reading its value.
+ *
+ * @throws Exception
+ */
+ @Test
+ public void testReadFile() throws Exception {
+ byte[] data = "yaks".getBytes();
+ Path file = touch(localFs, name.getMethodName().toLowerCase(), data);
+ FSDataInputStream is = null;
+ try {
+ is = sftpFs.open(file);
+ byte[] b = new byte[data.length];
+ is.read(b);
+ assertArrayEquals(data, b);
+ } finally {
+ if (is != null) {
+ is.close();
+ }
+ }
+ assertTrue(sftpFs.delete(file, false));
+ }
+
+ /**
+ * Test getting the status of a file.
+ *
+ * @throws Exception
+ */
+ @Test
+ public void testStatFile() throws Exception {
+ byte[] data = "yaks".getBytes();
+ Path file = touch(localFs, name.getMethodName().toLowerCase(), data);
+
+ FileStatus lstat = localFs.getFileStatus(file);
+ FileStatus sstat = sftpFs.getFileStatus(file);
+ assertNotNull(sstat);
+
+ assertEquals(lstat.getPath().toUri().getPath(),
+ sstat.getPath().toUri().getPath());
+ assertEquals(data.length, sstat.getLen());
+ assertEquals(lstat.getLen(), sstat.getLen());
+ assertTrue(sftpFs.delete(file, false));
+ }
+
+ /**
+ * Test deleting a non empty directory.
+ *
+ * @throws Exception
+ */
+ @Test(expected=java.io.IOException.class)
+ public void testDeleteNonEmptyDir() throws Exception {
+ Path file = touch(localFs, name.getMethodName().toLowerCase());
+ sftpFs.delete(localDir, false);
+ }
+
+ /**
+ * Test deleting a file that does not exist.
+ *
+ * @throws Exception
+ */
+ @Test
+ public void testDeleteNonExistFile() throws Exception {
+ Path file = new Path(localDir, name.getMethodName().toLowerCase());
+ assertFalse(sftpFs.delete(file, false));
+ }
+
+ /**
+ * Test renaming a file.
+ *
+ * @throws Exception
+ */
+ @Test
+ public void testRenameFile() throws Exception {
+ byte[] data = "dingos".getBytes();
+ Path file1 = touch(localFs, name.getMethodName().toLowerCase() + "1");
+ Path file2 = new Path(localDir, name.getMethodName().toLowerCase() + "2");
+
+ assertTrue(sftpFs.rename(file1, file2));
+
+ assertTrue(sftpFs.exists(file2));
+ assertFalse(sftpFs.exists(file1));
+
+ assertTrue(localFs.exists(file2));
+ assertFalse(localFs.exists(file1));
+
+ assertTrue(sftpFs.delete(file2, false));
+ }
+
+ /**
+ * Test renaming a file that does not exist.
+ *
+ * @throws Exception
+ */
+ @Test(expected=java.io.IOException.class)
+ public void testRenameNonExistFile() throws Exception {
+ Path file1 = new Path(localDir, name.getMethodName().toLowerCase() + "1");
+ Path file2 = new Path(localDir, name.getMethodName().toLowerCase() + "2");
+ sftpFs.rename(file1, file2);
+ }
+
+ /**
+ * Test renaming a file onto an existing file.
+ *
+ * @throws Exception
+ */
+ @Test(expected=java.io.IOException.class)
+ public void testRenamingFileOntoExistingFile() throws Exception {
+ Path file1 = touch(localFs, name.getMethodName().toLowerCase() + "1");
+ Path file2 = touch(localFs, name.getMethodName().toLowerCase() + "2");
+ sftpFs.rename(file1, file2);
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/559425dc/hadoop-project/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index 16d2058..e010de1 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -666,6 +666,11 @@
<version>2.0.0-M5</version>
</dependency>
<dependency>
+ <groupId>org.apache.sshd</groupId>
+ <artifactId>sshd-core</artifactId>
+ <version>0.14.0</version>
+ </dependency>
+ <dependency>
<groupId>org.apache.ftpserver</groupId>
<artifactId>ftplet-api</artifactId>
<version>1.0.0</version>
[11/12] hadoop git commit: Revert "HADOOP-11807. add a lint mode to
releasedocmaker (ramtin via aw)"
Posted by aw...@apache.org.
Revert "HADOOP-11807. add a lint mode to releasedocmaker (ramtin via aw)"
This reverts commit 79ed0f959ffc490414ca56a73e026500c24e7078.
Conflicts:
hadoop-common-project/hadoop-common/CHANGES.txt
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/98e59261
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/98e59261
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/98e59261
Branch: refs/heads/HADOOP-12111
Commit: 98e59261b7fb6f9d6d10a5c897f62222da7d2a1f
Parents: bd4e109
Author: Allen Wittenauer <aw...@apache.org>
Authored: Wed Jul 8 08:06:30 2015 -0700
Committer: Allen Wittenauer <aw...@apache.org>
Committed: Wed Jul 8 08:06:30 2015 -0700
----------------------------------------------------------------------
dev-support/releasedocmaker.py | 76 +++-----------------
hadoop-common-project/hadoop-common/CHANGES.txt | 2 -
2 files changed, 10 insertions(+), 68 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/98e59261/dev-support/releasedocmaker.py
----------------------------------------------------------------------
diff --git a/dev-support/releasedocmaker.py b/dev-support/releasedocmaker.py
index 8e68b3c..2ccc1c0 100755
--- a/dev-support/releasedocmaker.py
+++ b/dev-support/releasedocmaker.py
@@ -87,15 +87,8 @@ def notableclean(str):
str=str.rstrip()
return str
-# clean output dir
-def cleanOutputDir(dir):
- files = os.listdir(dir)
- for name in files:
- os.remove(os.path.join(dir,name))
- os.rmdir(dir)
-
def mstr(obj):
- if (obj is None):
+ if (obj == None):
return ""
return unicode(obj)
@@ -155,7 +148,7 @@ class Jira:
return mstr(self.fields['description'])
def getReleaseNote(self):
- if (self.notes is None):
+ if (self.notes == None):
field = self.parent.fieldIdMap['Release Note']
if (self.fields.has_key(field)):
self.notes=mstr(self.fields[field])
@@ -166,14 +159,14 @@ class Jira:
def getPriority(self):
ret = ""
pri = self.fields['priority']
- if(pri is not None):
+ if(pri != None):
ret = pri['name']
return mstr(ret)
def getAssignee(self):
ret = ""
mid = self.fields['assignee']
- if(mid is not None):
+ if(mid != None):
ret = mid['displayName']
return mstr(ret)
@@ -189,21 +182,21 @@ class Jira:
def getType(self):
ret = ""
mid = self.fields['issuetype']
- if(mid is not None):
+ if(mid != None):
ret = mid['name']
return mstr(ret)
def getReporter(self):
ret = ""
mid = self.fields['reporter']
- if(mid is not None):
+ if(mid != None):
ret = mid['displayName']
return mstr(ret)
def getProject(self):
ret = ""
mid = self.fields['project']
- if(mid is not None):
+ if(mid != None):
ret = mid['key']
return mstr(ret)
@@ -221,7 +214,7 @@ class Jira:
return False
def getIncompatibleChange(self):
- if (self.incompat is None):
+ if (self.incompat == None):
field = self.parent.fieldIdMap['Hadoop Flags']
self.reviewed=False
self.incompat=False
@@ -234,24 +227,6 @@ class Jira:
self.reviewed=True
return self.incompat
- def checkMissingComponent(self):
- if (len(self.fields['components'])>0):
- return False
- return True
-
- def checkMissingAssignee(self):
- if (self.fields['assignee'] is not None):
- return False
- return True
-
- def checkVersionString(self):
- field = self.parent.fieldIdMap['Fix Version/s']
- for h in self.fields[field]:
- found = re.match('^((\d+)(\.\d+)*).*$|^(\w+\-\d+)$', h['name'])
- if not found:
- return True
- return False
-
def getReleaseDate(self,version):
for j in range(len(self.fields['fixVersions'])):
if self.fields['fixVersions'][j]==version:
@@ -364,11 +339,9 @@ def main():
help="build an index file")
parser.add_option("-u","--usetoday", dest="usetoday", action="store_true",
help="use current date for unreleased versions")
- parser.add_option("-n","--lint", dest="lint", action="store_true",
- help="use lint flag to exit on failures")
(options, args) = parser.parse_args()
- if (options.versions is None):
+ if (options.versions == None):
options.versions = []
if (len(args) > 2):
@@ -423,9 +396,6 @@ def main():
reloutputs.writeAll(relhead)
choutputs.writeAll(chhead)
- errorCount=0
- warningCount=0
- lintMessage=""
incompatlist=[]
buglist=[]
improvementlist=[]
@@ -438,14 +408,6 @@ def main():
for jira in sorted(jlist):
if jira.getIncompatibleChange():
incompatlist.append(jira)
- if (len(jira.getReleaseNote())==0):
- warningCount+=1
-
- if jira.checkVersionString():
- warningCount+=1
-
- if jira.checkMissingComponent() or jira.checkMissingAssignee():
- errorCount+=1
elif jira.getType() == "Bug":
buglist.append(jira)
elif jira.getType() == "Improvement":
@@ -469,33 +431,15 @@ def main():
reloutputs.writeKeyRaw(jira.getProject(),"\n---\n\n")
reloutputs.writeKeyRaw(jira.getProject(), line)
line ='\n**WARNING: No release note provided for this incompatible change.**\n\n'
- lintMessage += "\nWARNING: incompatible change %s lacks release notes." % (notableclean(jira.getId()))
+ print 'WARNING: incompatible change %s lacks release notes.' % (notableclean(jira.getId()))
reloutputs.writeKeyRaw(jira.getProject(), line)
- if jira.checkVersionString():
- lintMessage += "\nWARNING: Version string problem for %s " % jira.getId()
-
- if (jira.checkMissingComponent() or jira.checkMissingAssignee()):
- errorMessage=[]
- jira.checkMissingComponent() and errorMessage.append("component")
- jira.checkMissingAssignee() and errorMessage.append("assignee")
- lintMessage += "\nERROR: missing %s for %s " % (" and ".join(errorMessage) , jira.getId())
-
if (len(jira.getReleaseNote())>0):
reloutputs.writeKeyRaw(jira.getProject(),"\n---\n\n")
reloutputs.writeKeyRaw(jira.getProject(), line)
line ='\n%s\n\n' % (tableclean(jira.getReleaseNote()))
reloutputs.writeKeyRaw(jira.getProject(), line)
- if (options.lint is True):
- print lintMessage
- print "======================================="
- print "Error:%d, Warning:%d \n" % (errorCount, warningCount)
-
- if (errorCount>0):
- cleanOutputDir(version)
- sys.exit(1)
-
reloutputs.writeAll("\n\n")
reloutputs.close()
http://git-wip-us.apache.org/repos/asf/hadoop/blob/98e59261/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 859e58a..8ab109d 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -230,8 +230,6 @@ Trunk (Unreleased)
HADOOP-11142. Remove hdfs dfs reference from file system shell
documentation (Kengo Seki via aw)
- HADOOP-11807. add a lint mode to releasedocmaker (ramtin via aw)
-
HADOOP-12149. copy all of test-patch BINDIR prior to re-exec (aw)
BUG FIXES
[04/12] hadoop git commit: HADOOP-12193. Rename Touchz.java to
Touch.java.
Posted by aw...@apache.org.
HADOOP-12193. Rename Touchz.java to Touch.java.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/aa96a8c0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/aa96a8c0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/aa96a8c0
Branch: refs/heads/HADOOP-12111
Commit: aa96a8c0c5d82a21e9b511517bc621c41841bc53
Parents: 7e2fe8c
Author: Andrew Wang <wa...@apache.org>
Authored: Tue Jul 7 11:12:29 2015 -0700
Committer: Andrew Wang <wa...@apache.org>
Committed: Tue Jul 7 11:12:56 2015 -0700
----------------------------------------------------------------------
hadoop-common-project/hadoop-common/CHANGES.txt | 2 +
.../java/org/apache/hadoop/fs/shell/Touch.java | 84 ++++++++++++++++++++
.../java/org/apache/hadoop/fs/shell/Touchz.java | 84 --------------------
3 files changed, 86 insertions(+), 84 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/aa96a8c0/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 194e2e3..af6e3fe 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -684,6 +684,8 @@ Release 2.8.0 - UNRELEASED
HADOOP-11974. Fix FIONREAD #include on Solaris (Alan Burlison via Colin P.
McCabe)
+ HADOOP-12193. Rename Touchz.java to Touch.java. (wang)
+
OPTIMIZATIONS
HADOOP-11785. Reduce the number of listStatus operation in distcp
http://git-wip-us.apache.org/repos/asf/hadoop/blob/aa96a8c0/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Touch.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Touch.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Touch.java
new file mode 100644
index 0000000..72a463a
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Touch.java
@@ -0,0 +1,84 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.shell;
+
+import java.io.IOException;
+import java.util.LinkedList;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.fs.PathIOException;
+import org.apache.hadoop.fs.PathIsDirectoryException;
+import org.apache.hadoop.fs.PathNotFoundException;
+
+/**
+ * Unix touch like commands
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+
+class Touch extends FsCommand {
+ public static void registerCommands(CommandFactory factory) {
+ factory.addClass(Touchz.class, "-touchz");
+ }
+
+ /**
+ * (Re)create zero-length file at the specified path.
+ * This will be replaced by a more UNIX-like touch when files may be
+ * modified.
+ */
+ public static class Touchz extends Touch {
+ public static final String NAME = "touchz";
+ public static final String USAGE = "<path> ...";
+ public static final String DESCRIPTION =
+ "Creates a file of zero length " +
+ "at <path> with current time as the timestamp of that <path>. " +
+ "An error is returned if the file exists with non-zero length\n";
+
+ @Override
+ protected void processOptions(LinkedList<String> args) {
+ CommandFormat cf = new CommandFormat(1, Integer.MAX_VALUE);
+ cf.parse(args);
+ }
+
+ @Override
+ protected void processPath(PathData item) throws IOException {
+ if (item.stat.isDirectory()) {
+ // TODO: handle this
+ throw new PathIsDirectoryException(item.toString());
+ }
+ if (item.stat.getLen() != 0) {
+ throw new PathIOException(item.toString(), "Not a zero-length file");
+ }
+ touchz(item);
+ }
+
+ @Override
+ protected void processNonexistentPath(PathData item) throws IOException {
+ if (!item.parentExists()) {
+ throw new PathNotFoundException(item.toString());
+ }
+ touchz(item);
+ }
+
+ private void touchz(PathData item) throws IOException {
+ item.fs.create(item.path).close();
+ }
+ }
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/aa96a8c0/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Touchz.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Touchz.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Touchz.java
deleted file mode 100644
index 7925a0f..0000000
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Touchz.java
+++ /dev/null
@@ -1,84 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.shell;
-
-import java.io.IOException;
-import java.util.LinkedList;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.fs.PathIOException;
-import org.apache.hadoop.fs.PathIsDirectoryException;
-import org.apache.hadoop.fs.PathNotFoundException;
-
-/**
- * Unix touch like commands
- */
-@InterfaceAudience.Private
-@InterfaceStability.Unstable
-
-class Touch extends FsCommand {
- public static void registerCommands(CommandFactory factory) {
- factory.addClass(Touchz.class, "-touchz");
- }
-
- /**
- * (Re)create zero-length file at the specified path.
- * This will be replaced by a more UNIX-like touch when files may be
- * modified.
- */
- public static class Touchz extends Touch {
- public static final String NAME = "touchz";
- public static final String USAGE = "<path> ...";
- public static final String DESCRIPTION =
- "Creates a file of zero length " +
- "at <path> with current time as the timestamp of that <path>. " +
- "An error is returned if the file exists with non-zero length\n";
-
- @Override
- protected void processOptions(LinkedList<String> args) {
- CommandFormat cf = new CommandFormat(1, Integer.MAX_VALUE);
- cf.parse(args);
- }
-
- @Override
- protected void processPath(PathData item) throws IOException {
- if (item.stat.isDirectory()) {
- // TODO: handle this
- throw new PathIsDirectoryException(item.toString());
- }
- if (item.stat.getLen() != 0) {
- throw new PathIOException(item.toString(), "Not a zero-length file");
- }
- touchz(item);
- }
-
- @Override
- protected void processNonexistentPath(PathData item) throws IOException {
- if (!item.parentExists()) {
- throw new PathNotFoundException(item.toString());
- }
- touchz(item);
- }
-
- private void touchz(PathData item) throws IOException {
- item.fs.create(item.path).close();
- }
- }
-}
[06/12] hadoop git commit: HDFS-8711. setSpaceQuota command should
print the available storage type when input storage type is wrong.
Contributed by Brahma Reddy Battula.
Posted by aw...@apache.org.
HDFS-8711. setSpaceQuota command should print the available storage type when input storage type is wrong. Contributed by Brahma Reddy Battula.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b68701b7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b68701b7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b68701b7
Branch: refs/heads/HADOOP-12111
Commit: b68701b7b2a9597b4183e0ba19b1551680d543a1
Parents: 3dc92e8
Author: Xiaoyu Yao <xy...@apache.org>
Authored: Tue Jul 7 13:50:49 2015 -0700
Committer: Xiaoyu Yao <xy...@apache.org>
Committed: Tue Jul 7 13:50:49 2015 -0700
----------------------------------------------------------------------
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++
.../org/apache/hadoop/hdfs/tools/DFSAdmin.java | 10 ++++++++--
.../java/org/apache/hadoop/hdfs/TestQuota.java | 21 ++++++++++++++++++++
3 files changed, 32 insertions(+), 2 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b68701b7/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 7294cab..1e1e6bb 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -701,6 +701,9 @@ Release 2.8.0 - UNRELEASED
HDFS-8709. Clarify automatic sync in FSEditLog#logEdit. (wang)
+ HDFS-8711. setSpaceQuota command should print the available storage type
+ when input storage type is wrong. (Brahma Reddy Battula via xyao)
+
OPTIMIZATIONS
HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b68701b7/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
index 4640bb3..014637b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
@@ -290,9 +290,15 @@ public class DFSAdmin extends FsShell {
String storageTypeString =
StringUtils.popOptionWithArgument("-storageType", parameters);
if (storageTypeString != null) {
- this.type = StorageType.parseStorageType(storageTypeString);
+ try {
+ this.type = StorageType.parseStorageType(storageTypeString);
+ } catch (IllegalArgumentException e) {
+ throw new IllegalArgumentException("Storage type "
+ + storageTypeString
+ + " is not available. Available storage types are "
+ + StorageType.getTypesSupportingQuota());
+ }
}
-
this.args = parameters.toArray(new String[parameters.size()]);
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b68701b7/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java
index 4541e69..e339049 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java
@@ -17,18 +17,22 @@
*/
package org.apache.hadoop.hdfs;
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
+import java.io.ByteArrayOutputStream;
import java.io.OutputStream;
+import java.io.PrintStream;
import java.security.PrivilegedExceptionAction;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.ContentSummary;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException;
@@ -41,6 +45,8 @@ import org.apache.hadoop.security.UserGroupInformation;
import org.junit.Assert;
import org.junit.Test;
+import com.google.common.base.Charsets;
+
/** A class for testing quota-related commands */
public class TestQuota {
@@ -986,4 +992,19 @@ public class TestQuota {
cluster.shutdown();
}
}
+
+ @Test
+ public void testSetSpaceQuotaWhenStorageTypeIsWrong() throws Exception {
+ Configuration conf = new HdfsConfiguration();
+ conf.set(FS_DEFAULT_NAME_KEY, "hdfs://127.0.0.1:8020");
+ DFSAdmin admin = new DFSAdmin(conf);
+ ByteArrayOutputStream err = new ByteArrayOutputStream();
+ System.setErr(new PrintStream(err));
+ String[] args = { "-setSpaceQuota", "100", "-storageType", "COLD",
+ "/testDir" };
+ admin.run(args);
+ String errOutput = new String(err.toByteArray(), Charsets.UTF_8);
+ assertTrue(errOutput.contains(StorageType.getTypesSupportingQuota()
+ .toString()));
+ }
}
[07/12] hadoop git commit: HDFS-8620. Clean up the checkstyle
warinings about ClientProtocol. Contributed by Takanobu Asanuma.
Posted by aw...@apache.org.
HDFS-8620. Clean up the checkstyle warinings about ClientProtocol. Contributed by Takanobu Asanuma.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c0b8e4e5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c0b8e4e5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c0b8e4e5
Branch: refs/heads/HADOOP-12111
Commit: c0b8e4e5b5083631ed22d8d36c8992df7d34303c
Parents: b68701b
Author: Haohui Mai <wh...@apache.org>
Authored: Tue Jul 7 14:01:19 2015 -0700
Committer: Haohui Mai <wh...@apache.org>
Committed: Tue Jul 7 14:01:19 2015 -0700
----------------------------------------------------------------------
.../hdfs/client/HdfsClientConfigKeys.java | 3 +-
.../hadoop/hdfs/protocol/ClientProtocol.java | 606 +++++++++----------
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +
3 files changed, 288 insertions(+), 324 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0b8e4e5/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
index e6d579b..600c7ca 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
@@ -39,7 +39,8 @@ public interface HdfsClientConfigKeys {
String DFS_NAMENODE_HTTPS_ADDRESS_KEY = "dfs.namenode.https-address";
String DFS_HA_NAMENODES_KEY_PREFIX = "dfs.ha.namenodes";
int DFS_NAMENODE_RPC_PORT_DEFAULT = 8020;
- String DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY = "dfs.namenode.kerberos.principal";
+ String DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY =
+ "dfs.namenode.kerberos.principal";
/** dfs.client.retry configuration properties */
interface Retry {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0b8e4e5/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
index ab41911..381be30 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
@@ -77,32 +77,33 @@ public interface ClientProtocol {
/**
* Until version 69, this class ClientProtocol served as both
- * the client interface to the NN AND the RPC protocol used to
+ * the client interface to the NN AND the RPC protocol used to
* communicate with the NN.
- *
- * This class is used by both the DFSClient and the
+ *
+ * This class is used by both the DFSClient and the
* NN server side to insulate from the protocol serialization.
- *
- * If you are adding/changing this interface then you need to
+ *
+ * If you are adding/changing this interface then you need to
* change both this class and ALSO related protocol buffer
* wire protocol definition in ClientNamenodeProtocol.proto.
- *
- * For more details on protocol buffer wire protocol, please see
+ *
+ * For more details on protocol buffer wire protocol, please see
* .../org/apache/hadoop/hdfs/protocolPB/overview.html
- *
+ *
* The log of historical changes can be retrieved from the svn).
* 69: Eliminate overloaded method names.
- *
+ *
* 69L is the last version id when this class was used for protocols
- * serialization. DO not update this version any further.
+ * serialization. DO not update this version any further.
*/
- public static final long versionID = 69L;
-
+ long versionID = 69L;
+
///////////////////////////////////////
// File contents
///////////////////////////////////////
/**
- * Get locations of the blocks of the specified file within the specified range.
+ * Get locations of the blocks of the specified file
+ * within the specified range.
* DataNode locations for each block are sorted by
* the proximity to the client.
* <p>
@@ -111,9 +112,9 @@ public interface ClientProtocol {
* DataNode locations for each block are sorted by
* the distance to the client's address.
* <p>
- * The client will then have to contact
+ * The client will then have to contact
* one of the indicated DataNodes to obtain the actual data.
- *
+ *
* @param src file name
* @param offset range start offset
* @param length range length
@@ -126,11 +127,8 @@ public interface ClientProtocol {
* @throws IOException If an I/O error occurred
*/
@Idempotent
- public LocatedBlocks getBlockLocations(String src,
- long offset,
- long length)
- throws AccessControlException, FileNotFoundException,
- UnresolvedLinkException, IOException;
+ LocatedBlocks getBlockLocations(String src, long offset, long length)
+ throws IOException;
/**
* Get server default values for a number of configuration params.
@@ -138,7 +136,7 @@ public interface ClientProtocol {
* @throws IOException
*/
@Idempotent
- public FsServerDefaults getServerDefaults() throws IOException;
+ FsServerDefaults getServerDefaults() throws IOException;
/**
* Create a new file entry in the namespace.
@@ -148,36 +146,36 @@ public interface ClientProtocol {
* The name-node does not have a notion of "current" directory for a client.
* <p>
* Once created, the file is visible and available for read to other clients.
- * Although, other clients cannot {@link #delete(String, boolean)}, re-create or
- * {@link #rename(String, String)} it until the file is completed
+ * Although, other clients cannot {@link #delete(String, boolean)}, re-create
+ * or {@link #rename(String, String)} it until the file is completed
* or explicitly as a result of lease expiration.
* <p>
* Blocks have a maximum size. Clients that intend to create
- * multi-block files must also use
+ * multi-block files must also use
* {@link #addBlock}
*
* @param src path of the file being created.
* @param masked masked permission.
* @param clientName name of the current client.
- * @param flag indicates whether the file should be
+ * @param flag indicates whether the file should be
* overwritten if it already exists or create if it does not exist or append.
* @param createParent create missing parent directory if true
* @param replication block replication factor.
* @param blockSize maximum block size.
* @param supportedVersions CryptoProtocolVersions supported by the client
- *
+ *
* @return the status of the created file, it could be null if the server
* doesn't support returning the file status
* @throws AccessControlException If access is denied
* @throws AlreadyBeingCreatedException if the path does not exist.
- * @throws DSQuotaExceededException If file creation violates disk space
+ * @throws DSQuotaExceededException If file creation violates disk space
* quota restriction
* @throws FileAlreadyExistsException If file <code>src</code> already exists
* @throws FileNotFoundException If parent of <code>src</code> does not exist
* and <code>createParent</code> is false
* @throws ParentNotDirectoryException If parent of <code>src</code> is not a
* directory.
- * @throws NSQuotaExceededException If file creation violates name space
+ * @throws NSQuotaExceededException If file creation violates name space
* quota restriction
* @throws SafeModeException create not allowed in safemode
* @throws UnresolvedLinkException If <code>src</code> contains a symlink
@@ -190,33 +188,29 @@ public interface ClientProtocol {
* <em>Note that create with {@link CreateFlag#OVERWRITE} is idempotent.</em>
*/
@AtMostOnce
- public HdfsFileStatus create(String src, FsPermission masked,
+ HdfsFileStatus create(String src, FsPermission masked,
String clientName, EnumSetWritable<CreateFlag> flag,
- boolean createParent, short replication, long blockSize,
+ boolean createParent, short replication, long blockSize,
CryptoProtocolVersion[] supportedVersions)
- throws AccessControlException, AlreadyBeingCreatedException,
- DSQuotaExceededException, FileAlreadyExistsException,
- FileNotFoundException, NSQuotaExceededException,
- ParentNotDirectoryException, SafeModeException, UnresolvedLinkException,
- SnapshotAccessControlException, IOException;
+ throws IOException;
/**
- * Append to the end of the file.
+ * Append to the end of the file.
* @param src path of the file being created.
* @param clientName name of the current client.
* @param flag indicates whether the data is appended to a new block.
* @return wrapper with information about the last partial block and file
* status if any
- * @throws AccessControlException if permission to append file is
- * denied by the system. As usually on the client side the exception will
+ * @throws AccessControlException if permission to append file is
+ * denied by the system. As usually on the client side the exception will
* be wrapped into {@link org.apache.hadoop.ipc.RemoteException}.
* Allows appending to an existing file if the server is
* configured with the parameter dfs.support.append set to true, otherwise
* throws an IOException.
- *
+ *
* @throws AccessControlException If permission to append to file is denied
* @throws FileNotFoundException If file <code>src</code> is not found
- * @throws DSQuotaExceededException If append violates disk space quota
+ * @throws DSQuotaExceededException If append violates disk space quota
* restriction
* @throws SafeModeException append not allowed in safemode
* @throws UnresolvedLinkException If <code>src</code> contains a symlink
@@ -227,27 +221,25 @@ public interface ClientProtocol {
* @throws UnsupportedOperationException if append is not supported
*/
@AtMostOnce
- public LastBlockWithStatus append(String src, String clientName,
- EnumSetWritable<CreateFlag> flag) throws AccessControlException,
- DSQuotaExceededException, FileNotFoundException, SafeModeException,
- UnresolvedLinkException, SnapshotAccessControlException, IOException;
+ LastBlockWithStatus append(String src, String clientName,
+ EnumSetWritable<CreateFlag> flag) throws IOException;
/**
* Set replication for an existing file.
* <p>
* The NameNode sets replication to the new value and returns.
- * The actual block replication is not expected to be performed during
- * this method call. The blocks will be populated or removed in the
+ * The actual block replication is not expected to be performed during
+ * this method call. The blocks will be populated or removed in the
* background as the result of the routine block maintenance procedures.
- *
+ *
* @param src file name
* @param replication new replication
- *
+ *
* @return true if successful;
* false if file does not exist or is a directory
*
* @throws AccessControlException If access is denied
- * @throws DSQuotaExceededException If replication violates disk space
+ * @throws DSQuotaExceededException If replication violates disk space
* quota restriction
* @throws FileNotFoundException If file <code>src</code> is not found
* @throws SafeModeException not allowed in safemode
@@ -256,21 +248,19 @@ public interface ClientProtocol {
* @throws IOException If an I/O error occurred
*/
@Idempotent
- public boolean setReplication(String src, short replication)
- throws AccessControlException, DSQuotaExceededException,
- FileNotFoundException, SafeModeException, UnresolvedLinkException,
- SnapshotAccessControlException, IOException;
+ boolean setReplication(String src, short replication)
+ throws IOException;
/**
* Get all the available block storage policies.
* @return All the in-use block storage policies currently.
*/
@Idempotent
- public BlockStoragePolicy[] getStoragePolicies() throws IOException;
+ BlockStoragePolicy[] getStoragePolicies() throws IOException;
/**
- * Set the storage policy for a file/directory
- * @param src Path of an existing file/directory.
+ * Set the storage policy for a file/directory.
+ * @param src Path of an existing file/directory.
* @param policyName The name of the storage policy
* @throws SnapshotAccessControlException If access is denied
* @throws UnresolvedLinkException if <code>src</code> contains a symlink
@@ -278,13 +268,12 @@ public interface ClientProtocol {
* @throws QuotaExceededException If changes violate the quota restriction
*/
@Idempotent
- public void setStoragePolicy(String src, String policyName)
- throws SnapshotAccessControlException, UnresolvedLinkException,
- FileNotFoundException, QuotaExceededException, IOException;
+ void setStoragePolicy(String src, String policyName)
+ throws IOException;
/**
* Set permissions for an existing file/directory.
- *
+ *
* @throws AccessControlException If access is denied
* @throws FileNotFoundException If file <code>src</code> is not found
* @throws SafeModeException not allowed in safemode
@@ -293,9 +282,8 @@ public interface ClientProtocol {
* @throws IOException If an I/O error occurred
*/
@Idempotent
- public void setPermission(String src, FsPermission permission)
- throws AccessControlException, FileNotFoundException, SafeModeException,
- UnresolvedLinkException, SnapshotAccessControlException, IOException;
+ void setPermission(String src, FsPermission permission)
+ throws IOException;
/**
* Set Owner of a path (i.e. a file or a directory).
@@ -312,16 +300,15 @@ public interface ClientProtocol {
* @throws IOException If an I/O error occurred
*/
@Idempotent
- public void setOwner(String src, String username, String groupname)
- throws AccessControlException, FileNotFoundException, SafeModeException,
- UnresolvedLinkException, SnapshotAccessControlException, IOException;
+ void setOwner(String src, String username, String groupname)
+ throws IOException;
/**
* The client can give up on a block by calling abandonBlock().
- * The client can then either obtain a new block, or complete or abandon the
+ * The client can then either obtain a new block, or complete or abandon the
* file.
* Any partial writes to the block will be discarded.
- *
+ *
* @param b Block to abandon
* @param fileId The id of the file where the block resides. Older clients
* will pass GRANDFATHER_INODE_ID here.
@@ -334,19 +321,18 @@ public interface ClientProtocol {
* @throws IOException If an I/O error occurred
*/
@Idempotent
- public void abandonBlock(ExtendedBlock b, long fileId,
+ void abandonBlock(ExtendedBlock b, long fileId,
String src, String holder)
- throws AccessControlException, FileNotFoundException,
- UnresolvedLinkException, IOException;
+ throws IOException;
/**
- * A client that wants to write an additional block to the
+ * A client that wants to write an additional block to the
* indicated filename (which must currently be open for writing)
- * should call addBlock().
+ * should call addBlock().
*
* addBlock() allocates a new block and datanodes the block data
* should be replicated to.
- *
+ *
* addBlock() also commits the previous block by reporting
* to the name-node the actual generation stamp and the length
* of the block that the client has transmitted to data-nodes.
@@ -372,16 +358,14 @@ public interface ClientProtocol {
* @throws IOException If an I/O error occurred
*/
@Idempotent
- public LocatedBlock addBlock(String src, String clientName,
- ExtendedBlock previous, DatanodeInfo[] excludeNodes, long fileId,
+ LocatedBlock addBlock(String src, String clientName,
+ ExtendedBlock previous, DatanodeInfo[] excludeNodes, long fileId,
String[] favoredNodes)
- throws AccessControlException, FileNotFoundException,
- NotReplicatedYetException, SafeModeException, UnresolvedLinkException,
- IOException;
+ throws IOException;
- /**
+ /**
* Get a datanode for an existing pipeline.
- *
+ *
* @param src the file being written
* @param fileId the ID of the file being written
* @param blk the block being written
@@ -389,9 +373,9 @@ public interface ClientProtocol {
* @param excludes the excluded nodes
* @param numAdditionalNodes number of additional datanodes
* @param clientName the name of the client
- *
+ *
* @return the located block.
- *
+ *
* @throws AccessControlException If access is denied
* @throws FileNotFoundException If file <code>src</code> is not found
* @throws SafeModeException create not allowed in safemode
@@ -399,22 +383,21 @@ public interface ClientProtocol {
* @throws IOException If an I/O error occurred
*/
@Idempotent
- public LocatedBlock getAdditionalDatanode(final String src,
+ LocatedBlock getAdditionalDatanode(final String src,
final long fileId, final ExtendedBlock blk,
final DatanodeInfo[] existings,
final String[] existingStorageIDs,
final DatanodeInfo[] excludes,
final int numAdditionalNodes, final String clientName
- ) throws AccessControlException, FileNotFoundException,
- SafeModeException, UnresolvedLinkException, IOException;
+ ) throws IOException;
/**
- * The client is done writing data to the given filename, and would
- * like to complete it.
+ * The client is done writing data to the given filename, and would
+ * like to complete it.
*
* The function returns whether the file has been closed successfully.
* If the function returns false, the caller should try again.
- *
+ *
* close() also commits the last block of file by reporting
* to the name-node the actual generation stamp and the length
* of the block that the client has transmitted to data-nodes.
@@ -434,14 +417,13 @@ public interface ClientProtocol {
* @throws AccessControlException If access is denied
* @throws FileNotFoundException If file <code>src</code> is not found
* @throws SafeModeException create not allowed in safemode
- * @throws UnresolvedLinkException If <code>src</code> contains a symlink
+ * @throws UnresolvedLinkException If <code>src</code> contains a symlink
* @throws IOException If an I/O error occurred
*/
@Idempotent
- public boolean complete(String src, String clientName,
+ boolean complete(String src, String clientName,
ExtendedBlock last, long fileId)
- throws AccessControlException, FileNotFoundException, SafeModeException,
- UnresolvedLinkException, IOException;
+ throws IOException;
/**
* The client wants to report corrupted blocks (blocks with specified
@@ -449,7 +431,7 @@ public interface ClientProtocol {
* @param blocks Array of located blocks to report
*/
@Idempotent
- public void reportBadBlocks(LocatedBlock[] blocks) throws IOException;
+ void reportBadBlocks(LocatedBlock[] blocks) throws IOException;
///////////////////////////////////////
// Namespace management
@@ -460,17 +442,17 @@ public interface ClientProtocol {
* @param dst new name.
* @return true if successful, or false if the old name does not exist
* or if the new name already belongs to the namespace.
- *
+ *
* @throws SnapshotAccessControlException if path is in RO snapshot
- * @throws IOException an I/O error occurred
+ * @throws IOException an I/O error occurred
*/
@AtMostOnce
- public boolean rename(String src, String dst)
- throws UnresolvedLinkException, SnapshotAccessControlException, IOException;
+ boolean rename(String src, String dst)
+ throws IOException;
/**
- * Moves blocks from srcs to trg and delete srcs
- *
+ * Moves blocks from srcs to trg and delete srcs.
+ *
* @param trg existing file
* @param srcs - list of existing files (same block size, same replication)
* @throws IOException if some arguments are invalid
@@ -479,8 +461,8 @@ public interface ClientProtocol {
* @throws SnapshotAccessControlException if path is in RO snapshot
*/
@AtMostOnce
- public void concat(String trg, String[] srcs)
- throws IOException, UnresolvedLinkException, SnapshotAccessControlException;
+ void concat(String trg, String[] srcs)
+ throws IOException;
/**
* Rename src to dst.
@@ -491,7 +473,7 @@ public interface ClientProtocol {
* </ul>
* <p>
* Without OVERWRITE option, rename fails if the dst already exists.
- * With OVERWRITE option, rename overwrites the dst, if it is a file
+ * With OVERWRITE option, rename overwrites the dst, if it is a file
* or an empty directory. Rename fails if dst is a non-empty directory.
* <p>
* This implementation of rename is atomic.
@@ -499,17 +481,17 @@ public interface ClientProtocol {
* @param src existing file or directory name.
* @param dst new name.
* @param options Rename options
- *
+ *
* @throws AccessControlException If access is denied
- * @throws DSQuotaExceededException If rename violates disk space
+ * @throws DSQuotaExceededException If rename violates disk space
* quota restriction
* @throws FileAlreadyExistsException If <code>dst</code> already exists and
- * <code>options</options> has {@link Rename#OVERWRITE} option
+ * <code>options</code> has {@link Rename#OVERWRITE} option
* false.
* @throws FileNotFoundException If <code>src</code> does not exist
- * @throws NSQuotaExceededException If rename violates namespace
+ * @throws NSQuotaExceededException If rename violates namespace
* quota restriction
- * @throws ParentNotDirectoryException If parent of <code>dst</code>
+ * @throws ParentNotDirectoryException If parent of <code>dst</code>
* is not a directory
* @throws SafeModeException rename not allowed in safemode
* @throws UnresolvedLinkException If <code>src</code> or
@@ -518,11 +500,8 @@ public interface ClientProtocol {
* @throws IOException If an I/O error occurred
*/
@AtMostOnce
- public void rename2(String src, String dst, Options.Rename... options)
- throws AccessControlException, DSQuotaExceededException,
- FileAlreadyExistsException, FileNotFoundException,
- NSQuotaExceededException, ParentNotDirectoryException, SafeModeException,
- UnresolvedLinkException, SnapshotAccessControlException, IOException;
+ void rename2(String src, String dst, Options.Rename... options)
+ throws IOException;
/**
* Truncate file src to new size.
@@ -550,21 +529,20 @@ public interface ClientProtocol {
* @throws IOException If an I/O error occurred
*/
@Idempotent
- public boolean truncate(String src, long newLength, String clientName)
- throws AccessControlException, FileNotFoundException, SafeModeException,
- UnresolvedLinkException, SnapshotAccessControlException, IOException;
+ boolean truncate(String src, long newLength, String clientName)
+ throws IOException;
/**
* Delete the given file or directory from the file system.
* <p>
- * same as delete but provides a way to avoid accidentally
- * deleting non empty directories programmatically.
+ * same as delete but provides a way to avoid accidentally
+ * deleting non empty directories programmatically.
* @param src existing name
* @param recursive if true deletes a non empty directory recursively,
* else throws an exception.
- * @return true only if the existing file or directory was actually removed
+ * @return true only if the existing file or directory was actually removed
* from the file system.
- *
+ *
* @throws AccessControlException If access is denied
* @throws FileNotFoundException If file <code>src</code> is not found
* @throws SafeModeException create not allowed in safemode
@@ -573,10 +551,9 @@ public interface ClientProtocol {
* @throws IOException If an I/O error occurred
*/
@AtMostOnce
- public boolean delete(String src, boolean recursive)
- throws AccessControlException, FileNotFoundException, SafeModeException,
- UnresolvedLinkException, SnapshotAccessControlException, IOException;
-
+ boolean delete(String src, boolean recursive)
+ throws IOException;
+
/**
* Create a directory (or hierarchy of directories) with the given
* name and permission.
@@ -591,8 +568,9 @@ public interface ClientProtocol {
* @throws FileAlreadyExistsException If <code>src</code> already exists
* @throws FileNotFoundException If parent of <code>src</code> does not exist
* and <code>createParent</code> is false
- * @throws NSQuotaExceededException If file creation violates quota restriction
- * @throws ParentNotDirectoryException If parent of <code>src</code>
+ * @throws NSQuotaExceededException If file creation violates quota
+ * restriction
+ * @throws ParentNotDirectoryException If parent of <code>src</code>
* is not a directory
* @throws SafeModeException create not allowed in safemode
* @throws UnresolvedLinkException If <code>src</code> contains a symlink
@@ -603,14 +581,11 @@ public interface ClientProtocol {
* @throws InvalidPathException If <code>src</code> is invalid
*/
@Idempotent
- public boolean mkdirs(String src, FsPermission masked, boolean createParent)
- throws AccessControlException, FileAlreadyExistsException,
- FileNotFoundException, NSQuotaExceededException,
- ParentNotDirectoryException, SafeModeException, UnresolvedLinkException,
- SnapshotAccessControlException, IOException;
+ boolean mkdirs(String src, FsPermission masked, boolean createParent)
+ throws IOException;
/**
- * Get a partial listing of the indicated directory
+ * Get a partial listing of the indicated directory.
*
* @param src the directory name
* @param startAfter the name to start listing after encoded in java UTF8
@@ -624,20 +599,17 @@ public interface ClientProtocol {
* @throws IOException If an I/O error occurred
*/
@Idempotent
- public DirectoryListing getListing(String src,
- byte[] startAfter,
- boolean needLocation)
- throws AccessControlException, FileNotFoundException,
- UnresolvedLinkException, IOException;
-
- /**
- * Get listing of all the snapshottable directories
- *
+ DirectoryListing getListing(String src, byte[] startAfter,
+ boolean needLocation) throws IOException;
+
+ /**
+ * Get listing of all the snapshottable directories.
+ *
* @return Information about all the current snapshottable directory
* @throws IOException If an I/O error occurred
*/
@Idempotent
- public SnapshottableDirectoryStatus[] getSnapshottableDirListing()
+ SnapshottableDirectoryStatus[] getSnapshottableDirListing()
throws IOException;
///////////////////////////////////////
@@ -646,7 +618,7 @@ public interface ClientProtocol {
/**
* Client programs can cause stateful changes in the NameNode
- * that affect other clients. A client may obtain a file and
+ * that affect other clients. A client may obtain a file and
* neither abandon nor complete it. A client might hold a series
* of locks that prevent other clients from proceeding.
* Clearly, it would be bad if a client held a bunch of locks
@@ -664,29 +636,28 @@ public interface ClientProtocol {
* @throws IOException If an I/O error occurred
*/
@Idempotent
- public void renewLease(String clientName) throws AccessControlException,
- IOException;
+ void renewLease(String clientName) throws IOException;
/**
* Start lease recovery.
* Lightweight NameNode operation to trigger lease recovery
- *
+ *
* @param src path of the file to start lease recovery
* @param clientName name of the current client
* @return true if the file is already closed
* @throws IOException
*/
@Idempotent
- public boolean recoverLease(String src, String clientName) throws IOException;
+ boolean recoverLease(String src, String clientName) throws IOException;
+
+ int GET_STATS_CAPACITY_IDX = 0;
+ int GET_STATS_USED_IDX = 1;
+ int GET_STATS_REMAINING_IDX = 2;
+ int GET_STATS_UNDER_REPLICATED_IDX = 3;
+ int GET_STATS_CORRUPT_BLOCKS_IDX = 4;
+ int GET_STATS_MISSING_BLOCKS_IDX = 5;
+ int GET_STATS_MISSING_REPL_ONE_BLOCKS_IDX = 6;
- public int GET_STATS_CAPACITY_IDX = 0;
- public int GET_STATS_USED_IDX = 1;
- public int GET_STATS_REMAINING_IDX = 2;
- public int GET_STATS_UNDER_REPLICATED_IDX = 3;
- public int GET_STATS_CORRUPT_BLOCKS_IDX = 4;
- public int GET_STATS_MISSING_BLOCKS_IDX = 5;
- public int GET_STATS_MISSING_REPL_ONE_BLOCKS_IDX = 6;
-
/**
* Get a set of statistics about the filesystem.
* Right now, only seven values are returned.
@@ -700,11 +671,11 @@ public interface ClientProtocol {
* <li> [6] contains number of blocks which have replication factor
* 1 and have lost the only replica. </li>
* </ul>
- * Use public constants like {@link #GET_STATS_CAPACITY_IDX} in place of
+ * Use public constants like {@link #GET_STATS_CAPACITY_IDX} in place of
* actual numbers to index into the array.
*/
@Idempotent
- public long[] getStats() throws IOException;
+ long[] getStats() throws IOException;
/**
* Get a report on the system's current datanodes.
@@ -713,14 +684,14 @@ public interface ClientProtocol {
* otherwise all datanodes if type is ALL.
*/
@Idempotent
- public DatanodeInfo[] getDatanodeReport(HdfsConstants.DatanodeReportType type)
+ DatanodeInfo[] getDatanodeReport(HdfsConstants.DatanodeReportType type)
throws IOException;
/**
* Get a report on the current datanode storages.
*/
@Idempotent
- public DatanodeStorageReport[] getDatanodeStorageReport(
+ DatanodeStorageReport[] getDatanodeStorageReport(
HdfsConstants.DatanodeReportType type) throws IOException;
/**
@@ -728,11 +699,11 @@ public interface ClientProtocol {
* @param filename The name of the file
* @return The number of bytes in each block
* @throws IOException
- * @throws UnresolvedLinkException if the path contains a symlink.
+ * @throws UnresolvedLinkException if the path contains a symlink.
*/
@Idempotent
- public long getPreferredBlockSize(String filename)
- throws IOException, UnresolvedLinkException;
+ long getPreferredBlockSize(String filename)
+ throws IOException;
/**
* Enter, leave or get safe mode.
@@ -740,16 +711,17 @@ public interface ClientProtocol {
* Safe mode is a name node state when it
* <ol><li>does not accept changes to name space (read-only), and</li>
* <li>does not replicate or delete blocks.</li></ol>
- *
+ *
* <p>
* Safe mode is entered automatically at name node startup.
* Safe mode can also be entered manually using
- * {@link #setSafeMode(HdfsConstants.SafeModeAction,boolean) setSafeMode(SafeModeAction.SAFEMODE_ENTER,false)}.
+ * {@link #setSafeMode(HdfsConstants.SafeModeAction,boolean)
+ * setSafeMode(SafeModeAction.SAFEMODE_ENTER,false)}.
* <p>
* At startup the name node accepts data node reports collecting
* information about block locations.
* In order to leave safe mode it needs to collect a configurable
- * percentage called threshold of blocks, which satisfy the minimal
+ * percentage called threshold of blocks, which satisfy the minimal
* replication condition.
* The minimal replication condition is that each block must have at least
* <tt>dfs.namenode.replication.min</tt> replicas.
@@ -760,37 +732,40 @@ public interface ClientProtocol {
* Then the name node leaves safe mode.
* <p>
* If safe mode is turned on manually using
- * {@link #setSafeMode(HdfsConstants.SafeModeAction,boolean) setSafeMode(SafeModeAction.SAFEMODE_ENTER,false)}
+ * {@link #setSafeMode(HdfsConstants.SafeModeAction,boolean)
+ * setSafeMode(SafeModeAction.SAFEMODE_ENTER,false)}
* then the name node stays in safe mode until it is manually turned off
- * using {@link #setSafeMode(HdfsConstants.SafeModeAction,boolean) setSafeMode(SafeModeAction.SAFEMODE_LEAVE,false)}.
+ * using {@link #setSafeMode(HdfsConstants.SafeModeAction,boolean)
+ * setSafeMode(SafeModeAction.SAFEMODE_LEAVE,false)}.
* Current state of the name node can be verified using
- * {@link #setSafeMode(HdfsConstants.SafeModeAction,boolean) setSafeMode(SafeModeAction.SAFEMODE_GET,false)}
+ * {@link #setSafeMode(HdfsConstants.SafeModeAction,boolean)
+ * setSafeMode(SafeModeAction.SAFEMODE_GET,false)}
* <h4>Configuration parameters:</h4>
* <tt>dfs.safemode.threshold.pct</tt> is the threshold parameter.<br>
* <tt>dfs.safemode.extension</tt> is the safe mode extension parameter.<br>
* <tt>dfs.namenode.replication.min</tt> is the minimal replication parameter.
- *
+ *
* <h4>Special cases:</h4>
- * The name node does not enter safe mode at startup if the threshold is
+ * The name node does not enter safe mode at startup if the threshold is
* set to 0 or if the name space is empty.<br>
- * If the threshold is set to 1 then all blocks need to have at least
+ * If the threshold is set to 1 then all blocks need to have at least
* minimal replication.<br>
- * If the threshold value is greater than 1 then the name node will not be
+ * If the threshold value is greater than 1 then the name node will not be
* able to turn off safe mode automatically.<br>
* Safe mode can always be turned off manually.
- *
+ *
* @param action <ul> <li>0 leave safe mode;</li>
* <li>1 enter safe mode;</li>
* <li>2 get safe mode state.</li></ul>
* @param isChecked If true then action will be done only in ActiveNN.
- *
- * @return <ul><li>0 if the safe mode is OFF or</li>
+ *
+ * @return <ul><li>0 if the safe mode is OFF or</li>
* <li>1 if the safe mode is ON.</li></ul>
- *
+ *
* @throws IOException
*/
@Idempotent
- public boolean setSafeMode(HdfsConstants.SafeModeAction action, boolean isChecked)
+ boolean setSafeMode(HdfsConstants.SafeModeAction action, boolean isChecked)
throws IOException;
/**
@@ -808,47 +783,45 @@ public interface ClientProtocol {
* @throws IOException if image creation failed.
*/
@AtMostOnce
- public boolean saveNamespace(long timeWindow, long txGap) throws IOException;
+ boolean saveNamespace(long timeWindow, long txGap) throws IOException;
-
/**
* Roll the edit log.
* Requires superuser privileges.
- *
+ *
* @throws AccessControlException if the superuser privilege is violated
* @throws IOException if log roll fails
* @return the txid of the new segment
*/
@Idempotent
- public long rollEdits() throws AccessControlException, IOException;
+ long rollEdits() throws IOException;
/**
* Enable/Disable restore failed storage.
* <p>
* sets flag to enable restore of failed storage replicas
- *
+ *
* @throws AccessControlException if the superuser privilege is violated.
*/
@Idempotent
- public boolean restoreFailedStorage(String arg)
- throws AccessControlException, IOException;
+ boolean restoreFailedStorage(String arg) throws IOException;
/**
- * Tells the namenode to reread the hosts and exclude files.
+ * Tells the namenode to reread the hosts and exclude files.
* @throws IOException
*/
@Idempotent
- public void refreshNodes() throws IOException;
+ void refreshNodes() throws IOException;
/**
* Finalize previous upgrade.
* Remove file system state saved during the upgrade.
* The upgrade will become irreversible.
- *
+ *
* @throws IOException
*/
@Idempotent
- public void finalizeUpgrade() throws IOException;
+ void finalizeUpgrade() throws IOException;
/**
* Rolling upgrade operations.
@@ -857,7 +830,7 @@ public interface ClientProtocol {
* progress, returns null.
*/
@Idempotent
- public RollingUpgradeInfo rollingUpgrade(RollingUpgradeAction action)
+ RollingUpgradeInfo rollingUpgrade(RollingUpgradeAction action)
throws IOException;
/**
@@ -871,9 +844,9 @@ public interface ClientProtocol {
* cookie returned from the previous call.
*/
@Idempotent
- public CorruptFileBlocks listCorruptFileBlocks(String path, String cookie)
+ CorruptFileBlocks listCorruptFileBlocks(String path, String cookie)
throws IOException;
-
+
/**
* Dumps namenode data structures into specified file. If the file
* already exists, then append.
@@ -881,7 +854,7 @@ public interface ClientProtocol {
* @throws IOException
*/
@Idempotent
- public void metaSave(String filename) throws IOException;
+ void metaSave(String filename) throws IOException;
/**
* Tell all datanodes to use a new, non-persistent bandwidth value for
@@ -891,8 +864,8 @@ public interface ClientProtocol {
* @throws IOException
*/
@Idempotent
- public void setBalancerBandwidth(long bandwidth) throws IOException;
-
+ void setBalancerBandwidth(long bandwidth) throws IOException;
+
/**
* Get the file info for a specific file or directory.
* @param src The string representation of the path to the file
@@ -901,29 +874,27 @@ public interface ClientProtocol {
* or null if file not found
* @throws AccessControlException permission denied
* @throws FileNotFoundException file <code>src</code> is not found
- * @throws UnresolvedLinkException if the path contains a symlink.
- * @throws IOException If an I/O error occurred
+ * @throws UnresolvedLinkException if the path contains a symlink.
+ * @throws IOException If an I/O error occurred
*/
@Idempotent
- public HdfsFileStatus getFileInfo(String src) throws AccessControlException,
- FileNotFoundException, UnresolvedLinkException, IOException;
-
+ HdfsFileStatus getFileInfo(String src) throws IOException;
+
/**
- * Get the close status of a file
+ * Get the close status of a file.
* @param src The string representation of the path to the file
*
* @return return true if file is closed
* @throws AccessControlException permission denied
* @throws FileNotFoundException file <code>src</code> is not found
* @throws UnresolvedLinkException if the path contains a symlink.
- * @throws IOException If an I/O error occurred
+ * @throws IOException If an I/O error occurred
*/
@Idempotent
- public boolean isFileClosed(String src) throws AccessControlException,
- FileNotFoundException, UnresolvedLinkException, IOException;
-
+ boolean isFileClosed(String src) throws IOException;
+
/**
- * Get the file info for a specific file or directory. If the path
+ * Get the file info for a specific file or directory. If the path
* refers to a symlink then the FileStatus of the symlink is returned.
* @param src The string representation of the path to the file
*
@@ -932,58 +903,55 @@ public interface ClientProtocol {
*
* @throws AccessControlException permission denied
* @throws UnresolvedLinkException if <code>src</code> contains a symlink
- * @throws IOException If an I/O error occurred
+ * @throws IOException If an I/O error occurred
*/
@Idempotent
- public HdfsFileStatus getFileLinkInfo(String src)
- throws AccessControlException, UnresolvedLinkException, IOException;
-
+ HdfsFileStatus getFileLinkInfo(String src) throws IOException;
+
/**
* Get {@link ContentSummary} rooted at the specified directory.
* @param path The string representation of the path
*
* @throws AccessControlException permission denied
* @throws FileNotFoundException file <code>path</code> is not found
- * @throws UnresolvedLinkException if <code>path</code> contains a symlink.
+ * @throws UnresolvedLinkException if <code>path</code> contains a symlink.
* @throws IOException If an I/O error occurred
*/
@Idempotent
- public ContentSummary getContentSummary(String path)
- throws AccessControlException, FileNotFoundException,
- UnresolvedLinkException, IOException;
+ ContentSummary getContentSummary(String path) throws IOException;
/**
* Set the quota for a directory.
* @param path The string representation of the path to the directory
- * @param namespaceQuota Limit on the number of names in the tree rooted
+ * @param namespaceQuota Limit on the number of names in the tree rooted
* at the directory
- * @param storagespaceQuota Limit on storage space occupied all the files under
- * this directory.
+ * @param storagespaceQuota Limit on storage space occupied all the files
+ * under this directory.
* @param type StorageType that the space quota is intended to be set on.
- * It may be null when called by traditional space/namespace quota.
- * When type is is not null, the storagespaceQuota parameter is for
- * type specified and namespaceQuota must be
+ * It may be null when called by traditional space/namespace
+ * quota. When type is is not null, the storagespaceQuota
+ * parameter is for type specified and namespaceQuota must be
* {@link HdfsConstants#QUOTA_DONT_SET}.
*
* <br><br>
- *
- * The quota can have three types of values : (1) 0 or more will set
+ *
+ * The quota can have three types of values : (1) 0 or more will set
* the quota to that value, (2) {@link HdfsConstants#QUOTA_DONT_SET} implies
* the quota will not be changed, and (3) {@link HdfsConstants#QUOTA_RESET}
* implies the quota will be reset. Any other value is a runtime error.
- *
+ *
* @throws AccessControlException permission denied
* @throws FileNotFoundException file <code>path</code> is not found
- * @throws QuotaExceededException if the directory size
+ * @throws QuotaExceededException if the directory size
* is greater than the given quota
- * @throws UnresolvedLinkException if the <code>path</code> contains a symlink.
+ * @throws UnresolvedLinkException if the <code>path</code> contains
+ * a symlink.
* @throws SnapshotAccessControlException if path is in RO snapshot
* @throws IOException If an I/O error occurred
*/
@Idempotent
- public void setQuota(String path, long namespaceQuota, long storagespaceQuota,
- StorageType type) throws AccessControlException, FileNotFoundException,
- UnresolvedLinkException, SnapshotAccessControlException, IOException;
+ void setQuota(String path, long namespaceQuota, long storagespaceQuota,
+ StorageType type) throws IOException;
/**
* Write all metadata for this file into persistent storage.
@@ -992,39 +960,35 @@ public interface ClientProtocol {
* @param inodeId The inode ID, or GRANDFATHER_INODE_ID if the client is
* too old to support fsync with inode IDs.
* @param client The string representation of the client
- * @param lastBlockLength The length of the last block (under construction)
- * to be reported to NameNode
+ * @param lastBlockLength The length of the last block (under construction)
+ * to be reported to NameNode
* @throws AccessControlException permission denied
* @throws FileNotFoundException file <code>src</code> is not found
- * @throws UnresolvedLinkException if <code>src</code> contains a symlink.
+ * @throws UnresolvedLinkException if <code>src</code> contains a symlink.
* @throws IOException If an I/O error occurred
*/
@Idempotent
- public void fsync(String src, long inodeId, String client,
- long lastBlockLength)
- throws AccessControlException, FileNotFoundException,
- UnresolvedLinkException, IOException;
+ void fsync(String src, long inodeId, String client, long lastBlockLength)
+ throws IOException;
/**
* Sets the modification and access time of the file to the specified time.
* @param src The string representation of the path
* @param mtime The number of milliseconds since Jan 1, 1970.
- * Setting mtime to -1 means that modification time should not be set
- * by this call.
+ * Setting mtime to -1 means that modification time should not
+ * be set by this call.
* @param atime The number of milliseconds since Jan 1, 1970.
* Setting atime to -1 means that access time should not be set
* by this call.
- *
+ *
* @throws AccessControlException permission denied
* @throws FileNotFoundException file <code>src</code> is not found
- * @throws UnresolvedLinkException if <code>src</code> contains a symlink.
+ * @throws UnresolvedLinkException if <code>src</code> contains a symlink.
* @throws SnapshotAccessControlException if path is in RO snapshot
* @throws IOException If an I/O error occurred
*/
@Idempotent
- public void setTimes(String src, long mtime, long atime)
- throws AccessControlException, FileNotFoundException,
- UnresolvedLinkException, SnapshotAccessControlException, IOException;
+ void setTimes(String src, long mtime, long atime) throws IOException;
/**
* Create symlink to a file or directory.
@@ -1041,16 +1005,13 @@ public interface ClientProtocol {
* and <code>createParent</code> is false
* @throws ParentNotDirectoryException If parent of <code>link</code> is not a
* directory.
- * @throws UnresolvedLinkException if <code>link</target> contains a symlink.
+ * @throws UnresolvedLinkException if <code>link</code> contains a symlink.
* @throws SnapshotAccessControlException if path is in RO snapshot
* @throws IOException If an I/O error occurred
*/
@AtMostOnce
- public void createSymlink(String target, String link, FsPermission dirPerm,
- boolean createParent) throws AccessControlException,
- FileAlreadyExistsException, FileNotFoundException,
- ParentNotDirectoryException, SafeModeException, UnresolvedLinkException,
- SnapshotAccessControlException, IOException;
+ void createSymlink(String target, String link, FsPermission dirPerm,
+ boolean createParent) throws IOException;
/**
* Return the target of the given symlink. If there is an intermediate
@@ -1065,28 +1026,27 @@ public interface ClientProtocol {
* or an I/O error occurred
*/
@Idempotent
- public String getLinkTarget(String path) throws AccessControlException,
- FileNotFoundException, IOException;
-
+ String getLinkTarget(String path) throws IOException;
+
/**
- * Get a new generation stamp together with an access token for
+ * Get a new generation stamp together with an access token for
* a block under construction
- *
+ *
* This method is called only when a client needs to recover a failed
* pipeline or set up a pipeline for appending to a block.
- *
+ *
* @param block a block
* @param clientName the name of the client
* @return a located block with a new generation stamp and an access token
* @throws IOException if any error occurs
*/
@Idempotent
- public LocatedBlock updateBlockForPipeline(ExtendedBlock block,
+ LocatedBlock updateBlockForPipeline(ExtendedBlock block,
String clientName) throws IOException;
/**
- * Update a pipeline for a block under construction
- *
+ * Update a pipeline for a block under construction.
+ *
* @param clientName the name of the client
* @param oldBlock the old block
* @param newBlock the new block containing new generation stamp and length
@@ -1094,104 +1054,104 @@ public interface ClientProtocol {
* @throws IOException if any error occurs
*/
@AtMostOnce
- public void updatePipeline(String clientName, ExtendedBlock oldBlock,
+ void updatePipeline(String clientName, ExtendedBlock oldBlock,
ExtendedBlock newBlock, DatanodeID[] newNodes, String[] newStorageIDs)
throws IOException;
/**
* Get a valid Delegation Token.
- *
+ *
* @param renewer the designated renewer for the token
* @return Token<DelegationTokenIdentifier>
* @throws IOException
*/
@Idempotent
- public Token<DelegationTokenIdentifier> getDelegationToken(Text renewer)
+ Token<DelegationTokenIdentifier> getDelegationToken(Text renewer)
throws IOException;
/**
* Renew an existing delegation token.
- *
+ *
* @param token delegation token obtained earlier
* @return the new expiration time
* @throws IOException
*/
@Idempotent
- public long renewDelegationToken(Token<DelegationTokenIdentifier> token)
+ long renewDelegationToken(Token<DelegationTokenIdentifier> token)
throws IOException;
-
+
/**
* Cancel an existing delegation token.
- *
+ *
* @param token delegation token
* @throws IOException
*/
@Idempotent
- public void cancelDelegationToken(Token<DelegationTokenIdentifier> token)
+ void cancelDelegationToken(Token<DelegationTokenIdentifier> token)
throws IOException;
-
+
/**
* @return encryption key so a client can encrypt data sent via the
* DataTransferProtocol to/from DataNodes.
* @throws IOException
*/
@Idempotent
- public DataEncryptionKey getDataEncryptionKey() throws IOException;
-
+ DataEncryptionKey getDataEncryptionKey() throws IOException;
+
/**
- * Create a snapshot
+ * Create a snapshot.
* @param snapshotRoot the path that is being snapshotted
* @param snapshotName name of the snapshot created
* @return the snapshot path.
* @throws IOException
*/
@AtMostOnce
- public String createSnapshot(String snapshotRoot, String snapshotName)
+ String createSnapshot(String snapshotRoot, String snapshotName)
throws IOException;
/**
- * Delete a specific snapshot of a snapshottable directory
+ * Delete a specific snapshot of a snapshottable directory.
* @param snapshotRoot The snapshottable directory
* @param snapshotName Name of the snapshot for the snapshottable directory
* @throws IOException
*/
@AtMostOnce
- public void deleteSnapshot(String snapshotRoot, String snapshotName)
+ void deleteSnapshot(String snapshotRoot, String snapshotName)
throws IOException;
-
+
/**
- * Rename a snapshot
- * @param snapshotRoot the directory path where the snapshot was taken
+ * Rename a snapshot.
+ * @param snapshotRoot the directory path where the snapshot was taken
* @param snapshotOldName old name of the snapshot
* @param snapshotNewName new name of the snapshot
* @throws IOException
*/
@AtMostOnce
- public void renameSnapshot(String snapshotRoot, String snapshotOldName,
+ void renameSnapshot(String snapshotRoot, String snapshotOldName,
String snapshotNewName) throws IOException;
-
+
/**
* Allow snapshot on a directory.
* @param snapshotRoot the directory to be snapped
* @throws IOException on error
*/
@Idempotent
- public void allowSnapshot(String snapshotRoot)
+ void allowSnapshot(String snapshotRoot)
throws IOException;
-
+
/**
* Disallow snapshot on a directory.
* @param snapshotRoot the directory to disallow snapshot
* @throws IOException on error
*/
@Idempotent
- public void disallowSnapshot(String snapshotRoot)
+ void disallowSnapshot(String snapshotRoot)
throws IOException;
-
+
/**
* Get the difference between two snapshots, or between a snapshot and the
* current tree of a directory.
- *
+ *
* @param snapshotRoot
* full path of the directory where snapshots are taken
* @param fromSnapshot
@@ -1204,93 +1164,93 @@ public interface ClientProtocol {
* @throws IOException on error
*/
@Idempotent
- public SnapshotDiffReport getSnapshotDiffReport(String snapshotRoot,
+ SnapshotDiffReport getSnapshotDiffReport(String snapshotRoot,
String fromSnapshot, String toSnapshot) throws IOException;
/**
* Add a CacheDirective to the CacheManager.
- *
+ *
* @param directive A CacheDirectiveInfo to be added
* @param flags {@link CacheFlag}s to use for this operation.
* @return A CacheDirectiveInfo associated with the added directive
* @throws IOException if the directive could not be added
*/
@AtMostOnce
- public long addCacheDirective(CacheDirectiveInfo directive,
+ long addCacheDirective(CacheDirectiveInfo directive,
EnumSet<CacheFlag> flags) throws IOException;
/**
* Modify a CacheDirective in the CacheManager.
- *
+ *
* @param flags {@link CacheFlag}s to use for this operation.
* @throws IOException if the directive could not be modified
*/
@AtMostOnce
- public void modifyCacheDirective(CacheDirectiveInfo directive,
+ void modifyCacheDirective(CacheDirectiveInfo directive,
EnumSet<CacheFlag> flags) throws IOException;
/**
* Remove a CacheDirectiveInfo from the CacheManager.
- *
+ *
* @param id of a CacheDirectiveInfo
* @throws IOException if the cache directive could not be removed
*/
@AtMostOnce
- public void removeCacheDirective(long id) throws IOException;
+ void removeCacheDirective(long id) throws IOException;
/**
* List the set of cached paths of a cache pool. Incrementally fetches results
* from the server.
- *
+ *
* @param prevId The last listed entry ID, or -1 if this is the first call to
* listCacheDirectives.
- * @param filter Parameters to use to filter the list results,
+ * @param filter Parameters to use to filter the list results,
* or null to display all directives visible to us.
* @return A batch of CacheDirectiveEntry objects.
*/
@Idempotent
- public BatchedEntries<CacheDirectiveEntry> listCacheDirectives(
+ BatchedEntries<CacheDirectiveEntry> listCacheDirectives(
long prevId, CacheDirectiveInfo filter) throws IOException;
/**
* Add a new cache pool.
- *
+ *
* @param info Description of the new cache pool
* @throws IOException If the request could not be completed.
*/
@AtMostOnce
- public void addCachePool(CachePoolInfo info) throws IOException;
+ void addCachePool(CachePoolInfo info) throws IOException;
/**
* Modify an existing cache pool.
*
* @param req
* The request to modify a cache pool.
- * @throws IOException
+ * @throws IOException
* If the request could not be completed.
*/
@AtMostOnce
- public void modifyCachePool(CachePoolInfo req) throws IOException;
-
+ void modifyCachePool(CachePoolInfo req) throws IOException;
+
/**
* Remove a cache pool.
- *
+ *
* @param pool name of the cache pool to remove.
* @throws IOException if the cache pool did not exist, or could not be
* removed.
*/
@AtMostOnce
- public void removeCachePool(String pool) throws IOException;
+ void removeCachePool(String pool) throws IOException;
/**
* List the set of cache pools. Incrementally fetches results from the server.
- *
- * @param prevPool name of the last pool listed, or the empty string if this is
- * the first invocation of listCachePools
+ *
+ * @param prevPool name of the last pool listed, or the empty string if this
+ * is the first invocation of listCachePools
* @return A batch of CachePoolEntry objects.
*/
@Idempotent
- public BatchedEntries<CachePoolEntry> listCachePools(String prevPool)
+ BatchedEntries<CachePoolEntry> listCachePools(String prevPool)
throws IOException;
/**
@@ -1300,7 +1260,7 @@ public interface ClientProtocol {
* changes. (Modifications are merged into the current ACL.)
*/
@Idempotent
- public void modifyAclEntries(String src, List<AclEntry> aclSpec)
+ void modifyAclEntries(String src, List<AclEntry> aclSpec)
throws IOException;
/**
@@ -1308,14 +1268,14 @@ public interface ClientProtocol {
* retained.
*/
@Idempotent
- public void removeAclEntries(String src, List<AclEntry> aclSpec)
+ void removeAclEntries(String src, List<AclEntry> aclSpec)
throws IOException;
/**
* Removes all default ACL entries from files and directories.
*/
@Idempotent
- public void removeDefaultAcl(String src) throws IOException;
+ void removeDefaultAcl(String src) throws IOException;
/**
* Removes all but the base ACL entries of files and directories. The entries
@@ -1323,33 +1283,33 @@ public interface ClientProtocol {
* bits.
*/
@Idempotent
- public void removeAcl(String src) throws IOException;
+ void removeAcl(String src) throws IOException;
/**
* Fully replaces ACL of files and directories, discarding all existing
* entries.
*/
@Idempotent
- public void setAcl(String src, List<AclEntry> aclSpec) throws IOException;
+ void setAcl(String src, List<AclEntry> aclSpec) throws IOException;
/**
* Gets the ACLs of files and directories.
*/
@Idempotent
- public AclStatus getAclStatus(String src) throws IOException;
-
+ AclStatus getAclStatus(String src) throws IOException;
+
/**
- * Create an encryption zone
+ * Create an encryption zone.
*/
@AtMostOnce
- public void createEncryptionZone(String src, String keyName)
+ void createEncryptionZone(String src, String keyName)
throws IOException;
/**
* Get the encryption zone for a path.
*/
@Idempotent
- public EncryptionZone getEZForPath(String src)
+ EncryptionZone getEZForPath(String src)
throws IOException;
/**
@@ -1360,7 +1320,7 @@ public interface ClientProtocol {
* @return Batch of encryption zones.
*/
@Idempotent
- public BatchedEntries<EncryptionZone> listEncryptionZones(
+ BatchedEntries<EncryptionZone> listEncryptionZones(
long prevId) throws IOException;
/**
@@ -1376,9 +1336,9 @@ public interface ClientProtocol {
* @throws IOException
*/
@AtMostOnce
- public void setXAttr(String src, XAttr xAttr, EnumSet<XAttrSetFlag> flag)
+ void setXAttr(String src, XAttr xAttr, EnumSet<XAttrSetFlag> flag)
throws IOException;
-
+
/**
* Get xattrs of a file or directory. Values in xAttrs parameter are ignored.
* If xAttrs is null or empty, this is the same as getting all xattrs of the
@@ -1389,11 +1349,11 @@ public interface ClientProtocol {
*
* @param src file or directory
* @param xAttrs xAttrs to get
- * @return List<XAttr> <code>XAttr</code> list
+ * @return List<XAttr> <code>XAttr</code> list
* @throws IOException
*/
@Idempotent
- public List<XAttr> getXAttrs(String src, List<XAttr> xAttrs)
+ List<XAttr> getXAttrs(String src, List<XAttr> xAttrs)
throws IOException;
/**
@@ -1408,9 +1368,9 @@ public interface ClientProtocol {
* @throws IOException
*/
@Idempotent
- public List<XAttr> listXAttrs(String src)
+ List<XAttr> listXAttrs(String src)
throws IOException;
-
+
/**
* Remove xattr of a file or directory.Value in xAttr parameter is ignored.
* The name must be prefixed with the namespace followed by ".". For example,
@@ -1423,7 +1383,7 @@ public interface ClientProtocol {
* @throws IOException
*/
@AtMostOnce
- public void removeXAttr(String src, XAttr xAttr) throws IOException;
+ void removeXAttr(String src, XAttr xAttr) throws IOException;
/**
* Checks if the user can access a path. The mode specifies which access
@@ -1441,7 +1401,7 @@ public interface ClientProtocol {
* @throws IOException see specific implementation
*/
@Idempotent
- public void checkAccess(String path, FsAction mode) throws IOException;
+ void checkAccess(String path, FsAction mode) throws IOException;
/**
* Get the highest txid the NameNode knows has been written to the edit
@@ -1449,12 +1409,12 @@ public interface ClientProtocol {
* the starting point for the inotify event stream.
*/
@Idempotent
- public long getCurrentEditLogTxid() throws IOException;
+ long getCurrentEditLogTxid() throws IOException;
/**
* Get an ordered list of batches of events corresponding to the edit log
* transactions for txids equal to or greater than txid.
*/
@Idempotent
- public EventBatchList getEditsFromTxid(long txid) throws IOException;
+ EventBatchList getEditsFromTxid(long txid) throws IOException;
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0b8e4e5/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 1e1e6bb..db77d0b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -704,6 +704,9 @@ Release 2.8.0 - UNRELEASED
HDFS-8711. setSpaceQuota command should print the available storage type
when input storage type is wrong. (Brahma Reddy Battula via xyao)
+ HDFS-8620. Clean up the checkstyle warinings about ClientProtocol.
+ (Takanobu Asanuma via wheat9)
+
OPTIMIZATIONS
HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than
[02/12] hadoop git commit: Revert "HDFS-8652. Track BlockInfo instead
of Block in CorruptReplicasMap. Contributed by Jing Zhao."
Posted by aw...@apache.org.
Revert "HDFS-8652. Track BlockInfo instead of Block in CorruptReplicasMap. Contributed by Jing Zhao."
This reverts commit d62b63d297bff12d93de560dd50ddd48743b851d.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bc99aaff
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bc99aaff
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bc99aaff
Branch: refs/heads/HADOOP-12111
Commit: bc99aaffe7b0ed13b1efc37b6a32cdbd344c2d75
Parents: 559425d
Author: Jing Zhao <ji...@apache.org>
Authored: Tue Jul 7 10:08:30 2015 -0700
Committer: Jing Zhao <ji...@apache.org>
Committed: Tue Jul 7 10:13:41 2015 -0700
----------------------------------------------------------------------
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 -
.../hdfs/server/blockmanagement/BlockInfo.java | 7 +-
.../blockmanagement/BlockInfoContiguous.java | 9 +-
.../BlockInfoUnderConstruction.java | 22 +--
.../BlockInfoUnderConstructionContiguous.java | 13 +-
.../server/blockmanagement/BlockManager.java | 143 ++++++++++---------
.../hdfs/server/blockmanagement/BlocksMap.java | 4 +-
.../ContiguousBlockStorageOp.java | 7 +-
.../blockmanagement/CorruptReplicasMap.java | 62 ++++----
.../hdfs/server/namenode/FSDirWriteFileOp.java | 6 +-
.../hdfs/server/namenode/FSEditLogLoader.java | 2 +-
.../hdfs/server/namenode/NamenodeFsck.java | 12 +-
.../org/apache/hadoop/hdfs/DFSTestUtil.java | 6 +-
.../blockmanagement/BlockManagerTestUtil.java | 7 +-
.../server/blockmanagement/TestBlockInfo.java | 10 +-
.../blockmanagement/TestBlockManager.java | 10 +-
.../blockmanagement/TestCorruptReplicaInfo.java | 15 +-
17 files changed, 168 insertions(+), 169 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/bc99aaff/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index e40ea3d..7294cab 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -701,8 +701,6 @@ Release 2.8.0 - UNRELEASED
HDFS-8709. Clarify automatic sync in FSEditLog#logEdit. (wang)
- HDFS-8652. Track BlockInfo instead of Block in CorruptReplicasMap. (jing9)
-
OPTIMIZATIONS
HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than
http://git-wip-us.apache.org/repos/asf/hadoop/blob/bc99aaff/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java
index 4df2f0e..5ad992b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java
@@ -179,7 +179,7 @@ public abstract class BlockInfo extends Block
* information indicating the index of the block in the
* corresponding block group.
*/
- abstract void addStorage(DatanodeStorageInfo storage, Block reportedBlock);
+ abstract boolean addStorage(DatanodeStorageInfo storage, Block reportedBlock);
/**
* Remove {@link DatanodeStorageInfo} location for a block
@@ -193,11 +193,6 @@ public abstract class BlockInfo extends Block
abstract void replaceBlock(BlockInfo newBlock);
/**
- * @return true if there is no storage storing the block
- */
- abstract boolean hasEmptyStorage();
-
- /**
* Find specified DatanodeStorageInfo.
* @return DatanodeStorageInfo or null if not found.
*/
http://git-wip-us.apache.org/repos/asf/hadoop/blob/bc99aaff/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguous.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguous.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguous.java
index 561faca..de64ad8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguous.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguous.java
@@ -45,8 +45,8 @@ public class BlockInfoContiguous extends BlockInfo {
}
@Override
- void addStorage(DatanodeStorageInfo storage, Block reportedBlock) {
- ContiguousBlockStorageOp.addStorage(this, storage);
+ boolean addStorage(DatanodeStorageInfo storage, Block reportedBlock) {
+ return ContiguousBlockStorageOp.addStorage(this, storage);
}
@Override
@@ -73,9 +73,4 @@ public class BlockInfoContiguous extends BlockInfo {
ucBlock.setBlockCollection(getBlockCollection());
return ucBlock;
}
-
- @Override
- boolean hasEmptyStorage() {
- return ContiguousBlockStorageOp.hasEmptyStorage(this);
- }
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/bc99aaff/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoUnderConstruction.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoUnderConstruction.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoUnderConstruction.java
index 7924709..9cd3987 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoUnderConstruction.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoUnderConstruction.java
@@ -18,6 +18,7 @@
package org.apache.hadoop.hdfs.server.blockmanagement;
import java.io.IOException;
+import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
@@ -273,17 +274,18 @@ public abstract class BlockInfoUnderConstruction extends BlockInfo {
"No blocks found, lease removed.");
}
boolean allLiveReplicasTriedAsPrimary = true;
- for (ReplicaUnderConstruction replica : replicas) {
+ for (int i = 0; i < replicas.size(); i++) {
// Check if all replicas have been tried or not.
- if (replica.isAlive()) {
- allLiveReplicasTriedAsPrimary = allLiveReplicasTriedAsPrimary
- && replica.getChosenAsPrimary();
+ if (replicas.get(i).isAlive()) {
+ allLiveReplicasTriedAsPrimary =
+ (allLiveReplicasTriedAsPrimary &&
+ replicas.get(i).getChosenAsPrimary());
}
}
if (allLiveReplicasTriedAsPrimary) {
// Just set all the replicas to be chosen whether they are alive or not.
- for (ReplicaUnderConstruction replica : replicas) {
- replica.setChosenAsPrimary(false);
+ for (int i = 0; i < replicas.size(); i++) {
+ replicas.get(i).setChosenAsPrimary(false);
}
}
long mostRecentLastUpdate = 0;
@@ -343,6 +345,10 @@ public abstract class BlockInfoUnderConstruction extends BlockInfo {
* Convert an under construction block to a complete block.
*
* @return a complete block.
+ * @throws IOException
+ * if the state of the block (the generation stamp and the length)
+ * has not been committed by the client or it does not have at
+ * least a minimal number of replicas reported from data-nodes.
*/
public abstract BlockInfo convertToCompleteBlock();
@@ -380,8 +386,8 @@ public abstract class BlockInfoUnderConstruction extends BlockInfo {
}
private void appendUCParts(StringBuilder sb) {
- sb.append("{UCState=").append(blockUCState).append(", truncateBlock=")
- .append(truncateBlock)
+ sb.append("{UCState=").append(blockUCState)
+ .append(", truncateBlock=" + truncateBlock)
.append(", primaryNodeIndex=").append(primaryNodeIndex)
.append(", replicas=[");
if (replicas != null) {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/bc99aaff/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoUnderConstructionContiguous.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoUnderConstructionContiguous.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoUnderConstructionContiguous.java
index 963f247..d3cb337 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoUnderConstructionContiguous.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoUnderConstructionContiguous.java
@@ -55,6 +55,10 @@ public class BlockInfoUnderConstructionContiguous extends
* Convert an under construction block to a complete block.
*
* @return BlockInfo - a complete block.
+ * @throws IOException if the state of the block
+ * (the generation stamp and the length) has not been committed by
+ * the client or it does not have at least a minimal number of replicas
+ * reported from data-nodes.
*/
@Override
public BlockInfoContiguous convertToCompleteBlock() {
@@ -65,8 +69,8 @@ public class BlockInfoUnderConstructionContiguous extends
}
@Override
- void addStorage(DatanodeStorageInfo storage, Block reportedBlock) {
- ContiguousBlockStorageOp.addStorage(this, storage);
+ boolean addStorage(DatanodeStorageInfo storage, Block reportedBlock) {
+ return ContiguousBlockStorageOp.addStorage(this, storage);
}
@Override
@@ -85,11 +89,6 @@ public class BlockInfoUnderConstructionContiguous extends
}
@Override
- boolean hasEmptyStorage() {
- return ContiguousBlockStorageOp.hasEmptyStorage(this);
- }
-
- @Override
public void setExpectedLocations(DatanodeStorageInfo[] targets) {
int numLocations = targets == null ? 0 : targets.length;
this.replicas = new ArrayList<>(numLocations);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/bc99aaff/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 6ae3ee2..0b60a97 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -17,6 +17,7 @@
*/
package org.apache.hadoop.hdfs.server.blockmanagement;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX;
import static org.apache.hadoop.util.ExitUtil.terminate;
import java.io.IOException;
@@ -196,7 +197,7 @@ public class BlockManager implements BlockStatsMXBean {
* notified of all block deletions that might have been pending
* when the failover happened.
*/
- private final Set<BlockInfo> postponedMisreplicatedBlocks = Sets.newHashSet();
+ private final Set<Block> postponedMisreplicatedBlocks = Sets.newHashSet();
/**
* Maps a StorageID to the set of blocks that are "extra" for this
@@ -337,7 +338,8 @@ public class BlockManager implements BlockStatsMXBean {
DFSConfigKeys.DFS_NAMENODE_REPLICATION_STREAMS_HARD_LIMIT_KEY,
DFSConfigKeys.DFS_NAMENODE_REPLICATION_STREAMS_HARD_LIMIT_DEFAULT);
this.shouldCheckForEnoughRacks =
- conf.get(DFSConfigKeys.NET_TOPOLOGY_SCRIPT_FILE_NAME_KEY) != null;
+ conf.get(DFSConfigKeys.NET_TOPOLOGY_SCRIPT_FILE_NAME_KEY) == null
+ ? false : true;
this.blocksInvalidateWorkPct = DFSUtil.getInvalidateWorkPctPerIteration(conf);
this.blocksReplWorkMultiplier = DFSUtil.getReplWorkMultiplier(conf);
@@ -463,7 +465,8 @@ public class BlockManager implements BlockStatsMXBean {
/** Should the access keys be updated? */
boolean shouldUpdateBlockKey(final long updateTime) throws IOException {
- return isBlockTokenEnabled() && blockTokenSecretManager.updateKeys(updateTime);
+ return isBlockTokenEnabled()? blockTokenSecretManager.updateKeys(updateTime)
+ : false;
}
public void activate(Configuration conf) {
@@ -516,14 +519,14 @@ public class BlockManager implements BlockStatsMXBean {
synchronized (neededReplications) {
out.println("Metasave: Blocks waiting for replication: " +
neededReplications.size());
- for (BlockInfo block : neededReplications) {
+ for (Block block : neededReplications) {
dumpBlockMeta(block, out);
}
}
// Dump any postponed over-replicated blocks
out.println("Mis-replicated blocks that have been postponed:");
- for (BlockInfo block : postponedMisreplicatedBlocks) {
+ for (Block block : postponedMisreplicatedBlocks) {
dumpBlockMeta(block, out);
}
@@ -541,9 +544,11 @@ public class BlockManager implements BlockStatsMXBean {
* Dump the metadata for the given block in a human-readable
* form.
*/
- private void dumpBlockMeta(BlockInfo block, PrintWriter out) {
- List<DatanodeDescriptor> containingNodes = new ArrayList<>();
- List<DatanodeStorageInfo> containingLiveReplicasNodes = new ArrayList<>();
+ private void dumpBlockMeta(Block block, PrintWriter out) {
+ List<DatanodeDescriptor> containingNodes =
+ new ArrayList<DatanodeDescriptor>();
+ List<DatanodeStorageInfo> containingLiveReplicasNodes =
+ new ArrayList<>();
NumberReplicas numReplicas = new NumberReplicas();
// source node returned is not used
@@ -551,16 +556,17 @@ public class BlockManager implements BlockStatsMXBean {
containingLiveReplicasNodes, numReplicas,
UnderReplicatedBlocks.LEVEL);
- // containingLiveReplicasNodes can include READ_ONLY_SHARED replicas which
- // are not included in the numReplicas.liveReplicas() count
+ // containingLiveReplicasNodes can include READ_ONLY_SHARED replicas which are
+ // not included in the numReplicas.liveReplicas() count
assert containingLiveReplicasNodes.size() >= numReplicas.liveReplicas();
int usableReplicas = numReplicas.liveReplicas() +
numReplicas.decommissionedAndDecommissioning();
-
- BlockCollection bc = block.getBlockCollection();
- String fileName = (bc == null) ? "[orphaned]" : bc.getName();
- out.print(fileName + ": ");
-
+
+ if (block instanceof BlockInfo) {
+ BlockCollection bc = ((BlockInfo) block).getBlockCollection();
+ String fileName = (bc == null) ? "[orphaned]" : bc.getName();
+ out.print(fileName + ": ");
+ }
// l: == live:, d: == decommissioned c: == corrupt e: == excess
out.print(block + ((usableReplicas > 0)? "" : " MISSING") +
" (replicas:" +
@@ -569,8 +575,8 @@ public class BlockManager implements BlockStatsMXBean {
" c: " + numReplicas.corruptReplicas() +
" e: " + numReplicas.excessReplicas() + ") ");
- Collection<DatanodeDescriptor> corruptNodes =
- corruptReplicas.getNodes(block);
+ Collection<DatanodeDescriptor> corruptNodes =
+ corruptReplicas.getNodes(block);
for (DatanodeStorageInfo storage : getStorages(block)) {
final DatanodeDescriptor node = storage.getDatanodeDescriptor();
@@ -807,8 +813,7 @@ public class BlockManager implements BlockStatsMXBean {
final long offset, final long length, final int nrBlocksToReturn,
final AccessMode mode) throws IOException {
int curBlk;
- long curPos = 0;
- long blkSize;
+ long curPos = 0, blkSize = 0;
int nrBlocks = (blocks[0].getNumBytes() == 0) ? 0 : blocks.length;
for (curBlk = 0; curBlk < nrBlocks; curBlk++) {
blkSize = blocks[curBlk].getNumBytes();
@@ -1199,11 +1204,10 @@ public class BlockManager implements BlockStatsMXBean {
}
/**
- * Mark a replica (of a contiguous block) or an internal block (of a striped
- * block group) as corrupt.
- * @param b Indicating the reported bad block and the corresponding BlockInfo
- * stored in blocksMap.
+ *
+ * @param b
* @param storageInfo storage that contains the block, if known. null otherwise.
+ * @throws IOException
*/
private void markBlockAsCorrupt(BlockToMarkCorrupt b,
DatanodeStorageInfo storageInfo,
@@ -1224,7 +1228,7 @@ public class BlockManager implements BlockStatsMXBean {
}
// Add this replica to corruptReplicas Map
- corruptReplicas.addToCorruptReplicasMap(b.stored, node, b.reason,
+ corruptReplicas.addToCorruptReplicasMap(b.corrupted, node, b.reason,
b.reasonCode);
NumberReplicas numberOfReplicas = countNodes(b.stored);
@@ -1246,7 +1250,7 @@ public class BlockManager implements BlockStatsMXBean {
if (hasEnoughLiveReplicas || hasMoreCorruptReplicas
|| corruptedDuringWrite) {
// the block is over-replicated so invalidate the replicas immediately
- invalidateBlock(b, node, numberOfReplicas);
+ invalidateBlock(b, node);
} else if (namesystem.isPopulatingReplQueues()) {
// add the block to neededReplication
updateNeededReplications(b.stored, -1, 0);
@@ -1254,15 +1258,12 @@ public class BlockManager implements BlockStatsMXBean {
}
/**
- * Invalidates the given block on the given datanode. Note that before this
- * call we have already checked the current live replicas of the block and
- * make sure it's safe to invalidate the replica.
- *
- * @return true if the replica was successfully invalidated and no longer
- * associated with the DataNode.
+ * Invalidates the given block on the given datanode.
+ * @return true if the block was successfully invalidated and no longer
+ * present in the BlocksMap
*/
- private boolean invalidateBlock(BlockToMarkCorrupt b, DatanodeInfo dn,
- NumberReplicas nr) throws IOException {
+ private boolean invalidateBlock(BlockToMarkCorrupt b, DatanodeInfo dn
+ ) throws IOException {
blockLog.info("BLOCK* invalidateBlock: {} on {}", b, dn);
DatanodeDescriptor node = getDatanodeManager().getDatanode(dn);
if (node == null) {
@@ -1271,30 +1272,35 @@ public class BlockManager implements BlockStatsMXBean {
}
// Check how many copies we have of the block
+ NumberReplicas nr = countNodes(b.stored);
if (nr.replicasOnStaleNodes() > 0) {
blockLog.info("BLOCK* invalidateBlocks: postponing " +
"invalidation of {} on {} because {} replica(s) are located on " +
"nodes with potentially out-of-date block reports", b, dn,
nr.replicasOnStaleNodes());
- postponeBlock(b.stored);
+ postponeBlock(b.corrupted);
return false;
- } else {
- // we already checked the number of replicas in the caller of this
- // function and we know there is at least one copy on a live node, so we
- // can delete it.
+ } else if (nr.liveReplicas() >= 1) {
+ // If we have at least one copy on a live node, then we can delete it.
addToInvalidates(b.corrupted, dn);
removeStoredBlock(b.stored, node);
blockLog.debug("BLOCK* invalidateBlocks: {} on {} listed for deletion.",
b, dn);
return true;
+ } else {
+ blockLog.info("BLOCK* invalidateBlocks: {} on {} is the only copy and" +
+ " was not deleted", b, dn);
+ return false;
}
}
+
public void setPostponeBlocksFromFuture(boolean postpone) {
this.shouldPostponeBlocksFromFuture = postpone;
}
- private void postponeBlock(BlockInfo blk) {
+
+ private void postponeBlock(Block blk) {
if (postponedMisreplicatedBlocks.add(blk)) {
postponedMisreplicatedBlocksCount.incrementAndGet();
}
@@ -1368,7 +1374,7 @@ public class BlockManager implements BlockStatsMXBean {
int requiredReplication, numEffectiveReplicas;
List<DatanodeDescriptor> containingNodes;
DatanodeDescriptor srcNode;
- BlockCollection bc;
+ BlockCollection bc = null;
int additionalReplRequired;
int scheduledWork = 0;
@@ -1529,9 +1535,9 @@ public class BlockManager implements BlockStatsMXBean {
DatanodeStorageInfo[] targets = rw.targets;
if (targets != null && targets.length != 0) {
StringBuilder targetList = new StringBuilder("datanode(s)");
- for (DatanodeStorageInfo target : targets) {
+ for (int k = 0; k < targets.length; k++) {
targetList.append(' ');
- targetList.append(target.getDatanodeDescriptor());
+ targetList.append(targets[k].getDatanodeDescriptor());
}
blockLog.info("BLOCK* ask {} to replicate {} to {}", rw.srcNode,
rw.block, targetList);
@@ -1608,8 +1614,8 @@ public class BlockManager implements BlockStatsMXBean {
List<DatanodeDescriptor> datanodeDescriptors = null;
if (nodes != null) {
datanodeDescriptors = new ArrayList<>(nodes.size());
- for (String nodeStr : nodes) {
- DatanodeDescriptor node = datanodeManager.getDatanodeDescriptor(nodeStr);
+ for (int i = 0; i < nodes.size(); i++) {
+ DatanodeDescriptor node = datanodeManager.getDatanodeDescriptor(nodes.get(i));
if (node != null) {
datanodeDescriptors.add(node);
}
@@ -1648,7 +1654,7 @@ public class BlockManager implements BlockStatsMXBean {
* the given block
*/
@VisibleForTesting
- DatanodeDescriptor chooseSourceDatanode(BlockInfo block,
+ DatanodeDescriptor chooseSourceDatanode(Block block,
List<DatanodeDescriptor> containingNodes,
List<DatanodeStorageInfo> nodesContainingLiveReplicas,
NumberReplicas numReplicas,
@@ -1728,16 +1734,16 @@ public class BlockManager implements BlockStatsMXBean {
if (timedOutItems != null) {
namesystem.writeLock();
try {
- for (BlockInfo timedOutItem : timedOutItems) {
+ for (int i = 0; i < timedOutItems.length; i++) {
/*
* Use the blockinfo from the blocksmap to be certain we're working
* with the most up-to-date block information (e.g. genstamp).
*/
- BlockInfo bi = getStoredBlock(timedOutItem);
+ BlockInfo bi = getStoredBlock(timedOutItems[i]);
if (bi == null) {
continue;
}
- NumberReplicas num = countNodes(timedOutItem);
+ NumberReplicas num = countNodes(timedOutItems[i]);
if (isNeededReplication(bi, getReplication(bi), num.liveReplicas())) {
neededReplications.add(bi, num.liveReplicas(),
num.decommissionedAndDecommissioning(), getReplication(bi));
@@ -1754,7 +1760,7 @@ public class BlockManager implements BlockStatsMXBean {
public long requestBlockReportLeaseId(DatanodeRegistration nodeReg) {
assert namesystem.hasReadLock();
- DatanodeDescriptor node;
+ DatanodeDescriptor node = null;
try {
node = datanodeManager.getDatanode(nodeReg);
} catch (UnregisteredNodeException e) {
@@ -2016,7 +2022,7 @@ public class BlockManager implements BlockStatsMXBean {
startIndex += (base+1);
}
}
- Iterator<BlockInfo> it = postponedMisreplicatedBlocks.iterator();
+ Iterator<Block> it = postponedMisreplicatedBlocks.iterator();
for (int tmp = 0; tmp < startIndex; tmp++) {
it.next();
}
@@ -2111,7 +2117,7 @@ public class BlockManager implements BlockStatsMXBean {
long oldGenerationStamp, long oldNumBytes,
DatanodeStorageInfo[] newStorages) throws IOException {
assert namesystem.hasWriteLock();
- BlockToMarkCorrupt b;
+ BlockToMarkCorrupt b = null;
if (block.getGenerationStamp() != oldGenerationStamp) {
b = new BlockToMarkCorrupt(oldBlock, block, oldGenerationStamp,
"genstamp does not match " + oldGenerationStamp
@@ -2713,7 +2719,7 @@ public class BlockManager implements BlockStatsMXBean {
" but corrupt replicas map has " + corruptReplicasCount);
}
if ((corruptReplicasCount > 0) && (numLiveReplicas >= fileReplication)) {
- invalidateCorruptReplicas(storedBlock, reportedBlock, num);
+ invalidateCorruptReplicas(storedBlock, reportedBlock);
}
return storedBlock;
}
@@ -2746,20 +2752,18 @@ public class BlockManager implements BlockStatsMXBean {
*
* @param blk Block whose corrupt replicas need to be invalidated
*/
- private void invalidateCorruptReplicas(BlockInfo blk, Block reported,
- NumberReplicas numberReplicas) {
+ private void invalidateCorruptReplicas(BlockInfo blk, Block reported) {
Collection<DatanodeDescriptor> nodes = corruptReplicas.getNodes(blk);
boolean removedFromBlocksMap = true;
if (nodes == null)
return;
// make a copy of the array of nodes in order to avoid
// ConcurrentModificationException, when the block is removed from the node
- DatanodeDescriptor[] nodesCopy = nodes.toArray(
- new DatanodeDescriptor[nodes.size()]);
+ DatanodeDescriptor[] nodesCopy = nodes.toArray(new DatanodeDescriptor[0]);
for (DatanodeDescriptor node : nodesCopy) {
try {
if (!invalidateBlock(new BlockToMarkCorrupt(reported, blk, null,
- Reason.ANY), node, numberReplicas)) {
+ Reason.ANY), node)) {
removedFromBlocksMap = false;
}
} catch (IOException e) {
@@ -2809,6 +2813,7 @@ public class BlockManager implements BlockStatsMXBean {
replicationQueuesInitializer.join();
} catch (final InterruptedException e) {
LOG.warn("Interrupted while waiting for replicationQueueInitializer. Returning..");
+ return;
} finally {
replicationQueuesInitializer = null;
}
@@ -3170,7 +3175,8 @@ public class BlockManager implements BlockStatsMXBean {
CachedBlock cblock = namesystem.getCacheManager().getCachedBlocks()
.get(new CachedBlock(storedBlock.getBlockId(), (short) 0, false));
if (cblock != null) {
- boolean removed = node.getPendingCached().remove(cblock);
+ boolean removed = false;
+ removed |= node.getPendingCached().remove(cblock);
removed |= node.getCached().remove(cblock);
removed |= node.getPendingUncached().remove(cblock);
if (removed) {
@@ -3386,7 +3392,7 @@ public class BlockManager implements BlockStatsMXBean {
int excess = 0;
int stale = 0;
Collection<DatanodeDescriptor> nodesCorrupt = corruptReplicas.getNodes(b);
- for (DatanodeStorageInfo storage : blocksMap.getStorages(b, State.NORMAL)) {
+ for(DatanodeStorageInfo storage : blocksMap.getStorages(b, State.NORMAL)) {
final DatanodeDescriptor node = storage.getDatanodeDescriptor();
if ((nodesCorrupt != null) && (nodesCorrupt.contains(node))) {
corrupt++;
@@ -3407,8 +3413,7 @@ public class BlockManager implements BlockStatsMXBean {
stale++;
}
}
- return new NumberReplicas(live, decommissioned, decommissioning, corrupt,
- excess, stale);
+ return new NumberReplicas(live, decommissioned, decommissioning, corrupt, excess, stale);
}
/**
@@ -3591,6 +3596,8 @@ public class BlockManager implements BlockStatsMXBean {
String src, BlockInfo[] blocks) {
for (BlockInfo b: blocks) {
if (!b.isComplete()) {
+ final BlockInfoUnderConstruction uc =
+ (BlockInfoUnderConstruction)b;
final int numNodes = b.numNodes();
final int min = getMinStorageNum(b);
final BlockUCState state = b.getBlockUCState();
@@ -3716,7 +3723,11 @@ public class BlockManager implements BlockStatsMXBean {
return blocksMap.getBlockCollection(b);
}
- public void removeBlockFromMap(BlockInfo block) {
+ public int numCorruptReplicas(Block block) {
+ return corruptReplicas.numCorruptReplicas(block);
+ }
+
+ public void removeBlockFromMap(Block block) {
removeFromExcessReplicateMap(block);
blocksMap.removeBlock(block);
// If block is removed from blocksMap remove it from corruptReplicasMap
@@ -3726,7 +3737,7 @@ public class BlockManager implements BlockStatsMXBean {
/**
* If a block is removed from blocksMap, remove it from excessReplicateMap.
*/
- private void removeFromExcessReplicateMap(BlockInfo block) {
+ private void removeFromExcessReplicateMap(Block block) {
for (DatanodeStorageInfo info : getStorages(block)) {
String uuid = info.getDatanodeDescriptor().getDatanodeUuid();
LightWeightLinkedSet<BlockInfo> excessReplicas =
@@ -3757,14 +3768,14 @@ public class BlockManager implements BlockStatsMXBean {
/**
* Get the replicas which are corrupt for a given block.
*/
- public Collection<DatanodeDescriptor> getCorruptReplicas(BlockInfo block) {
+ public Collection<DatanodeDescriptor> getCorruptReplicas(Block block) {
return corruptReplicas.getNodes(block);
}
/**
* Get reason for certain corrupted replicas for a given block and a given dn.
*/
- public String getCorruptReason(BlockInfo block, DatanodeDescriptor node) {
+ public String getCorruptReason(Block block, DatanodeDescriptor node) {
return corruptReplicas.getCorruptReason(block, node);
}
@@ -3858,7 +3869,7 @@ public class BlockManager implements BlockStatsMXBean {
datanodeManager.clearPendingQueues();
postponedMisreplicatedBlocks.clear();
postponedMisreplicatedBlocksCount.set(0);
- }
+ };
public static LocatedBlock newLocatedBlock(
ExtendedBlock b, DatanodeStorageInfo[] storages,
http://git-wip-us.apache.org/repos/asf/hadoop/blob/bc99aaff/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlocksMap.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlocksMap.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlocksMap.java
index 85cea5a..0dbf485 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlocksMap.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlocksMap.java
@@ -117,7 +117,7 @@ class BlocksMap {
* remove it from all data-node lists it belongs to;
* and remove all data-node locations associated with the block.
*/
- void removeBlock(BlockInfo block) {
+ void removeBlock(Block block) {
BlockInfo blockInfo = blocks.remove(block);
if (blockInfo == null)
return;
@@ -190,7 +190,7 @@ class BlocksMap {
// remove block from the data-node list and the node from the block info
boolean removed = node.removeBlock(info);
- if (info.hasEmptyStorage() // no datanodes left
+ if (info.getDatanode(0) == null // no datanodes left
&& info.isDeleted()) { // does not belong to a file
blocks.remove(b); // remove block from the map
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/bc99aaff/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ContiguousBlockStorageOp.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ContiguousBlockStorageOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ContiguousBlockStorageOp.java
index 70251e1..092f65e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ContiguousBlockStorageOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ContiguousBlockStorageOp.java
@@ -45,12 +45,13 @@ class ContiguousBlockStorageOp {
return last;
}
- static void addStorage(BlockInfo b, DatanodeStorageInfo storage) {
+ static boolean addStorage(BlockInfo b, DatanodeStorageInfo storage) {
// find the last null node
int lastNode = ensureCapacity(b, 1);
b.setStorageInfo(lastNode, storage);
b.setNext(lastNode, null);
b.setPrevious(lastNode, null);
+ return true;
}
static boolean removeStorage(BlockInfo b,
@@ -102,8 +103,4 @@ class ContiguousBlockStorageOp {
"newBlock already exists.");
}
}
-
- static boolean hasEmptyStorage(BlockInfo b) {
- return b.getStorageInfo(0) == null;
- }
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/bc99aaff/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java
index 9a0023d..fc2e234 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java
@@ -17,8 +17,8 @@
*/
package org.apache.hadoop.hdfs.server.blockmanagement;
-import com.google.common.annotations.VisibleForTesting;
import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.ipc.Server;
@@ -46,12 +46,8 @@ public class CorruptReplicasMap{
CORRUPTION_REPORTED // client or datanode reported the corruption
}
- /**
- * Used to track corrupted replicas (for contiguous block) or internal blocks
- * (for striped block) and the corresponding DataNodes. For a striped block,
- * the key here is the striped block group object stored in the blocksMap.
- */
- private final SortedMap<BlockInfo, Map<DatanodeDescriptor, Reason>> corruptReplicasMap = new TreeMap<>();
+ private final SortedMap<Block, Map<DatanodeDescriptor, Reason>> corruptReplicasMap =
+ new TreeMap<Block, Map<DatanodeDescriptor, Reason>>();
/**
* Mark the block belonging to datanode as corrupt.
@@ -61,21 +57,21 @@ public class CorruptReplicasMap{
* @param reason a textual reason (for logging purposes)
* @param reasonCode the enum representation of the reason
*/
- void addToCorruptReplicasMap(BlockInfo blk, DatanodeDescriptor dn,
+ void addToCorruptReplicasMap(Block blk, DatanodeDescriptor dn,
String reason, Reason reasonCode) {
Map <DatanodeDescriptor, Reason> nodes = corruptReplicasMap.get(blk);
if (nodes == null) {
- nodes = new HashMap<>();
+ nodes = new HashMap<DatanodeDescriptor, Reason>();
corruptReplicasMap.put(blk, nodes);
}
-
+
String reasonText;
if (reason != null) {
reasonText = " because " + reason;
} else {
reasonText = "";
}
-
+
if (!nodes.keySet().contains(dn)) {
NameNode.blockStateChangeLog.info(
"BLOCK NameSystem.addToCorruptReplicasMap: {} added as corrupt on "
@@ -96,7 +92,7 @@ public class CorruptReplicasMap{
*
* @param blk Block to be removed
*/
- void removeFromCorruptReplicasMap(BlockInfo blk) {
+ void removeFromCorruptReplicasMap(Block blk) {
if (corruptReplicasMap != null) {
corruptReplicasMap.remove(blk);
}
@@ -109,13 +105,12 @@ public class CorruptReplicasMap{
* @return true if the removal is successful;
false if the replica is not in the map
*/
- boolean removeFromCorruptReplicasMap(BlockInfo blk,
- DatanodeDescriptor datanode) {
+ boolean removeFromCorruptReplicasMap(Block blk, DatanodeDescriptor datanode) {
return removeFromCorruptReplicasMap(blk, datanode, Reason.ANY);
}
- boolean removeFromCorruptReplicasMap(BlockInfo blk,
- DatanodeDescriptor datanode, Reason reason) {
+ boolean removeFromCorruptReplicasMap(Block blk, DatanodeDescriptor datanode,
+ Reason reason) {
Map <DatanodeDescriptor, Reason> datanodes = corruptReplicasMap.get(blk);
if (datanodes==null)
return false;
@@ -144,9 +139,11 @@ public class CorruptReplicasMap{
* @param blk Block for which nodes are requested
* @return collection of nodes. Null if does not exists
*/
- Collection<DatanodeDescriptor> getNodes(BlockInfo blk) {
- Map<DatanodeDescriptor, Reason> nodes = corruptReplicasMap.get(blk);
- return nodes != null ? nodes.keySet() : null;
+ Collection<DatanodeDescriptor> getNodes(Block blk) {
+ Map <DatanodeDescriptor, Reason> nodes = corruptReplicasMap.get(blk);
+ if (nodes == null)
+ return null;
+ return nodes.keySet();
}
/**
@@ -156,12 +153,12 @@ public class CorruptReplicasMap{
* @param node DatanodeDescriptor which holds the replica
* @return true if replica is corrupt, false if does not exists in this map
*/
- boolean isReplicaCorrupt(BlockInfo blk, DatanodeDescriptor node) {
+ boolean isReplicaCorrupt(Block blk, DatanodeDescriptor node) {
Collection<DatanodeDescriptor> nodes = getNodes(blk);
return ((nodes != null) && (nodes.contains(node)));
}
- int numCorruptReplicas(BlockInfo blk) {
+ int numCorruptReplicas(Block blk) {
Collection<DatanodeDescriptor> nodes = getNodes(blk);
return (nodes == null) ? 0 : nodes.size();
}
@@ -171,9 +168,9 @@ public class CorruptReplicasMap{
}
/**
- * Return a range of corrupt replica block ids. Up to numExpectedBlocks
+ * Return a range of corrupt replica block ids. Up to numExpectedBlocks
* blocks starting at the next block after startingBlockId are returned
- * (fewer if numExpectedBlocks blocks are unavailable). If startingBlockId
+ * (fewer if numExpectedBlocks blocks are unavailable). If startingBlockId
* is null, up to numExpectedBlocks blocks are returned from the beginning.
* If startingBlockId cannot be found, null is returned.
*
@@ -184,39 +181,44 @@ public class CorruptReplicasMap{
* @return Up to numExpectedBlocks blocks from startingBlockId if it exists
*
*/
- @VisibleForTesting
long[] getCorruptReplicaBlockIds(int numExpectedBlocks,
Long startingBlockId) {
if (numExpectedBlocks < 0 || numExpectedBlocks > 100) {
return null;
}
- Iterator<BlockInfo> blockIt = corruptReplicasMap.keySet().iterator();
+
+ Iterator<Block> blockIt = corruptReplicasMap.keySet().iterator();
+
// if the starting block id was specified, iterate over keys until
// we find the matching block. If we find a matching block, break
- // to leave the iterator on the next block after the specified block.
+ // to leave the iterator on the next block after the specified block.
if (startingBlockId != null) {
boolean isBlockFound = false;
while (blockIt.hasNext()) {
- BlockInfo b = blockIt.next();
+ Block b = blockIt.next();
if (b.getBlockId() == startingBlockId) {
isBlockFound = true;
- break;
+ break;
}
}
+
if (!isBlockFound) {
return null;
}
}
- ArrayList<Long> corruptReplicaBlockIds = new ArrayList<>();
+ ArrayList<Long> corruptReplicaBlockIds = new ArrayList<Long>();
+
// append up to numExpectedBlocks blockIds to our list
for(int i=0; i<numExpectedBlocks && blockIt.hasNext(); i++) {
corruptReplicaBlockIds.add(blockIt.next().getBlockId());
}
+
long[] ret = new long[corruptReplicaBlockIds.size()];
for(int i=0; i<ret.length; i++) {
ret[i] = corruptReplicaBlockIds.get(i);
}
+
return ret;
}
@@ -227,7 +229,7 @@ public class CorruptReplicasMap{
* @param node datanode that contains this corrupted replica
* @return reason
*/
- String getCorruptReason(BlockInfo block, DatanodeDescriptor node) {
+ String getCorruptReason(Block block, DatanodeDescriptor node) {
Reason reason = null;
if(corruptReplicasMap.containsKey(block)) {
if (corruptReplicasMap.get(block).containsKey(node)) {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/bc99aaff/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
index eebeac0..4830d5d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
@@ -71,7 +71,7 @@ class FSDirWriteFileOp {
private FSDirWriteFileOp() {}
static boolean unprotectedRemoveBlock(
FSDirectory fsd, String path, INodesInPath iip, INodeFile fileNode,
- BlockInfo block) throws IOException {
+ Block block) throws IOException {
// modify file-> block and blocksMap
// fileNode should be under construction
BlockInfoUnderConstruction uc = fileNode.removeLastBlock(block);
@@ -136,9 +136,7 @@ class FSDirWriteFileOp {
fsd.writeLock();
try {
// Remove the block from the pending creates list
- BlockInfo storedBlock = fsd.getBlockManager().getStoredBlock(localBlock);
- if (storedBlock != null &&
- !unprotectedRemoveBlock(fsd, src, iip, file, storedBlock)) {
+ if (!unprotectedRemoveBlock(fsd, src, iip, file, localBlock)) {
return;
}
} finally {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/bc99aaff/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
index 96d6982..63ef985 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
@@ -1035,7 +1035,7 @@ public class FSEditLogLoader {
throw new IOException("Trying to remove more than one block from file "
+ path);
}
- BlockInfo oldBlock = oldBlocks[oldBlocks.length - 1];
+ Block oldBlock = oldBlocks[oldBlocks.length - 1];
boolean removed = FSDirWriteFileOp.unprotectedRemoveBlock(
fsDir, path, iip, file, oldBlock);
if (!removed && !(op instanceof UpdateBlocksOp)) {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/bc99aaff/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
index 2a8231a..ab179b4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
@@ -267,8 +267,10 @@ public class NamenodeFsck implements DataEncryptionKeyFactory {
out.println("No. of corrupted Replica: " +
numberReplicas.corruptReplicas());
//record datanodes that have corrupted block replica
- Collection<DatanodeDescriptor> corruptionRecord =
- bm.getCorruptReplicas(blockInfo);
+ Collection<DatanodeDescriptor> corruptionRecord = null;
+ if (bm.getCorruptReplicas(block) != null) {
+ corruptionRecord = bm.getCorruptReplicas(block);
+ }
//report block replicas status on datanodes
for(int idx = (blockInfo.numNodes()-1); idx >= 0; idx--) {
@@ -277,7 +279,7 @@ public class NamenodeFsck implements DataEncryptionKeyFactory {
dn.getNetworkLocation() + " ");
if (corruptionRecord != null && corruptionRecord.contains(dn)) {
out.print(CORRUPT_STATUS+"\t ReasonCode: "+
- bm.getCorruptReason(blockInfo, dn));
+ bm.getCorruptReason(block,dn));
} else if (dn.isDecommissioned() ){
out.print(DECOMMISSIONED_STATUS);
} else if (dn.isDecommissionInProgress()) {
@@ -648,7 +650,7 @@ public class NamenodeFsck implements DataEncryptionKeyFactory {
LightWeightLinkedSet<BlockInfo> blocksExcess =
bm.excessReplicateMap.get(dnDesc.getDatanodeUuid());
Collection<DatanodeDescriptor> corruptReplicas =
- bm.getCorruptReplicas(storedBlock);
+ bm.getCorruptReplicas(block.getLocalBlock());
sb.append("(");
if (dnDesc.isDecommissioned()) {
sb.append("DECOMMISSIONED)");
@@ -656,7 +658,7 @@ public class NamenodeFsck implements DataEncryptionKeyFactory {
sb.append("DECOMMISSIONING)");
} else if (corruptReplicas != null && corruptReplicas.contains(dnDesc)) {
sb.append("CORRUPT)");
- } else if (blocksExcess != null && blocksExcess.contains(storedBlock)) {
+ } else if (blocksExcess != null && blocksExcess.contains(block.getLocalBlock())) {
sb.append("EXCESS)");
} else if (dnDesc.isStale(this.staleInterval)) {
sb.append("STALE_NODE)");
http://git-wip-us.apache.org/repos/asf/hadoop/blob/bc99aaff/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
index af1e023..89ee674 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
@@ -560,8 +560,7 @@ public class DFSTestUtil {
throws TimeoutException, InterruptedException {
int count = 0;
final int ATTEMPTS = 50;
- int repls = BlockManagerTestUtil.numCorruptReplicas(ns.getBlockManager(),
- b.getLocalBlock());
+ int repls = ns.getBlockManager().numCorruptReplicas(b.getLocalBlock());
while (repls != corruptRepls && count < ATTEMPTS) {
try {
IOUtils.copyBytes(fs.open(file), new IOUtils.NullOutputStream(),
@@ -573,8 +572,7 @@ public class DFSTestUtil {
count++;
// check more often so corrupt block reports are not easily missed
for (int i = 0; i < 10; i++) {
- repls = BlockManagerTestUtil.numCorruptReplicas(ns.getBlockManager(),
- b.getLocalBlock());
+ repls = ns.getBlockManager().numCorruptReplicas(b.getLocalBlock());
Thread.sleep(100);
if (repls == corruptRepls) {
break;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/bc99aaff/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerTestUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerTestUtil.java
index a899891..148135b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerTestUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerTestUtil.java
@@ -24,7 +24,6 @@ import java.util.HashSet;
import java.util.Set;
import java.util.concurrent.ExecutionException;
-import com.google.common.annotations.VisibleForTesting;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.protocol.Block;
@@ -88,7 +87,7 @@ public class BlockManagerTestUtil {
final Block b) {
final Set<String> rackSet = new HashSet<String>(0);
final Collection<DatanodeDescriptor> corruptNodes =
- getCorruptReplicas(blockManager).getNodes(blockManager.getStoredBlock(b));
+ getCorruptReplicas(blockManager).getNodes(b);
for(DatanodeStorageInfo storage : blockManager.blocksMap.getStorages(b)) {
final DatanodeDescriptor cur = storage.getDatanodeDescriptor();
if (!cur.isDecommissionInProgress() && !cur.isDecommissioned()) {
@@ -307,8 +306,4 @@ public class BlockManagerTestUtil {
throws ExecutionException, InterruptedException {
dm.getDecomManager().runMonitor();
}
-
- public static int numCorruptReplicas(BlockManager bm, Block block) {
- return bm.corruptReplicas.numCorruptReplicas(bm.getStoredBlock(block));
- }
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/bc99aaff/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfo.java
index c23f3d0..bae4f1d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfo.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfo.java
@@ -63,7 +63,9 @@ public class TestBlockInfo {
final DatanodeStorageInfo storage = DFSTestUtil.createDatanodeStorageInfo("storageID", "127.0.0.1");
- blockInfo.addStorage(storage, blockInfo);
+ boolean added = blockInfo.addStorage(storage, blockInfo);
+
+ Assert.assertTrue(added);
Assert.assertEquals(storage, blockInfo.getStorageInfo(0));
}
@@ -71,7 +73,7 @@ public class TestBlockInfo {
public void testCopyConstructor() {
BlockInfo old = new BlockInfoContiguous((short) 3);
try {
- BlockInfo copy = new BlockInfoContiguous(old);
+ BlockInfo copy = new BlockInfoContiguous((BlockInfoContiguous)old);
assertEquals(old.getBlockCollection(), copy.getBlockCollection());
assertEquals(old.getCapacity(), copy.getCapacity());
} catch (Exception e) {
@@ -108,8 +110,8 @@ public class TestBlockInfo {
final int MAX_BLOCKS = 10;
DatanodeStorageInfo dd = DFSTestUtil.createDatanodeStorageInfo("s1", "1.1.1.1");
- ArrayList<Block> blockList = new ArrayList<>(MAX_BLOCKS);
- ArrayList<BlockInfo> blockInfoList = new ArrayList<>();
+ ArrayList<Block> blockList = new ArrayList<Block>(MAX_BLOCKS);
+ ArrayList<BlockInfo> blockInfoList = new ArrayList<BlockInfo>();
int headIndex;
int curIndex;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/bc99aaff/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
index f6cc747..9e31670 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
@@ -509,7 +509,7 @@ public class TestBlockManager {
+ " even if all available source nodes have reached their replication"
+ " limits below the hard limit.",
bm.chooseSourceDatanode(
- bm.getStoredBlock(aBlock),
+ aBlock,
cntNodes,
liveNodes,
new NumberReplicas(),
@@ -519,7 +519,7 @@ public class TestBlockManager {
+ " replication since all available source nodes have reached"
+ " their replication limits.",
bm.chooseSourceDatanode(
- bm.getStoredBlock(aBlock),
+ aBlock,
cntNodes,
liveNodes,
new NumberReplicas(),
@@ -532,7 +532,7 @@ public class TestBlockManager {
assertNull("Does not choose a source node for a highest-priority"
+ " replication when all available nodes exceed the hard limit.",
bm.chooseSourceDatanode(
- bm.getStoredBlock(aBlock),
+ aBlock,
cntNodes,
liveNodes,
new NumberReplicas(),
@@ -558,7 +558,7 @@ public class TestBlockManager {
+ " if all available source nodes have reached their replication"
+ " limits below the hard limit.",
bm.chooseSourceDatanode(
- bm.getStoredBlock(aBlock),
+ aBlock,
cntNodes,
liveNodes,
new NumberReplicas(),
@@ -572,7 +572,7 @@ public class TestBlockManager {
assertNull("Does not choose a source decommissioning node for a normal"
+ " replication when all available nodes exceed the hard limit.",
bm.chooseSourceDatanode(
- bm.getStoredBlock(aBlock),
+ aBlock,
cntNodes,
liveNodes,
new NumberReplicas(),
http://git-wip-us.apache.org/repos/asf/hadoop/blob/bc99aaff/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestCorruptReplicaInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestCorruptReplicaInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestCorruptReplicaInfo.java
index 1a49bee..21fb54e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestCorruptReplicaInfo.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestCorruptReplicaInfo.java
@@ -48,19 +48,20 @@ public class TestCorruptReplicaInfo {
private static final Log LOG =
LogFactory.getLog(TestCorruptReplicaInfo.class);
- private final Map<Long, BlockInfo> block_map = new HashMap<>();
+ private final Map<Long, Block> block_map =
+ new HashMap<Long, Block>();
// Allow easy block creation by block id
// Return existing block if one with same block id already exists
- private BlockInfo getBlock(Long block_id) {
+ private Block getBlock(Long block_id) {
if (!block_map.containsKey(block_id)) {
- block_map.put(block_id,
- new BlockInfoContiguous(new Block(block_id, 0, 0), (short) 1));
+ block_map.put(block_id, new Block(block_id,0,0));
}
+
return block_map.get(block_id);
}
- private BlockInfo getBlock(int block_id) {
+ private Block getBlock(int block_id) {
return getBlock((long)block_id);
}
@@ -81,7 +82,7 @@ public class TestCorruptReplicaInfo {
// create a list of block_ids. A list is used to allow easy validation of the
// output of getCorruptReplicaBlockIds
int NUM_BLOCK_IDS = 140;
- List<Long> block_ids = new LinkedList<>();
+ List<Long> block_ids = new LinkedList<Long>();
for (int i=0;i<NUM_BLOCK_IDS;i++) {
block_ids.add((long)i);
}
@@ -129,7 +130,7 @@ public class TestCorruptReplicaInfo {
}
private static void addToCorruptReplicasMap(CorruptReplicasMap crm,
- BlockInfo blk, DatanodeDescriptor dn) {
+ Block blk, DatanodeDescriptor dn) {
crm.addToCorruptReplicasMap(blk, dn, "TEST", Reason.NONE);
}
}
[09/12] hadoop git commit: YARN-3690. [JDK8] 'mvn site' fails.
Contributed by Brahma Reddy Battula.
Posted by aw...@apache.org.
YARN-3690. [JDK8] 'mvn site' fails. Contributed by Brahma Reddy Battula.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d6325745
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d6325745
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d6325745
Branch: refs/heads/HADOOP-12111
Commit: d6325745e26d6acb0bbdb0972ce036297d66d5b0
Parents: c9dd2ca
Author: Akira Ajisaka <aa...@apache.org>
Authored: Wed Jul 8 15:41:33 2015 +0900
Committer: Akira Ajisaka <aa...@apache.org>
Committed: Wed Jul 8 15:42:31 2015 +0900
----------------------------------------------------------------------
hadoop-yarn-project/CHANGES.txt | 2 ++
.../src/main/java/org/apache/hadoop/yarn/util/package-info.java | 2 --
.../org/apache/hadoop/yarn/client/api/impl/package-info.java | 4 ----
.../java/org/apache/hadoop/yarn/client/api/package-info.java | 4 ----
.../main/java/org/apache/hadoop/yarn/factories/package-info.java | 2 --
.../org/apache/hadoop/yarn/factory/providers/package-info.java | 2 --
6 files changed, 2 insertions(+), 14 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d6325745/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 8b5cc0c..d1960e6 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -619,6 +619,8 @@ Release 2.7.2 - UNRELEASED
YARN-3508. Prevent processing preemption events on the main RM dispatcher.
(Varun Saxena via wangda)
+ YARN-3690. [JDK8] 'mvn site' fails. (Brahma Reddy Battula via aajisaka)
+
Release 2.7.1 - 2015-07-06
INCOMPATIBLE CHANGES
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d6325745/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/package-info.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/package-info.java
index fd7cfb2..e4518ae 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/package-info.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/package-info.java
@@ -15,6 +15,4 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-@InterfaceAudience.Private
package org.apache.hadoop.yarn.util;
-import org.apache.hadoop.classification.InterfaceAudience;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d6325745/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/package-info.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/package-info.java
index 9e2d911..7d747d6 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/package-info.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/package-info.java
@@ -15,9 +15,5 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-@InterfaceAudience.Public
-@InterfaceStability.Evolving
package org.apache.hadoop.yarn.client.api.impl;
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d6325745/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/package-info.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/package-info.java
index a37e3d9..9130828 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/package-info.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/package-info.java
@@ -15,9 +15,5 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-@InterfaceAudience.Public
-@InterfaceStability.Evolving
package org.apache.hadoop.yarn.client.api;
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d6325745/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factories/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factories/package-info.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factories/package-info.java
index 249f335..a52771b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factories/package-info.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factories/package-info.java
@@ -15,7 +15,5 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-@InterfaceAudience.LimitedPrivate({ "MapReduce", "YARN" })
package org.apache.hadoop.yarn.factories;
-import org.apache.hadoop.classification.InterfaceAudience;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d6325745/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factory/providers/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factory/providers/package-info.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factory/providers/package-info.java
index 26579ae..dd0c85c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factory/providers/package-info.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factory/providers/package-info.java
@@ -15,7 +15,5 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-@InterfaceAudience.LimitedPrivate({ "MapReduce", "YARN" })
package org.apache.hadoop.yarn.factory.providers;
-import org.apache.hadoop.classification.InterfaceAudience;
[05/12] hadoop git commit: HADOOP-12195. Add annotation to
package-info.java file to workaround MCOMPILER-205.
Posted by aw...@apache.org.
HADOOP-12195. Add annotation to package-info.java file to workaround MCOMPILER-205.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3dc92e84
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3dc92e84
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3dc92e84
Branch: refs/heads/HADOOP-12111
Commit: 3dc92e84c2a530483c6a7693817b67d86f9c97aa
Parents: aa96a8c
Author: Andrew Wang <wa...@apache.org>
Authored: Tue Jul 7 11:14:12 2015 -0700
Committer: Andrew Wang <wa...@apache.org>
Committed: Tue Jul 7 11:14:12 2015 -0700
----------------------------------------------------------------------
hadoop-common-project/hadoop-common/CHANGES.txt | 3 +++
.../src/main/java/org/apache/hadoop/jmx/package-info.java | 5 ++++-
2 files changed, 7 insertions(+), 1 deletion(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/3dc92e84/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index af6e3fe..859e58a 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -686,6 +686,9 @@ Release 2.8.0 - UNRELEASED
HADOOP-12193. Rename Touchz.java to Touch.java. (wang)
+ HADOOP-12195. Add annotation to package-info.java file to workaround
+ MCOMPILER-205. (wang)
+
OPTIMIZATIONS
HADOOP-11785. Reduce the number of listStatus operation in distcp
http://git-wip-us.apache.org/repos/asf/hadoop/blob/3dc92e84/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/jmx/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/jmx/package-info.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/jmx/package-info.java
index e09d993..ef227f0 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/jmx/package-info.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/jmx/package-info.java
@@ -19,4 +19,7 @@
* This package provides access to JMX primarily through the
* {@link org.apache.hadoop.jmx.JMXJsonServlet} class.
*/
-package org.apache.hadoop.jmx;
\ No newline at end of file
+@InterfaceAudience.Private
+package org.apache.hadoop.jmx;
+
+import org.apache.hadoop.classification.InterfaceAudience;
\ No newline at end of file
[08/12] hadoop git commit: YARN-3892. Fixed NPE on
RMStateStore#serviceStop when CapacityScheduler#serviceInit fails.
Contributed by Bibin A Chundatt
Posted by aw...@apache.org.
YARN-3892. Fixed NPE on RMStateStore#serviceStop when CapacityScheduler#serviceInit fails. Contributed by Bibin A Chundatt
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c9dd2cad
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c9dd2cad
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c9dd2cad
Branch: refs/heads/HADOOP-12111
Commit: c9dd2cada055c0beffd04bad0ded8324f66ad1b7
Parents: c0b8e4e
Author: Jian He <ji...@apache.org>
Authored: Tue Jul 7 14:16:21 2015 -0700
Committer: Jian He <ji...@apache.org>
Committed: Tue Jul 7 14:16:21 2015 -0700
----------------------------------------------------------------------
hadoop-yarn-project/CHANGES.txt | 3 +++
.../yarn/server/resourcemanager/recovery/ZKRMStateStore.java | 3 ++-
2 files changed, 5 insertions(+), 1 deletion(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c9dd2cad/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 2d1d6a2..8b5cc0c 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -598,6 +598,9 @@ Release 2.8.0 - UNRELEASED
YARN-2194. Fix bug causing CGroups functionality to fail on RHEL7.
(Wei Yan via vvasudev)
+ YARN-3892. Fixed NPE on RMStateStore#serviceStop when
+ CapacityScheduler#serviceInit fails. (Bibin A Chundatt via jianhe)
+
Release 2.7.2 - UNRELEASED
INCOMPATIBLE CHANGES
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c9dd2cad/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/ZKRMStateStore.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/ZKRMStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/ZKRMStateStore.java
index bca5348..8f096d8 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/ZKRMStateStore.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/ZKRMStateStore.java
@@ -40,6 +40,7 @@ import org.apache.curator.retry.RetryNTimes;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.security.token.delegation.DelegationKey;
import org.apache.hadoop.util.ZKUtil;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
@@ -312,7 +313,7 @@ public class ZKRMStateStore extends RMStateStore {
verifyActiveStatusThread.interrupt();
verifyActiveStatusThread.join(1000);
}
- curatorFramework.close();
+ IOUtils.closeStream(curatorFramework);
}
@Override
[03/12] hadoop git commit: MAPREDUCE-6038. A boolean may be set error
in the Word Count v2.0 in MapReduce Tutorial. Contributed by Tsuyoshi Ozawa
Posted by aw...@apache.org.
MAPREDUCE-6038. A boolean may be set error in the Word Count v2.0 in MapReduce Tutorial. Contributed by Tsuyoshi Ozawa
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7e2fe8c9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7e2fe8c9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7e2fe8c9
Branch: refs/heads/HADOOP-12111
Commit: 7e2fe8c9f28ec6fff32741ebf1bdbf47729d9eaf
Parents: bc99aaf
Author: Chris Douglas <cd...@apache.org>
Authored: Tue Jul 7 10:26:27 2015 -0700
Committer: Chris Douglas <cd...@apache.org>
Committed: Tue Jul 7 10:27:04 2015 -0700
----------------------------------------------------------------------
hadoop-mapreduce-project/CHANGES.txt | 3 +++
.../src/site/markdown/MapReduceTutorial.md | 2 +-
2 files changed, 4 insertions(+), 1 deletion(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/7e2fe8c9/hadoop-mapreduce-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt
index 4c60174..874ecea 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -530,6 +530,9 @@ Release 2.8.0 - UNRELEASED
MAPREDUCE-6418. MRApp should not shutdown LogManager during shutdown
(Chang Li via jlowe)
+ MAPREDUCE-6038. A boolean may be set error in the Word Count v2.0 in
+ MapReduce Tutorial. (Tsuyoshi Ozawa via cdouglas)
+
Release 2.7.2 - UNRELEASED
INCOMPATIBLE CHANGES
http://git-wip-us.apache.org/repos/asf/hadoop/blob/7e2fe8c9/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/markdown/MapReduceTutorial.md
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/markdown/MapReduceTutorial.md b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/markdown/MapReduceTutorial.md
index cd087d5..e2aaaf6 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/markdown/MapReduceTutorial.md
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/markdown/MapReduceTutorial.md
@@ -978,7 +978,7 @@ public class WordCount2 {
InterruptedException {
conf = context.getConfiguration();
caseSensitive = conf.getBoolean("wordcount.case.sensitive", true);
- if (conf.getBoolean("wordcount.skip.patterns", true)) {
+ if (conf.getBoolean("wordcount.skip.patterns", false)) {
URI[] patternsURIs = Job.getInstance(conf).getCacheFiles();
for (URI patternsURI : patternsURIs) {
Path patternsPath = new Path(patternsURI.getPath());
[12/12] hadoop git commit: Merge branch 'trunk' into HADOOP-12111
Posted by aw...@apache.org.
Merge branch 'trunk' into HADOOP-12111
Conflicts:
dev-support/releasedocmaker.py
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8243608f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8243608f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8243608f
Branch: refs/heads/HADOOP-12111
Commit: 8243608fdcccf8a33678d9ce66e8789e82e6025f
Parents: adbacf7 98e5926
Author: Allen Wittenauer <aw...@apache.org>
Authored: Wed Jul 8 08:15:55 2015 -0700
Committer: Allen Wittenauer <aw...@apache.org>
Committed: Wed Jul 8 08:15:55 2015 -0700
----------------------------------------------------------------------
hadoop-common-project/hadoop-common/CHANGES.txt | 10 +-
hadoop-common-project/hadoop-common/pom.xml | 5 +
.../hadoop/fs/sftp/SFTPConnectionPool.java | 303 +++++++++
.../apache/hadoop/fs/sftp/SFTPFileSystem.java | 671 +++++++++++++++++++
.../apache/hadoop/fs/sftp/SFTPInputStream.java | 130 ++++
.../org/apache/hadoop/fs/sftp/package-info.java | 19 +
.../java/org/apache/hadoop/fs/shell/Touch.java | 84 +++
.../java/org/apache/hadoop/fs/shell/Touchz.java | 84 ---
.../org/apache/hadoop/jmx/package-info.java | 5 +-
.../hadoop/fs/sftp/TestSFTPFileSystem.java | 308 +++++++++
.../hdfs/client/HdfsClientConfigKeys.java | 3 +-
.../hadoop/hdfs/protocol/ClientProtocol.java | 606 ++++++++---------
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 9 +-
.../hdfs/server/blockmanagement/BlockInfo.java | 7 +-
.../blockmanagement/BlockInfoContiguous.java | 9 +-
.../BlockInfoUnderConstruction.java | 22 +-
.../BlockInfoUnderConstructionContiguous.java | 13 +-
.../server/blockmanagement/BlockManager.java | 143 ++--
.../hdfs/server/blockmanagement/BlocksMap.java | 4 +-
.../ContiguousBlockStorageOp.java | 7 +-
.../blockmanagement/CorruptReplicasMap.java | 62 +-
.../server/datanode/fsdataset/FsDatasetSpi.java | 149 ++--
.../server/datanode/fsdataset/FsVolumeSpi.java | 47 +-
.../hdfs/server/namenode/FSDirWriteFileOp.java | 6 +-
.../hdfs/server/namenode/FSEditLogLoader.java | 2 +-
.../hdfs/server/namenode/NamenodeFsck.java | 12 +-
.../org/apache/hadoop/hdfs/tools/DFSAdmin.java | 10 +-
.../org/apache/hadoop/hdfs/DFSTestUtil.java | 6 +-
.../java/org/apache/hadoop/hdfs/TestQuota.java | 21 +
.../blockmanagement/BlockManagerTestUtil.java | 7 +-
.../server/blockmanagement/TestBlockInfo.java | 10 +-
.../blockmanagement/TestBlockManager.java | 10 +-
.../blockmanagement/TestCorruptReplicaInfo.java | 15 +-
hadoop-mapreduce-project/CHANGES.txt | 3 +
.../src/site/markdown/MapReduceTutorial.md | 2 +-
hadoop-project/pom.xml | 5 +
hadoop-yarn-project/CHANGES.txt | 5 +
.../apache/hadoop/yarn/util/package-info.java | 2 -
.../yarn/client/api/impl/package-info.java | 4 -
.../hadoop/yarn/client/api/package-info.java | 4 -
.../hadoop/yarn/factories/package-info.java | 2 -
.../yarn/factory/providers/package-info.java | 2 -
.../recovery/ZKRMStateStore.java | 3 +-
43 files changed, 2136 insertions(+), 695 deletions(-)
----------------------------------------------------------------------
[10/12] hadoop git commit: HDFS-8712. Remove 'public' and 'abstract'
modifiers in FsVolumeSpi and FsDatasetSpi (Contributed by Lei (Eddy) Xu)
Posted by aw...@apache.org.
HDFS-8712. Remove 'public' and 'abstract' modifiers in FsVolumeSpi and FsDatasetSpi (Contributed by Lei (Eddy) Xu)
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bd4e1090
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bd4e1090
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bd4e1090
Branch: refs/heads/HADOOP-12111
Commit: bd4e10900cc53a2768c31cc29fdb3698684bc2a0
Parents: d632574
Author: Vinayakumar B <vi...@apache.org>
Authored: Wed Jul 8 16:34:54 2015 +0530
Committer: Vinayakumar B <vi...@apache.org>
Committed: Wed Jul 8 16:34:54 2015 +0530
----------------------------------------------------------------------
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +
.../server/datanode/fsdataset/FsDatasetSpi.java | 149 ++++++++++---------
.../server/datanode/fsdataset/FsVolumeSpi.java | 47 +++---
3 files changed, 101 insertions(+), 98 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/bd4e1090/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index db77d0b..b88b42a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -707,6 +707,9 @@ Release 2.8.0 - UNRELEASED
HDFS-8620. Clean up the checkstyle warinings about ClientProtocol.
(Takanobu Asanuma via wheat9)
+ HDFS-8712. Remove 'public' and 'abstracta modifiers in FsVolumeSpi and
+ FsDatasetSpi (Lei (Eddy) Xu via vinayakumarb)
+
OPTIMIZATIONS
HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than
http://git-wip-us.apache.org/repos/asf/hadoop/blob/bd4e1090/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsDatasetSpi.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsDatasetSpi.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsDatasetSpi.java
index 76c4f02..af6a532 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsDatasetSpi.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsDatasetSpi.java
@@ -72,7 +72,7 @@ public interface FsDatasetSpi<V extends FsVolumeSpi> extends FSDatasetMBean {
/**
* A factory for creating {@link FsDatasetSpi} objects.
*/
- public static abstract class Factory<D extends FsDatasetSpi<?>> {
+ abstract class Factory<D extends FsDatasetSpi<?>> {
/** @return the configured factory. */
public static Factory<?> getFactory(Configuration conf) {
@SuppressWarnings("rawtypes")
@@ -182,7 +182,7 @@ public interface FsDatasetSpi<V extends FsVolumeSpi> extends FSDatasetMBean {
* The caller must release the reference of each volume by calling
* {@link FsVolumeReferences#close()}.
*/
- public FsVolumeReferences getFsVolumeReferences();
+ FsVolumeReferences getFsVolumeReferences();
/**
* Add a new volume to the FsDataset.<p/>
@@ -193,7 +193,7 @@ public interface FsDatasetSpi<V extends FsVolumeSpi> extends FSDatasetMBean {
* @param location The storage location for the new volume.
* @param nsInfos Namespace information for the new volume.
*/
- public void addVolume(
+ void addVolume(
final StorageLocation location,
final List<NamespaceInfo> nsInfos) throws IOException;
@@ -207,20 +207,20 @@ public interface FsDatasetSpi<V extends FsVolumeSpi> extends FSDatasetMBean {
* @param clearFailure set true to clear the failure information about the
* volumes.
*/
- public void removeVolumes(Set<File> volumes, boolean clearFailure);
+ void removeVolumes(Set<File> volumes, boolean clearFailure);
/** @return a storage with the given storage ID */
- public DatanodeStorage getStorage(final String storageUuid);
+ DatanodeStorage getStorage(final String storageUuid);
/** @return one or more storage reports for attached volumes. */
- public StorageReport[] getStorageReports(String bpid)
+ StorageReport[] getStorageReports(String bpid)
throws IOException;
/** @return the volume that contains a replica of the block. */
- public V getVolume(ExtendedBlock b);
+ V getVolume(ExtendedBlock b);
/** @return a volume information map (name => info). */
- public Map<String, Object> getVolumeInfoMap();
+ Map<String, Object> getVolumeInfoMap();
/**
* Returns info about volume failures.
@@ -230,17 +230,17 @@ public interface FsDatasetSpi<V extends FsVolumeSpi> extends FSDatasetMBean {
VolumeFailureSummary getVolumeFailureSummary();
/** @return a list of finalized blocks for the given block pool. */
- public List<FinalizedReplica> getFinalizedBlocks(String bpid);
+ List<FinalizedReplica> getFinalizedBlocks(String bpid);
/** @return a list of finalized blocks for the given block pool. */
- public List<FinalizedReplica> getFinalizedBlocksOnPersistentStorage(String bpid);
+ List<FinalizedReplica> getFinalizedBlocksOnPersistentStorage(String bpid);
/**
* Check whether the in-memory block record matches the block on the disk,
* and, in case that they are not matched, update the record or mark it
* as corrupted.
*/
- public void checkAndUpdate(String bpid, long blockId, File diskFile,
+ void checkAndUpdate(String bpid, long blockId, File diskFile,
File diskMetaFile, FsVolumeSpi vol) throws IOException;
/**
@@ -249,15 +249,15 @@ public interface FsDatasetSpi<V extends FsVolumeSpi> extends FSDatasetMBean {
* otherwise, return null.
* @throws IOException
*/
- public LengthInputStream getMetaDataInputStream(ExtendedBlock b
+ LengthInputStream getMetaDataInputStream(ExtendedBlock b
) throws IOException;
/**
- * Returns the specified block's on-disk length (excluding metadata)
+ * Returns the specified block's on-disk length (excluding metadata).
* @return the specified block's on-disk length (excluding metadta)
* @throws IOException on error
*/
- public long getLength(ExtendedBlock b) throws IOException;
+ long getLength(ExtendedBlock b) throws IOException;
/**
* Get reference to the replica meta info in the replicasMap.
@@ -265,47 +265,48 @@ public interface FsDatasetSpi<V extends FsVolumeSpi> extends FSDatasetMBean {
* @return replica from the replicas map
*/
@Deprecated
- public Replica getReplica(String bpid, long blockId);
+ Replica getReplica(String bpid, long blockId);
/**
* @return replica meta information
*/
- public String getReplicaString(String bpid, long blockId);
+ String getReplicaString(String bpid, long blockId);
/**
* @return the generation stamp stored with the block.
*/
- public Block getStoredBlock(String bpid, long blkid) throws IOException;
-
+ Block getStoredBlock(String bpid, long blkid) throws IOException;
+
/**
- * Returns an input stream at specified offset of the specified block
+ * Returns an input stream at specified offset of the specified block.
* @param b block
* @param seekOffset offset with in the block to seek to
* @return an input stream to read the contents of the specified block,
* starting at the offset
* @throws IOException
*/
- public InputStream getBlockInputStream(ExtendedBlock b, long seekOffset)
+ InputStream getBlockInputStream(ExtendedBlock b, long seekOffset)
throws IOException;
/**
- * Returns an input stream at specified offset of the specified block
+ * Returns an input stream at specified offset of the specified block.
* The block is still in the tmp directory and is not finalized
* @return an input stream to read the contents of the specified block,
* starting at the offset
* @throws IOException
*/
- public ReplicaInputStreams getTmpInputStreams(ExtendedBlock b, long blkoff,
+ ReplicaInputStreams getTmpInputStreams(ExtendedBlock b, long blkoff,
long ckoff) throws IOException;
/**
* Creates a temporary replica and returns the meta information of the replica
+ * .
*
* @param b block
* @return the meta info of the replica which is being written to
* @throws IOException if an error occurs
*/
- public ReplicaHandler createTemporary(StorageType storageType,
+ ReplicaHandler createTemporary(StorageType storageType,
ExtendedBlock b) throws IOException;
/**
@@ -315,11 +316,11 @@ public interface FsDatasetSpi<V extends FsVolumeSpi> extends FSDatasetMBean {
* @return the meta info of the replica which is being written to
* @throws IOException if an error occurs
*/
- public ReplicaHandler createRbw(StorageType storageType,
+ ReplicaHandler createRbw(StorageType storageType,
ExtendedBlock b, boolean allowLazyPersist) throws IOException;
/**
- * Recovers a RBW replica and returns the meta info of the replica
+ * Recovers a RBW replica and returns the meta info of the replica.
*
* @param b block
* @param newGS the new generation stamp for the replica
@@ -328,7 +329,7 @@ public interface FsDatasetSpi<V extends FsVolumeSpi> extends FSDatasetMBean {
* @return the meta info of the replica which is being written to
* @throws IOException if an error occurs
*/
- public ReplicaHandler recoverRbw(ExtendedBlock b,
+ ReplicaHandler recoverRbw(ExtendedBlock b,
long newGS, long minBytesRcvd, long maxBytesRcvd) throws IOException;
/**
@@ -336,11 +337,11 @@ public interface FsDatasetSpi<V extends FsVolumeSpi> extends FSDatasetMBean {
* @param temporary the temporary replica being converted
* @return the result RBW
*/
- public ReplicaInPipelineInterface convertTemporaryToRbw(
+ ReplicaInPipelineInterface convertTemporaryToRbw(
ExtendedBlock temporary) throws IOException;
/**
- * Append to a finalized replica and returns the meta info of the replica
+ * Append to a finalized replica and returns the meta info of the replica.
*
* @param b block
* @param newGS the new generation stamp for the replica
@@ -348,12 +349,12 @@ public interface FsDatasetSpi<V extends FsVolumeSpi> extends FSDatasetMBean {
* @return the meata info of the replica which is being written to
* @throws IOException
*/
- public ReplicaHandler append(ExtendedBlock b, long newGS,
+ ReplicaHandler append(ExtendedBlock b, long newGS,
long expectedBlockLen) throws IOException;
/**
- * Recover a failed append to a finalized replica
- * and returns the meta info of the replica
+ * Recover a failed append to a finalized replica and returns the meta
+ * info of the replica.
*
* @param b block
* @param newGS the new generation stamp for the replica
@@ -361,11 +362,11 @@ public interface FsDatasetSpi<V extends FsVolumeSpi> extends FSDatasetMBean {
* @return the meta info of the replica which is being written to
* @throws IOException
*/
- public ReplicaHandler recoverAppend(
+ ReplicaHandler recoverAppend(
ExtendedBlock b, long newGS, long expectedBlockLen) throws IOException;
/**
- * Recover a failed pipeline close
+ * Recover a failed pipeline close.
* It bumps the replica's generation stamp and finalize it if RBW replica
*
* @param b block
@@ -374,7 +375,7 @@ public interface FsDatasetSpi<V extends FsVolumeSpi> extends FSDatasetMBean {
* @return the storage uuid of the replica.
* @throws IOException
*/
- public String recoverClose(ExtendedBlock b, long newGS, long expectedBlockLen
+ String recoverClose(ExtendedBlock b, long newGS, long expectedBlockLen
) throws IOException;
/**
@@ -386,21 +387,21 @@ public interface FsDatasetSpi<V extends FsVolumeSpi> extends FSDatasetMBean {
* block is been finalized. For instance, the block resides on an HDFS volume
* that has been removed.
*/
- public void finalizeBlock(ExtendedBlock b) throws IOException;
+ void finalizeBlock(ExtendedBlock b) throws IOException;
/**
* Unfinalizes the block previously opened for writing using writeToBlock.
* The temporary file associated with this block is deleted.
* @throws IOException
*/
- public void unfinalizeBlock(ExtendedBlock b) throws IOException;
+ void unfinalizeBlock(ExtendedBlock b) throws IOException;
/**
* Returns one block report per volume.
* @param bpid Block Pool Id
* @return - a map of DatanodeStorage to block report for the volume.
*/
- public Map<DatanodeStorage, BlockListAsLongs> getBlockReports(String bpid);
+ Map<DatanodeStorage, BlockListAsLongs> getBlockReports(String bpid);
/**
* Returns the cache report - the full list of cached block IDs of a
@@ -408,10 +409,10 @@ public interface FsDatasetSpi<V extends FsVolumeSpi> extends FSDatasetMBean {
* @param bpid Block Pool Id
* @return the cache report - the full list of cached block IDs.
*/
- public List<Long> getCacheReport(String bpid);
+ List<Long> getCacheReport(String bpid);
/** Does the dataset contain the block? */
- public boolean contains(ExtendedBlock block);
+ boolean contains(ExtendedBlock block);
/**
* Check if a block is valid.
@@ -431,7 +432,7 @@ public interface FsDatasetSpi<V extends FsVolumeSpi> extends FSDatasetMBean {
*
* @throws IOException May be thrown from the methods called.
*/
- public void checkBlock(ExtendedBlock b, long minLength, ReplicaState state)
+ void checkBlock(ExtendedBlock b, long minLength, ReplicaState state)
throws ReplicaNotFoundException, UnexpectedReplicaStateException,
FileNotFoundException, EOFException, IOException;
@@ -440,13 +441,13 @@ public interface FsDatasetSpi<V extends FsVolumeSpi> extends FSDatasetMBean {
* Is the block valid?
* @return - true if the specified block is valid
*/
- public boolean isValidBlock(ExtendedBlock b);
+ boolean isValidBlock(ExtendedBlock b);
/**
* Is the block a valid RBW?
* @return - true if the specified block is a valid RBW
*/
- public boolean isValidRbw(ExtendedBlock b);
+ boolean isValidRbw(ExtendedBlock b);
/**
* Invalidates the specified blocks
@@ -454,21 +455,21 @@ public interface FsDatasetSpi<V extends FsVolumeSpi> extends FSDatasetMBean {
* @param invalidBlks - the blocks to be invalidated
* @throws IOException
*/
- public void invalidate(String bpid, Block invalidBlks[]) throws IOException;
+ void invalidate(String bpid, Block invalidBlks[]) throws IOException;
/**
* Caches the specified blocks
* @param bpid Block pool id
* @param blockIds - block ids to cache
*/
- public void cache(String bpid, long[] blockIds);
+ void cache(String bpid, long[] blockIds);
/**
* Uncaches the specified blocks
* @param bpid Block pool id
* @param blockIds - blocks ids to uncache
*/
- public void uncache(String bpid, long[] blockIds);
+ void uncache(String bpid, long[] blockIds);
/**
* Determine if the specified block is cached.
@@ -476,18 +477,18 @@ public interface FsDatasetSpi<V extends FsVolumeSpi> extends FSDatasetMBean {
* @param blockIds - block id
* @return true if the block is cached
*/
- public boolean isCached(String bpid, long blockId);
+ boolean isCached(String bpid, long blockId);
/**
* Check if all the data directories are healthy
* @return A set of unhealthy data directories.
*/
- public Set<File> checkDataDir();
+ Set<File> checkDataDir();
/**
* Shutdown the FSDataset
*/
- public void shutdown();
+ void shutdown();
/**
* Sets the file pointer of the checksum stream so that the last checksum
@@ -497,7 +498,7 @@ public interface FsDatasetSpi<V extends FsVolumeSpi> extends FSDatasetMBean {
* @param checksumSize number of bytes each checksum has
* @throws IOException
*/
- public void adjustCrcChannelPosition(ExtendedBlock b,
+ void adjustCrcChannelPosition(ExtendedBlock b,
ReplicaOutputStreams outs, int checksumSize) throws IOException;
/**
@@ -505,7 +506,7 @@ public interface FsDatasetSpi<V extends FsVolumeSpi> extends FSDatasetMBean {
* @return true if more than the minimum number of valid volumes are left
* in the FSDataSet.
*/
- public boolean hasEnoughResource();
+ boolean hasEnoughResource();
/**
* Get visible length of the specified replica.
@@ -517,14 +518,14 @@ public interface FsDatasetSpi<V extends FsVolumeSpi> extends FSDatasetMBean {
* @return actual state of the replica on this data-node or
* null if data-node does not have the replica.
*/
- public ReplicaRecoveryInfo initReplicaRecovery(RecoveringBlock rBlock
+ ReplicaRecoveryInfo initReplicaRecovery(RecoveringBlock rBlock
) throws IOException;
/**
* Update replica's generation stamp and length and finalize it.
* @return the ID of storage that stores the block
*/
- public String updateReplicaUnderRecovery(ExtendedBlock oldBlock,
+ String updateReplicaUnderRecovery(ExtendedBlock oldBlock,
long recoveryId, long newBlockId, long newLength) throws IOException;
/**
@@ -532,14 +533,14 @@ public interface FsDatasetSpi<V extends FsVolumeSpi> extends FSDatasetMBean {
* @param bpid Block pool Id
* @param conf Configuration
*/
- public void addBlockPool(String bpid, Configuration conf) throws IOException;
-
+ void addBlockPool(String bpid, Configuration conf) throws IOException;
+
/**
* Shutdown and remove the block pool from underlying storage.
* @param bpid Block pool Id to be removed
*/
- public void shutdownBlockPool(String bpid) ;
-
+ void shutdownBlockPool(String bpid) ;
+
/**
* Deletes the block pool directories. If force is false, directories are
* deleted only if no block files exist for the block pool. If force
@@ -551,12 +552,12 @@ public interface FsDatasetSpi<V extends FsVolumeSpi> extends FSDatasetMBean {
* directory for the blockpool is deleted along with its contents.
* @throws IOException
*/
- public void deleteBlockPool(String bpid, boolean force) throws IOException;
-
+ void deleteBlockPool(String bpid, boolean force) throws IOException;
+
/**
* Get {@link BlockLocalPathInfo} for the given block.
*/
- public BlockLocalPathInfo getBlockLocalPathInfo(ExtendedBlock b
+ BlockLocalPathInfo getBlockLocalPathInfo(ExtendedBlock b
) throws IOException;
/**
@@ -568,7 +569,7 @@ public interface FsDatasetSpi<V extends FsVolumeSpi> extends FSDatasetMBean {
* @return metadata Metadata for the list of blocks
* @throws IOException
*/
- public HdfsBlocksMetadata getHdfsBlocksMetadata(String bpid,
+ HdfsBlocksMetadata getHdfsBlocksMetadata(String bpid,
long[] blockIds) throws IOException;
/**
@@ -576,51 +577,51 @@ public interface FsDatasetSpi<V extends FsVolumeSpi> extends FSDatasetMBean {
* moved to a separate trash directory instead of being deleted immediately.
* This can be useful for example during rolling upgrades.
*/
- public void enableTrash(String bpid);
+ void enableTrash(String bpid);
/**
* Clear trash
*/
- public void clearTrash(String bpid);
+ void clearTrash(String bpid);
/**
* @return true when trash is enabled
*/
- public boolean trashEnabled(String bpid);
+ boolean trashEnabled(String bpid);
/**
* Create a marker file indicating that a rolling upgrade is in progress.
*/
- public void setRollingUpgradeMarker(String bpid) throws IOException;
+ void setRollingUpgradeMarker(String bpid) throws IOException;
/**
* Delete the rolling upgrade marker file if it exists.
* @param bpid
*/
- public void clearRollingUpgradeMarker(String bpid) throws IOException;
+ void clearRollingUpgradeMarker(String bpid) throws IOException;
/**
- * submit a sync_file_range request to AsyncDiskService
+ * submit a sync_file_range request to AsyncDiskService.
*/
- public void submitBackgroundSyncFileRangeRequest(final ExtendedBlock block,
+ void submitBackgroundSyncFileRangeRequest(final ExtendedBlock block,
final FileDescriptor fd, final long offset, final long nbytes,
final int flags);
/**
* Callback from RamDiskAsyncLazyPersistService upon async lazy persist task end
*/
- public void onCompleteLazyPersist(String bpId, long blockId,
+ void onCompleteLazyPersist(String bpId, long blockId,
long creationTime, File[] savedFiles, V targetVolume);
/**
* Callback from RamDiskAsyncLazyPersistService upon async lazy persist task fail
*/
- public void onFailLazyPersist(String bpId, long blockId);
+ void onFailLazyPersist(String bpId, long blockId);
/**
* Move block from one storage to another storage
*/
- public ReplicaInfo moveBlockAcrossStorage(final ExtendedBlock block,
+ ReplicaInfo moveBlockAcrossStorage(final ExtendedBlock block,
StorageType targetStorageType) throws IOException;
/**
@@ -629,15 +630,15 @@ public interface FsDatasetSpi<V extends FsVolumeSpi> extends FSDatasetMBean {
*
* It is a no-op when dfs.datanode.block-pinning.enabled is set to false.
*/
- public void setPinning(ExtendedBlock block) throws IOException;
+ void setPinning(ExtendedBlock block) throws IOException;
/**
* Check whether the block was pinned
*/
- public boolean getPinning(ExtendedBlock block) throws IOException;
-
+ boolean getPinning(ExtendedBlock block) throws IOException;
+
/**
* Confirm whether the block is deleting
*/
- public boolean isDeletingBlock(String bpid, long blockId);
+ boolean isDeletingBlock(String bpid, long blockId);
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/bd4e1090/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsVolumeSpi.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsVolumeSpi.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsVolumeSpi.java
index 8d1bb2a..ee01924 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsVolumeSpi.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsVolumeSpi.java
@@ -39,38 +39,38 @@ public interface FsVolumeSpi {
FsVolumeReference obtainReference() throws ClosedChannelException;
/** @return the StorageUuid of the volume */
- public String getStorageID();
+ String getStorageID();
/** @return a list of block pools. */
- public String[] getBlockPoolList();
+ String[] getBlockPoolList();
/** @return the available storage space in bytes. */
- public long getAvailable() throws IOException;
+ long getAvailable() throws IOException;
/** @return the base path to the volume */
- public String getBasePath();
+ String getBasePath();
/** @return the path to the volume */
- public String getPath(String bpid) throws IOException;
+ String getPath(String bpid) throws IOException;
/** @return the directory for the finalized blocks in the block pool. */
- public File getFinalizedDir(String bpid) throws IOException;
+ File getFinalizedDir(String bpid) throws IOException;
- public StorageType getStorageType();
+ StorageType getStorageType();
/** Returns true if the volume is NOT backed by persistent storage. */
- public boolean isTransientStorage();
+ boolean isTransientStorage();
/**
* Reserve disk space for an RBW block so a writer does not run out of
* space before the block is full.
*/
- public void reserveSpaceForRbw(long bytesToReserve);
+ void reserveSpaceForRbw(long bytesToReserve);
/**
* Release disk space previously reserved for RBW block.
*/
- public void releaseReservedSpace(long bytesToRelease);
+ void releaseReservedSpace(long bytesToRelease);
/**
* Release reserved memory for an RBW block written to transient storage
@@ -78,7 +78,7 @@ public interface FsVolumeSpi {
* bytesToRelease will be rounded down to the OS page size since locked
* memory reservation must always be a multiple of the page size.
*/
- public void releaseLockedMemory(long bytesToRelease);
+ void releaseLockedMemory(long bytesToRelease);
/**
* BlockIterator will return ExtendedBlock entries from a block pool in
@@ -90,7 +90,7 @@ public interface FsVolumeSpi {
*
* Closing the iterator does not save it. You must call save to save it.
*/
- public interface BlockIterator extends Closeable {
+ interface BlockIterator extends Closeable {
/**
* Get the next block.<p/>
*
@@ -107,17 +107,17 @@ public interface FsVolumeSpi {
* this volume. In this case, EOF will be set on
* the iterator.
*/
- public ExtendedBlock nextBlock() throws IOException;
+ ExtendedBlock nextBlock() throws IOException;
/**
* Returns true if we got to the end of the block pool.
*/
- public boolean atEnd();
+ boolean atEnd();
/**
* Repositions the iterator at the beginning of the block pool.
*/
- public void rewind();
+ void rewind();
/**
* Save this block iterator to the underlying volume.
@@ -127,7 +127,7 @@ public interface FsVolumeSpi {
* @throws IOException If there was an error when saving the block
* iterator.
*/
- public void save() throws IOException;
+ void save() throws IOException;
/**
* Set the maximum staleness of entries that we will return.<p/>
@@ -138,25 +138,25 @@ public interface FsVolumeSpi {
* to 0, consumers of this API must handle race conditions where block
* disappear before they can be processed.
*/
- public void setMaxStalenessMs(long maxStalenessMs);
+ void setMaxStalenessMs(long maxStalenessMs);
/**
* Get the wall-clock time, measured in milliseconds since the Epoch,
* when this iterator was created.
*/
- public long getIterStartMs();
+ long getIterStartMs();
/**
* Get the wall-clock time, measured in milliseconds since the Epoch,
* when this iterator was last saved. Returns iterStartMs if the
* iterator was never saved.
*/
- public long getLastSavedMs();
+ long getLastSavedMs();
/**
* Get the id of the block pool which this iterator traverses.
*/
- public String getBlockPoolId();
+ String getBlockPoolId();
}
/**
@@ -168,7 +168,7 @@ public interface FsVolumeSpi {
*
* @return The new block iterator.
*/
- public BlockIterator newBlockIterator(String bpid, String name);
+ BlockIterator newBlockIterator(String bpid, String name);
/**
* Load a saved block iterator.
@@ -180,11 +180,10 @@ public interface FsVolumeSpi {
* @throws IOException If there was an IO error loading the saved
* block iterator.
*/
- public BlockIterator loadBlockIterator(String bpid, String name)
- throws IOException;
+ BlockIterator loadBlockIterator(String bpid, String name) throws IOException;
/**
* Get the FSDatasetSpi which this volume is a part of.
*/
- public FsDatasetSpi getDataset();
+ FsDatasetSpi getDataset();
}