You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by wa...@apache.org on 2013/10/17 07:32:55 UTC
svn commit: r1532967 [3/7] - in
/hadoop/common/branches/HDFS-4949/hadoop-common-project:
hadoop-annotations/ hadoop-common/ hadoop-common/dev-support/
hadoop-common/src/main/bin/ hadoop-common/src/main/conf/
hadoop-common/src/main/docs/ hadoop-common/s...
Propchange: hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/main/java/
------------------------------------------------------------------------------
Merged /hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/java:r1522707-1532945
Merged /hadoop/common/branches/branch-2/hadoop-common-project/hadoop-common/src/main/java:r1526848
Modified: hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java?rev=1532967&r1=1532966&r2=1532967&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java (original)
+++ hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java Thu Oct 17 05:32:42 2013
@@ -264,5 +264,9 @@ public class CommonConfigurationKeysPubl
/** Default value for HADOOP_KERBEROS_MIN_SECONDS_BEFORE_RELOGIN */
public static final int HADOOP_KERBEROS_MIN_SECONDS_BEFORE_RELOGIN_DEFAULT =
60;
+
+ // HTTP policies to be used in configuration
+ public static final String HTTP_POLICY_HTTP_ONLY = "HTTP_ONLY";
+ public static final String HTTP_POLICY_HTTPS_ONLY = "HTTPS_ONLY";
}
Modified: hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Globber.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Globber.java?rev=1532967&r1=1532966&r2=1532967&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Globber.java (original)
+++ hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Globber.java Thu Oct 17 05:32:42 2013
@@ -17,6 +17,7 @@
*/
package org.apache.hadoop.fs;
+import java.io.FileNotFoundException;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
@@ -50,26 +51,26 @@ class Globber {
this.filter = filter;
}
- private FileStatus getFileStatus(Path path) {
+ private FileStatus getFileStatus(Path path) throws IOException {
try {
if (fs != null) {
return fs.getFileStatus(path);
} else {
return fc.getFileStatus(path);
}
- } catch (IOException e) {
+ } catch (FileNotFoundException e) {
return null;
}
}
- private FileStatus[] listStatus(Path path) {
+ private FileStatus[] listStatus(Path path) throws IOException {
try {
if (fs != null) {
return fs.listStatus(path);
} else {
return fc.util().listStatus(path);
}
- } catch (IOException e) {
+ } catch (FileNotFoundException e) {
return new FileStatus[0];
}
}
@@ -83,6 +84,15 @@ class Globber {
}
/**
+ * Convert a path component that contains backslash ecape sequences to a
+ * literal string. This is necessary when you want to explicitly refer to a
+ * path that contains globber metacharacters.
+ */
+ private static String unescapePathComponent(String name) {
+ return name.replaceAll("\\\\(.)", "$1");
+ }
+
+ /**
* Translate an absolute path into a list of path components.
* We merge double slashes into a single slash here.
* POSIX root path, i.e. '/', does not get an entry in the list.
@@ -165,37 +175,72 @@ class Globber {
new Path(scheme, authority, Path.SEPARATOR)));
}
- for (String component : components) {
+ for (int componentIdx = 0; componentIdx < components.size();
+ componentIdx++) {
ArrayList<FileStatus> newCandidates =
new ArrayList<FileStatus>(candidates.size());
- GlobFilter globFilter = new GlobFilter(component);
+ GlobFilter globFilter = new GlobFilter(components.get(componentIdx));
+ String component = unescapePathComponent(components.get(componentIdx));
if (globFilter.hasPattern()) {
sawWildcard = true;
}
if (candidates.isEmpty() && sawWildcard) {
+ // Optimization: if there are no more candidates left, stop examining
+ // the path components. We can only do this if we've already seen
+ // a wildcard component-- otherwise, we still need to visit all path
+ // components in case one of them is a wildcard.
break;
}
- for (FileStatus candidate : candidates) {
- FileStatus resolvedCandidate = candidate;
- if (candidate.isSymlink()) {
- // We have to resolve symlinks, because otherwise we don't know
- // whether they are directories.
- resolvedCandidate = getFileStatus(candidate.getPath());
+ if ((componentIdx < components.size() - 1) &&
+ (!globFilter.hasPattern())) {
+ // Optimization: if this is not the terminal path component, and we
+ // are not matching against a glob, assume that it exists. If it
+ // doesn't exist, we'll find out later when resolving a later glob
+ // or the terminal path component.
+ for (FileStatus candidate : candidates) {
+ candidate.setPath(new Path(candidate.getPath(), component));
}
- if (resolvedCandidate == null ||
- resolvedCandidate.isDirectory() == false) {
- continue;
- }
- FileStatus[] children = listStatus(candidate.getPath());
- for (FileStatus child : children) {
- // Set the child path based on the parent path.
- // This keeps the symlinks in our path.
- child.setPath(new Path(candidate.getPath(),
- child.getPath().getName()));
- if (globFilter.accept(child.getPath())) {
- newCandidates.add(child);
+ continue;
+ }
+ for (FileStatus candidate : candidates) {
+ if (globFilter.hasPattern()) {
+ FileStatus[] children = listStatus(candidate.getPath());
+ if (children.length == 1) {
+ // If we get back only one result, this could be either a listing
+ // of a directory with one entry, or it could reflect the fact
+ // that what we listed resolved to a file.
+ //
+ // Unfortunately, we can't just compare the returned paths to
+ // figure this out. Consider the case where you have /a/b, where
+ // b is a symlink to "..". In that case, listing /a/b will give
+ // back "/a/b" again. If we just went by returned pathname, we'd
+ // incorrectly conclude that /a/b was a file and should not match
+ // /a/*/*. So we use getFileStatus of the path we just listed to
+ // disambiguate.
+ if (!getFileStatus(candidate.getPath()).isDirectory()) {
+ continue;
+ }
}
- }
+ for (FileStatus child : children) {
+ // Set the child path based on the parent path.
+ child.setPath(new Path(candidate.getPath(),
+ child.getPath().getName()));
+ if (globFilter.accept(child.getPath())) {
+ newCandidates.add(child);
+ }
+ }
+ } else {
+ // When dealing with non-glob components, use getFileStatus
+ // instead of listStatus. This is an optimization, but it also
+ // is necessary for correctness in HDFS, since there are some
+ // special HDFS directories like .reserved and .snapshot that are
+ // not visible to listStatus, but which do exist. (See HADOOP-9877)
+ FileStatus childStatus = getFileStatus(
+ new Path(candidate.getPath(), component));
+ if (childStatus != null) {
+ newCandidates.add(childStatus);
+ }
+ }
}
candidates = newCandidates;
}
Modified: hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/HarFileSystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/HarFileSystem.java?rev=1532967&r1=1532966&r2=1532967&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/HarFileSystem.java (original)
+++ hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/HarFileSystem.java Thu Oct 17 05:32:42 2013
@@ -17,20 +17,6 @@
*/
package org.apache.hadoop.fs;
-import java.io.FileNotFoundException;
-import java.io.IOException;
-import java.io.UnsupportedEncodingException;
-import java.net.URI;
-import java.net.URISyntaxException;
-import java.net.URLDecoder;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.List;
-import java.util.LinkedHashMap;
-import java.util.Map;
-import java.util.TreeMap;
-import java.util.HashMap;
-
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
@@ -40,6 +26,14 @@ import org.apache.hadoop.io.Text;
import org.apache.hadoop.util.LineReader;
import org.apache.hadoop.util.Progressable;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.io.UnsupportedEncodingException;
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.net.URLDecoder;
+import java.util.*;
+
/**
* This is an implementation of the Hadoop Archive
* Filesystem. This archive Filesystem has index files
@@ -53,7 +47,7 @@ import org.apache.hadoop.util.Progressab
* index for ranges of hashcodes.
*/
-public class HarFileSystem extends FilterFileSystem {
+public class HarFileSystem extends FileSystem {
private static final Log LOG = LogFactory.getLog(HarFileSystem.class);
@@ -75,11 +69,13 @@ public class HarFileSystem extends Filte
// pointer into the static metadata cache
private HarMetaData metadata;
+ private FileSystem fs;
+
/**
* public construction of harfilesystem
- *
*/
public HarFileSystem() {
+ // Must call #initialize() method to set the underlying file system
}
/**
@@ -96,10 +92,11 @@ public class HarFileSystem extends Filte
/**
* Constructor to create a HarFileSystem with an
* underlying filesystem.
- * @param fs
+ * @param fs underlying file system
*/
public HarFileSystem(FileSystem fs) {
- super(fs);
+ this.fs = fs;
+ this.statistics = fs.statistics;
}
private synchronized void initializeMetadataCache(Configuration conf) {
@@ -171,6 +168,11 @@ public class HarFileSystem extends Filte
}
}
+ @Override
+ public Configuration getConf() {
+ return fs.getConf();
+ }
+
// get the version of the filesystem from the masterindex file
// the version is currently not useful since its the first version
// of archives
@@ -236,8 +238,7 @@ public class HarFileSystem extends Filte
throw new IOException("query component in Path not supported " + rawURI);
}
- URI tmp = null;
-
+ URI tmp;
try {
// convert <scheme>-<host> to <scheme>://<host>
URI baseUri = new URI(authority.replaceFirst("-", "://"));
@@ -256,7 +257,7 @@ public class HarFileSystem extends Filte
return URLDecoder.decode(str, "UTF-8");
}
- private String decodeFileName(String fname)
+ private String decodeFileName(String fname)
throws UnsupportedEncodingException {
int version = metadata.getVersion();
if (version == 2 || version == 3){
@@ -272,19 +273,30 @@ public class HarFileSystem extends Filte
public Path getWorkingDirectory() {
return new Path(uri.toString());
}
-
+
+ @Override
+ public Path getInitialWorkingDirectory() {
+ return getWorkingDirectory();
+ }
+
+ @Override
+ public FsStatus getStatus(Path p) throws IOException {
+ return fs.getStatus(p);
+ }
+
/**
* Create a har specific auth
* har-underlyingfs:port
- * @param underLyingURI the uri of underlying
+ * @param underLyingUri the uri of underlying
* filesystem
* @return har specific auth
*/
private String getHarAuth(URI underLyingUri) {
String auth = underLyingUri.getScheme() + "-";
if (underLyingUri.getHost() != null) {
- auth += underLyingUri.getHost() + ":";
+ auth += underLyingUri.getHost();
if (underLyingUri.getPort() != -1) {
+ auth += ":";
auth += underLyingUri.getPort();
}
}
@@ -293,7 +305,21 @@ public class HarFileSystem extends Filte
}
return auth;
}
-
+
+ /**
+ * Used for delegation token related functionality. Must delegate to
+ * underlying file system.
+ */
+ @Override
+ protected URI getCanonicalUri() {
+ return fs.getCanonicalUri();
+ }
+
+ @Override
+ protected URI canonicalizeUri(URI uri) {
+ return fs.canonicalizeUri(uri);
+ }
+
/**
* Returns the uri of this filesystem.
* The uri is of the form
@@ -304,6 +330,16 @@ public class HarFileSystem extends Filte
return this.uri;
}
+ @Override
+ protected void checkPath(Path path) {
+ fs.checkPath(path);
+ }
+
+ @Override
+ public Path resolvePath(Path p) throws IOException {
+ return fs.resolvePath(p);
+ }
+
/**
* this method returns the path
* inside the har filesystem.
@@ -418,7 +454,7 @@ public class HarFileSystem extends Filte
/**
* Get block locations from the underlying fs and fix their
* offsets and lengths.
- * @param file the input filestatus to get block locations
+ * @param file the input file status to get block locations
* @param start the start of the desired range in the contained file
* @param len the length of the desired range
* @return block locations for this segment of file
@@ -440,8 +476,7 @@ public class HarFileSystem extends Filte
}
/**
- * the hash of the path p inside iniside
- * the filesystem
+ * the hash of the path p inside the filesystem
* @param p the path in the harfilesystem
* @return the hash code of the path.
*/
@@ -474,13 +509,9 @@ public class HarFileSystem extends Filte
* the parent path directory
* @param statuses
* the list to add the children filestatuses to
- * @param children
- * the string list of children for this parent
- * @param archiveIndexStat
- * the archive index filestatus
*/
- private void fileStatusesInIndex(HarStatus parent, List<FileStatus> statuses,
- List<String> children) throws IOException {
+ private void fileStatusesInIndex(HarStatus parent, List<FileStatus> statuses)
+ throws IOException {
String parentString = parent.getName();
if (!parentString.endsWith(Path.SEPARATOR)){
parentString += Path.SEPARATOR;
@@ -546,7 +577,7 @@ public class HarFileSystem extends Filte
// stored in a single line in the index files
// the format is of the form
// filename "dir"/"file" partFileName startIndex length
- // <space seperated children>
+ // <space separated children>
private class HarStatus {
boolean isDir;
String name;
@@ -665,7 +696,6 @@ public class HarFileSystem extends Filte
public FSDataInputStream open(Path f, int bufferSize) throws IOException {
// get the fs DataInputStream for the underlying file
HarStatus hstatus = getFileHarStatus(f);
- // we got it.. woo hooo!!!
if (hstatus.isDir()) {
throw new FileNotFoundException(f + " : not a file in " +
archivePath);
@@ -674,20 +704,39 @@ public class HarFileSystem extends Filte
hstatus.getPartName()),
hstatus.getStartIndex(), hstatus.getLength(), bufferSize);
}
-
+
+ /**
+ * Used for delegation token related functionality. Must delegate to
+ * underlying file system.
+ */
+ @Override
+ public FileSystem[] getChildFileSystems() {
+ return new FileSystem[]{fs};
+ }
+
@Override
- public FSDataOutputStream create(Path f,
- FsPermission permission,
- boolean overwrite,
- int bufferSize,
- short replication,
- long blockSize,
+ public FSDataOutputStream create(Path f, FsPermission permission,
+ boolean overwrite, int bufferSize, short replication, long blockSize,
Progressable progress) throws IOException {
throw new IOException("Har: create not allowed.");
}
-
+
+ @SuppressWarnings("deprecation")
+ @Override
+ public FSDataOutputStream createNonRecursive(Path f, boolean overwrite,
+ int bufferSize, short replication, long blockSize, Progressable progress)
+ throws IOException {
+ throw new IOException("Har: create not allowed.");
+ }
+
+ @Override
+ public FSDataOutputStream append(Path f, int bufferSize, Progressable progress) throws IOException {
+ throw new IOException("Har: append not allowed.");
+ }
+
@Override
public void close() throws IOException {
+ super.close();
if (fs != null) {
try {
fs.close();
@@ -703,9 +752,19 @@ public class HarFileSystem extends Filte
*/
@Override
public boolean setReplication(Path src, short replication) throws IOException{
- throw new IOException("Har: setreplication not allowed");
+ throw new IOException("Har: setReplication not allowed");
}
-
+
+ @Override
+ public boolean rename(Path src, Path dst) throws IOException {
+ throw new IOException("Har: rename not allowed");
+ }
+
+ @Override
+ public FSDataOutputStream append(Path f) throws IOException {
+ throw new IOException("Har: append not allowed");
+ }
+
/**
* Not implemented.
*/
@@ -713,7 +772,7 @@ public class HarFileSystem extends Filte
public boolean delete(Path f, boolean recursive) throws IOException {
throw new IOException("Har: delete not allowed");
}
-
+
/**
* liststatus returns the children of a directory
* after looking up the index files.
@@ -732,7 +791,7 @@ public class HarFileSystem extends Filte
throw new FileNotFoundException("File " + f + " not found in " + archivePath);
}
if (hstatus.isDir()) {
- fileStatusesInIndex(hstatus, statuses, hstatus.children);
+ fileStatusesInIndex(hstatus, statuses);
} else {
statuses.add(toFileStatus(hstatus, null));
}
@@ -747,7 +806,7 @@ public class HarFileSystem extends Filte
public Path getHomeDirectory() {
return new Path(uri.toString());
}
-
+
@Override
public void setWorkingDirectory(Path newDir) {
//does nothing.
@@ -765,11 +824,17 @@ public class HarFileSystem extends Filte
* not implemented.
*/
@Override
- public void copyFromLocalFile(boolean delSrc, Path src, Path dst) throws
- IOException {
+ public void copyFromLocalFile(boolean delSrc, boolean overwrite,
+ Path src, Path dst) throws IOException {
throw new IOException("Har: copyfromlocalfile not allowed");
}
-
+
+ @Override
+ public void copyFromLocalFile(boolean delSrc, boolean overwrite,
+ Path[] srcs, Path dst) throws IOException {
+ throw new IOException("Har: copyfromlocalfile not allowed");
+ }
+
/**
* copies the file in the har filesystem to a local file.
*/
@@ -806,11 +871,16 @@ public class HarFileSystem extends Filte
throw new IOException("Har: setowner not allowed");
}
+ @Override
+ public void setTimes(Path p, long mtime, long atime) throws IOException {
+ throw new IOException("Har: setTimes not allowed");
+ }
+
/**
* Not implemented.
*/
@Override
- public void setPermission(Path p, FsPermission permisssion)
+ public void setPermission(Path p, FsPermission permission)
throws IOException {
throw new IOException("Har: setPermission not allowed");
}
@@ -899,7 +969,7 @@ public class HarFileSystem extends Filte
newlen = (int) (end - position);
}
// end case
- if (newlen == 0)
+ if (newlen == 0)
return ret;
ret = underLyingStream.read(b, offset, newlen);
position += ret;
@@ -936,8 +1006,8 @@ public class HarFileSystem extends Filte
@Override
public boolean seekToNewSource(long targetPos) throws IOException {
- //do not need to implement this
- // hdfs in itself does seektonewsource
+ // do not need to implement this
+ // hdfs in itself does seektonewsource
// while reading.
return false;
}
@@ -973,14 +1043,12 @@ public class HarFileSystem extends Filte
}
@Override
- public void setReadahead(Long readahead)
- throws IOException, UnsupportedEncodingException {
+ public void setReadahead(Long readahead) throws IOException {
underLyingStream.setReadahead(readahead);
}
@Override
- public void setDropBehind(Boolean dropBehind)
- throws IOException, UnsupportedEncodingException {
+ public void setDropBehind(Boolean dropBehind) throws IOException {
underLyingStream.setDropBehind(dropBehind);
}
}
@@ -998,19 +1066,6 @@ public class HarFileSystem extends Filte
long length, int bufsize) throws IOException {
super(new HarFsInputStream(fs, p, start, length, bufsize));
}
-
- /**
- * constructor for har input stream.
- * @param fs the underlying filesystem
- * @param p the path in the underlying file system
- * @param start the start position in the part file
- * @param length the length of valid data in the part file.
- * @throws IOException
- */
- public HarFSDataInputStream(FileSystem fs, Path p, long start, long length)
- throws IOException {
- super(new HarFsInputStream(fs, p, start, length, 0));
- }
}
private class HarMetaData {
@@ -1057,7 +1112,7 @@ public class HarFileSystem extends Filte
}
private void parseMetaData() throws IOException {
- Text line;
+ Text line = new Text();
long read;
FSDataInputStream in = null;
LineReader lin = null;
@@ -1067,7 +1122,6 @@ public class HarFileSystem extends Filte
FileStatus masterStat = fs.getFileStatus(masterIndexPath);
masterIndexTimestamp = masterStat.getModificationTime();
lin = new LineReader(in, getConf());
- line = new Text();
read = lin.readLine(line);
// the first line contains the version of the index file
@@ -1081,7 +1135,7 @@ public class HarFileSystem extends Filte
}
// each line contains a hashcode range and the index file name
- String[] readStr = null;
+ String[] readStr;
while(read < masterStat.getLen()) {
int b = lin.readLine(line);
read += b;
@@ -1093,6 +1147,9 @@ public class HarFileSystem extends Filte
endHash));
line.clear();
}
+ } catch (IOException ioe) {
+ LOG.warn("Encountered exception ", ioe);
+ throw ioe;
} finally {
IOUtils.cleanup(LOG, lin, in);
}
@@ -1144,4 +1201,43 @@ public class HarFileSystem extends Filte
return size() > MAX_ENTRIES;
}
}
+
+ @SuppressWarnings("deprecation")
+ @Override
+ public FsServerDefaults getServerDefaults() throws IOException {
+ return fs.getServerDefaults();
+ }
+
+ @Override
+ public FsServerDefaults getServerDefaults(Path f) throws IOException {
+ return fs.getServerDefaults(f);
+ }
+
+ @Override
+ public long getUsed() throws IOException{
+ return fs.getUsed();
+ }
+
+ @SuppressWarnings("deprecation")
+ @Override
+ public long getDefaultBlockSize() {
+ return fs.getDefaultBlockSize();
+ }
+
+ @SuppressWarnings("deprecation")
+ @Override
+ public long getDefaultBlockSize(Path f) {
+ return fs.getDefaultBlockSize(f);
+ }
+
+ @SuppressWarnings("deprecation")
+ @Override
+ public short getDefaultReplication() {
+ return fs.getDefaultReplication();
+ }
+
+ @Override
+ public short getDefaultReplication(Path f) {
+ return fs.getDefaultReplication(f);
+ }
}
Modified: hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Path.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Path.java?rev=1532967&r1=1532966&r2=1532967&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Path.java (original)
+++ hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Path.java Thu Oct 17 05:32:42 2013
@@ -218,10 +218,13 @@ public class Path implements Comparable
*/
public static Path mergePaths(Path path1, Path path2) {
String path2Str = path2.toUri().getPath();
- if(hasWindowsDrive(path2Str)) {
- path2Str = path2Str.substring(path2Str.indexOf(':')+1);
- }
- return new Path(path1 + path2Str);
+ path2Str = path2Str.substring(startPositionWithoutWindowsDrive(path2Str));
+ // Add path components explicitly, because simply concatenating two path
+ // string is not safe, for example:
+ // "/" + "/foo" yields "//foo", which will be parsed as authority in Path
+ return new Path(path1.toUri().getScheme(),
+ path1.toUri().getAuthority(),
+ path1.toUri().getPath() + path2Str);
}
/**
@@ -247,8 +250,8 @@ public class Path implements Comparable
}
// trim trailing slash from non-root path (ignoring windows drive)
- int minLength = hasWindowsDrive(path) ? 4 : 1;
- if (path.length() > minLength && path.endsWith("/")) {
+ int minLength = startPositionWithoutWindowsDrive(path) + 1;
+ if (path.length() > minLength && path.endsWith(SEPARATOR)) {
path = path.substring(0, path.length()-1);
}
@@ -259,6 +262,14 @@ public class Path implements Comparable
return (WINDOWS && hasDriveLetterSpecifier.matcher(path).find());
}
+ private static int startPositionWithoutWindowsDrive(String path) {
+ if (hasWindowsDrive(path)) {
+ return path.charAt(0) == SEPARATOR_CHAR ? 3 : 2;
+ } else {
+ return 0;
+ }
+ }
+
/**
* Determine whether a given path string represents an absolute path on
* Windows. e.g. "C:/a/b" is an absolute path. "C:a/b" is not.
@@ -270,13 +281,11 @@ public class Path implements Comparable
*/
public static boolean isWindowsAbsolutePath(final String pathString,
final boolean slashed) {
- int start = (slashed ? 1 : 0);
-
- return
- hasWindowsDrive(pathString) &&
- pathString.length() >= (start + 3) &&
- ((pathString.charAt(start + 2) == SEPARATOR_CHAR) ||
- (pathString.charAt(start + 2) == '\\'));
+ int start = startPositionWithoutWindowsDrive(pathString);
+ return start > 0
+ && pathString.length() > start
+ && ((pathString.charAt(start) == SEPARATOR_CHAR) ||
+ (pathString.charAt(start) == '\\'));
}
/** Convert this to a URI. */
@@ -300,7 +309,7 @@ public class Path implements Comparable
* True if the path component (i.e. directory) of this URI is absolute.
*/
public boolean isUriPathAbsolute() {
- int start = hasWindowsDrive(uri.getPath()) ? 3 : 0;
+ int start = startPositionWithoutWindowsDrive(uri.getPath());
return uri.getPath().startsWith(SEPARATOR, start);
}
@@ -334,7 +343,7 @@ public class Path implements Comparable
public Path getParent() {
String path = uri.getPath();
int lastSlash = path.lastIndexOf('/');
- int start = hasWindowsDrive(path) ? 3 : 0;
+ int start = startPositionWithoutWindowsDrive(path);
if ((path.length() == start) || // empty path
(lastSlash == start && path.length() == start+1)) { // at root
return null;
@@ -343,8 +352,7 @@ public class Path implements Comparable
if (lastSlash==-1) {
parent = CUR_DIR;
} else {
- int end = hasWindowsDrive(path) ? 3 : 0;
- parent = path.substring(0, lastSlash==end?end+1:lastSlash);
+ parent = path.substring(0, lastSlash==start?start+1:lastSlash);
}
return new Path(uri.getScheme(), uri.getAuthority(), parent);
}
Modified: hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Seekable.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Seekable.java?rev=1532967&r1=1532966&r2=1532967&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Seekable.java (original)
+++ hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Seekable.java Thu Oct 17 05:32:42 2013
@@ -22,7 +22,9 @@ import java.io.*;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
-/** Stream that permits seeking. */
+/**
+ * Stream that permits seeking.
+ */
@InterfaceAudience.Public
@InterfaceStability.Evolving
public interface Seekable {
Modified: hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CommandWithDestination.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CommandWithDestination.java?rev=1532967&r1=1532966&r2=1532967&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CommandWithDestination.java (original)
+++ hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CommandWithDestination.java Thu Oct 17 05:32:42 2013
@@ -84,11 +84,16 @@ abstract class CommandWithDestination ex
*/
protected void getLocalDestination(LinkedList<String> args)
throws IOException {
+ String pathString = (args.size() < 2) ? Path.CUR_DIR : args.removeLast();
try {
- String pathString = (args.size() < 2) ? Path.CUR_DIR : args.removeLast();
dst = new PathData(new URI(pathString), getConf());
} catch (URISyntaxException e) {
- throw new IOException("unexpected URISyntaxException", e);
+ if (Path.WINDOWS) {
+ // Unlike URI, PathData knows how to parse Windows drive-letter paths.
+ dst = new PathData(pathString, getConf());
+ } else {
+ throw new IOException("unexpected URISyntaxException", e);
+ }
}
}
Modified: hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CopyCommands.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CopyCommands.java?rev=1532967&r1=1532966&r2=1532967&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CopyCommands.java (original)
+++ hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CopyCommands.java Thu Oct 17 05:32:42 2013
@@ -204,13 +204,18 @@ class CopyCommands {
// commands operating on local paths have no need for glob expansion
@Override
protected List<PathData> expandArgument(String arg) throws IOException {
+ List<PathData> items = new LinkedList<PathData>();
try {
- List<PathData> items = new LinkedList<PathData>();
items.add(new PathData(new URI(arg), getConf()));
- return items;
} catch (URISyntaxException e) {
- throw new IOException("unexpected URISyntaxException", e);
+ if (Path.WINDOWS) {
+ // Unlike URI, PathData knows how to parse Windows drive-letter paths.
+ items.add(new PathData(arg, getConf()));
+ } else {
+ throw new IOException("unexpected URISyntaxException", e);
+ }
}
+ return items;
}
@Override
Modified: hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/SetReplication.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/SetReplication.java?rev=1532967&r1=1532966&r2=1532967&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/SetReplication.java (original)
+++ hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/SetReplication.java Thu Oct 17 05:32:42 2013
@@ -39,11 +39,14 @@ class SetReplication extends FsCommand {
}
public static final String NAME = "setrep";
- public static final String USAGE = "[-R] [-w] <rep> <path/file> ...";
+ public static final String USAGE = "[-R] [-w] <rep> <path> ...";
public static final String DESCRIPTION =
- "Set the replication level of a file.\n" +
- "The -R flag requests a recursive change of replication level\n" +
- "for an entire tree.";
+ "Set the replication level of a file. If <path> is a directory\n" +
+ "then the command recursively changes the replication factor of\n" +
+ "all files under the directory tree rooted at <path>.\n" +
+ "The -w flag requests that the command wait for the replication\n" +
+ "to complete. This can potentially take a very long time.\n" +
+ "The -R flag is accepted for backwards compatibility. It has no effect.";
protected short newRep = 0;
protected List<PathData> waitList = new LinkedList<PathData>();
@@ -54,7 +57,7 @@ class SetReplication extends FsCommand {
CommandFormat cf = new CommandFormat(2, Integer.MAX_VALUE, "R", "w");
cf.parse(args);
waitOpt = cf.getOpt("w");
- setRecursive(cf.getOpt("R"));
+ setRecursive(true);
try {
newRep = Short.parseShort(args.removeFirst());
@@ -126,4 +129,4 @@ class SetReplication extends FsCommand {
out.println(" done");
}
}
-}
\ No newline at end of file
+}
Modified: hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/SnapshotCommands.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/SnapshotCommands.java?rev=1532967&r1=1532966&r2=1532967&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/SnapshotCommands.java (original)
+++ hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/SnapshotCommands.java Thu Oct 17 05:32:42 2013
@@ -68,7 +68,7 @@ class SnapshotCommands extends FsCommand
throw new IllegalArgumentException("<snapshotDir> is missing.");
}
if (args.size() > 2) {
- throw new IllegalArgumentException("Too many arguements.");
+ throw new IllegalArgumentException("Too many arguments.");
}
if (args.size() == 2) {
snapshotName = args.removeLast();
@@ -110,7 +110,7 @@ class SnapshotCommands extends FsCommand
@Override
protected void processOptions(LinkedList<String> args) throws IOException {
if (args.size() != 2) {
- throw new IOException("args number not 2: " + args.size());
+ throw new IllegalArgumentException("Incorrect number of arguments.");
}
snapshotName = args.removeLast();
}
@@ -150,7 +150,7 @@ class SnapshotCommands extends FsCommand
@Override
protected void processOptions(LinkedList<String> args) throws IOException {
if (args.size() != 3) {
- throw new IOException("args number not 3: " + args.size());
+ throw new IllegalArgumentException("Incorrect number of arguments.");
}
newName = args.removeLast();
oldName = args.removeLast();
Modified: hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ActiveStandbyElector.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ActiveStandbyElector.java?rev=1532967&r1=1532966&r2=1532967&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ActiveStandbyElector.java (original)
+++ hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ActiveStandbyElector.java Thu Oct 17 05:32:42 2013
@@ -568,6 +568,9 @@ public class ActiveStandbyElector implem
enterNeutralMode();
reJoinElection(0);
break;
+ case SaslAuthenticated:
+ LOG.info("Successfully authenticated to ZooKeeper using SASL.");
+ break;
default:
fatalError("Unexpected Zookeeper watch event state: "
+ event.getState());
Modified: hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAServiceProtocol.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAServiceProtocol.java?rev=1532967&r1=1532966&r2=1532967&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAServiceProtocol.java (original)
+++ hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAServiceProtocol.java Thu Oct 17 05:32:42 2013
@@ -43,13 +43,15 @@ public interface HAServiceProtocol {
public static final long versionID = 1L;
/**
- * An HA service may be in active or standby state. During
- * startup, it is in an unknown INITIALIZING state.
+ * An HA service may be in active or standby state. During startup, it is in
+ * an unknown INITIALIZING state. During shutdown, it is in the STOPPING state
+ * and can no longer return to active/standby states.
*/
public enum HAServiceState {
INITIALIZING("initializing"),
ACTIVE("active"),
- STANDBY("standby");
+ STANDBY("standby"),
+ STOPPING("stopping");
private String name;
Modified: hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpConfig.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpConfig.java?rev=1532967&r1=1532966&r2=1532967&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpConfig.java (original)
+++ hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpConfig.java Thu Oct 17 05:32:42 2013
@@ -17,7 +17,6 @@
*/
package org.apache.hadoop.http;
-import com.google.common.annotations.VisibleForTesting;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
@@ -29,26 +28,41 @@ import org.apache.hadoop.fs.CommonConfig
@InterfaceAudience.Private
@InterfaceStability.Unstable
public class HttpConfig {
- private static boolean sslEnabled;
+ private static Policy policy;
+ public enum Policy {
+ HTTP_ONLY,
+ HTTPS_ONLY;
+
+ public static Policy fromString(String value) {
+ if (value.equalsIgnoreCase(CommonConfigurationKeysPublic
+ .HTTP_POLICY_HTTPS_ONLY)) {
+ return HTTPS_ONLY;
+ }
+ return HTTP_ONLY;
+ }
+ }
static {
Configuration conf = new Configuration();
- sslEnabled = conf.getBoolean(
- CommonConfigurationKeysPublic.HADOOP_SSL_ENABLED_KEY,
- CommonConfigurationKeysPublic.HADOOP_SSL_ENABLED_DEFAULT);
+ boolean sslEnabled = conf.getBoolean(
+ CommonConfigurationKeysPublic.HADOOP_SSL_ENABLED_KEY,
+ CommonConfigurationKeysPublic.HADOOP_SSL_ENABLED_DEFAULT);
+ policy = sslEnabled ? Policy.HTTPS_ONLY : Policy.HTTP_ONLY;
}
- @VisibleForTesting
- static void setSecure(boolean secure) {
- sslEnabled = secure;
+ public static void setPolicy(Policy policy) {
+ HttpConfig.policy = policy;
}
public static boolean isSecure() {
- return sslEnabled;
+ return policy == Policy.HTTPS_ONLY;
}
public static String getSchemePrefix() {
return (isSecure()) ? "https://" : "http://";
}
+ public static String getScheme(Policy policy) {
+ return policy == Policy.HTTPS_ONLY ? "https://" : "http://";
+ }
}
Modified: hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer.java?rev=1532967&r1=1532966&r2=1532967&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer.java (original)
+++ hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer.java Thu Oct 17 05:32:42 2013
@@ -341,6 +341,7 @@ public class HttpServer implements Filte
}
listener.setHost(bindAddress);
listener.setPort(port);
+ LOG.info("SSL is enabled on " + toString());
} else {
listenerStartedExternally = true;
listener = connector;
Modified: hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java?rev=1532967&r1=1532966&r2=1532967&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java (original)
+++ hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java Thu Oct 17 05:32:42 2013
@@ -34,6 +34,7 @@ import java.util.concurrent.TimeUnit;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.ipc.RemoteException;
+import org.apache.hadoop.ipc.RetriableException;
import org.apache.hadoop.ipc.StandbyException;
import org.apache.hadoop.net.ConnectTimeoutException;
@@ -531,6 +532,15 @@ public class RetryPolicies {
this.maxDelayBase = maxDelayBase;
}
+ /**
+ * @return 0 if this is our first failover/retry (i.e., retry immediately),
+ * sleep exponentially otherwise
+ */
+ private long getFailoverOrRetrySleepTime(int times) {
+ return times == 0 ? 0 :
+ calculateExponentialTime(delayMillis, times, maxDelayBase);
+ }
+
@Override
public RetryAction shouldRetry(Exception e, int retries,
int failovers, boolean isIdempotentOrAtMostOnce) throws Exception {
@@ -546,11 +556,8 @@ public class RetryPolicies {
e instanceof StandbyException ||
e instanceof ConnectTimeoutException ||
isWrappedStandbyException(e)) {
- return new RetryAction(
- RetryAction.RetryDecision.FAILOVER_AND_RETRY,
- // retry immediately if this is our first failover, sleep otherwise
- failovers == 0 ? 0 :
- calculateExponentialTime(delayMillis, failovers, maxDelayBase));
+ return new RetryAction(RetryAction.RetryDecision.FAILOVER_AND_RETRY,
+ getFailoverOrRetrySleepTime(failovers));
} else if (e instanceof SocketException ||
(e instanceof IOException && !(e instanceof RemoteException))) {
if (isIdempotentOrAtMostOnce) {
@@ -561,8 +568,14 @@ public class RetryPolicies {
"whether it was invoked");
}
} else {
- return fallbackPolicy.shouldRetry(e, retries, failovers,
- isIdempotentOrAtMostOnce);
+ RetriableException re = getWrappedRetriableException(e);
+ if (re != null) {
+ return new RetryAction(RetryAction.RetryDecision.RETRY,
+ getFailoverOrRetrySleepTime(retries));
+ } else {
+ return fallbackPolicy.shouldRetry(e, retries, failovers,
+ isIdempotentOrAtMostOnce);
+ }
}
}
@@ -596,4 +609,14 @@ public class RetryPolicies {
StandbyException.class);
return unwrapped instanceof StandbyException;
}
+
+ private static RetriableException getWrappedRetriableException(Exception e) {
+ if (!(e instanceof RemoteException)) {
+ return null;
+ }
+ Exception unwrapped = ((RemoteException)e).unwrapRemoteException(
+ RetriableException.class);
+ return unwrapped instanceof RetriableException ?
+ (RetriableException) unwrapped : null;
+ }
}
Modified: hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java?rev=1532967&r1=1532966&r2=1532967&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java (original)
+++ hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java Thu Oct 17 05:32:42 2013
@@ -1295,6 +1295,29 @@ public abstract class Server {
}
}
+ private Throwable getCauseForInvalidToken(IOException e) {
+ Throwable cause = e;
+ while (cause != null) {
+ if (cause instanceof RetriableException) {
+ return (RetriableException) cause;
+ } else if (cause instanceof StandbyException) {
+ return (StandbyException) cause;
+ } else if (cause instanceof InvalidToken) {
+ // FIXME: hadoop method signatures are restricting the SASL
+ // callbacks to only returning InvalidToken, but some services
+ // need to throw other exceptions (ex. NN + StandyException),
+ // so for now we'll tunnel the real exceptions via an
+ // InvalidToken's cause which normally is not set
+ if (cause.getCause() != null) {
+ cause = cause.getCause();
+ }
+ return cause;
+ }
+ cause = cause.getCause();
+ }
+ return e;
+ }
+
private void saslProcess(RpcSaslProto saslMessage)
throws WrappedRpcServerException, IOException, InterruptedException {
if (saslContextEstablished) {
@@ -1307,29 +1330,11 @@ public abstract class Server {
try {
saslResponse = processSaslMessage(saslMessage);
} catch (IOException e) {
- IOException sendToClient = e;
- Throwable cause = e;
- while (cause != null) {
- if (cause instanceof InvalidToken) {
- // FIXME: hadoop method signatures are restricting the SASL
- // callbacks to only returning InvalidToken, but some services
- // need to throw other exceptions (ex. NN + StandyException),
- // so for now we'll tunnel the real exceptions via an
- // InvalidToken's cause which normally is not set
- if (cause.getCause() != null) {
- cause = cause.getCause();
- }
- sendToClient = (IOException) cause;
- break;
- }
- cause = cause.getCause();
- }
rpcMetrics.incrAuthenticationFailures();
- String clientIP = this.toString();
// attempting user could be null
- AUDITLOG.warn(AUTH_FAILED_FOR + clientIP + ":" + attemptingUser +
- " (" + e.getLocalizedMessage() + ")");
- throw sendToClient;
+ AUDITLOG.warn(AUTH_FAILED_FOR + this.toString() + ":"
+ + attemptingUser + " (" + e.getLocalizedMessage() + ")");
+ throw (IOException) getCauseForInvalidToken(e);
}
if (saslServer != null && saslServer.isComplete()) {
Modified: hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/CachedDNSToSwitchMapping.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/CachedDNSToSwitchMapping.java?rev=1532967&r1=1532966&r2=1532967&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/CachedDNSToSwitchMapping.java (original)
+++ hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/CachedDNSToSwitchMapping.java Thu Oct 17 05:32:42 2013
@@ -154,4 +154,11 @@ public class CachedDNSToSwitchMapping ex
public void reloadCachedMappings() {
cache.clear();
}
+
+ @Override
+ public void reloadCachedMappings(List<String> names) {
+ for (String name : names) {
+ cache.remove(name);
+ }
+ }
}
Modified: hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/DNSToSwitchMapping.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/DNSToSwitchMapping.java?rev=1532967&r1=1532966&r2=1532967&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/DNSToSwitchMapping.java (original)
+++ hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/DNSToSwitchMapping.java Thu Oct 17 05:32:42 2013
@@ -59,4 +59,12 @@ public interface DNSToSwitchMapping {
* will get a chance to see the new data.
*/
public void reloadCachedMappings();
+
+ /**
+ * Reload cached mappings on specific nodes.
+ *
+ * If there is a cache on these nodes, this method will clear it, so that
+ * future accesses will see updated data.
+ */
+ public void reloadCachedMappings(List<String> names);
}
Modified: hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/ScriptBasedMapping.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/ScriptBasedMapping.java?rev=1532967&r1=1532966&r2=1532967&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/ScriptBasedMapping.java (original)
+++ hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/ScriptBasedMapping.java Thu Oct 17 05:32:42 2013
@@ -269,5 +269,11 @@ public final class ScriptBasedMapping ex
// Nothing to do here, since RawScriptBasedMapping has no cache, and
// does not inherit from CachedDNSToSwitchMapping
}
+
+ @Override
+ public void reloadCachedMappings(List<String> names) {
+ // Nothing to do here, since RawScriptBasedMapping has no cache, and
+ // does not inherit from CachedDNSToSwitchMapping
+ }
}
}
Modified: hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/TableMapping.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/TableMapping.java?rev=1532967&r1=1532966&r2=1532967&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/TableMapping.java (original)
+++ hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/TableMapping.java Thu Oct 17 05:32:42 2013
@@ -162,5 +162,12 @@ public class TableMapping extends Cached
}
}
}
+
+ @Override
+ public void reloadCachedMappings(List<String> names) {
+ // TableMapping has to reload all mappings at once, so no chance to
+ // reload mappings on specific nodes
+ reloadCachedMappings();
+ }
}
}
Modified: hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SaslRpcServer.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SaslRpcServer.java?rev=1532967&r1=1532966&r2=1532967&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SaslRpcServer.java (original)
+++ hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SaslRpcServer.java Thu Oct 17 05:32:42 2013
@@ -45,11 +45,13 @@ import org.apache.commons.logging.LogFac
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.ipc.RetriableException;
import org.apache.hadoop.ipc.Server;
import org.apache.hadoop.ipc.Server.Connection;
+import org.apache.hadoop.ipc.StandbyException;
import org.apache.hadoop.security.token.SecretManager;
-import org.apache.hadoop.security.token.TokenIdentifier;
import org.apache.hadoop.security.token.SecretManager.InvalidToken;
+import org.apache.hadoop.security.token.TokenIdentifier;
/**
* A utility class for dealing with SASL on RPC server
@@ -267,13 +269,15 @@ public class SaslRpcServer {
this.connection = connection;
}
- private char[] getPassword(TokenIdentifier tokenid) throws InvalidToken {
- return encodePassword(secretManager.retrievePassword(tokenid));
+ private char[] getPassword(TokenIdentifier tokenid) throws InvalidToken,
+ StandbyException, RetriableException, IOException {
+ return encodePassword(secretManager.retriableRetrievePassword(tokenid));
}
@Override
public void handle(Callback[] callbacks) throws InvalidToken,
- UnsupportedCallbackException {
+ UnsupportedCallbackException, StandbyException, RetriableException,
+ IOException {
NameCallback nc = null;
PasswordCallback pc = null;
AuthorizeCallback ac = null;
@@ -292,7 +296,8 @@ public class SaslRpcServer {
}
}
if (pc != null) {
- TokenIdentifier tokenIdentifier = getIdentifier(nc.getDefaultName(), secretManager);
+ TokenIdentifier tokenIdentifier = getIdentifier(nc.getDefaultName(),
+ secretManager);
char[] password = getPassword(tokenIdentifier);
UserGroupInformation user = null;
user = tokenIdentifier.getUser(); // may throw exception
Modified: hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java?rev=1532967&r1=1532966&r2=1532967&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java (original)
+++ hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java Thu Oct 17 05:32:42 2013
@@ -33,6 +33,7 @@ import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
+import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Set;
@@ -1333,7 +1334,14 @@ public class UserGroupInformation {
* @return Credentials of tokens associated with this user
*/
public synchronized Credentials getCredentials() {
- return new Credentials(getCredentialsInternal());
+ Credentials creds = new Credentials(getCredentialsInternal());
+ Iterator<Token<?>> iter = creds.getAllTokens().iterator();
+ while (iter.hasNext()) {
+ if (iter.next() instanceof Token.PrivateToken) {
+ iter.remove();
+ }
+ }
+ return creds;
}
/**
Modified: hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/FileBasedKeyStoresFactory.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/FileBasedKeyStoresFactory.java?rev=1532967&r1=1532966&r2=1532967&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/FileBasedKeyStoresFactory.java (original)
+++ hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/FileBasedKeyStoresFactory.java Thu Oct 17 05:32:42 2013
@@ -53,6 +53,8 @@ public class FileBasedKeyStoresFactory i
"ssl.{0}.keystore.location";
public static final String SSL_KEYSTORE_PASSWORD_TPL_KEY =
"ssl.{0}.keystore.password";
+ public static final String SSL_KEYSTORE_KEYPASSWORD_TPL_KEY =
+ "ssl.{0}.keystore.keypassword";
public static final String SSL_KEYSTORE_TYPE_TPL_KEY =
"ssl.{0}.keystore.type";
@@ -136,7 +138,7 @@ public class FileBasedKeyStoresFactory i
conf.get(resolvePropertyName(mode, SSL_KEYSTORE_TYPE_TPL_KEY),
DEFAULT_KEYSTORE_TYPE);
KeyStore keystore = KeyStore.getInstance(keystoreType);
- String keystorePassword = null;
+ String keystoreKeyPassword = null;
if (requireClientCert || mode == SSLFactory.Mode.SERVER) {
String locationProperty =
resolvePropertyName(mode, SSL_KEYSTORE_LOCATION_TPL_KEY);
@@ -147,11 +149,17 @@ public class FileBasedKeyStoresFactory i
}
String passwordProperty =
resolvePropertyName(mode, SSL_KEYSTORE_PASSWORD_TPL_KEY);
- keystorePassword = conf.get(passwordProperty, "");
+ String keystorePassword = conf.get(passwordProperty, "");
if (keystorePassword.isEmpty()) {
throw new GeneralSecurityException("The property '" + passwordProperty +
"' has not been set in the ssl configuration file.");
}
+ String keyPasswordProperty =
+ resolvePropertyName(mode, SSL_KEYSTORE_KEYPASSWORD_TPL_KEY);
+ // Key password defaults to the same value as store password for
+ // compatibility with legacy configurations that did not use a separate
+ // configuration property for key password.
+ keystoreKeyPassword = conf.get(keyPasswordProperty, keystorePassword);
LOG.debug(mode.toString() + " KeyStore: " + keystoreLocation);
InputStream is = new FileInputStream(keystoreLocation);
@@ -167,8 +175,8 @@ public class FileBasedKeyStoresFactory i
KeyManagerFactory keyMgrFactory = KeyManagerFactory
.getInstance(SSLFactory.SSLCERTIFICATE);
- keyMgrFactory.init(keystore, (keystorePassword != null) ?
- keystorePassword.toCharArray() : null);
+ keyMgrFactory.init(keystore, (keystoreKeyPassword != null) ?
+ keystoreKeyPassword.toCharArray() : null);
keyManagers = keyMgrFactory.getKeyManagers();
//trust store
Modified: hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/SecretManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/SecretManager.java?rev=1532967&r1=1532966&r2=1532967&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/SecretManager.java (original)
+++ hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/SecretManager.java Thu Oct 17 05:32:42 2013
@@ -29,6 +29,7 @@ import javax.crypto.spec.SecretKeySpec;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.ipc.RetriableException;
import org.apache.hadoop.ipc.StandbyException;
@@ -66,7 +67,29 @@ public abstract class SecretManager<T ex
* @return the password to use
* @throws InvalidToken the token was invalid
*/
- public abstract byte[] retrievePassword(T identifier) throws InvalidToken;
+ public abstract byte[] retrievePassword(T identifier)
+ throws InvalidToken;
+
+ /**
+ * The same functionality with {@link #retrievePassword}, except that this
+ * method can throw a {@link RetriableException} or a {@link StandbyException}
+ * to indicate that client can retry/failover the same operation because of
+ * temporary issue on the server side.
+ *
+ * @param identifier the identifier to validate
+ * @return the password to use
+ * @throws InvalidToken the token was invalid
+ * @throws StandbyException the server is in standby state, the client can
+ * try other servers
+ * @throws RetriableException the token was invalid, and the server thinks
+ * this may be a temporary issue and suggests the client to retry
+ * @throws IOException to allow future exceptions to be added without breaking
+ * compatibility
+ */
+ public byte[] retriableRetrievePassword(T identifier)
+ throws InvalidToken, StandbyException, RetriableException, IOException {
+ return retrievePassword(identifier);
+ }
/**
* Create an empty token identifier.
Modified: hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/Token.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/Token.java?rev=1532967&r1=1532966&r2=1532967&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/Token.java (original)
+++ hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/Token.java Thu Oct 17 05:32:42 2013
@@ -19,31 +19,20 @@
package org.apache.hadoop.security.token;
import com.google.common.collect.Maps;
-
-import java.io.ByteArrayInputStream;
-import java.io.DataInput;
-import java.io.DataInputStream;
-import java.io.DataOutput;
-import java.io.IOException;
-import java.util.Arrays;
-import java.util.Map;
-import java.util.ServiceLoader;
-
import org.apache.commons.codec.binary.Base64;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
-
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.io.DataInputBuffer;
-import org.apache.hadoop.io.DataOutputBuffer;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.io.Writable;
-import org.apache.hadoop.io.WritableComparator;
-import org.apache.hadoop.io.WritableUtils;
+import org.apache.hadoop.io.*;
import org.apache.hadoop.util.ReflectionUtils;
+import java.io.*;
+import java.util.Arrays;
+import java.util.Map;
+import java.util.ServiceLoader;
+
/**
* The client-side form of the token.
*/
@@ -195,6 +184,19 @@ public class Token<T extends TokenIdenti
service = newService;
}
+ /**
+ * Indicates whether the token is a clone. Used by HA failover proxy
+ * to indicate a token should not be visible to the user via
+ * UGI.getCredentials()
+ */
+ @InterfaceAudience.Private
+ @InterfaceStability.Unstable
+ public static class PrivateToken<T extends TokenIdentifier> extends Token<T> {
+ public PrivateToken(Token<T> token) {
+ super(token);
+ }
+ }
+
@Override
public void readFields(DataInput in) throws IOException {
int len = WritableUtils.readVInt(in);
Modified: hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenSecretManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenSecretManager.java?rev=1532967&r1=1532966&r2=1532967&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenSecretManager.java (original)
+++ hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenSecretManager.java Thu Oct 17 05:32:42 2013
@@ -45,7 +45,7 @@ import org.apache.hadoop.util.Time;
import com.google.common.base.Preconditions;
-@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
+@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce", "Hive"})
@InterfaceStability.Evolving
public abstract
class AbstractDelegationTokenSecretManager<TokenIdent
@@ -289,20 +289,30 @@ extends AbstractDelegationTokenIdentifie
+ tokenRenewInterval, password, getTrackingIdIfEnabled(identifier)));
return password;
}
-
- @Override
- public synchronized byte[] retrievePassword(TokenIdent identifier)
+
+ /**
+ * Find the DelegationTokenInformation for the given token id, and verify that
+ * if the token is expired. Note that this method should be called with
+ * acquiring the secret manager's monitor.
+ */
+ protected DelegationTokenInformation checkToken(TokenIdent identifier)
throws InvalidToken {
+ assert Thread.holdsLock(this);
DelegationTokenInformation info = currentTokens.get(identifier);
if (info == null) {
throw new InvalidToken("token (" + identifier.toString()
+ ") can't be found in cache");
}
- long now = Time.now();
- if (info.getRenewDate() < now) {
+ if (info.getRenewDate() < Time.now()) {
throw new InvalidToken("token (" + identifier.toString() + ") is expired");
}
- return info.getPassword();
+ return info;
+ }
+
+ @Override
+ public synchronized byte[] retrievePassword(TokenIdent identifier)
+ throws InvalidToken {
+ return checkToken(identifier).getPassword();
}
protected String getTrackingIdIfEnabled(TokenIdent ident) {
@@ -444,6 +454,10 @@ extends AbstractDelegationTokenIdentifie
byte[] password;
String trackingId;
+ public DelegationTokenInformation(long renewDate, byte[] password) {
+ this(renewDate, password, null);
+ }
+
public DelegationTokenInformation(long renewDate, byte[] password,
String trackingId) {
this.renewDate = renewDate;
Modified: hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ReflectionUtils.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ReflectionUtils.java?rev=1532967&r1=1532966&r2=1532967&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ReflectionUtils.java (original)
+++ hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ReflectionUtils.java Thu Oct 17 05:32:42 2013
@@ -154,7 +154,7 @@ public class ReflectionUtils {
* @param stream the stream to
* @param title a string title for the stack trace
*/
- public static void printThreadInfo(PrintWriter stream,
+ public synchronized static void printThreadInfo(PrintWriter stream,
String title) {
final int STACK_DEPTH = 20;
boolean contention = threadBean.isThreadContentionMonitoringEnabled();
Propchange: hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/main/native/native.sln
------------------------------------------------------------------------------
svn:eol-style = native
Modified: hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/main/native/native.vcxproj
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/main/native/native.vcxproj?rev=1532967&r1=1532966&r2=1532967&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/main/native/native.vcxproj (original)
+++ hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/main/native/native.vcxproj Thu Oct 17 05:32:42 2013
@@ -1,4 +1,4 @@
-<?xml version="1.0" encoding="utf-8"?>
+<?xml version="1.0" encoding="utf-8"?>
<!--
Licensed to the Apache Software Foundation (ASF) under one or more
Propchange: hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/main/native/native.vcxproj
------------------------------------------------------------------------------
svn:eol-style = native
Modified: hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/main/proto/ProtobufRpcEngine.proto
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/main/proto/ProtobufRpcEngine.proto?rev=1532967&r1=1532966&r2=1532967&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/main/proto/ProtobufRpcEngine.proto (original)
+++ hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/main/proto/ProtobufRpcEngine.proto Thu Oct 17 05:32:42 2013
@@ -60,8 +60,8 @@ message RequestHeaderProto {
* ProtocolInfoProto) since they reuse the connection; in this case
* the declaringClassProtocolName field is set to the ProtocolInfoProto
*/
- required string declaringClassProtocolName = 3;
+ required string declaringClassProtocolName = 2;
/** protocol version of class declaring the called method */
- required uint64 clientProtocolVersion = 4;
+ required uint64 clientProtocolVersion = 3;
}
Modified: hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/main/proto/RpcHeader.proto
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/main/proto/RpcHeader.proto?rev=1532967&r1=1532966&r2=1532967&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/main/proto/RpcHeader.proto (original)
+++ hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/main/proto/RpcHeader.proto Thu Oct 17 05:32:42 2013
@@ -62,7 +62,7 @@ message RpcRequestHeaderProto { // the h
optional RpcKindProto rpcKind = 1;
optional OperationProto rpcOp = 2;
- required uint32 callId = 3; // a sequence number that is sent back in response
+ required sint32 callId = 3; // a sequence number that is sent back in response
required bytes clientId = 4; // Globally unique client ID
// clientId + callId uniquely identifies a request
// retry count, 1 means this is the first retry
Modified: hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml?rev=1532967&r1=1532966&r2=1532967&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml (original)
+++ hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml Thu Oct 17 05:32:42 2013
@@ -500,6 +500,11 @@
</description>
</property>
+<property>
+ <name>fs.swift.impl</name>
+ <value>org.apache.hadoop.fs.swift.snative.SwiftNativeFileSystem</value>
+ <description>The implementation class of the OpenStack Swift Filesystem</description>
+</property>
<property>
<name>fs.automatic.close</name>
@@ -1226,4 +1231,19 @@
</description>
</property>
+<property>
+ <name>nfs3.server.port</name>
+ <value>2049</value>
+ <description>
+ Specify the port number used by Hadoop NFS.
+ </description>
+</property>
+
+<property>
+ <name>nfs3.mountd.port</name>
+ <value>4242</value>
+ <description>
+ Specify the port number used by Hadoop mount daemon.
+ </description>
+</property>
</configuration>
Propchange: hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/main/winutils/libwinutils.vcxproj
------------------------------------------------------------------------------
svn:eol-style = native
Propchange: hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/main/winutils/winutils.sln
------------------------------------------------------------------------------
svn:eol-style = native
Propchange: hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/main/winutils/winutils.vcxproj
------------------------------------------------------------------------------
svn:eol-style = native
Modified: hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/site/apt/ClusterSetup.apt.vm
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/site/apt/ClusterSetup.apt.vm?rev=1532967&r1=1532966&r2=1532967&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/site/apt/ClusterSetup.apt.vm (original)
+++ hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/site/apt/ClusterSetup.apt.vm Thu Oct 17 05:32:42 2013
@@ -311,7 +311,7 @@ Hadoop MapReduce Next Generation - Clust
| | | Only applicable if log-aggregation is enabled. |
*-------------------------+-------------------------+------------------------+
| <<<yarn.nodemanager.aux-services>>> | | |
-| | mapreduce.shuffle | |
+| | mapreduce_shuffle | |
| | | Shuffle service that needs to be set for Map Reduce applications. |
*-------------------------+-------------------------+------------------------+
@@ -854,8 +854,10 @@ KVNO Timestamp Principal
| | The container process has the same Unix user as the NodeManager. |
*--------------------------------------+--------------------------------------+
| <<<LinuxContainerExecutor>>> | |
-| | Supported only on GNU/Linux, this executor runs the containers as the |
-| | user who submitted the application. It requires all user accounts to be |
+| | Supported only on GNU/Linux, this executor runs the containers as either the |
+| | YARN user who submitted the application (when full security is enabled) or |
+| | as a dedicated user (defaults to nobody) when full security is not enabled. |
+| | When full security is enabled, this executor requires all user accounts to be |
| | created on the cluster nodes where the containers are launched. It uses |
| | a <setuid> executable that is included in the Hadoop distribution. |
| | The NodeManager uses this executable to launch and kill containers. |
@@ -929,6 +931,8 @@ KVNO Timestamp Principal
*-------------------------+-------------------------+------------------------+
| <<<banned.users>>> | hfds,yarn,mapred,bin | Banned users. |
*-------------------------+-------------------------+------------------------+
+| <<<allowed.system.users>>> | foo,bar | Allowed system users. |
+*-------------------------+-------------------------+------------------------+
| <<<min.user.id>>> | 1000 | Prevent other super-users. |
*-------------------------+-------------------------+------------------------+
Modified: hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/site/apt/FileSystemShell.apt.vm
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/site/apt/FileSystemShell.apt.vm?rev=1532967&r1=1532966&r2=1532967&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/site/apt/FileSystemShell.apt.vm (original)
+++ hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/site/apt/FileSystemShell.apt.vm Thu Oct 17 05:32:42 2013
@@ -381,17 +381,22 @@ rmr
setrep
- Usage: <<<hdfs dfs -setrep [-R] <path> >>>
+ Usage: <<<hdfs dfs -setrep [-R] [-w] <numRepicas> <path> >>>
- Changes the replication factor of a file.
+ Changes the replication factor of a file. If <path> is a directory then
+ the command recursively changes the replication factor of all files under
+ the directory tree rooted at <path>.
Options:
- * The -R option will recursively increase the replication factor of files within a directory.
+ * The -w flag requests that the command wait for the replication
+ to complete. This can potentially take a very long time.
+
+ * The -R flag is accepted for backwards compatibility. It has no effect.
Example:
- * <<<hdfs dfs -setrep -w 3 -R /user/hadoop/dir1>>>
+ * <<<hdfs dfs -setrep -w 3 /user/hadoop/dir1>>>
Exit Code:
Modified: hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/site/apt/SingleCluster.apt.vm
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/site/apt/SingleCluster.apt.vm?rev=1532967&r1=1532966&r2=1532967&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/site/apt/SingleCluster.apt.vm (original)
+++ hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/site/apt/SingleCluster.apt.vm Thu Oct 17 05:32:42 2013
@@ -140,7 +140,7 @@ Add the following configs to your <<<yar
<property>
<name>yarn.nodemanager.aux-services</name>
- <value>mapreduce.shuffle</value>
+ <value>mapreduce_shuffle</value>
<description>shuffle service that needs to be set for Map Reduce to run </description>
</property>
+---+
Propchange: hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/core/
------------------------------------------------------------------------------
Merged /hadoop/common/branches/branch-2/hadoop-common-project/hadoop-common/src/test/core:r1526848
Merged /hadoop/common/trunk/hadoop-common-project/hadoop-common/src/test/core:r1522707-1532945
Modified: hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/cli/CLITestHelper.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/cli/CLITestHelper.java?rev=1532967&r1=1532966&r2=1532967&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/cli/CLITestHelper.java (original)
+++ hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/cli/CLITestHelper.java Thu Oct 17 05:32:42 2013
@@ -21,11 +21,10 @@ package org.apache.hadoop.cli;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.cli.util.*;
-import org.apache.hadoop.cli.util.CLITestCmd;
-import org.apache.hadoop.cli.util.CLICommand;
import org.apache.hadoop.cli.util.CommandExecutor.Result;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
+import org.apache.hadoop.util.Shell;
import org.apache.hadoop.util.StringUtils;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
@@ -369,6 +368,7 @@ public class CLITestHelper {
CLITestData td = null;
ArrayList<CLICommand> testCommands = null;
ArrayList<CLICommand> cleanupCommands = null;
+ boolean runOnWindows = true;
@Override
public void startDocument() throws SAXException {
@@ -399,6 +399,8 @@ public class CLITestHelper {
throws SAXException {
if (qName.equals("description")) {
td.setTestDesc(charString);
+ } else if (qName.equals("windows")) {
+ runOnWindows = Boolean.parseBoolean(charString);
} else if (qName.equals("test-commands")) {
td.setTestCommands(testCommands);
testCommands = null;
@@ -420,8 +422,11 @@ public class CLITestHelper {
} else if (qName.equals("expected-output")) {
comparatorData.setExpectedOutput(charString);
} else if (qName.equals("test")) {
- testsFromConfigFile.add(td);
+ if (!Shell.WINDOWS || runOnWindows) {
+ testsFromConfigFile.add(td);
+ }
td = null;
+ runOnWindows = true;
} else if (qName.equals("mode")) {
testMode = charString;
if (!testMode.equals(TESTMODE_NOCOMPARE) &&
Modified: hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FSMainOperationsBaseTest.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FSMainOperationsBaseTest.java?rev=1532967&r1=1532966&r2=1532967&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FSMainOperationsBaseTest.java (original)
+++ hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FSMainOperationsBaseTest.java Thu Oct 17 05:32:42 2013
@@ -944,14 +944,20 @@ public abstract class FSMainOperationsBa
rename(src, dst, false, true, false, Rename.NONE);
Assert.fail("Expected exception was not thrown");
} catch (IOException e) {
- Assert.assertTrue(unwrapException(e) instanceof FileNotFoundException);
+ IOException ioException = unwrapException(e);
+ if (!(ioException instanceof FileNotFoundException)) {
+ throw ioException;
+ }
}
try {
rename(src, dst, false, true, false, Rename.OVERWRITE);
Assert.fail("Expected exception was not thrown");
} catch (IOException e) {
- Assert.assertTrue(unwrapException(e) instanceof FileNotFoundException);
+ IOException ioException = unwrapException(e);
+ if (!(ioException instanceof FileNotFoundException)) {
+ throw ioException;
+ }
}
}