You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by sz...@apache.org on 2012/10/19 04:28:07 UTC
svn commit: r1399950 [16/27] - in
/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project: ./
hadoop-hdfs-httpfs/ hadoop-hdfs-httpfs/dev-support/
hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/
hadoop-hdfs-httpfs/src/main/java/org/apac...
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/LightWeightLinkedSet.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/LightWeightLinkedSet.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/LightWeightLinkedSet.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/LightWeightLinkedSet.java Fri Oct 19 02:25:55 2012
@@ -47,6 +47,7 @@ public class LightWeightLinkedSet<T> ext
this.after = null;
}
+ @Override
public String toString() {
return super.toString();
}
@@ -79,6 +80,7 @@ public class LightWeightLinkedSet<T> ext
*
* @return true if the element was not present in the table, false otherwise
*/
+ @Override
protected boolean addElem(final T element) {
// validate element
if (element == null) {
@@ -118,6 +120,7 @@ public class LightWeightLinkedSet<T> ext
*
* @return Return the entry with the element if exists. Otherwise return null.
*/
+ @Override
protected DoubleLinkedElement<T> removeElem(final T key) {
DoubleLinkedElement<T> found = (DoubleLinkedElement<T>) (super
.removeElem(key));
@@ -162,6 +165,7 @@ public class LightWeightLinkedSet<T> ext
*
* @return first element
*/
+ @Override
public List<T> pollN(int n) {
if (n >= size) {
// if we need to remove all elements then do fast polling
@@ -182,6 +186,7 @@ public class LightWeightLinkedSet<T> ext
* link list, don't worry about hashtable - faster version of the parent
* method.
*/
+ @Override
public List<T> pollAll() {
List<T> retList = new ArrayList<T>(size);
while (head != null) {
@@ -212,6 +217,7 @@ public class LightWeightLinkedSet<T> ext
return a;
}
+ @Override
public Iterator<T> iterator() {
return new LinkedSetIterator();
}
@@ -251,6 +257,7 @@ public class LightWeightLinkedSet<T> ext
/**
* Clear the set. Resize it to the original capacity.
*/
+ @Override
public void clear() {
super.clear();
this.head = null;
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/PersistentLongFile.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/PersistentLongFile.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/PersistentLongFile.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/PersistentLongFile.java Fri Oct 19 02:25:55 2012
@@ -57,7 +57,9 @@ public class PersistentLongFile {
}
public void set(long newVal) throws IOException {
- writeFile(file, newVal);
+ if (value != newVal || !loaded) {
+ writeFile(file, newVal);
+ }
value = newVal;
loaded = true;
}
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/XMLUtils.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/XMLUtils.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/XMLUtils.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/XMLUtils.java Fri Oct 19 02:25:55 2012
@@ -146,6 +146,7 @@ public class XMLUtils {
/**
* Convert a stanza to a human-readable string.
*/
+ @Override
public String toString() {
StringBuilder bld = new StringBuilder();
bld.append("{");
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java Fri Oct 19 02:25:55 2012
@@ -29,6 +29,8 @@ import java.util.TreeMap;
import org.apache.hadoop.fs.ContentSummary;
import org.apache.hadoop.fs.FileChecksum;
import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.MD5MD5CRC32CastagnoliFileChecksum;
+import org.apache.hadoop.fs.MD5MD5CRC32GzipFileChecksum;
import org.apache.hadoop.fs.MD5MD5CRC32FileChecksum;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSUtil;
@@ -43,6 +45,7 @@ import org.apache.hadoop.hdfs.security.t
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.TokenIdentifier;
+import org.apache.hadoop.util.DataChecksum;
import org.apache.hadoop.util.StringUtils;
import org.mortbay.util.ajax.JSON;
@@ -512,7 +515,21 @@ public class JsonUtil {
final byte[] bytes = StringUtils.hexStringToByte((String)m.get("bytes"));
final DataInputStream in = new DataInputStream(new ByteArrayInputStream(bytes));
- final MD5MD5CRC32FileChecksum checksum = new MD5MD5CRC32FileChecksum();
+ final DataChecksum.Type crcType =
+ MD5MD5CRC32FileChecksum.getCrcTypeFromAlgorithmName(algorithm);
+ final MD5MD5CRC32FileChecksum checksum;
+
+ // Recreate what DFSClient would have returned.
+ switch(crcType) {
+ case CRC32:
+ checksum = new MD5MD5CRC32GzipFileChecksum();
+ break;
+ case CRC32C:
+ checksum = new MD5MD5CRC32CastagnoliFileChecksum();
+ break;
+ default:
+ throw new IOException("Unknown algorithm: " + algorithm);
+ }
checksum.readFields(in);
//check algorithm name
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java Fri Oct 19 02:25:55 2012
@@ -30,7 +30,6 @@ import java.net.URI;
import java.net.URISyntaxException;
import java.net.URL;
import java.util.Collection;
-import java.util.List;
import java.util.Map;
import java.util.StringTokenizer;
@@ -88,6 +87,8 @@ import org.apache.hadoop.hdfs.web.resour
import org.apache.hadoop.hdfs.web.resources.TokenArgumentParam;
import org.apache.hadoop.hdfs.web.resources.UserParam;
import org.apache.hadoop.io.Text;
+import org.apache.hadoop.io.retry.RetryPolicy;
+import org.apache.hadoop.io.retry.RetryUtils;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.AccessControlException;
@@ -147,6 +148,7 @@ public class WebHdfsFileSystem extends F
private URI uri;
private Token<?> delegationToken;
private final AuthenticatedURL.Token authToken = new AuthenticatedURL.Token();
+ private RetryPolicy retryPolicy = null;
private Path workingDir;
{
@@ -179,6 +181,14 @@ public class WebHdfsFileSystem extends F
throw new IllegalArgumentException(e);
}
this.nnAddr = NetUtils.createSocketAddr(uri.getAuthority(), getDefaultPort());
+ this.retryPolicy =
+ RetryUtils.getDefaultRetryPolicy(
+ conf,
+ DFSConfigKeys.DFS_CLIENT_RETRY_POLICY_ENABLED_KEY,
+ DFSConfigKeys.DFS_CLIENT_RETRY_POLICY_ENABLED_DEFAULT,
+ DFSConfigKeys.DFS_CLIENT_RETRY_POLICY_SPEC_KEY,
+ DFSConfigKeys.DFS_CLIENT_RETRY_POLICY_SPEC_DEFAULT,
+ SafeModeException.class);
this.workingDir = getHomeDirectory();
if (UserGroupInformation.isSecurityEnabled()) {
@@ -276,39 +286,64 @@ public class WebHdfsFileSystem extends F
}
private static Map<?, ?> validateResponse(final HttpOpParam.Op op,
- final HttpURLConnection conn) throws IOException {
+ final HttpURLConnection conn, boolean unwrapException) throws IOException {
final int code = conn.getResponseCode();
if (code != op.getExpectedHttpResponseCode()) {
final Map<?, ?> m;
try {
m = jsonParse(conn, true);
- } catch(IOException e) {
+ } catch(Exception e) {
throw new IOException("Unexpected HTTP response: code=" + code + " != "
+ op.getExpectedHttpResponseCode() + ", " + op.toQueryString()
+ ", message=" + conn.getResponseMessage(), e);
}
- if (m.get(RemoteException.class.getSimpleName()) == null) {
+ if (m == null) {
+ throw new IOException("Unexpected HTTP response: code=" + code + " != "
+ + op.getExpectedHttpResponseCode() + ", " + op.toQueryString()
+ + ", message=" + conn.getResponseMessage());
+ } else if (m.get(RemoteException.class.getSimpleName()) == null) {
return m;
}
final RemoteException re = JsonUtil.toRemoteException(m);
- throw re.unwrapRemoteException(AccessControlException.class,
- InvalidToken.class,
- AuthenticationException.class,
- AuthorizationException.class,
- FileAlreadyExistsException.class,
- FileNotFoundException.class,
- ParentNotDirectoryException.class,
- UnresolvedPathException.class,
- SafeModeException.class,
- DSQuotaExceededException.class,
- NSQuotaExceededException.class);
+ throw unwrapException? toIOException(re): re;
}
return null;
}
/**
+ * Covert an exception to an IOException.
+ *
+ * For a non-IOException, wrap it with IOException.
+ * For a RemoteException, unwrap it.
+ * For an IOException which is not a RemoteException, return it.
+ */
+ private static IOException toIOException(Exception e) {
+ if (!(e instanceof IOException)) {
+ return new IOException(e);
+ }
+
+ final IOException ioe = (IOException)e;
+ if (!(ioe instanceof RemoteException)) {
+ return ioe;
+ }
+
+ final RemoteException re = (RemoteException)ioe;
+ return re.unwrapRemoteException(AccessControlException.class,
+ InvalidToken.class,
+ AuthenticationException.class,
+ AuthorizationException.class,
+ FileAlreadyExistsException.class,
+ FileNotFoundException.class,
+ ParentNotDirectoryException.class,
+ UnresolvedPathException.class,
+ SafeModeException.class,
+ DSQuotaExceededException.class,
+ NSQuotaExceededException.class);
+ }
+
+ /**
* Return a URL pointing to given path on the namenode.
*
* @param path to obtain the URL for
@@ -347,8 +382,7 @@ public class WebHdfsFileSystem extends F
+ Param.toSortedString("&", parameters);
final URL url;
if (op == PutOpParam.Op.RENEWDELEGATIONTOKEN
- || op == GetOpParam.Op.GETDELEGATIONTOKEN
- || op == GetOpParam.Op.GETDELEGATIONTOKENS) {
+ || op == GetOpParam.Op.GETDELEGATIONTOKEN) {
// Skip adding delegation token for getting or renewing delegation token,
// because these operations require kerberos authentication.
url = getNamenodeURL(path, query);
@@ -362,69 +396,15 @@ public class WebHdfsFileSystem extends F
}
private HttpURLConnection getHttpUrlConnection(URL url)
- throws IOException {
+ throws IOException, AuthenticationException {
final HttpURLConnection conn;
- try {
- if (ugi.hasKerberosCredentials()) {
- conn = new AuthenticatedURL(AUTH).openConnection(url, authToken);
- } else {
- conn = (HttpURLConnection)url.openConnection();
- }
- } catch (AuthenticationException e) {
- throw new IOException("Authentication failed, url=" + url, e);
+ if (ugi.hasKerberosCredentials()) {
+ conn = new AuthenticatedURL(AUTH).openConnection(url, authToken);
+ } else {
+ conn = (HttpURLConnection)url.openConnection();
}
return conn;
}
-
- private HttpURLConnection httpConnect(final HttpOpParam.Op op, final Path fspath,
- final Param<?,?>... parameters) throws IOException {
- final URL url = toUrl(op, fspath, parameters);
-
- //connect and get response
- HttpURLConnection conn = getHttpUrlConnection(url);
- try {
- conn.setRequestMethod(op.getType().toString());
- if (op.getDoOutput()) {
- conn = twoStepWrite(conn, op);
- conn.setRequestProperty("Content-Type", "application/octet-stream");
- }
- conn.setDoOutput(op.getDoOutput());
- conn.connect();
- return conn;
- } catch (IOException e) {
- conn.disconnect();
- throw e;
- }
- }
-
- /**
- * Two-step Create/Append:
- * Step 1) Submit a Http request with neither auto-redirect nor data.
- * Step 2) Submit another Http request with the URL from the Location header with data.
- *
- * The reason of having two-step create/append is for preventing clients to
- * send out the data before the redirect. This issue is addressed by the
- * "Expect: 100-continue" header in HTTP/1.1; see RFC 2616, Section 8.2.3.
- * Unfortunately, there are software library bugs (e.g. Jetty 6 http server
- * and Java 6 http client), which do not correctly implement "Expect:
- * 100-continue". The two-step create/append is a temporary workaround for
- * the software library bugs.
- */
- static HttpURLConnection twoStepWrite(HttpURLConnection conn,
- final HttpOpParam.Op op) throws IOException {
- //Step 1) Submit a Http request with neither auto-redirect nor data.
- conn.setInstanceFollowRedirects(false);
- conn.setDoOutput(false);
- conn.connect();
- validateResponse(HttpOpParam.TemporaryRedirectOp.valueOf(op), conn);
- final String redirect = conn.getHeaderField("Location");
- conn.disconnect();
-
- //Step 2) Submit another Http request with the URL from the Location header with data.
- conn = (HttpURLConnection)new URL(redirect).openConnection();
- conn.setRequestMethod(op.getType().toString());
- return conn;
- }
/**
* Run a http operation.
@@ -438,12 +418,161 @@ public class WebHdfsFileSystem extends F
*/
private Map<?, ?> run(final HttpOpParam.Op op, final Path fspath,
final Param<?,?>... parameters) throws IOException {
- final HttpURLConnection conn = httpConnect(op, fspath, parameters);
- try {
- final Map<?, ?> m = validateResponse(op, conn);
- return m != null? m: jsonParse(conn, false);
- } finally {
- conn.disconnect();
+ return new Runner(op, fspath, parameters).run().json;
+ }
+
+ /**
+ * This class is for initialing a HTTP connection, connecting to server,
+ * obtaining a response, and also handling retry on failures.
+ */
+ class Runner {
+ private final HttpOpParam.Op op;
+ private final URL url;
+ private final boolean redirected;
+
+ private boolean checkRetry;
+ private HttpURLConnection conn = null;
+ private Map<?, ?> json = null;
+
+ Runner(final HttpOpParam.Op op, final URL url, final boolean redirected) {
+ this.op = op;
+ this.url = url;
+ this.redirected = redirected;
+ }
+
+ Runner(final HttpOpParam.Op op, final Path fspath,
+ final Param<?,?>... parameters) throws IOException {
+ this(op, toUrl(op, fspath, parameters), false);
+ }
+
+ Runner(final HttpOpParam.Op op, final HttpURLConnection conn) {
+ this(op, null, false);
+ this.conn = conn;
+ }
+
+ private void init() throws IOException {
+ checkRetry = !redirected;
+ try {
+ conn = getHttpUrlConnection(url);
+ } catch(AuthenticationException ae) {
+ checkRetry = false;
+ throw new IOException("Authentication failed, url=" + url, ae);
+ }
+ }
+
+ private void connect() throws IOException {
+ connect(op.getDoOutput());
+ }
+
+ private void connect(boolean doOutput) throws IOException {
+ conn.setRequestMethod(op.getType().toString());
+ conn.setDoOutput(doOutput);
+ conn.setInstanceFollowRedirects(false);
+ conn.connect();
+ }
+
+ private void disconnect() {
+ if (conn != null) {
+ conn.disconnect();
+ conn = null;
+ }
+ }
+
+ Runner run() throws IOException {
+ for(int retry = 0; ; retry++) {
+ try {
+ init();
+ if (op.getDoOutput()) {
+ twoStepWrite();
+ } else {
+ getResponse(op != GetOpParam.Op.OPEN);
+ }
+ return this;
+ } catch(IOException ioe) {
+ shouldRetry(ioe, retry);
+ }
+ }
+ }
+
+ private void shouldRetry(final IOException ioe, final int retry
+ ) throws IOException {
+ if (checkRetry) {
+ try {
+ final RetryPolicy.RetryAction a = retryPolicy.shouldRetry(
+ ioe, retry, 0, true);
+ if (a.action == RetryPolicy.RetryAction.RetryDecision.RETRY) {
+ LOG.info("Retrying connect to namenode: " + nnAddr
+ + ". Already tried " + retry + " time(s); retry policy is "
+ + retryPolicy + ", delay " + a.delayMillis + "ms.");
+ Thread.sleep(a.delayMillis);
+ return;
+ }
+ } catch(Exception e) {
+ LOG.warn("Original exception is ", ioe);
+ throw toIOException(e);
+ }
+ }
+ throw toIOException(ioe);
+ }
+
+ /**
+ * Two-step Create/Append:
+ * Step 1) Submit a Http request with neither auto-redirect nor data.
+ * Step 2) Submit another Http request with the URL from the Location header with data.
+ *
+ * The reason of having two-step create/append is for preventing clients to
+ * send out the data before the redirect. This issue is addressed by the
+ * "Expect: 100-continue" header in HTTP/1.1; see RFC 2616, Section 8.2.3.
+ * Unfortunately, there are software library bugs (e.g. Jetty 6 http server
+ * and Java 6 http client), which do not correctly implement "Expect:
+ * 100-continue". The two-step create/append is a temporary workaround for
+ * the software library bugs.
+ */
+ HttpURLConnection twoStepWrite() throws IOException {
+ //Step 1) Submit a Http request with neither auto-redirect nor data.
+ connect(false);
+ validateResponse(HttpOpParam.TemporaryRedirectOp.valueOf(op), conn, false);
+ final String redirect = conn.getHeaderField("Location");
+ disconnect();
+ checkRetry = false;
+
+ //Step 2) Submit another Http request with the URL from the Location header with data.
+ conn = (HttpURLConnection)new URL(redirect).openConnection();
+ conn.setRequestProperty("Content-Type", MediaType.APPLICATION_OCTET_STREAM);
+ conn.setChunkedStreamingMode(32 << 10); //32kB-chunk
+ connect();
+ return conn;
+ }
+
+ FSDataOutputStream write(final int bufferSize) throws IOException {
+ return WebHdfsFileSystem.this.write(op, conn, bufferSize);
+ }
+
+ void getResponse(boolean getJsonAndDisconnect) throws IOException {
+ try {
+ connect();
+ final int code = conn.getResponseCode();
+ if (!redirected && op.getRedirect()
+ && code != op.getExpectedHttpResponseCode()) {
+ final String redirect = conn.getHeaderField("Location");
+ json = validateResponse(HttpOpParam.TemporaryRedirectOp.valueOf(op),
+ conn, false);
+ disconnect();
+
+ checkRetry = false;
+ conn = (HttpURLConnection)new URL(redirect).openConnection();
+ connect();
+ }
+
+ json = validateResponse(op, conn, false);
+ if (json == null && getJsonAndDisconnect) {
+ json = jsonParse(conn, false);
+ }
+ } finally {
+ if (getJsonAndDisconnect) {
+ disconnect();
+ }
+ }
}
}
@@ -577,7 +706,7 @@ public class WebHdfsFileSystem extends F
super.close();
} finally {
try {
- validateResponse(op, conn);
+ validateResponse(op, conn, true);
} finally {
conn.disconnect();
}
@@ -593,13 +722,14 @@ public class WebHdfsFileSystem extends F
statistics.incrementWriteOps(1);
final HttpOpParam.Op op = PutOpParam.Op.CREATE;
- final HttpURLConnection conn = httpConnect(op, f,
+ return new Runner(op, f,
new PermissionParam(applyUMask(permission)),
new OverwriteParam(overwrite),
new BufferSizeParam(bufferSize),
new ReplicationParam(replication),
- new BlockSizeParam(blockSize));
- return write(op, conn, bufferSize);
+ new BlockSizeParam(blockSize))
+ .run()
+ .write(bufferSize);
}
@Override
@@ -608,9 +738,9 @@ public class WebHdfsFileSystem extends F
statistics.incrementWriteOps(1);
final HttpOpParam.Op op = PostOpParam.Op.APPEND;
- final HttpURLConnection conn = httpConnect(op, f,
- new BufferSizeParam(bufferSize));
- return write(op, conn, bufferSize);
+ return new Runner(op, f, new BufferSizeParam(bufferSize))
+ .run()
+ .write(bufferSize);
}
@SuppressWarnings("deprecation")
@@ -637,26 +767,17 @@ public class WebHdfsFileSystem extends F
}
class OffsetUrlOpener extends ByteRangeInputStream.URLOpener {
- /** The url with offset parameter */
- private URL offsetUrl;
-
OffsetUrlOpener(final URL url) {
super(url);
}
- /** Open connection with offset url. */
+ /** Setup offset url and connect. */
@Override
- protected HttpURLConnection openConnection() throws IOException {
- return getHttpUrlConnection(offsetUrl);
- }
-
- /** Setup offset url before open connection. */
- @Override
- protected HttpURLConnection openConnection(final long offset) throws IOException {
- offsetUrl = offset == 0L? url: new URL(url + "&" + new OffsetParam(offset));
- final HttpURLConnection conn = openConnection();
- conn.setRequestMethod("GET");
- return conn;
+ protected HttpURLConnection connect(final long offset,
+ final boolean resolved) throws IOException {
+ final URL offsetUrl = offset == 0L? url
+ : new URL(url + "&" + new OffsetParam(offset));
+ return new Runner(GetOpParam.Op.OPEN, offsetUrl, resolved).run().conn;
}
}
@@ -697,12 +818,6 @@ public class WebHdfsFileSystem extends F
OffsetUrlInputStream(OffsetUrlOpener o, OffsetUrlOpener r) {
super(o, r);
}
-
- @Override
- protected void checkResponseCode(final HttpURLConnection connection
- ) throws IOException {
- validateResponse(GetOpParam.Op.OPEN, connection);
- }
/** Remove offset parameter before returning the resolved url. */
@Override
@@ -730,10 +845,9 @@ public class WebHdfsFileSystem extends F
return statuses;
}
- @SuppressWarnings("deprecation")
@Override
- public Token<DelegationTokenIdentifier> getDelegationToken(final String renewer
- ) throws IOException {
+ public Token<DelegationTokenIdentifier> getDelegationToken(
+ final String renewer) throws IOException {
final HttpOpParam.Op op = GetOpParam.Op.GETDELEGATIONTOKEN;
final Map<?, ?> m = run(op, null, new RenewerParam(renewer));
final Token<DelegationTokenIdentifier> token = JsonUtil.toDelegationToken(m);
@@ -742,18 +856,6 @@ public class WebHdfsFileSystem extends F
}
@Override
- public List<Token<?>> getDelegationTokens(final String renewer
- ) throws IOException {
- final HttpOpParam.Op op = GetOpParam.Op.GETDELEGATIONTOKENS;
- final Map<?, ?> m = run(op, null, new RenewerParam(renewer));
- final List<Token<?>> tokens = JsonUtil.toTokenList(m);
- for(Token<?> t : tokens) {
- SecurityUtil.setTokenService(t, nnAddr);
- }
- return tokens;
- }
-
- @Override
public Token<?> getRenewToken() {
return delegationToken;
}
@@ -835,8 +937,7 @@ public class WebHdfsFileSystem extends F
}
private static WebHdfsFileSystem getWebHdfs(
- final Token<?> token, final Configuration conf
- ) throws IOException, InterruptedException, URISyntaxException {
+ final Token<?> token, final Configuration conf) throws IOException {
final InetSocketAddress nnAddr = SecurityUtil.getTokenServiceAddr(token);
final URI uri = DFSUtil.createUri(WebHdfsFileSystem.SCHEME, nnAddr);
@@ -850,12 +951,7 @@ public class WebHdfsFileSystem extends F
// update the kerberos credentials, if they are coming from a keytab
ugi.reloginFromKeytab();
- try {
- WebHdfsFileSystem webhdfs = getWebHdfs(token, conf);
- return webhdfs.renewDelegationToken(token);
- } catch (URISyntaxException e) {
- throw new IOException(e);
- }
+ return getWebHdfs(token, conf).renewDelegationToken(token);
}
@Override
@@ -865,12 +961,7 @@ public class WebHdfsFileSystem extends F
// update the kerberos credentials, if they are coming from a keytab
ugi.checkTGTAndReloginFromKeytab();
- try {
- final WebHdfsFileSystem webhdfs = getWebHdfs(token, conf);
- webhdfs.cancelDelegationToken(token);
- } catch (URISyntaxException e) {
- throw new IOException(e);
- }
+ getWebHdfs(token, conf).cancelDelegationToken(token);
}
}
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/DeleteOpParam.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/DeleteOpParam.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/DeleteOpParam.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/DeleteOpParam.java Fri Oct 19 02:25:55 2012
@@ -44,6 +44,11 @@ public class DeleteOpParam extends HttpO
}
@Override
+ public boolean getRedirect() {
+ return false;
+ }
+
+ @Override
public int getExpectedHttpResponseCode() {
return expectedHttpResponseCode;
}
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/GetOpParam.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/GetOpParam.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/GetOpParam.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/GetOpParam.java Fri Oct 19 02:25:55 2012
@@ -23,25 +23,26 @@ import java.net.HttpURLConnection;
public class GetOpParam extends HttpOpParam<GetOpParam.Op> {
/** Get operations. */
public static enum Op implements HttpOpParam.Op {
- OPEN(HttpURLConnection.HTTP_OK),
+ OPEN(true, HttpURLConnection.HTTP_OK),
- GETFILESTATUS(HttpURLConnection.HTTP_OK),
- LISTSTATUS(HttpURLConnection.HTTP_OK),
- GETCONTENTSUMMARY(HttpURLConnection.HTTP_OK),
- GETFILECHECKSUM(HttpURLConnection.HTTP_OK),
-
- GETHOMEDIRECTORY(HttpURLConnection.HTTP_OK),
- GETDELEGATIONTOKEN(HttpURLConnection.HTTP_OK),
- GETDELEGATIONTOKENS(HttpURLConnection.HTTP_OK),
+ GETFILESTATUS(false, HttpURLConnection.HTTP_OK),
+ LISTSTATUS(false, HttpURLConnection.HTTP_OK),
+ GETCONTENTSUMMARY(false, HttpURLConnection.HTTP_OK),
+ GETFILECHECKSUM(true, HttpURLConnection.HTTP_OK),
+
+ GETHOMEDIRECTORY(false, HttpURLConnection.HTTP_OK),
+ GETDELEGATIONTOKEN(false, HttpURLConnection.HTTP_OK),
/** GET_BLOCK_LOCATIONS is a private unstable op. */
- GET_BLOCK_LOCATIONS(HttpURLConnection.HTTP_OK),
+ GET_BLOCK_LOCATIONS(false, HttpURLConnection.HTTP_OK),
- NULL(HttpURLConnection.HTTP_NOT_IMPLEMENTED);
+ NULL(false, HttpURLConnection.HTTP_NOT_IMPLEMENTED);
+ final boolean redirect;
final int expectedHttpResponseCode;
- Op(final int expectedHttpResponseCode) {
+ Op(final boolean redirect, final int expectedHttpResponseCode) {
+ this.redirect = redirect;
this.expectedHttpResponseCode = expectedHttpResponseCode;
}
@@ -56,6 +57,11 @@ public class GetOpParam extends HttpOpPa
}
@Override
+ public boolean getRedirect() {
+ return redirect;
+ }
+
+ @Override
public int getExpectedHttpResponseCode() {
return expectedHttpResponseCode;
}
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/HttpOpParam.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/HttpOpParam.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/HttpOpParam.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/HttpOpParam.java Fri Oct 19 02:25:55 2012
@@ -17,6 +17,10 @@
*/
package org.apache.hadoop.hdfs.web.resources;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.List;
+
import javax.ws.rs.core.Response;
@@ -42,6 +46,9 @@ public abstract class HttpOpParam<E exte
/** @return true if the operation will do output. */
public boolean getDoOutput();
+ /** @return true if the operation will be redirected. */
+ public boolean getRedirect();
+
/** @return true the expected http response code. */
public int getExpectedHttpResponseCode();
@@ -51,15 +58,25 @@ public abstract class HttpOpParam<E exte
/** Expects HTTP response 307 "Temporary Redirect". */
public static class TemporaryRedirectOp implements Op {
- static final TemporaryRedirectOp CREATE = new TemporaryRedirectOp(PutOpParam.Op.CREATE);
- static final TemporaryRedirectOp APPEND = new TemporaryRedirectOp(PostOpParam.Op.APPEND);
+ static final TemporaryRedirectOp CREATE = new TemporaryRedirectOp(
+ PutOpParam.Op.CREATE);
+ static final TemporaryRedirectOp APPEND = new TemporaryRedirectOp(
+ PostOpParam.Op.APPEND);
+ static final TemporaryRedirectOp OPEN = new TemporaryRedirectOp(
+ GetOpParam.Op.OPEN);
+ static final TemporaryRedirectOp GETFILECHECKSUM = new TemporaryRedirectOp(
+ GetOpParam.Op.GETFILECHECKSUM);
+ static final List<TemporaryRedirectOp> values
+ = Collections.unmodifiableList(Arrays.asList(
+ new TemporaryRedirectOp[]{CREATE, APPEND, OPEN, GETFILECHECKSUM}));
+
/** Get an object for the given op. */
public static TemporaryRedirectOp valueOf(final Op op) {
- if (op == CREATE.op) {
- return CREATE;
- } else if (op == APPEND.op) {
- return APPEND;
+ for(TemporaryRedirectOp t : values) {
+ if (op == t.op) {
+ return t;
+ }
}
throw new IllegalArgumentException(op + " not found.");
}
@@ -80,6 +97,11 @@ public abstract class HttpOpParam<E exte
return op.getDoOutput();
}
+ @Override
+ public boolean getRedirect() {
+ return false;
+ }
+
/** Override the original expected response with "Temporary Redirect". */
@Override
public int getExpectedHttpResponseCode() {
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/PostOpParam.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/PostOpParam.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/PostOpParam.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/PostOpParam.java Fri Oct 19 02:25:55 2012
@@ -44,11 +44,17 @@ public class PostOpParam extends HttpOpP
}
@Override
+ public boolean getRedirect() {
+ return true;
+ }
+
+ @Override
public int getExpectedHttpResponseCode() {
return expectedHttpResponseCode;
}
/** @return a URI query string. */
+ @Override
public String toQueryString() {
return NAME + "=" + this;
}
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/PutOpParam.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/PutOpParam.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/PutOpParam.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/PutOpParam.java Fri Oct 19 02:25:55 2012
@@ -39,11 +39,11 @@ public class PutOpParam extends HttpOpPa
NULL(false, HttpURLConnection.HTTP_NOT_IMPLEMENTED);
- final boolean doOutput;
+ final boolean doOutputAndRedirect;
final int expectedHttpResponseCode;
- Op(final boolean doOutput, final int expectedHttpResponseCode) {
- this.doOutput = doOutput;
+ Op(final boolean doOutputAndRedirect, final int expectedHttpResponseCode) {
+ this.doOutputAndRedirect = doOutputAndRedirect;
this.expectedHttpResponseCode = expectedHttpResponseCode;
}
@@ -54,7 +54,12 @@ public class PutOpParam extends HttpOpPa
@Override
public boolean getDoOutput() {
- return doOutput;
+ return doOutputAndRedirect;
+ }
+
+ @Override
+ public boolean getRedirect() {
+ return doOutputAndRedirect;
}
@Override
Propchange: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/native/
------------------------------------------------------------------------------
Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native:r1360400-1399945
Merged /hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/native:r1363593-1396941
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientDatanodeProtocol.proto
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientDatanodeProtocol.proto?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientDatanodeProtocol.proto (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientDatanodeProtocol.proto Fri Oct 19 02:25:55 2012
@@ -90,6 +90,26 @@ message GetBlockLocalPathInfoResponsePro
}
/**
+ * blocks - list of ExtendedBlocks on which we are querying additional info
+ * tokens - list of access tokens corresponding to list of ExtendedBlocks
+ */
+message GetHdfsBlockLocationsRequestProto {
+ repeated ExtendedBlockProto blocks = 1;
+ repeated BlockTokenIdentifierProto tokens = 2;
+}
+
+/**
+ * volumeIds - id of each volume, potentially multiple bytes
+ * volumeIndexes - for each block, an index into volumeIds specifying the volume
+ * on which it is located. If block is not present on any volume,
+ * index is set to MAX_INT.
+ */
+message GetHdfsBlockLocationsResponseProto {
+ repeated bytes volumeIds = 1;
+ repeated uint32 volumeIndexes = 2;
+}
+
+/**
* Protocol used from client to the Datanode.
* See the request and response for details of rpc call.
*/
@@ -119,4 +139,11 @@ service ClientDatanodeProtocolService {
*/
rpc getBlockLocalPathInfo(GetBlockLocalPathInfoRequestProto)
returns(GetBlockLocalPathInfoResponseProto);
+
+ /**
+ * Retrieve additional HDFS-specific metadata about a set of blocks stored
+ * on the local file system.
+ */
+ rpc getHdfsBlockLocations(GetHdfsBlockLocationsRequestProto)
+ returns(GetHdfsBlockLocationsResponseProto);
}
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto Fri Oct 19 02:25:55 2012
@@ -276,6 +276,13 @@ message SaveNamespaceRequestProto { // n
message SaveNamespaceResponseProto { // void response
}
+message RollEditsRequestProto { // no parameters
+}
+
+message RollEditsResponseProto { // response
+ required uint64 newSegmentTxId = 1;
+}
+
message RestoreFailedStorageRequestProto {
required string arg = 1;
}
@@ -296,19 +303,6 @@ message FinalizeUpgradeRequestProto { //
message FinalizeUpgradeResponseProto { // void response
}
-enum UpgradeActionProto {
- GET_STATUS = 1;
- DETAILED_STATUS = 2;
- FORCE_PROCEED = 3;
-}
-
-message DistributedUpgradeProgressRequestProto {
- required UpgradeActionProto action = 1;
-}
-message DistributedUpgradeProgressResponseProto {
- optional UpgradeStatusReportProto report = 1;
-}
-
message ListCorruptFileBlocksRequestProto {
required string path = 1;
optional string cookie = 2;
@@ -441,6 +435,12 @@ message SetBalancerBandwidthRequestProto
message SetBalancerBandwidthResponseProto { // void response
}
+message GetDataEncryptionKeyRequestProto { // no parameters
+}
+
+message GetDataEncryptionKeyResponseProto {
+ required DataEncryptionKeyProto dataEncryptionKey = 1;
+}
service ClientNamenodeProtocol {
rpc getBlockLocations(GetBlockLocationsRequestProto)
@@ -479,13 +479,13 @@ service ClientNamenodeProtocol {
returns(SetSafeModeResponseProto);
rpc saveNamespace(SaveNamespaceRequestProto)
returns(SaveNamespaceResponseProto);
+ rpc rollEdits(RollEditsRequestProto)
+ returns(RollEditsResponseProto);
rpc restoreFailedStorage(RestoreFailedStorageRequestProto)
returns(RestoreFailedStorageResponseProto);
rpc refreshNodes(RefreshNodesRequestProto) returns(RefreshNodesResponseProto);
rpc finalizeUpgrade(FinalizeUpgradeRequestProto)
returns(FinalizeUpgradeResponseProto);
- rpc distributedUpgradeProgress(DistributedUpgradeProgressRequestProto)
- returns(DistributedUpgradeProgressResponseProto);
rpc listCorruptFileBlocks(ListCorruptFileBlocksRequestProto)
returns(ListCorruptFileBlocksResponseProto);
rpc metaSave(MetaSaveRequestProto) returns(MetaSaveResponseProto);
@@ -511,6 +511,8 @@ service ClientNamenodeProtocol {
returns(RenewDelegationTokenResponseProto);
rpc cancelDelegationToken(CancelDelegationTokenRequestProto)
returns(CancelDelegationTokenResponseProto);
- rpc setBalancerBandwidth(SetBalancerBandwidthRequestProto)
+ rpc setBalancerBandwidth(SetBalancerBandwidthRequestProto)
returns(SetBalancerBandwidthResponseProto);
+ rpc getDataEncryptionKey(GetDataEncryptionKeyRequestProto)
+ returns(GetDataEncryptionKeyResponseProto);
}
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto Fri Oct 19 02:25:55 2012
@@ -60,7 +60,7 @@ message DatanodeCommandProto {
FinalizeCommand = 3;
KeyUpdateCommand = 4;
RegisterCommand = 5;
- UpgradeCommand = 6;
+ UnusedUpgradeCommand = 6;
NullDatanodeCommand = 7;
}
@@ -74,7 +74,6 @@ message DatanodeCommandProto {
optional FinalizeCommandProto finalizeCmd = 5;
optional KeyUpdateCommandProto keyUpdateCmd = 6;
optional RegisterCommandProto registerCmd = 7;
- optional UpgradeCommandProto upgradeCmd = 8;
}
/**
@@ -132,20 +131,6 @@ message RegisterCommandProto {
}
/**
- * Generic distributed upgrade Command
- */
-message UpgradeCommandProto {
- enum Action {
- UNKNOWN = 0; // Unknown action
- REPORT_STATUS = 100; // Report upgrade status
- START_UPGRADE = 101; // Start upgrade
- }
- required Action action = 1; // Upgrade action
- required uint32 version = 2; // Version of the upgrade
- required uint32 upgradeStatus = 3; // % completed in range 0 & 100
-}
-
-/**
* registration - Information of the datanode registering with the namenode
*/
message RegisterDatanodeRequestProto {
@@ -303,20 +288,6 @@ message ErrorReportResponseProto {
}
/**
- * cmd - Upgrade command sent from datanode to namenode
- */
-message ProcessUpgradeRequestProto {
- optional UpgradeCommandProto cmd = 1;
-}
-
-/**
- * cmd - Upgrade command sent from namenode to datanode
- */
-message ProcessUpgradeResponseProto {
- optional UpgradeCommandProto cmd = 1;
-}
-
-/**
* blocks - list of blocks that are reported as corrupt
*/
message ReportBadBlocksRequestProto {
@@ -389,12 +360,6 @@ service DatanodeProtocolService {
rpc versionRequest(VersionRequestProto) returns(VersionResponseProto);
/**
- * Generic way to send commands from datanode to namenode during
- * distributed upgrade process.
- */
- rpc processUpgrade(ProcessUpgradeRequestProto) returns(ProcessUpgradeResponseProto);
-
- /**
* Report corrupt blocks at the specified location
*/
rpc reportBadBlocks(ReportBadBlocksRequestProto) returns(ReportBadBlocksResponseProto);
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/datatransfer.proto
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/datatransfer.proto?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/datatransfer.proto (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/datatransfer.proto Fri Oct 19 02:25:55 2012
@@ -25,6 +25,17 @@ option java_generate_equals_and_hash = t
import "hdfs.proto";
+message DataTransferEncryptorMessageProto {
+ enum DataTransferEncryptorStatus {
+ SUCCESS = 0;
+ ERROR_UNKNOWN_KEY = 1;
+ ERROR = 2;
+ }
+ required DataTransferEncryptorStatus status = 1;
+ optional bytes payload = 2;
+ optional string message = 3;
+}
+
message BaseHeaderProto {
required ExtendedBlockProto block = 1;
optional BlockTokenIdentifierProto token = 2;
@@ -43,12 +54,7 @@ message OpReadBlockProto {
message ChecksumProto {
- enum ChecksumType {
- NULL = 0;
- CRC32 = 1;
- CRC32C = 2;
- }
- required ChecksumType type = 1;
+ required ChecksumTypeProto type = 1;
required uint32 bytesPerChecksum = 2;
}
@@ -174,4 +180,5 @@ message OpBlockChecksumResponseProto {
required uint32 bytesPerCrc = 1;
required uint64 crcPerBlock = 2;
required bytes md5 = 3;
+ optional ChecksumTypeProto crcType = 4 [default = CRC32];
}
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto Fri Oct 19 02:25:55 2012
@@ -126,7 +126,16 @@ message LocatedBlockProto {
// their locations are not part of this object
required BlockTokenIdentifierProto blockToken = 5;
- }
+}
+
+message DataEncryptionKeyProto {
+ required uint32 keyId = 1;
+ required string blockPoolId = 2;
+ required bytes nonce = 3;
+ required bytes encryptionKey = 4;
+ required uint64 expiryDate = 5;
+ optional string encryptionAlgorithm = 6;
+}
/**
@@ -170,6 +179,15 @@ message HdfsFileStatusProto {
}
/**
+ * Checksum algorithms/types used in HDFS
+ */
+enum ChecksumTypeProto {
+ NULL = 0;
+ CRC32 = 1;
+ CRC32C = 2;
+}
+
+/**
* HDFS Server Defaults
*/
message FsServerDefaultsProto {
@@ -178,6 +196,9 @@ message FsServerDefaultsProto {
required uint32 writePacketSize = 3;
required uint32 replication = 4; // Actually a short - only 16 bits used
required uint32 fileBufferSize = 5;
+ optional bool encryptDataTransfer = 6 [default = false];
+ optional uint64 trashInterval = 7 [default = 0];
+ optional ChecksumTypeProto checksumType = 8 [default = CRC32];
}
@@ -190,15 +211,6 @@ message DirectoryListingProto {
}
/**
- * Status of current cluster upgrade from one version to another
- */
-message UpgradeStatusReportProto {
- required uint32 version = 1;;
- required uint32 upgradeStatus = 2; // % completed in range 0 & 100
- required bool finalized = 3;
-}
-
-/**
* Common node information shared by all the nodes in the cluster
*/
message StorageInfoProto {
@@ -290,6 +302,7 @@ message BlocksWithLocationsProto {
message RemoteEditLogProto {
required uint64 startTxId = 1; // Starting available edit log transaction
required uint64 endTxId = 2; // Ending available edit log transaction
+ optional bool isInProgress = 3 [default = false];
}
/**
@@ -304,7 +317,7 @@ message RemoteEditLogManifestProto {
*/
message NamespaceInfoProto {
required string buildVersion = 1; // Software revision version (e.g. an svn or git revision)
- required uint32 distUpgradeVersion = 2; // Distributed upgrade version
+ required uint32 unused = 2; // Retained for backward compatibility
required string blockPoolID = 3; // block pool used by the namespace
required StorageInfoProto storageInfo = 4;// Node information
required string softwareVersion = 5; // Software version number (e.g. 2.0.0)
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml Fri Oct 19 02:25:55 2012
@@ -41,11 +41,34 @@
</property>
<property>
+ <name>dfs.namenode.rpc-address</name>
+ <value></value>
+ <description>
+ RPC address that handles all clients requests. In the case of HA/Federation where multiple namenodes exist,
+ the name service id is added to the name e.g. dfs.namenode.rpc-address.ns1
+ dfs.namenode.rpc-address.EXAMPLENAMESERVICE
+ The value of this property will take the form of hdfs://nn-host1:rpc-port.
+ </description>
+</property>
+
+<property>
+ <name>dfs.namenode.servicerpc-address</name>
+ <value></value>
+ <description>
+ RPC address for HDFS Services communication. BackupNode, Datanodes and all other services should be
+ connecting to this address if it is configured. In the case of HA/Federation where multiple namenodes exist,
+ the name service id is added to the name e.g. dfs.namenode.servicerpc-address.ns1
+ dfs.namenode.rpc-address.EXAMPLENAMESERVICE
+ The value of this property will take the form of hdfs://nn-host1:rpc-port.
+ If the value of this property is unset the value of dfs.namenode.rpc-address will be used as the default.
+ </description>
+</property>
+
+<property>
<name>dfs.namenode.secondary.http-address</name>
<value>0.0.0.0:50090</value>
<description>
The secondary namenode http server address and port.
- If the port is 0 then the server will start on a free port.
</description>
</property>
@@ -53,8 +76,7 @@
<name>dfs.datanode.address</name>
<value>0.0.0.0:50010</value>
<description>
- The address where the datanode server will listen to.
- If the port is 0 then the server will start on a free port.
+ The datanode server address and port for data transfer.
</description>
</property>
@@ -63,7 +85,6 @@
<value>0.0.0.0:50075</value>
<description>
The datanode http server address and port.
- If the port is 0 then the server will start on a free port.
</description>
</property>
@@ -72,13 +93,12 @@
<value>0.0.0.0:50020</value>
<description>
The datanode ipc server address and port.
- If the port is 0 then the server will start on a free port.
</description>
</property>
<property>
<name>dfs.datanode.handler.count</name>
- <value>3</value>
+ <value>10</value>
<description>The number of server threads for the datanode.</description>
</property>
@@ -87,7 +107,6 @@
<value>0.0.0.0:50070</value>
<description>
The address and the base port where the dfs namenode web ui will listen on.
- If the port is 0 then the server will start on a free port.
</description>
</property>
@@ -241,6 +260,11 @@
</property>
<property>
+ <name>dfs.namenode.edits.journal-plugin.qjournal</name>
+ <value>org.apache.hadoop.hdfs.qjournal.client.QuorumJournalManager</value>
+</property>
+
+<property>
<name>dfs.permissions.enabled</name>
<value>true</value>
<description>
@@ -334,7 +358,7 @@
<property>
<name>dfs.blocksize</name>
- <value>67108864</value>
+ <value>134217728</value>
<description>
The default block size for new files, in bytes.
You can use the following suffix (case insensitive):
@@ -636,6 +660,20 @@
edits in order to start again.
Typically each edit is on the order of a few hundred bytes, so the default
of 1 million edits should be on the order of hundreds of MBs or low GBs.
+
+ NOTE: Fewer extra edits may be retained than value specified for this setting
+ if doing so would mean that more segments would be retained than the number
+ configured by dfs.namenode.max.extra.edits.segments.retained.
+ </description>
+</property>
+
+<property>
+ <name>dfs.namenode.max.extra.edits.segments.retained</name>
+ <value>10000</value>
+ <description>The maximum number of extra edit log segments which should be retained
+ beyond what is minimally necessary for a NN restart. When used in conjunction with
+ dfs.namenode.num.extra.edits.retained, this configuration property serves to cap
+ the number of extra edits files to a reasonable value.
</description>
</property>
@@ -715,6 +753,80 @@
</property>
<property>
+ <name>dfs.datanode.readahead.bytes</name>
+ <value>4193404</value>
+ <description>
+ While reading block files, if the Hadoop native libraries are available,
+ the datanode can use the posix_fadvise system call to explicitly
+ page data into the operating system buffer cache ahead of the current
+ reader's position. This can improve performance especially when
+ disks are highly contended.
+
+ This configuration specifies the number of bytes ahead of the current
+ read position which the datanode will attempt to read ahead. This
+ feature may be disabled by configuring this property to 0.
+
+ If the native libraries are not available, this configuration has no
+ effect.
+ </description>
+</property>
+
+<property>
+ <name>dfs.datanode.drop.cache.behind.reads</name>
+ <value>false</value>
+ <description>
+ In some workloads, the data read from HDFS is known to be significantly
+ large enough that it is unlikely to be useful to cache it in the
+ operating system buffer cache. In this case, the DataNode may be
+ configured to automatically purge all data from the buffer cache
+ after it is delivered to the client. This behavior is automatically
+ disabled for workloads which read only short sections of a block
+ (e.g HBase random-IO workloads).
+
+ This may improve performance for some workloads by freeing buffer
+ cache spage usage for more cacheable data.
+
+ If the Hadoop native libraries are not available, this configuration
+ has no effect.
+ </description>
+</property>
+
+<property>
+ <name>dfs.datanode.drop.cache.behind.writes</name>
+ <value>false</value>
+ <description>
+ In some workloads, the data written to HDFS is known to be significantly
+ large enough that it is unlikely to be useful to cache it in the
+ operating system buffer cache. In this case, the DataNode may be
+ configured to automatically purge all data from the buffer cache
+ after it is written to disk.
+
+ This may improve performance for some workloads by freeing buffer
+ cache spage usage for more cacheable data.
+
+ If the Hadoop native libraries are not available, this configuration
+ has no effect.
+ </description>
+</property>
+
+<property>
+ <name>dfs.datanode.sync.behind.writes</name>
+ <value>false</value>
+ <description>
+ If this configuration is enabled, the datanode will instruct the
+ operating system to enqueue all written data to the disk immediately
+ after it is written. This differs from the usual OS policy which
+ may wait for up to 30 seconds before triggering writeback.
+
+ This may improve performance for some workloads by smoothing the
+ IO profile for data written to disk.
+
+ If the Hadoop native libraries are not available, this configuration
+ has no effect.
+ </description>
+</property>
+
+<property>
<name>dfs.client.failover.max.attempts</name>
<value>15</value>
<description>
@@ -847,6 +959,22 @@
</property>
<property>
+ <name>dfs.client.use.datanode.hostname</name>
+ <value>false</value>
+ <description>Whether clients should use datanode hostnames when
+ connecting to datanodes.
+ </description>
+</property>
+
+<property>
+ <name>dfs.datanode.use.datanode.hostname</name>
+ <value>false</value>
+ <description>Whether datanodes should use datanode hostnames when
+ connecting to other datanodes for data transfer.
+ </description>
+</property>
+
+<property>
<name>dfs.client.local.interfaces</name>
<value></value>
<description>A comma separated list of network interface names to use
@@ -871,6 +999,60 @@
</property>
<property>
+ <name>dfs.namenode.check.stale.datanode</name>
+ <value>false</value>
+ <description>
+ Indicate whether or not to check "stale" datanodes whose
+ heartbeat messages have not been received by the namenode
+ for more than a specified time interval. If this configuration
+ parameter is set as true, the system will keep track
+ of the number of stale datanodes. The stale datanodes will be
+ moved to the end of the node list returned for reading. See
+ dfs.namenode.avoid.write.stale.datanode for details on how this
+ affects writes.
+ </description>
+</property>
+
+<property>
+ <name>dfs.namenode.avoid.write.stale.datanode</name>
+ <value>false</value>
+ <description>
+ Indicate whether or not to avoid writing to "stale" datanodes whose
+ heartbeat messages have not been received by the namenode
+ for more than a specified time interval. If this configuration
+ parameter and dfs.namenode.check.stale.datanode are both set as true,
+ the writing will avoid using stale datanodes unless a high number
+ of datanodes are marked as stale. See
+ dfs.namenode.write.stale.datanode.ratio for details.
+ </description>
+</property>
+
+<property>
+ <name>dfs.namenode.stale.datanode.interval</name>
+ <value>30000</value>
+ <description>
+ Default time interval for marking a datanode as "stale", i.e., if
+ the namenode has not received heartbeat msg from a datanode for
+ more than this time interval, the datanode will be marked and treated
+ as "stale" by default. The stale interval cannot be too small since
+ otherwise this may cause too frequent change of stale states.
+ We thus set a minimum stale interval value (the default value is 3 times
+ of heartbeat interval) and guarantee that the stale interval cannot be less
+ than the minimum value.
+ </description>
+</property>
+
+<property>
+ <name>dfs.namenode.write.stale.datanode.ratio</name>
+ <value>0.5f</value>
+ <description>
+ When the ratio of number stale datanodes to total datanodes marked
+ is greater than this ratio, stop avoiding writing to stale nodes so
+ as to prevent causing hotspots.
+ </description>
+</property>
+
+<property>
<name>dfs.namenode.invalidate.work.pct.per.iteration</name>
<value>0.32f</value>
<description>
@@ -909,4 +1091,97 @@
</description>
</property>
+<property>
+ <name>hadoop.fuse.connection.timeout</name>
+ <value>300</value>
+ <description>
+ The minimum number of seconds that we'll cache libhdfs connection objects
+ in fuse_dfs. Lower values will result in lower memory consumption; higher
+ values may speed up access by avoiding the overhead of creating new
+ connection objects.
+ </description>
+</property>
+
+<property>
+ <name>hadoop.fuse.timer.period</name>
+ <value>5</value>
+ <description>
+ The number of seconds between cache expiry checks in fuse_dfs. Lower values
+ will result in fuse_dfs noticing changes to Kerberos ticket caches more
+ quickly.
+ </description>
+</property>
+
+<property>
+ <name>dfs.metrics.percentiles.intervals</name>
+ <value></value>
+ <description>
+ Comma-delimited set of integers denoting the desired rollover intervals
+ (in seconds) for percentile latency metrics on the Namenode and Datanode.
+ By default, percentile latency metrics are disabled.
+ </description>
+</property>
+
+<property>
+ <name>dfs.encrypt.data.transfer</name>
+ <value>false</value>
+ <description>
+ Whether or not actual block data that is read/written from/to HDFS should
+ be encrypted on the wire. This only needs to be set on the NN and DNs,
+ clients will deduce this automatically.
+ </description>
+</property>
+
+<property>
+ <name>dfs.encrypt.data.transfer.algorithm</name>
+ <value></value>
+ <description>
+ This value may be set to either "3des" or "rc4". If nothing is set, then
+ the configured JCE default on the system is used (usually 3DES.) It is
+ widely believed that 3DES is more cryptographically secure, but RC4 is
+ substantially faster.
+ </description>
+</property>
+
+<property>
+ <name>dfs.datanode.hdfs-blocks-metadata.enabled</name>
+ <value>false</value>
+ <description>
+ Boolean which enables backend datanode-side support for the experimental DistributedFileSystem#getFileVBlockStorageLocations API.
+ </description>
+</property>
+
+<property>
+ <name>dfs.client.file-block-storage-locations.num-threads</name>
+ <value>10</value>
+ <description>
+ Number of threads used for making parallel RPCs in DistributedFileSystem#getFileBlockStorageLocations().
+ </description>
+</property>
+
+<property>
+ <name>dfs.client.file-block-storage-locations.timeout</name>
+ <value>60</value>
+ <description>
+ Timeout (in seconds) for the parallel RPCs made in DistributedFileSystem#getFileBlockStorageLocations().
+ </description>
+</property>
+
+<property>
+ <name>dfs.journalnode.rpc-address</name>
+ <value>0.0.0.0:8485</value>
+ <description>
+ The JournalNode RPC server address and port.
+ </description>
+</property>
+
+<property>
+ <name>dfs.journalnode.http-address</name>
+ <value>0.0.0.0:8480</value>
+ <description>
+ The address and port the JournalNode web UI listens on.
+ If the port is 0 then the server will start on a free port.
+ </description>
+</property>
+
</configuration>
Propchange: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/
------------------------------------------------------------------------------
Merged /hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode:r1363593-1396941
Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode:r1360400-1399945
Propchange: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/
------------------------------------------------------------------------------
Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs:r1360400-1399945
Merged /hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs:r1363593-1396941
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/corrupt_files.jsp
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/corrupt_files.jsp?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/corrupt_files.jsp (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/corrupt_files.jsp Fri Oct 19 02:25:55 2012
@@ -34,8 +34,7 @@
HAServiceState nnHAState = nn.getServiceState();
boolean isActive = (nnHAState == HAServiceState.ACTIVE);
String namenodeRole = nn.getRole().toString();
- String namenodeLabel = nn.getNameNodeAddress().getHostName() + ":"
- + nn.getNameNodeAddress().getPort();
+ String namenodeLabel = nn.getNameNodeAddressHostPortString();
Collection<FSNamesystem.CorruptFileBlockInfo> corruptFileBlocks =
fsn.listCorruptFileBlocks("/", null);
int corruptFileCount = corruptFileBlocks.size();
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.jsp
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.jsp?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.jsp (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.jsp Fri Oct 19 02:25:55 2012
@@ -34,7 +34,7 @@
boolean isActive = (nnHAState == HAServiceState.ACTIVE);
String namenodeRole = nn.getRole().toString();
String namenodeState = nnHAState.toString();
- String namenodeLabel = nn.getNameNodeAddress().getHostName() + ":" + nn.getNameNodeAddress().getPort();
+ String namenodeLabel = nn.getNameNodeAddressHostPortString();
%>
<!DOCTYPE html>
@@ -60,8 +60,10 @@
<%= NamenodeJspHelper.getCorruptFilesWarning(fsn)%>
<% healthjsp.generateHealthReport(out, nn, request); %>
-<hr>
+<% healthjsp.generateJournalReport(out, nn, request); %>
+<hr/>
<% healthjsp.generateConfReport(out, nn, request); %>
+<hr>
<%
out.println(ServletUtil.htmlFooter());
%>
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfsnodelist.jsp
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfsnodelist.jsp?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfsnodelist.jsp (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfsnodelist.jsp Fri Oct 19 02:25:55 2012
@@ -33,7 +33,7 @@ String namenodeRole = nn.getRole().toStr
FSNamesystem fsn = nn.getNamesystem();
HAServiceState nnHAState = nn.getServiceState();
boolean isActive = (nnHAState == HAServiceState.ACTIVE);
-String namenodeLabel = nn.getNameNodeAddress().getHostName() + ":" + nn.getNameNodeAddress().getPort();
+String namenodeLabel = nn.getNameNodeAddressHostPortString();
%>
<!DOCTYPE html>
Propchange: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/
------------------------------------------------------------------------------
Merged /hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary:r1363593-1396941
Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary:r1360400-1399945
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/aop/org/apache/hadoop/fi/DataTransferTestUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/aop/org/apache/hadoop/fi/DataTransferTestUtil.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/aop/org/apache/hadoop/fi/DataTransferTestUtil.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/aop/org/apache/hadoop/fi/DataTransferTestUtil.java Fri Oct 19 02:25:55 2012
@@ -271,7 +271,6 @@ public class DataTransferTestUtil {
}
}
- /** {@inheritDoc} */
@Override
public String toString() {
return error + " " + super.toString();
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/aop/org/apache/hadoop/hdfs/PipelinesTestUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/aop/org/apache/hadoop/hdfs/PipelinesTestUtil.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/aop/org/apache/hadoop/hdfs/PipelinesTestUtil.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/aop/org/apache/hadoop/hdfs/PipelinesTestUtil.java Fri Oct 19 02:25:55 2012
@@ -51,9 +51,6 @@ public class PipelinesTestUtil extends D
this.name = name;
}
- /**
- * {@inheritDoc}
- */
@Override
public void run(NodeBytes nb) throws IOException {
synchronized (rcv) {
Propchange: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs/
------------------------------------------------------------------------------
Merged /hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs:r1363593-1396941
Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs:r1360400-1399945
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/CLITestCmdDFS.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/CLITestCmdDFS.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/CLITestCmdDFS.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/CLITestCmdDFS.java Fri Oct 19 02:25:55 2012
@@ -17,7 +17,11 @@
*/
package org.apache.hadoop.cli;
-import org.apache.hadoop.cli.util.*;
+import org.apache.hadoop.cli.util.CLICommandDFSAdmin;
+import org.apache.hadoop.cli.util.CLICommandTypes;
+import org.apache.hadoop.cli.util.CLITestCmd;
+import org.apache.hadoop.cli.util.CommandExecutor;
+import org.apache.hadoop.cli.util.FSCmdExecutor;
import org.apache.hadoop.hdfs.tools.DFSAdmin;
public class CLITestCmdDFS extends CLITestCmd {
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestHDFSCLI.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestHDFSCLI.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestHDFSCLI.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestHDFSCLI.java Fri Oct 19 02:25:55 2012
@@ -18,6 +18,8 @@
package org.apache.hadoop.cli;
+import static org.junit.Assert.assertTrue;
+
import org.apache.hadoop.cli.util.CLICommand;
import org.apache.hadoop.cli.util.CommandExecutor.Result;
import org.apache.hadoop.fs.FileSystem;
@@ -27,7 +29,6 @@ import org.apache.hadoop.hdfs.HDFSPolicy
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.security.authorize.PolicyProvider;
import org.junit.After;
-import static org.junit.Assert.assertTrue;
import org.junit.Before;
import org.junit.Test;
@@ -74,9 +75,12 @@ public class TestHDFSCLI extends CLITest
@After
@Override
public void tearDown() throws Exception {
- if (null != fs)
+ if (fs != null) {
fs.close();
- dfsCluster.shutdown();
+ }
+ if (dfsCluster != null) {
+ dfsCluster.shutdown();
+ }
Thread.sleep(2000);
super.tearDown();
}
@@ -93,9 +97,7 @@ public class TestHDFSCLI extends CLITest
protected Result execute(CLICommand cmd) throws Exception {
return cmd.getExecutor(namenode).executeCommand(cmd.getCmd());
}
-
- //TODO: The test is failing due to the change in HADOOP-7360.
- // HDFS-2038 is going to fix it. Disable the test for the moment.
+
@Test
@Override
public void testAll () {
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestFcHdfsCreateMkdir.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestFcHdfsCreateMkdir.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestFcHdfsCreateMkdir.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestFcHdfsCreateMkdir.java Fri Oct 19 02:25:55 2012
@@ -55,6 +55,7 @@ public class TestFcHdfsCreateMkdir exten
cluster.shutdown();
}
+ @Override
@Before
public void setUp() throws Exception {
super.setUp();
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestFcHdfsPermission.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestFcHdfsPermission.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestFcHdfsPermission.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestFcHdfsPermission.java Fri Oct 19 02:25:55 2012
@@ -55,6 +55,7 @@ public class TestFcHdfsPermission extend
cluster.shutdown();
}
+ @Override
@Before
public void setUp() throws Exception {
super.setUp();
@@ -72,6 +73,7 @@ public class TestFcHdfsPermission extend
*/
static final FsPermission FILE_MASK_IGNORE_X_BIT =
new FsPermission((short) ~0666);
+ @Override
FsPermission getFileMask() {
return FILE_MASK_IGNORE_X_BIT;
}
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestFcHdfsSymlink.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestFcHdfsSymlink.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestFcHdfsSymlink.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestFcHdfsSymlink.java Fri Oct 19 02:25:55 2012
@@ -53,18 +53,22 @@ public class TestFcHdfsSymlink extends F
private static WebHdfsFileSystem webhdfs;
+ @Override
protected String getScheme() {
return "hdfs";
}
+ @Override
protected String testBaseDir1() throws IOException {
return "/test1";
}
+ @Override
protected String testBaseDir2() throws IOException {
return "/test2";
}
+ @Override
protected URI testURI() {
return cluster.getURI(0);
}
@@ -93,7 +97,7 @@ public class TestFcHdfsSymlink extends F
}
@Test
- /** Link from Hdfs to LocalFs */
+ /** Access a file using a link that spans Hdfs to LocalFs */
public void testLinkAcrossFileSystems() throws IOException {
Path localDir = new Path("file://"+getAbsoluteTestRootDir(fc)+"/test");
Path localFile = new Path("file://"+getAbsoluteTestRootDir(fc)+"/test/file");
@@ -108,7 +112,42 @@ public class TestFcHdfsSymlink extends F
readFile(link);
assertEquals(fileSize, fc.getFileStatus(link).getLen());
}
-
+
+ @Test
+ /** Test renaming a file across two file systems using a link */
+ public void testRenameAcrossFileSystemsViaLink() throws IOException {
+ Path localDir = new Path("file://"+getAbsoluteTestRootDir(fc)+"/test");
+ Path hdfsFile = new Path(testBaseDir1(), "file");
+ Path link = new Path(testBaseDir1(), "link");
+ Path hdfsFileNew = new Path(testBaseDir1(), "fileNew");
+ Path hdfsFileNewViaLink = new Path(link, "fileNew");
+ FileContext localFc = FileContext.getLocalFSFileContext();
+ localFc.delete(localDir, true);
+ localFc.mkdir(localDir, FileContext.DEFAULT_PERM, true);
+ localFc.setWorkingDirectory(localDir);
+ createAndWriteFile(fc, hdfsFile);
+ fc.createSymlink(localDir, link, false);
+ // Rename hdfs://test1/file to hdfs://test1/link/fileNew
+ // which renames to file://TEST_ROOT/test/fileNew which
+ // spans AbstractFileSystems and therefore fails.
+ try {
+ fc.rename(hdfsFile, hdfsFileNewViaLink);
+ fail("Renamed across file systems");
+ } catch (InvalidPathException ipe) {
+ // Expected
+ }
+ // Now rename hdfs://test1/link/fileNew to hdfs://test1/fileNew
+ // which renames file://TEST_ROOT/test/fileNew to hdfs://test1/fileNew
+ // which spans AbstractFileSystems and therefore fails.
+ createAndWriteFile(fc, hdfsFileNewViaLink);
+ try {
+ fc.rename(hdfsFileNewViaLink, hdfsFileNew);
+ fail("Renamed across file systems");
+ } catch (InvalidPathException ipe) {
+ // Expected
+ }
+ }
+
@Test
/** Test access a symlink using AbstractFileSystem */
public void testAccessLinkFromAbstractFileSystem() throws IOException {