You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by ar...@apache.org on 2014/08/29 04:07:30 UTC
[01/12] git commit: HADOOP-10880. Move HTTP delegation tokens out of
URL querystring to a header. (tucu)
Repository: hadoop
Updated Branches:
refs/heads/HDFS-6581 bbaa7dc28 -> 7e32be876
HADOOP-10880. Move HTTP delegation tokens out of URL querystring to a header. (tucu)
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d1ae479a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d1ae479a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d1ae479a
Branch: refs/heads/HDFS-6581
Commit: d1ae479aa5ae4d3e7ec80e35892e1699c378f813
Parents: c4c9a78
Author: Alejandro Abdelnur <tu...@apache.org>
Authored: Thu Aug 28 14:45:40 2014 -0700
Committer: Alejandro Abdelnur <tu...@apache.org>
Committed: Thu Aug 28 14:45:40 2014 -0700
----------------------------------------------------------------------
hadoop-common-project/hadoop-common/CHANGES.txt | 3 +
.../web/DelegationTokenAuthenticatedURL.java | 81 ++++++++++++++++----
.../DelegationTokenAuthenticationHandler.java | 14 +++-
.../web/DelegationTokenAuthenticator.java | 19 ++++-
...tionTokenAuthenticationHandlerWithMocks.java | 46 ++++++++++-
.../delegation/web/TestWebDelegationToken.java | 50 +++++++++++-
6 files changed, 187 insertions(+), 26 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d1ae479a/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index ecbaaab..641635b 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -518,6 +518,9 @@ Release 2.6.0 - UNRELEASED
HADOOP-10998. Fix bash tab completion code to work (Jim Hester via aw)
+ HADOOP-10880. Move HTTP delegation tokens out of URL querystring to
+ a header. (tucu)
+
OPTIMIZATIONS
HADOOP-10838. Byte array native checksumming. (James Thomas via todd)
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d1ae479a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticatedURL.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticatedURL.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticatedURL.java
index d955ada..5aeb177 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticatedURL.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticatedURL.java
@@ -125,6 +125,8 @@ public class DelegationTokenAuthenticatedURL extends AuthenticatedURL {
}
}
+ private boolean useQueryStringforDelegationToken = false;
+
/**
* Creates an <code>DelegationTokenAuthenticatedURL</code>.
* <p/>
@@ -171,6 +173,34 @@ public class DelegationTokenAuthenticatedURL extends AuthenticatedURL {
}
/**
+ * Sets if delegation token should be transmitted in the URL query string.
+ * By default it is transmitted using the
+ * {@link DelegationTokenAuthenticator#DELEGATION_TOKEN_HEADER} HTTP header.
+ * <p/>
+ * This method is provided to enable WebHDFS backwards compatibility.
+ *
+ * @param useQueryString <code>TRUE</code> if the token is transmitted in the
+ * URL query string, <code>FALSE</code> if the delegation token is transmitted
+ * using the {@link DelegationTokenAuthenticator#DELEGATION_TOKEN_HEADER} HTTP
+ * header.
+ */
+ @Deprecated
+ protected void setUseQueryStringForDelegationToken(boolean useQueryString) {
+ useQueryStringforDelegationToken = useQueryString;
+ }
+
+ /**
+ * Returns if delegation token is transmitted as a HTTP header.
+ *
+ * @return <code>TRUE</code> if the token is transmitted in the URL query
+ * string, <code>FALSE</code> if the delegation token is transmitted using the
+ * {@link DelegationTokenAuthenticator#DELEGATION_TOKEN_HEADER} HTTP header.
+ */
+ public boolean useQueryStringForDelegationToken() {
+ return useQueryStringforDelegationToken;
+ }
+
+ /**
* Returns an authenticated {@link HttpURLConnection}, it uses a Delegation
* Token only if the given auth token is an instance of {@link Token} and
* it contains a Delegation Token, otherwise use the configured
@@ -235,23 +265,41 @@ public class DelegationTokenAuthenticatedURL extends AuthenticatedURL {
* @throws IOException if an IO error occurred.
* @throws AuthenticationException if an authentication exception occurred.
*/
+ @SuppressWarnings("unchecked")
public HttpURLConnection openConnection(URL url, Token token, String doAs)
throws IOException, AuthenticationException {
Preconditions.checkNotNull(url, "url");
Preconditions.checkNotNull(token, "token");
Map<String, String> extraParams = new HashMap<String, String>();
-
- // delegation token
- Credentials creds = UserGroupInformation.getCurrentUser().getCredentials();
- if (!creds.getAllTokens().isEmpty()) {
- InetSocketAddress serviceAddr = new InetSocketAddress(url.getHost(),
- url.getPort());
- Text service = SecurityUtil.buildTokenService(serviceAddr);
- org.apache.hadoop.security.token.Token<? extends TokenIdentifier> dt =
- creds.getToken(service);
- if (dt != null) {
- extraParams.put(KerberosDelegationTokenAuthenticator.DELEGATION_PARAM,
- dt.encodeToUrlString());
+ org.apache.hadoop.security.token.Token<? extends TokenIdentifier> dToken
+ = null;
+ // if we have valid auth token, it takes precedence over a delegation token
+ // and we don't even look for one.
+ if (!token.isSet()) {
+ // delegation token
+ Credentials creds = UserGroupInformation.getCurrentUser().
+ getCredentials();
+ if (!creds.getAllTokens().isEmpty()) {
+ InetSocketAddress serviceAddr = new InetSocketAddress(url.getHost(),
+ url.getPort());
+ Text service = SecurityUtil.buildTokenService(serviceAddr);
+ dToken = creds.getToken(service);
+ if (dToken != null) {
+ if (useQueryStringForDelegationToken()) {
+ // delegation token will go in the query string, injecting it
+ extraParams.put(
+ KerberosDelegationTokenAuthenticator.DELEGATION_PARAM,
+ dToken.encodeToUrlString());
+ } else {
+ // delegation token will go as request header, setting it in the
+ // auth-token to ensure no authentication handshake is triggered
+ // (if we have a delegation token, we are authenticated)
+ // the delegation token header is injected in the connection request
+ // at the end of this method.
+ token.delegationToken = (org.apache.hadoop.security.token.Token
+ <AbstractDelegationTokenIdentifier>) dToken;
+ }
+ }
}
}
@@ -261,7 +309,14 @@ public class DelegationTokenAuthenticatedURL extends AuthenticatedURL {
}
url = augmentURL(url, extraParams);
- return super.openConnection(url, token);
+ HttpURLConnection conn = super.openConnection(url, token);
+ if (!token.isSet() && !useQueryStringForDelegationToken() && dToken != null) {
+ // injecting the delegation token header in the connection request
+ conn.setRequestProperty(
+ DelegationTokenAuthenticator.DELEGATION_TOKEN_HEADER,
+ dToken.encodeToUrlString());
+ }
+ return conn;
}
/**
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d1ae479a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticationHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticationHandler.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticationHandler.java
index 670ec55..e4d9424 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticationHandler.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticationHandler.java
@@ -331,8 +331,7 @@ public abstract class DelegationTokenAuthenticationHandler
HttpServletResponse response)
throws IOException, AuthenticationException {
AuthenticationToken token;
- String delegationParam = ServletUtils.getParameter(request,
- KerberosDelegationTokenAuthenticator.DELEGATION_PARAM);
+ String delegationParam = getDelegationToken(request);
if (delegationParam != null) {
try {
Token<DelegationTokenIdentifier> dt =
@@ -356,4 +355,15 @@ public abstract class DelegationTokenAuthenticationHandler
return token;
}
+ private String getDelegationToken(HttpServletRequest request)
+ throws IOException {
+ String dToken = request.getHeader(
+ DelegationTokenAuthenticator.DELEGATION_TOKEN_HEADER);
+ if (dToken == null) {
+ dToken = ServletUtils.getParameter(request,
+ KerberosDelegationTokenAuthenticator.DELEGATION_PARAM);
+ }
+ return dToken;
+ }
+
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d1ae479a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticator.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticator.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticator.java
index ec192da..18df56c 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticator.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticator.java
@@ -56,6 +56,9 @@ public abstract class DelegationTokenAuthenticator implements Authenticator {
public static final String OP_PARAM = "op";
+ public static final String DELEGATION_TOKEN_HEADER =
+ "X-Hadoop-Delegation-Token";
+
public static final String DELEGATION_PARAM = "delegation";
public static final String TOKEN_PARAM = "token";
public static final String RENEWER_PARAM = "renewer";
@@ -101,15 +104,23 @@ public abstract class DelegationTokenAuthenticator implements Authenticator {
authenticator.setConnectionConfigurator(configurator);
}
- private boolean hasDelegationToken(URL url) {
- String queryStr = url.getQuery();
- return (queryStr != null) && queryStr.contains(DELEGATION_PARAM + "=");
+ private boolean hasDelegationToken(URL url, AuthenticatedURL.Token token) {
+ boolean hasDt = false;
+ if (token instanceof DelegationTokenAuthenticatedURL.Token) {
+ hasDt = ((DelegationTokenAuthenticatedURL.Token) token).
+ getDelegationToken() != null;
+ }
+ if (!hasDt) {
+ String queryStr = url.getQuery();
+ hasDt = (queryStr != null) && queryStr.contains(DELEGATION_PARAM + "=");
+ }
+ return hasDt;
}
@Override
public void authenticate(URL url, AuthenticatedURL.Token token)
throws IOException, AuthenticationException {
- if (!hasDelegationToken(url)) {
+ if (!hasDelegationToken(url, token)) {
authenticator.authenticate(url, token);
}
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d1ae479a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/web/TestDelegationTokenAuthenticationHandlerWithMocks.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/web/TestDelegationTokenAuthenticationHandlerWithMocks.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/web/TestDelegationTokenAuthenticationHandlerWithMocks.java
index c9d255d..7880fa1 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/web/TestDelegationTokenAuthenticationHandlerWithMocks.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/web/TestDelegationTokenAuthenticationHandlerWithMocks.java
@@ -284,11 +284,13 @@ public class TestDelegationTokenAuthenticationHandlerWithMocks {
@Test
public void testAuthenticate() throws Exception {
- testValidDelegationToken();
- testInvalidDelegationToken();
+ testValidDelegationTokenQueryString();
+ testValidDelegationTokenHeader();
+ testInvalidDelegationTokenQueryString();
+ testInvalidDelegationTokenHeader();
}
- private void testValidDelegationToken() throws Exception {
+ private void testValidDelegationTokenQueryString() throws Exception {
HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
Token<DelegationTokenIdentifier> dToken =
@@ -307,7 +309,26 @@ public class TestDelegationTokenAuthenticationHandlerWithMocks {
Assert.assertTrue(token.isExpired());
}
- private void testInvalidDelegationToken() throws Exception {
+ private void testValidDelegationTokenHeader() throws Exception {
+ HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
+ HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
+ Token<DelegationTokenIdentifier> dToken =
+ handler.getTokenManager().createToken(
+ UserGroupInformation.getCurrentUser(), "user");
+ Mockito.when(request.getHeader(Mockito.eq(
+ DelegationTokenAuthenticator.DELEGATION_TOKEN_HEADER))).thenReturn(
+ dToken.encodeToUrlString());
+
+ AuthenticationToken token = handler.authenticate(request, response);
+ Assert.assertEquals(UserGroupInformation.getCurrentUser().
+ getShortUserName(), token.getUserName());
+ Assert.assertEquals(0, token.getExpires());
+ Assert.assertEquals(handler.getType(),
+ token.getType());
+ Assert.assertTrue(token.isExpired());
+ }
+
+ private void testInvalidDelegationTokenQueryString() throws Exception {
HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
Mockito.when(request.getQueryString()).thenReturn(
@@ -323,4 +344,21 @@ public class TestDelegationTokenAuthenticationHandlerWithMocks {
}
}
+ private void testInvalidDelegationTokenHeader() throws Exception {
+ HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
+ HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
+ Mockito.when(request.getHeader(Mockito.eq(
+ DelegationTokenAuthenticator.DELEGATION_TOKEN_HEADER))).thenReturn(
+ "invalid");
+
+ try {
+ handler.authenticate(request, response);
+ Assert.fail();
+ } catch (AuthenticationException ex) {
+ //NOP
+ } catch (Exception ex) {
+ Assert.fail();
+ }
+ }
+
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d1ae479a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/web/TestWebDelegationToken.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/web/TestWebDelegationToken.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/web/TestWebDelegationToken.java
index 1b452f1..118abff 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/web/TestWebDelegationToken.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/web/TestWebDelegationToken.java
@@ -149,6 +149,15 @@ public class TestWebDelegationToken {
throws ServletException, IOException {
resp.setStatus(HttpServletResponse.SC_OK);
resp.getWriter().write("ping");
+ if (req.getHeader(DelegationTokenAuthenticator.DELEGATION_TOKEN_HEADER)
+ != null) {
+ resp.setHeader("UsingHeader", "true");
+ }
+ if (req.getQueryString() != null &&
+ req.getQueryString().contains(
+ DelegationTokenAuthenticator.DELEGATION_PARAM + "=")) {
+ resp.setHeader("UsingQueryString", "true");
+ }
}
@Override
@@ -314,7 +323,20 @@ public class TestWebDelegationToken {
}
@Test
- public void testDelegationTokenAuthenticatorCalls() throws Exception {
+ public void testDelegationTokenAuthenticatorCallsWithHeader()
+ throws Exception {
+ testDelegationTokenAuthenticatorCalls(false);
+ }
+
+ @Test
+ public void testDelegationTokenAuthenticatorCallsWithQueryString()
+ throws Exception {
+ testDelegationTokenAuthenticatorCalls(true);
+ }
+
+
+ private void testDelegationTokenAuthenticatorCalls(final boolean useQS)
+ throws Exception {
final Server jetty = createJettyServer();
Context context = new Context();
context.setContextPath("/foo");
@@ -324,14 +346,15 @@ public class TestWebDelegationToken {
try {
jetty.start();
- URL nonAuthURL = new URL(getJettyURL() + "/foo/bar");
+ final URL nonAuthURL = new URL(getJettyURL() + "/foo/bar");
URL authURL = new URL(getJettyURL() + "/foo/bar?authenticated=foo");
URL authURL2 = new URL(getJettyURL() + "/foo/bar?authenticated=bar");
DelegationTokenAuthenticatedURL.Token token =
new DelegationTokenAuthenticatedURL.Token();
- DelegationTokenAuthenticatedURL aUrl =
+ final DelegationTokenAuthenticatedURL aUrl =
new DelegationTokenAuthenticatedURL();
+ aUrl.setUseQueryStringForDelegationToken(useQS);
try {
aUrl.getDelegationToken(nonAuthURL, token, FOO_USER);
@@ -379,6 +402,27 @@ public class TestWebDelegationToken {
Assert.assertTrue(ex.getMessage().contains("401"));
}
+ aUrl.getDelegationToken(authURL, token, "foo");
+
+ UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
+ ugi.addToken(token.getDelegationToken());
+ ugi.doAs(new PrivilegedExceptionAction<Void>() {
+ @Override
+ public Void run() throws Exception {
+ HttpURLConnection conn = aUrl.openConnection(nonAuthURL, new DelegationTokenAuthenticatedURL.Token());
+ Assert.assertEquals(HttpServletResponse.SC_OK, conn.getResponseCode());
+ if (useQS) {
+ Assert.assertNull(conn.getHeaderField("UsingHeader"));
+ Assert.assertNotNull(conn.getHeaderField("UsingQueryString"));
+ } else {
+ Assert.assertNotNull(conn.getHeaderField("UsingHeader"));
+ Assert.assertNull(conn.getHeaderField("UsingQueryString"));
+ }
+ return null;
+ }
+ });
+
+
} finally {
jetty.stop();
}
[04/12] git commit: Fix typos in log messages. Contributed by Ray
Chiang
Posted by ar...@apache.org.
Fix typos in log messages. Contributed by Ray Chiang
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/48aa3b72
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/48aa3b72
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/48aa3b72
Branch: refs/heads/HDFS-6581
Commit: 48aa3b7274b73e022835268123d3711e28e7d48e
Parents: d9a7404
Author: Chris Douglas <cd...@apache.org>
Authored: Thu Aug 28 16:29:35 2014 -0700
Committer: Chris Douglas <cd...@apache.org>
Committed: Thu Aug 28 16:29:35 2014 -0700
----------------------------------------------------------------------
hadoop-mapreduce-project/CHANGES.txt | 8 +++++---
.../mapreduce/v2/app/commit/CommitterEventHandler.java | 2 +-
.../apache/hadoop/mapreduce/v2/app/rm/RMCommunicator.java | 2 +-
.../hadoop/mapreduce/v2/app/speculate/DefaultSpeculator.java | 8 ++++----
.../src/main/java/org/apache/hadoop/mapred/BackupStore.java | 2 +-
.../src/test/java/org/apache/hadoop/fs/JHLogAnalyzer.java | 4 ++--
6 files changed, 14 insertions(+), 12 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/48aa3b72/hadoop-mapreduce-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt
index c0038f6..67f8851 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -252,16 +252,18 @@ Release 2.6.0 - UNRELEASED
(Chen He via jlowe)
BREAKDOWN OF HDFS-6134 AND HADOOP-10150 SUBTASKS AND RELATED JIRAS
-
+
MAPREDUCE-5890. Support for encrypting Intermediate
data and spills in local filesystem. (asuresh via tucu)
-
+
MAPREDUCE-6007. Add support to distcp to preserve raw.* namespace
extended attributes. (clamb)
-
+
MAPREDUCE-6041. Fix TestOptionsParser. (clamb)
--
+ MAPREDUCE-6051. Fix typos in log messages. (Ray Chiang via cdouglas)
+
Release 2.5.1 - UNRELEASED
INCOMPATIBLE CHANGES
http://git-wip-us.apache.org/repos/asf/hadoop/blob/48aa3b72/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/commit/CommitterEventHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/commit/CommitterEventHandler.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/commit/CommitterEventHandler.java
index 8c3be86..d56c1e5 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/commit/CommitterEventHandler.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/commit/CommitterEventHandler.java
@@ -202,7 +202,7 @@ public class CommitterEventHandler extends AbstractService
private synchronized void cancelJobCommit() {
Thread threadCommitting = jobCommitThread;
if (threadCommitting != null && threadCommitting.isAlive()) {
- LOG.info("Canceling commit");
+ LOG.info("Cancelling commit");
threadCommitting.interrupt();
// wait up to configured timeout for commit thread to finish
http://git-wip-us.apache.org/repos/asf/hadoop/blob/48aa3b72/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMCommunicator.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMCommunicator.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMCommunicator.java
index 4b32c04..6e9f313 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMCommunicator.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMCommunicator.java
@@ -335,7 +335,7 @@ public abstract class RMCommunicator extends AbstractService
public void setSignalled(boolean isSignalled) {
this.isSignalled = isSignalled;
- LOG.info("RMCommunicator notified that iSignalled is: "
+ LOG.info("RMCommunicator notified that isSignalled is: "
+ isSignalled);
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/48aa3b72/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/speculate/DefaultSpeculator.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/speculate/DefaultSpeculator.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/speculate/DefaultSpeculator.java
index 34dcb12..392a51a 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/speculate/DefaultSpeculator.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/speculate/DefaultSpeculator.java
@@ -134,16 +134,16 @@ public class DefaultSpeculator extends AbstractService implements
estimator.contextualize(conf, context);
} catch (InstantiationException ex) {
- LOG.error("Can't make a speculation runtime extimator", ex);
+ LOG.error("Can't make a speculation runtime estimator", ex);
throw new YarnRuntimeException(ex);
} catch (IllegalAccessException ex) {
- LOG.error("Can't make a speculation runtime extimator", ex);
+ LOG.error("Can't make a speculation runtime estimator", ex);
throw new YarnRuntimeException(ex);
} catch (InvocationTargetException ex) {
- LOG.error("Can't make a speculation runtime extimator", ex);
+ LOG.error("Can't make a speculation runtime estimator", ex);
throw new YarnRuntimeException(ex);
} catch (NoSuchMethodException ex) {
- LOG.error("Can't make a speculation runtime extimator", ex);
+ LOG.error("Can't make a speculation runtime estimator", ex);
throw new YarnRuntimeException(ex);
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/48aa3b72/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/BackupStore.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/BackupStore.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/BackupStore.java
index be7fe18..e79ec66 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/BackupStore.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/BackupStore.java
@@ -607,7 +607,7 @@ public class BackupStore<K,V> {
int reserve(int requestedSize, int minSize) {
if (availableSize < minSize) {
- LOG.debug("No Space available. Available: " + availableSize +
+ LOG.debug("No space available. Available: " + availableSize +
" MinSize: " + minSize);
return 0;
} else {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/48aa3b72/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/JHLogAnalyzer.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/JHLogAnalyzer.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/JHLogAnalyzer.java
index 347dd06..8b3f4c8 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/JHLogAnalyzer.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/JHLogAnalyzer.java
@@ -1076,7 +1076,7 @@ public class JHLogAnalyzer {
long execTime,
Path resFileName
) throws IOException {
- LOG.info("Analizing results ...");
+ LOG.info("Analyzing results ...");
DataOutputStream out = null;
BufferedWriter writer = null;
try {
@@ -1118,7 +1118,7 @@ public class JHLogAnalyzer {
if(writer != null) writer.close();
if(out != null) out.close();
}
- LOG.info("Analizing results ... done.");
+ LOG.info("Analyzing results ... done.");
}
private static void cleanup(Configuration conf) throws IOException {
[07/12] git commit: HADOOP-11013. CLASSPATH handling should be
consolidated, debuggable (aw)
Posted by ar...@apache.org.
HADOOP-11013. CLASSPATH handling should be consolidated, debuggable (aw)
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d8774cc5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d8774cc5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d8774cc5
Branch: refs/heads/HDFS-6581
Commit: d8774cc577198fdc3bc36c26526c95ea9a989800
Parents: 7119bd4
Author: Allen Wittenauer <aw...@apache.org>
Authored: Thu Aug 28 10:37:23 2014 -0700
Committer: Allen Wittenauer <aw...@apache.org>
Committed: Thu Aug 28 18:09:25 2014 -0700
----------------------------------------------------------------------
hadoop-common-project/hadoop-common/CHANGES.txt | 2 +
.../hadoop-common/src/main/bin/hadoop | 5 +-
.../hadoop-common/src/main/bin/hadoop-config.sh | 5 ++
.../src/main/bin/hadoop-functions.sh | 54 +++++++++++++++++---
.../hadoop-common/src/main/bin/rcc | 7 +--
.../hadoop-hdfs/src/main/bin/hdfs | 25 +++++++--
hadoop-mapreduce-project/bin/mapred | 14 +++--
hadoop-yarn-project/hadoop-yarn/bin/yarn | 15 +++++-
8 files changed, 105 insertions(+), 22 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d8774cc5/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 05eb383..717bd24 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -125,6 +125,8 @@ Trunk (Unreleased)
HADOOP-10485. Remove dead classes in hadoop-streaming. (wheat9)
+ HADOOP-11013. CLASSPATH handling should be consolidated, debuggable (aw)
+
BUG FIXES
HADOOP-9451. Fault single-layer config if node group topology is enabled.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d8774cc5/hadoop-common-project/hadoop-common/src/main/bin/hadoop
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/bin/hadoop b/hadoop-common-project/hadoop-common/src/main/bin/hadoop
index 24c4d18..64c6758 100755
--- a/hadoop-common-project/hadoop-common/src/main/bin/hadoop
+++ b/hadoop-common-project/hadoop-common/src/main/bin/hadoop
@@ -114,6 +114,7 @@ case ${COMMAND} in
;;
archive)
CLASS=org.apache.hadoop.tools.HadoopArchives
+ hadoop_debug "Injecting TOOL_PATH into CLASSPATH"
hadoop_add_classpath "${TOOL_PATH}"
;;
checknative)
@@ -136,10 +137,12 @@ case ${COMMAND} in
;;
distch)
CLASS=org.apache.hadoop.tools.DistCh
+ hadoop_debug "Injecting TOOL_PATH into CLASSPATH"
hadoop_add_classpath "${TOOL_PATH}"
;;
distcp)
CLASS=org.apache.hadoop.tools.DistCp
+ hadoop_debug "Injecting TOOL_PATH into CLASSPATH"
hadoop_add_classpath "${TOOL_PATH}"
;;
fs)
@@ -168,11 +171,11 @@ case ${COMMAND} in
esac
# Always respect HADOOP_OPTS and HADOOP_CLIENT_OPTS
+hadoop_debug "Appending HADOOP_CLIENT_OPTS onto HADOOP_OPTS"
HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_CLIENT_OPTS}"
hadoop_add_param HADOOP_OPTS Xmx "${JAVA_HEAP_MAX}"
hadoop_finalize
-export CLASSPATH
hadoop_java_exec "${COMMAND}" "${CLASS}" "$@"
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d8774cc5/hadoop-common-project/hadoop-common/src/main/bin/hadoop-config.sh
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/bin/hadoop-config.sh b/hadoop-common-project/hadoop-common/src/main/bin/hadoop-config.sh
index 0cf8bcf..40494b3 100644
--- a/hadoop-common-project/hadoop-common/src/main/bin/hadoop-config.sh
+++ b/hadoop-common-project/hadoop-common/src/main/bin/hadoop-config.sh
@@ -129,6 +129,11 @@ while [[ -z "${_hadoop_common_done}" ]]; do
hadoop_exit_with_usage 1
fi
;;
+ --debug)
+ shift
+ # shellcheck disable=SC2034
+ HADOOP_SHELL_SCRIPT_DEBUG=true
+ ;;
--help|-help|-h|help|--h|--\?|-\?|\?)
hadoop_exit_with_usage 0
;;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d8774cc5/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh b/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
index 800e024..dd5520c 100644
--- a/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
+++ b/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
@@ -21,6 +21,13 @@ function hadoop_error
echo "$*" 1>&2
}
+function hadoop_debug
+{
+ if [[ -n "${HADOOP_SHELL_SCRIPT_DEBUG}" ]]; then
+ echo "DEBUG: $*" 1>&2
+ fi
+}
+
function hadoop_bootstrap_init
{
# NOTE: This function is not user replaceable.
@@ -62,6 +69,7 @@ function hadoop_bootstrap_init
# defaults
export HADOOP_OPTS=${HADOOP_OPTS:-"-Djava.net.preferIPv4Stack=true"}
+ hadoop_debug "Initial HADOOP_OPTS=${HADOOP_OPTS}"
}
function hadoop_find_confdir
@@ -80,6 +88,8 @@ function hadoop_find_confdir
conf_dir="etc/hadoop"
fi
export HADOOP_CONF_DIR="${HADOOP_CONF_DIR:-${HADOOP_PREFIX}/${conf_dir}}"
+
+ hadoop_debug "HADOOP_CONF_DIR=${HADOOP_CONF_DIR}"
}
function hadoop_exec_hadoopenv
@@ -105,6 +115,7 @@ function hadoop_basic_init
# CLASSPATH initially contains $HADOOP_CONF_DIR
CLASSPATH="${HADOOP_CONF_DIR}"
+ hadoop_debug "Initial CLASSPATH=${HADOOP_CONF_DIR}"
if [[ -z "${HADOOP_COMMON_HOME}" ]] &&
[[ -d "${HADOOP_PREFIX}/${HADOOP_COMMON_DIR}" ]]; then
@@ -116,19 +127,19 @@ function hadoop_basic_init
# define HADOOP_HDFS_HOME
if [[ -z "${HADOOP_HDFS_HOME}" ]] &&
- [[ -d "${HADOOP_PREFIX}/${HDFS_DIR}" ]]; then
+ [[ -d "${HADOOP_PREFIX}/${HDFS_DIR}" ]]; then
export HADOOP_HDFS_HOME="${HADOOP_PREFIX}"
fi
# define HADOOP_YARN_HOME
if [[ -z "${HADOOP_YARN_HOME}" ]] &&
- [[ -d "${HADOOP_PREFIX}/${YARN_DIR}" ]]; then
+ [[ -d "${HADOOP_PREFIX}/${YARN_DIR}" ]]; then
export HADOOP_YARN_HOME="${HADOOP_PREFIX}"
fi
# define HADOOP_MAPRED_HOME
if [[ -z "${HADOOP_MAPRED_HOME}" ]] &&
- [[ -d "${HADOOP_PREFIX}/${MAPRED_DIR}" ]]; then
+ [[ -d "${HADOOP_PREFIX}/${MAPRED_DIR}" ]]; then
export HADOOP_MAPRED_HOME="${HADOOP_PREFIX}"
fi
@@ -274,6 +285,9 @@ function hadoop_add_param
if [[ ! ${!1} =~ $2 ]] ; then
# shellcheck disable=SC2086
eval $1="'${!1} $3'"
+ hadoop_debug "$1 accepted $3"
+ else
+ hadoop_debug "$1 declined $3"
fi
}
@@ -283,8 +297,8 @@ function hadoop_add_classpath
# $1 = directory, file, wildcard, whatever to add
# $2 = before or after, which determines where in the
# classpath this object should go. default is after
- # return 0 = success
- # return 1 = failure (duplicate, doesn't exist, whatever)
+ # return 0 = success (added or duplicate)
+ # return 1 = failure (doesn't exist, whatever)
# However, with classpath (& JLP), we can do dedupe
# along with some sanity checking (e.g., missing directories)
@@ -295,23 +309,29 @@ function hadoop_add_classpath
if [[ $1 =~ ^.*\*$ ]]; then
local mp=$(dirname "$1")
if [[ ! -d "${mp}" ]]; then
+ hadoop_debug "Rejected CLASSPATH: $1 (not a dir)"
return 1
fi
# no wildcard in the middle, so check existence
# (doesn't matter *what* it is)
elif [[ ! $1 =~ ^.*\*.*$ ]] && [[ ! -e "$1" ]]; then
+ hadoop_debug "Rejected CLASSPATH: $1 (does not exist)"
return 1
fi
-
if [[ -z "${CLASSPATH}" ]]; then
CLASSPATH=$1
+ hadoop_debug "Initial CLASSPATH=$1"
elif [[ ":${CLASSPATH}:" != *":$1:"* ]]; then
if [[ "$2" = "before" ]]; then
CLASSPATH="$1:${CLASSPATH}"
+ hadoop_debug "Prepend CLASSPATH: $1"
else
CLASSPATH+=:$1
+ hadoop_debug "Append CLASSPATH: $1"
fi
+ else
+ hadoop_debug "Dupe CLASSPATH: $1"
fi
return 0
}
@@ -331,14 +351,20 @@ function hadoop_add_colonpath
if [[ -z "${!1}" ]]; then
# shellcheck disable=SC2086
eval $1="'$2'"
+ hadoop_debug "Initial colonpath($1): $2"
elif [[ "$3" = "before" ]]; then
# shellcheck disable=SC2086
eval $1="'$2:${!1}'"
+ hadoop_debug "Prepend colonpath($1): $2"
else
# shellcheck disable=SC2086
eval $1+="'$2'"
+ hadoop_debug "Append colonpath($1): $2"
fi
+ return 0
fi
+ hadoop_debug "Rejected colonpath($1): $2"
+ return 1
}
function hadoop_add_javalibpath
@@ -397,6 +423,7 @@ function hadoop_add_to_classpath_hdfs
function hadoop_add_to_classpath_yarn
{
+ local i
#
# get all of the yarn jars+config in the path
#
@@ -459,7 +486,7 @@ function hadoop_add_to_classpath_userpath
local i
local j
let c=0
-
+
if [[ -n "${HADOOP_CLASSPATH}" ]]; then
# I wonder if Java runs on VMS.
for i in $(echo "${HADOOP_CLASSPATH}" | tr : '\n'); do
@@ -715,6 +742,11 @@ function hadoop_java_exec
local command=$1
local class=$2
shift 2
+
+ hadoop_debug "Final CLASSPATH: ${CLASSPATH}"
+ hadoop_debug "Final HADOOP_OPTS: ${HADOOP_OPTS}"
+
+ export CLASSPATH
#shellcheck disable=SC2086
exec "${JAVA}" "-Dproc_${command}" ${HADOOP_OPTS} "${class}" "$@"
}
@@ -727,6 +759,11 @@ function hadoop_start_daemon
local command=$1
local class=$2
shift 2
+
+ hadoop_debug "Final CLASSPATH: ${CLASSPATH}"
+ hadoop_debug "Final HADOOP_OPTS: ${HADOOP_OPTS}"
+
+ export CLASSPATH
#shellcheck disable=SC2086
exec "${JAVA}" "-Dproc_${command}" ${HADOOP_OPTS} "${class}" "$@"
}
@@ -807,6 +844,9 @@ function hadoop_start_secure_daemon
# note that shellcheck will throw a
# bogus for-our-use-case 2086 here.
# it doesn't properly support multi-line situations
+
+ hadoop_debug "Final CLASSPATH: ${CLASSPATH}"
+ hadoop_debug "Final HADOOP_OPTS: ${HADOOP_OPTS}"
exec "${jsvc}" \
"-Dproc_${daemonname}" \
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d8774cc5/hadoop-common-project/hadoop-common/src/main/bin/rcc
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/bin/rcc b/hadoop-common-project/hadoop-common/src/main/bin/rcc
index dc6158a..7425353 100755
--- a/hadoop-common-project/hadoop-common/src/main/bin/rcc
+++ b/hadoop-common-project/hadoop-common/src/main/bin/rcc
@@ -23,6 +23,7 @@ this="$bin/$script"
DEFAULT_LIBEXEC_DIR="$bin"/../libexec
HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}
+# shellcheck disable=SC2034
HADOOP_NEW_CONFIG=true
. "$HADOOP_LIBEXEC_DIR/hadoop-config.sh"
@@ -33,10 +34,10 @@ fi
CLASS='org.apache.hadoop.record.compiler.generated.Rcc'
# Always respect HADOOP_OPTS and HADOOP_CLIENT_OPTS
-HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS"
+hadoop_debug "Appending HADOOP_CLIENT_OPTS onto HADOOP_OPTS"
+HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_CLIENT_OPTS}"
-hadoop_add_param HADOOP_OPTS Xmx "$JAVA_HEAP_MAX"
+hadoop_add_param HADOOP_OPTS Xmx "${JAVA_HEAP_MAX}"
hadoop_finalize
-export CLASSPATH
hadoop_java_exec rcc "${CLASS}" "$@"
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d8774cc5/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
index 77f1582..6872a0e 100755
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
@@ -80,6 +80,7 @@ shift
case ${COMMAND} in
balancer)
CLASS=org.apache.hadoop.hdfs.server.balancer.Balancer
+ hadoop_debug "Appending HADOOP_BALANCER_OPTS onto HADOOP_OPTS"
HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_BALANCER_OPTS}"
;;
cacheadmin)
@@ -105,19 +106,24 @@ case ${COMMAND} in
HADOOP_SECURE_PID_DIR="${HADOOP_SECURE_PID_DIR:-$HADOOP_SECURE_DN_PID_DIR}"
HADOOP_SECURE_LOG_DIR="${HADOOP_SECURE_LOG_DIR:-$HADOOP_SECURE_DN_LOG_DIR}"
- HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_DN_SECURE_EXTRA_OPTS} ${HADOOP_DATANODE_OPTS}"
+ hadoop_debug "Appending HADOOP_DATANODE_OPTS onto HADOOP_OPTS"
+ hadoop_debug "Appending HADOOP_DN_SECURE_EXTRA_OPTS onto HADOOP_OPTS"
+ HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_DATANODE_OPTS} ${HADOOP_DN_SECURE_EXTRA_OPTS}"
CLASS="org.apache.hadoop.hdfs.server.datanode.SecureDataNodeStarter"
else
+ hadoop_debug "Appending HADOOP_DATANODE_OPTS onto HADOOP_OPTS"
HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_DATANODE_OPTS}"
CLASS='org.apache.hadoop.hdfs.server.datanode.DataNode'
fi
;;
dfs)
CLASS=org.apache.hadoop.fs.FsShell
+ hadoop_debug "Appending HADOOP_CLIENT_OPTS onto HADOOP_OPTS"
HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_CLIENT_OPTS}"
;;
dfsadmin)
CLASS=org.apache.hadoop.hdfs.tools.DFSAdmin
+ hadoop_debug "Appending HADOOP_CLIENT_OPTS onto HADOOP_OPTS"
HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_CLIENT_OPTS}"
;;
fetchdt)
@@ -125,6 +131,7 @@ case ${COMMAND} in
;;
fsck)
CLASS=org.apache.hadoop.hdfs.tools.DFSck
+ hadoop_debug "Appending HADOOP_CLIENT_OPTS onto HADOOP_OPTS"
HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_CLIENT_OPTS}"
;;
getconf)
@@ -135,12 +142,15 @@ case ${COMMAND} in
;;
haadmin)
CLASS=org.apache.hadoop.hdfs.tools.DFSHAAdmin
- CLASSPATH="${CLASSPATH}:${TOOL_PATH}"
+ hadoop_debug "Injecting TOOL_PATH into CLASSPATH"
+ hadoop_add_classpath "${TOOL_PATH}"
+ hadoop_debug "Appending HADOOP_CLIENT_OPTS onto HADOOP_OPTS"
HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_CLIENT_OPTS}"
;;
journalnode)
daemon="true"
CLASS='org.apache.hadoop.hdfs.qjournal.server.JournalNode'
+ hadoop_debug "Appending HADOOP_JOURNALNODE_OPTS onto HADOOP_OPTS"
HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_JOURNALNODE_OPTS}"
;;
jmxget)
@@ -152,6 +162,7 @@ case ${COMMAND} in
namenode)
daemon="true"
CLASS='org.apache.hadoop.hdfs.server.namenode.NameNode'
+ hadoop_debug "Appending HADOOP_NAMENODE_OPTS onto HADOOP_OPTS"
HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_NAMENODE_OPTS}"
;;
nfs3)
@@ -164,9 +175,12 @@ case ${COMMAND} in
HADOOP_SECURE_PID_DIR="${HADOOP_SECURE_PID_DIR:-$HADOOP_SECURE_NFS3_PID_DIR}"
HADOOP_SECURE_LOG_DIR="${HADOOP_SECURE_LOG_DIR:-$HADOOP_SECURE_NFS3_LOG_DIR}"
- HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_NFS3_SECURE_EXTRA_OPTS} ${HADOOP_NFS3_OPTS}"
+ hadoop_debug "Appending HADOOP_NFS3_OPTS onto HADOOP_OPTS"
+ hadoop_debug "Appending HADOOP_NFS3_SECURE_EXTRA_OPTS onto HADOOP_OPTS"
+ HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_NFS3_OPTS} ${HADOOP_NFS3_SECURE_EXTRA_OPTS}"
CLASS=org.apache.hadoop.hdfs.nfs.nfs3.PrivilegedNfsGatewayStarter
else
+ hadoop_debug "Appending HADOOP_NFS3_OPTS onto HADOOP_OPTS"
HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_NFS3_OPTS}"
CLASS=org.apache.hadoop.hdfs.nfs.nfs3.Nfs3
fi
@@ -183,11 +197,13 @@ case ${COMMAND} in
portmap)
daemon="true"
CLASS=org.apache.hadoop.portmap.Portmap
+ hadoop_debug "Appending HADOOP_PORTMAP_OPTS onto HADOOP_OPTS"
HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_PORTMAP_OPTS}"
;;
secondarynamenode)
daemon="true"
CLASS='org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode'
+ hadoop_debug "Appending HADOOP_SECONDARYNAMENODE_OPTS onto HADOOP_OPTS"
HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_SECONDARYNAMENODE_OPTS}"
;;
snapshotDiff)
@@ -196,6 +212,7 @@ case ${COMMAND} in
zkfc)
daemon="true"
CLASS='org.apache.hadoop.hdfs.tools.DFSZKFailoverController'
+ hadoop_debug "Appending HADOOP_ZKFC_OPTS onto HADOOP_OPTS"
HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_ZKFC_OPTS}"
;;
-*)
@@ -236,8 +253,6 @@ fi
hadoop_add_param HADOOP_OPTS Xmx "${JAVA_HEAP_MAX}"
hadoop_finalize
-export CLASSPATH
-
if [[ -n "${daemon}" ]]; then
if [[ -n "${secure_service}" ]]; then
hadoop_secure_daemon_handler \
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d8774cc5/hadoop-mapreduce-project/bin/mapred
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/bin/mapred b/hadoop-mapreduce-project/bin/mapred
index cbfdc7e..8f30637 100755
--- a/hadoop-mapreduce-project/bin/mapred
+++ b/hadoop-mapreduce-project/bin/mapred
@@ -64,13 +64,15 @@ shift
case ${COMMAND} in
mradmin|jobtracker|tasktracker|groups)
- echo "Sorry, the ${COMMAND} command is no longer supported."
- echo "You may find similar functionality with the \"yarn\" shell command."
+ hadoop_error "Sorry, the ${COMMAND} command is no longer supported."
+ hadoop_error "You may find similar functionality with the \"yarn\" shell command."
hadoop_exit_with_usage 1
;;
archive)
CLASS=org.apache.hadoop.tools.HadoopArchives
+ hadoop_debug "Injecting TOOL_PATH into CLASSPATH"
hadoop_add_classpath "${TOOL_PATH}"
+ hadoop_debug "Appending HADOOP_CLIENT_OPTS onto HADOOP_OPTS"
HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_CLIENT_OPTS}"
;;
classpath)
@@ -80,12 +82,15 @@ case ${COMMAND} in
;;
distcp)
CLASS=org.apache.hadoop.tools.DistCp
+ hadoop_debug "Injecting TOOL_PATH into CLASSPATH"
hadoop_add_classpath "${TOOL_PATH}"
+ hadoop_debug "Appending HADOOP_CLIENT_OPTS onto HADOOP_OPTS"
HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_CLIENT_OPTS}"
;;
historyserver)
daemon="true"
CLASS=org.apache.hadoop.mapreduce.v2.hs.JobHistoryServer
+ hadoop_debug "Appending HADOOP_JOB_HISTORYSERVER_OPTS onto HADOOP_OPTS"
HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_JOB_HISTORYSERVER_OPTS}"
if [ -n "${HADOOP_JOB_HISTORYSERVER_HEAPSIZE}" ]; then
JAVA_HEAP_MAX="-Xmx${HADOOP_JOB_HISTORYSERVER_HEAPSIZE}m"
@@ -97,6 +102,7 @@ case ${COMMAND} in
;;
pipes)
CLASS=org.apache.hadoop.mapred.pipes.Submitter
+ hadoop_debug "Appending HADOOP_CLIENT_OPTS onto HADOOP_OPTS"
HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_CLIENT_OPTS}"
;;
queue)
@@ -104,10 +110,12 @@ case ${COMMAND} in
;;
sampler)
CLASS=org.apache.hadoop.mapred.lib.InputSampler
+ hadoop_debug "Appending HADOOP_CLIENT_OPTS onto HADOOP_OPTS"
HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_CLIENT_OPTS}"
;;
version)
CLASS=org.apache.hadoop.util.VersionInfo
+ hadoop_debug "Appending HADOOP_CLIENT_OPTS onto HADOOP_OPTS"
HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_CLIENT_OPTS}"
;;
-*|*)
@@ -130,8 +138,6 @@ fi
hadoop_add_param HADOOP_OPTS Xmx "${JAVA_HEAP_MAX}"
hadoop_finalize
-export CLASSPATH
-
if [[ -n "${daemon}" ]]; then
if [[ -n "${secure_service}" ]]; then
hadoop_secure_daemon_handler "${HADOOP_DAEMON_MODE}" "${COMMAND}"\
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d8774cc5/hadoop-yarn-project/hadoop-yarn/bin/yarn
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/bin/yarn b/hadoop-yarn-project/hadoop-yarn/bin/yarn
index dfef811..371d23d 100644
--- a/hadoop-yarn-project/hadoop-yarn/bin/yarn
+++ b/hadoop-yarn-project/hadoop-yarn/bin/yarn
@@ -72,6 +72,7 @@ shift
case "${COMMAND}" in
application|applicationattempt|container)
CLASS=org.apache.hadoop.yarn.client.cli.ApplicationCLI
+ hadoop_debug "Append YARN_CLIENT_OPTS onto YARN_OPTS"
YARN_OPTS="${YARN_OPTS} ${YARN_CLIENT_OPTS}"
set -- "${COMMAND}" "$@"
;;
@@ -82,10 +83,12 @@ case "${COMMAND}" in
;;
daemonlog)
CLASS=org.apache.hadoop.log.LogLevel
+ hadoop_debug "Append YARN_CLIENT_OPTS onto YARN_OPTS"
YARN_OPTS="${YARN_OPTS} ${YARN_CLIENT_OPTS}"
;;
jar)
CLASS=org.apache.hadoop.util.RunJar
+ hadoop_debug "Append YARN_CLIENT_OPTS onto YARN_OPTS"
YARN_OPTS="${YARN_OPTS} ${YARN_CLIENT_OPTS}"
;;
historyserver)
@@ -97,15 +100,18 @@ case "${COMMAND}" in
;;
logs)
CLASS=org.apache.hadoop.yarn.logaggregation.LogDumper
+ hadoop_debug "Append YARN_CLIENT_OPTS onto YARN_OPTS"
YARN_OPTS="${YARN_OPTS} ${YARN_CLIENT_OPTS}"
;;
node)
CLASS=org.apache.hadoop.yarn.client.cli.NodeCLI
+ hadoop_debug "Append YARN_CLIENT_OPTS onto YARN_OPTS"
YARN_OPTS="${YARN_OPTS} ${YARN_CLIENT_OPTS}"
;;
nodemanager)
daemon="true"
CLASS='org.apache.hadoop.yarn.server.nodemanager.NodeManager'
+ hadoop_debug "Append YARN_NODEMANAGER_OPTS onto YARN_OPTS"
YARN_OPTS="${YARN_OPTS} ${YARN_NODEMANAGER_OPTS}"
if [[ -n "${YARN_NODEMANAGER_HEAPSIZE}" ]]; then
JAVA_HEAP_MAX="-Xmx${YARN_NODEMANAGER_HEAPSIZE}m"
@@ -114,6 +120,7 @@ case "${COMMAND}" in
proxyserver)
daemon="true"
CLASS='org.apache.hadoop.yarn.server.webproxy.WebAppProxyServer'
+ hadoop_debug "Append YARN_PROXYSERVER_OPTS onto YARN_OPTS"
YARN_OPTS="${YARN_OPTS} ${YARN_PROXYSERVER_OPTS}"
if [[ -n "${YARN_PROXYSERVER_HEAPSIZE}" ]]; then
JAVA_HEAP_MAX="-Xmx${YARN_PROXYSERVER_HEAPSIZE}m"
@@ -123,17 +130,20 @@ case "${COMMAND}" in
daemon="true"
CLASS='org.apache.hadoop.yarn.server.resourcemanager.ResourceManager'
YARN_OPTS="${YARN_OPTS} ${YARN_RESOURCEMANAGER_OPTS}"
+ hadoop_debug "Append YARN_RESOURCEMANAGER_OPTS onto YARN_OPTS"
if [[ -n "${YARN_RESOURCEMANAGER_HEAPSIZE}" ]]; then
JAVA_HEAP_MAX="-Xmx${YARN_RESOURCEMANAGER_HEAPSIZE}m"
fi
;;
rmadmin)
CLASS='org.apache.hadoop.yarn.client.cli.RMAdminCLI'
+ hadoop_debug "Append YARN_CLIENT_OPTS onto YARN_OPTS"
YARN_OPTS="${YARN_OPTS} ${YARN_CLIENT_OPTS}"
;;
timelineserver)
daemon="true"
CLASS='org.apache.hadoop.yarn.server.applicationhistoryservice.ApplicationHistoryServer'
+ hadoop_debug "Append YARN_TIMELINESERVER_OPTS onto YARN_OPTS"
YARN_OPTS="${YARN_OPTS} ${YARN_TIMELINESERVER_OPTS}"
if [[ -n "${YARN_TIMELINESERVER_HEAPSIZE}" ]]; then
JAVA_HEAP_MAX="-Xmx${YARN_TIMELINESERVER_HEAPSIZE}m"
@@ -141,6 +151,7 @@ case "${COMMAND}" in
;;
version)
CLASS=org.apache.hadoop.util.VersionInfo
+ hadoop_debug "Append YARN_CLIENT_OPTS onto YARN_OPTS"
YARN_OPTS="${YARN_OPTS} ${YARN_CLIENT_OPTS}"
;;
-*)
@@ -153,6 +164,8 @@ esac
# set HADOOP_OPTS to YARN_OPTS so that we can use
# finalize, etc, without doing anything funky
+hadoop_debug "Resetting HADOOP_OPTS=YARN_OPTS"
+# shellcheck disable=SC2034
HADOOP_OPTS="${YARN_OPTS}"
daemon_outfile="${HADOOP_LOG_DIR}/hadoop-${HADOOP_IDENT_STRING}-${COMMAND}-${HOSTNAME}.out"
@@ -180,8 +193,6 @@ hadoop_add_param HADOOP_OPTS yarn.root.logger "-Dyarn.root.logger=${YARN_ROOT_LO
hadoop_finalize
-export CLASSPATH
-
if [[ -n "${daemon}" ]]; then
if [[ -n "${secure_service}" ]]; then
hadoop_secure_daemon_handler "${HADOOP_DAEMON_MODE}" "${COMMAND}" \
[08/12] git commit: Fix up CHANGES.txt for HDFS-6134,
HADOOP-10150 and related JIRAs following merge to branch-2
Posted by ar...@apache.org.
Fix up CHANGES.txt for HDFS-6134, HADOOP-10150 and related JIRAs following merge to branch-2
Conflicts:
hadoop-common-project/hadoop-common/CHANGES.txt
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
hadoop-mapreduce-project/CHANGES.txt
Conflicts:
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2ca93d1f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2ca93d1f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2ca93d1f
Branch: refs/heads/HDFS-6581
Commit: 2ca93d1fbf0fdcd6b4b5a151261052ac106ac9e1
Parents: bc6ce2c
Author: Alejandro Abdelnur <tu...@cloudera.com>
Authored: Tue Aug 26 12:00:37 2014 -0700
Committer: arp <ar...@apache.org>
Committed: Thu Aug 28 19:05:57 2014 -0700
----------------------------------------------------------------------
hadoop-common-project/hadoop-common/CHANGES.txt | 101 +++++-----
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 185 +++++++++----------
hadoop-mapreduce-project/CHANGES.txt | 23 +--
3 files changed, 155 insertions(+), 154 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2ca93d1f/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 2d794cf..9fb0cd3 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -328,56 +328,6 @@ Trunk (Unreleased)
HADOOP-8589. ViewFs tests fail when tests and home dirs are nested (sanjay Radia)
- BREAKDOWN OF HDFS-6134 AND HADOOP-10150 SUBTASKS AND RELATED JIRAS
-
- HADOOP-10734. Implement high-performance secure random number sources.
- (Yi Liu via Colin Patrick McCabe)
-
- HADOOP-10603. Crypto input and output streams implementing Hadoop stream
- interfaces. (Yi Liu and Charles Lamb)
-
- HADOOP-10628. Javadoc and few code style improvement for Crypto
- input and output streams. (Yi Liu via clamb)
-
- HADOOP-10632. Minor improvements to Crypto input and output streams.
- (Yi Liu)
-
- HADOOP-10635. Add a method to CryptoCodec to generate SRNs for IV. (Yi Liu)
-
- HADOOP-10653. Add a new constructor for CryptoInputStream that
- receives current position of wrapped stream. (Yi Liu)
-
- HADOOP-10662. NullPointerException in CryptoInputStream while wrapped
- stream is not ByteBufferReadable. Add tests using normal stream. (Yi Liu)
-
- HADOOP-10713. Refactor CryptoCodec#generateSecureRandom to take a byte[].
- (wang via yliu)
-
- HADOOP-10693. Implementation of AES-CTR CryptoCodec using JNI to OpenSSL.
- (Yi Liu via cmccabe)
-
- HADOOP-10803. Update OpensslCipher#getInstance to accept CipherSuite#name
- format. (Yi Liu)
-
- HADOOP-10735. Fall back AesCtrCryptoCodec implementation from OpenSSL to
- JCE if non native support. (Yi Liu)
-
- HADOOP-10870. Failed to load OpenSSL cipher error logs on systems with old
- openssl versions (cmccabe)
-
- HADOOP-10853. Refactor get instance of CryptoCodec and support create via
- algorithm/mode/padding. (Yi Liu)
-
- HADOOP-10919. Copy command should preserve raw.* namespace
- extended attributes. (clamb)
-
- HDFS-6873. Constants in CommandWithDestination should be static. (clamb)
-
- HADOOP-10871. incorrect prototype in OpensslSecureRandom.c (cmccabe)
-
- HADOOP-10886. CryptoCodec#getCodecclasses throws NPE when configurations not
- loaded. (umamahesh)
-
Release 2.6.0 - UNRELEASED
INCOMPATIBLE CHANGES
@@ -704,6 +654,57 @@ Release 2.6.0 - UNRELEASED
HADOOP-8815. RandomDatum needs to override hashCode().
(Brandon Li via suresh)
+ BREAKDOWN OF HDFS-6134 AND HADOOP-10150 SUBTASKS AND RELATED JIRAS
+
+ HADOOP-10734. Implement high-performance secure random number sources.
+ (Yi Liu via Colin Patrick McCabe)
+
+ HADOOP-10603. Crypto input and output streams implementing Hadoop stream
+ interfaces. (Yi Liu and Charles Lamb)
+
+ HADOOP-10628. Javadoc and few code style improvement for Crypto
+ input and output streams. (Yi Liu via clamb)
+
+ HADOOP-10632. Minor improvements to Crypto input and output streams.
+ (Yi Liu)
+
+ HADOOP-10635. Add a method to CryptoCodec to generate SRNs for IV. (Yi Liu)
+
+ HADOOP-10653. Add a new constructor for CryptoInputStream that
+ receives current position of wrapped stream. (Yi Liu)
+
+ HADOOP-10662. NullPointerException in CryptoInputStream while wrapped
+ stream is not ByteBufferReadable. Add tests using normal stream. (Yi Liu)
+
+ HADOOP-10713. Refactor CryptoCodec#generateSecureRandom to take a byte[].
+ (wang via yliu)
+
+ HADOOP-10693. Implementation of AES-CTR CryptoCodec using JNI to OpenSSL.
+ (Yi Liu via cmccabe)
+
+ HADOOP-10803. Update OpensslCipher#getInstance to accept CipherSuite#name
+ format. (Yi Liu)
+
+ HADOOP-10735. Fall back AesCtrCryptoCodec implementation from OpenSSL to
+ JCE if non native support. (Yi Liu)
+
+ HADOOP-10870. Failed to load OpenSSL cipher error logs on systems with old
+ openssl versions (cmccabe)
+
+ HADOOP-10853. Refactor get instance of CryptoCodec and support create via
+ algorithm/mode/padding. (Yi Liu)
+
+ HADOOP-10919. Copy command should preserve raw.* namespace
+ extended attributes. (clamb)
+
+ HDFS-6873. Constants in CommandWithDestination should be static. (clamb)
+
+ HADOOP-10871. incorrect prototype in OpensslSecureRandom.c (cmccabe)
+
+ HADOOP-10886. CryptoCodec#getCodecclasses throws NPE when configurations not
+ loaded. (umamahesh)
+ --
+
Release 2.5.1 - UNRELEASED
INCOMPATIBLE CHANGES
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2ca93d1f/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 1bb6025..2c56407 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -255,99 +255,6 @@ Trunk (Unreleased)
HDFS-6657. Remove link to 'Legacy UI' in trunk's Namenode UI.
(Vinayakumar B via wheat 9)
- BREAKDOWN OF HDFS-6134 AND HADOOP-10150 SUBTASKS AND RELATED JIRAS
-
- HDFS-6387. HDFS CLI admin tool for creating & deleting an
- encryption zone. (clamb)
-
- HDFS-6386. HDFS Encryption Zones (clamb)
-
- HDFS-6388. HDFS integration with KeyProvider. (clamb)
-
- HDFS-6473. Protocol and API for Encryption Zones (clamb)
-
- HDFS-6392. Wire crypto streams for encrypted files in
- DFSClient. (clamb and yliu)
-
- HDFS-6476. Print out the KeyProvider after finding KP successfully on
- startup. (Juan Yu via wang)
-
- HDFS-6391. Get the Key/IV from the NameNode for encrypted files in
- DFSClient. (Charles Lamb and wang)
-
- HDFS-6389. Rename restrictions for encryption zones. (clamb)
-
- HDFS-6605. Client server negotiation of cipher suite. (wang)
-
- HDFS-6625. Remove the Delete Encryption Zone function (clamb)
-
- HDFS-6516. List of Encryption Zones should be based on inodes (clamb)
-
- HDFS-6629. Not able to create symlinks after HDFS-6516 (umamaheswararao)
-
- HDFS-6635. Refactor encryption zone functionality into new
- EncryptionZoneManager class. (wang)
-
- HDFS-6474. Namenode needs to get the actual keys and iv from the
- KeyProvider. (wang)
-
- HDFS-6619. Clean up encryption-related tests. (wang)
-
- HDFS-6405. Test Crypto streams in HDFS. (yliu via wang)
-
- HDFS-6490. Fix the keyid format for generated keys in
- FSNamesystem.createEncryptionZone (clamb)
-
- HDFS-6716. Update usage of KeyProviderCryptoExtension APIs on NameNode.
- (wang)
-
- HDFS-6718. Remove EncryptionZoneManager lock. (wang)
-
- HDFS-6720. Remove KeyProvider in EncryptionZoneManager. (wang)
-
- HDFS-6738. Remove unnecessary getEncryptionZoneForPath call in
- EZManager#createEncryptionZone. (clamb)
-
- HDFS-6724. Decrypt EDEK before creating
- CryptoInputStream/CryptoOutputStream. (wang)
-
- HDFS-6509. Create a special /.reserved/raw directory for raw access to
- encrypted data. (clamb via wang)
-
- HDFS-6771. Require specification of an encryption key when creating
- an encryption zone. (wang)
-
- HDFS-6730. Create a .RAW extended attribute namespace. (clamb)
-
- HDFS-6692. Add more HDFS encryption tests. (wang)
-
- HDFS-6780. Batch the encryption zones listing API. (wang)
-
- HDFS-6394. HDFS encryption documentation. (wang)
-
- HDFS-6834. Improve the configuration guidance in DFSClient when there
- are no Codec classes found in configs. (umamahesh)
-
- HDFS-6546. Add non-superuser capability to get the encryption zone
- for a specific path. (clamb)
-
- HDFS-6733. Creating encryption zone results in NPE when
- KeyProvider is null. (clamb)
-
- HDFS-6785. Should not be able to create encryption zone using path
- to a non-directory file. (clamb)
-
- HDFS-6807. Fix TestReservedRawPaths. (clamb)
-
- HDFS-6814. Mistakenly dfs.namenode.list.encryption.zones.num.responses configured
- as boolean. (umamahesh)
-
- HDFS-6817. Fix findbugs and other warnings. (yliu)
-
- HDFS-6839. Fix TestCLI to expect new output. (clamb)
-
- HDFS-6905. fs-encryption merge triggered release audit failures. (clamb via tucu)
-
HDFS-6694. TestPipelinesFailover.testPipelineRecoveryStress tests fail
intermittently with various symptoms - debugging patch. (Yongjun Zhang via
Arpit Agarwal)
@@ -661,6 +568,98 @@ Release 2.6.0 - UNRELEASED
HDFS-6902. FileWriter should be closed in finally block in
BlockReceiver#receiveBlock() (Tsuyoshi OZAWA via Colin Patrick McCabe)
+ BREAKDOWN OF HDFS-6134 AND HADOOP-10150 SUBTASKS AND RELATED JIRAS
+
+ HDFS-6387. HDFS CLI admin tool for creating & deleting an
+ encryption zone. (clamb)
+
+ HDFS-6386. HDFS Encryption Zones (clamb)
+
+ HDFS-6388. HDFS integration with KeyProvider. (clamb)
+
+ HDFS-6473. Protocol and API for Encryption Zones (clamb)
+
+ HDFS-6392. Wire crypto streams for encrypted files in
+ DFSClient. (clamb and yliu)
+
+ HDFS-6476. Print out the KeyProvider after finding KP successfully on
+ startup. (Juan Yu via wang)
+
+ HDFS-6391. Get the Key/IV from the NameNode for encrypted files in
+ DFSClient. (Charles Lamb and wang)
+
+ HDFS-6389. Rename restrictions for encryption zones. (clamb)
+
+ HDFS-6605. Client server negotiation of cipher suite. (wang)
+
+ HDFS-6625. Remove the Delete Encryption Zone function (clamb)
+
+ HDFS-6516. List of Encryption Zones should be based on inodes (clamb)
+
+ HDFS-6629. Not able to create symlinks after HDFS-6516 (umamaheswararao)
+
+ HDFS-6635. Refactor encryption zone functionality into new
+ EncryptionZoneManager class. (wang)
+
+ HDFS-6474. Namenode needs to get the actual keys and iv from the
+ KeyProvider. (wang)
+
+ HDFS-6619. Clean up encryption-related tests. (wang)
+
+ HDFS-6405. Test Crypto streams in HDFS. (yliu via wang)
+
+ HDFS-6490. Fix the keyid format for generated keys in
+ FSNamesystem.createEncryptionZone (clamb)
+
+ HDFS-6716. Update usage of KeyProviderCryptoExtension APIs on NameNode.
+ (wang)
+
+ HDFS-6718. Remove EncryptionZoneManager lock. (wang)
+
+ HDFS-6720. Remove KeyProvider in EncryptionZoneManager. (wang)
+
+ HDFS-6738. Remove unnecessary getEncryptionZoneForPath call in
+ EZManager#createEncryptionZone. (clamb)
+
+ HDFS-6724. Decrypt EDEK before creating
+ CryptoInputStream/CryptoOutputStream. (wang)
+
+ HDFS-6509. Create a special /.reserved/raw directory for raw access to
+ encrypted data. (clamb via wang)
+
+ HDFS-6771. Require specification of an encryption key when creating
+ an encryption zone. (wang)
+
+ HDFS-6730. Create a .RAW extended attribute namespace. (clamb)
+
+ HDFS-6692. Add more HDFS encryption tests. (wang)
+
+ HDFS-6780. Batch the encryption zones listing API. (wang)
+
+ HDFS-6394. HDFS encryption documentation. (wang)
+
+ HDFS-6834. Improve the configuration guidance in DFSClient when there
+ are no Codec classes found in configs. (umamahesh)
+
+ HDFS-6546. Add non-superuser capability to get the encryption zone
+ for a specific path. (clamb)
+
+ HDFS-6733. Creating encryption zone results in NPE when
+ KeyProvider is null. (clamb)
+
+ HDFS-6785. Should not be able to create encryption zone using path
+ to a non-directory file. (clamb)
+
+ HDFS-6807. Fix TestReservedRawPaths. (clamb)
+
+ HDFS-6814. Mistakenly dfs.namenode.list.encryption.zones.num.responses configured
+ as boolean. (umamahesh)
+
+ HDFS-6817. Fix findbugs and other warnings. (yliu)
+
+ HDFS-6839. Fix TestCLI to expect new output. (clamb)
+ --
+
Release 2.5.1 - UNRELEASED
INCOMPATIBLE CHANGES
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2ca93d1f/hadoop-mapreduce-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt
index de0767d..c0038f6 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -154,16 +154,6 @@ Trunk (Unreleased)
MAPREDUCE-5867. Fix NPE in KillAMPreemptionPolicy related to
ProportionalCapacityPreemptionPolicy (Sunil G via devaraj)
- BREAKDOWN OF HDFS-6134 AND HADOOP-10150 SUBTASKS AND RELATED JIRAS
-
- MAPREDUCE-5890. Support for encrypting Intermediate
- data and spills in local filesystem. (asuresh via tucu)
-
- MAPREDUCE-6007. Add support to distcp to preserve raw.* namespace
- extended attributes. (clamb)
-
- MAPREDUCE-6041. Fix TestOptionsParser. (clamb)
-
Release 2.6.0 - UNRELEASED
INCOMPATIBLE CHANGES
@@ -261,6 +251,17 @@ Release 2.6.0 - UNRELEASED
MAPREDUCE-5885. build/test/test.mapred.spill causes release audit warnings
(Chen He via jlowe)
+ BREAKDOWN OF HDFS-6134 AND HADOOP-10150 SUBTASKS AND RELATED JIRAS
+
+ MAPREDUCE-5890. Support for encrypting Intermediate
+ data and spills in local filesystem. (asuresh via tucu)
+
+ MAPREDUCE-6007. Add support to distcp to preserve raw.* namespace
+ extended attributes. (clamb)
+
+ MAPREDUCE-6041. Fix TestOptionsParser. (clamb)
+ --
+
Release 2.5.1 - UNRELEASED
INCOMPATIBLE CHANGES
@@ -273,7 +274,7 @@ Release 2.5.1 - UNRELEASED
BUG FIXES
- MAPREDUCE-6033. Updated access check for displaying job information
+ MAPREDUCE-6033. Updated access check for displaying job information
(Yu Gao via Eric Yang)
Release 2.5.0 - 2014-08-11
[09/12] git commit: Fix typos in log messages. Contributed by Ray
Chiang
Posted by ar...@apache.org.
Fix typos in log messages. Contributed by Ray Chiang
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fef8554b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fef8554b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fef8554b
Branch: refs/heads/HDFS-6581
Commit: fef8554be80c01519870ad2969f6c9f3df4d6a7f
Parents: 2ca93d1
Author: Chris Douglas <cd...@apache.org>
Authored: Thu Aug 28 16:29:35 2014 -0700
Committer: arp <ar...@apache.org>
Committed: Thu Aug 28 19:05:57 2014 -0700
----------------------------------------------------------------------
hadoop-mapreduce-project/CHANGES.txt | 8 +++++---
.../mapreduce/v2/app/commit/CommitterEventHandler.java | 2 +-
.../apache/hadoop/mapreduce/v2/app/rm/RMCommunicator.java | 2 +-
.../hadoop/mapreduce/v2/app/speculate/DefaultSpeculator.java | 8 ++++----
.../src/main/java/org/apache/hadoop/mapred/BackupStore.java | 2 +-
.../src/test/java/org/apache/hadoop/fs/JHLogAnalyzer.java | 4 ++--
6 files changed, 14 insertions(+), 12 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/fef8554b/hadoop-mapreduce-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt
index c0038f6..67f8851 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -252,16 +252,18 @@ Release 2.6.0 - UNRELEASED
(Chen He via jlowe)
BREAKDOWN OF HDFS-6134 AND HADOOP-10150 SUBTASKS AND RELATED JIRAS
-
+
MAPREDUCE-5890. Support for encrypting Intermediate
data and spills in local filesystem. (asuresh via tucu)
-
+
MAPREDUCE-6007. Add support to distcp to preserve raw.* namespace
extended attributes. (clamb)
-
+
MAPREDUCE-6041. Fix TestOptionsParser. (clamb)
--
+ MAPREDUCE-6051. Fix typos in log messages. (Ray Chiang via cdouglas)
+
Release 2.5.1 - UNRELEASED
INCOMPATIBLE CHANGES
http://git-wip-us.apache.org/repos/asf/hadoop/blob/fef8554b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/commit/CommitterEventHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/commit/CommitterEventHandler.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/commit/CommitterEventHandler.java
index 8c3be86..d56c1e5 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/commit/CommitterEventHandler.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/commit/CommitterEventHandler.java
@@ -202,7 +202,7 @@ public class CommitterEventHandler extends AbstractService
private synchronized void cancelJobCommit() {
Thread threadCommitting = jobCommitThread;
if (threadCommitting != null && threadCommitting.isAlive()) {
- LOG.info("Canceling commit");
+ LOG.info("Cancelling commit");
threadCommitting.interrupt();
// wait up to configured timeout for commit thread to finish
http://git-wip-us.apache.org/repos/asf/hadoop/blob/fef8554b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMCommunicator.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMCommunicator.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMCommunicator.java
index 4b32c04..6e9f313 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMCommunicator.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMCommunicator.java
@@ -335,7 +335,7 @@ public abstract class RMCommunicator extends AbstractService
public void setSignalled(boolean isSignalled) {
this.isSignalled = isSignalled;
- LOG.info("RMCommunicator notified that iSignalled is: "
+ LOG.info("RMCommunicator notified that isSignalled is: "
+ isSignalled);
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/fef8554b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/speculate/DefaultSpeculator.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/speculate/DefaultSpeculator.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/speculate/DefaultSpeculator.java
index 34dcb12..392a51a 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/speculate/DefaultSpeculator.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/speculate/DefaultSpeculator.java
@@ -134,16 +134,16 @@ public class DefaultSpeculator extends AbstractService implements
estimator.contextualize(conf, context);
} catch (InstantiationException ex) {
- LOG.error("Can't make a speculation runtime extimator", ex);
+ LOG.error("Can't make a speculation runtime estimator", ex);
throw new YarnRuntimeException(ex);
} catch (IllegalAccessException ex) {
- LOG.error("Can't make a speculation runtime extimator", ex);
+ LOG.error("Can't make a speculation runtime estimator", ex);
throw new YarnRuntimeException(ex);
} catch (InvocationTargetException ex) {
- LOG.error("Can't make a speculation runtime extimator", ex);
+ LOG.error("Can't make a speculation runtime estimator", ex);
throw new YarnRuntimeException(ex);
} catch (NoSuchMethodException ex) {
- LOG.error("Can't make a speculation runtime extimator", ex);
+ LOG.error("Can't make a speculation runtime estimator", ex);
throw new YarnRuntimeException(ex);
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/fef8554b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/BackupStore.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/BackupStore.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/BackupStore.java
index be7fe18..e79ec66 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/BackupStore.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/BackupStore.java
@@ -607,7 +607,7 @@ public class BackupStore<K,V> {
int reserve(int requestedSize, int minSize) {
if (availableSize < minSize) {
- LOG.debug("No Space available. Available: " + availableSize +
+ LOG.debug("No space available. Available: " + availableSize +
" MinSize: " + minSize);
return 0;
} else {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/fef8554b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/JHLogAnalyzer.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/JHLogAnalyzer.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/JHLogAnalyzer.java
index 347dd06..8b3f4c8 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/JHLogAnalyzer.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/JHLogAnalyzer.java
@@ -1076,7 +1076,7 @@ public class JHLogAnalyzer {
long execTime,
Path resFileName
) throws IOException {
- LOG.info("Analizing results ...");
+ LOG.info("Analyzing results ...");
DataOutputStream out = null;
BufferedWriter writer = null;
try {
@@ -1118,7 +1118,7 @@ public class JHLogAnalyzer {
if(writer != null) writer.close();
if(out != null) out.close();
}
- LOG.info("Analizing results ... done.");
+ LOG.info("Analyzing results ... done.");
}
private static void cleanup(Configuration conf) throws IOException {
[11/12] git commit: Fixing CHANGES.txt,
moving HADOOP-8815 to 2.6.0 release
Posted by ar...@apache.org.
Fixing CHANGES.txt, moving HADOOP-8815 to 2.6.0 release
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bc6ce2cb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bc6ce2cb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bc6ce2cb
Branch: refs/heads/HDFS-6581
Commit: bc6ce2cb34a638851d3530ca31979db30a8a50bd
Parents: 6bf16d1
Author: Alejandro Abdelnur <tu...@cloudera.com>
Authored: Wed Aug 27 09:03:11 2014 -0700
Committer: arp <ar...@apache.org>
Committed: Thu Aug 28 19:05:57 2014 -0700
----------------------------------------------------------------------
hadoop-common-project/hadoop-common/CHANGES.txt | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/bc6ce2cb/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 641635b..2d794cf 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -232,9 +232,6 @@ Trunk (Unreleased)
HADOOP-8813. Add InterfaceAudience and InterfaceStability annotations
to RPC Server and Client classes. (Brandon Li via suresh)
- HADOOP-8815. RandomDatum needs to override hashCode().
- (Brandon Li via suresh)
-
HADOOP-8436. NPE In getLocalPathForWrite ( path, conf ) when the
required context item is not configured
(Brahma Reddy Battula via harsh)
@@ -704,6 +701,9 @@ Release 2.6.0 - UNRELEASED
HADOOP-10989. Work around buggy getgrouplist() implementations on Linux that
return 0 on failure. (cnauroth)
+ HADOOP-8815. RandomDatum needs to override hashCode().
+ (Brandon Li via suresh)
+
Release 2.5.1 - UNRELEASED
INCOMPATIBLE CHANGES
[02/12] git commit: Fixing CHANGES.txt,
moving HADOOP-8815 to 2.6.0 release
Posted by ar...@apache.org.
Fixing CHANGES.txt, moving HADOOP-8815 to 2.6.0 release
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/88c5e214
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/88c5e214
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/88c5e214
Branch: refs/heads/HDFS-6581
Commit: 88c5e2141c4e85c2cac9463aaf68091a0e93302e
Parents: d1ae479
Author: Alejandro Abdelnur <tu...@cloudera.com>
Authored: Wed Aug 27 09:03:11 2014 -0700
Committer: Alejandro Abdelnur <tu...@apache.org>
Committed: Thu Aug 28 15:07:57 2014 -0700
----------------------------------------------------------------------
hadoop-common-project/hadoop-common/CHANGES.txt | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/88c5e214/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 641635b..2d794cf 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -232,9 +232,6 @@ Trunk (Unreleased)
HADOOP-8813. Add InterfaceAudience and InterfaceStability annotations
to RPC Server and Client classes. (Brandon Li via suresh)
- HADOOP-8815. RandomDatum needs to override hashCode().
- (Brandon Li via suresh)
-
HADOOP-8436. NPE In getLocalPathForWrite ( path, conf ) when the
required context item is not configured
(Brahma Reddy Battula via harsh)
@@ -704,6 +701,9 @@ Release 2.6.0 - UNRELEASED
HADOOP-10989. Work around buggy getgrouplist() implementations on Linux that
return 0 on failure. (cnauroth)
+ HADOOP-8815. RandomDatum needs to override hashCode().
+ (Brandon Li via suresh)
+
Release 2.5.1 - UNRELEASED
INCOMPATIBLE CHANGES
[05/12] git commit: HDFS-6865. Byte array native checksumming on
client side. Contributed by James Thomas.
Posted by ar...@apache.org.
HDFS-6865. Byte array native checksumming on client side. Contributed by James Thomas.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ab638e77
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ab638e77
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ab638e77
Branch: refs/heads/HDFS-6581
Commit: ab638e77b811d9592470f7d342cd11a66efbbf0d
Parents: 48aa3b7
Author: Todd Lipcon <to...@apache.org>
Authored: Thu Aug 28 16:44:09 2014 -0700
Committer: Todd Lipcon <to...@apache.org>
Committed: Thu Aug 28 16:44:09 2014 -0700
----------------------------------------------------------------------
.../apache/hadoop/fs/ChecksumFileSystem.java | 8 +-
.../java/org/apache/hadoop/fs/ChecksumFs.java | 8 +-
.../org/apache/hadoop/fs/FSOutputSummer.java | 107 ++++++++++++-------
.../org/apache/hadoop/util/DataChecksum.java | 2 +
.../org/apache/hadoop/util/NativeCrc32.java | 2 +-
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +
.../org/apache/hadoop/hdfs/DFSOutputStream.java | 38 ++-----
.../org/apache/hadoop/hdfs/TestFileAppend.java | 4 +-
.../security/token/block/TestBlockToken.java | 2 +
.../namenode/TestBlockUnderConstruction.java | 3 +
.../namenode/TestDecommissioningStatus.java | 3 +
11 files changed, 108 insertions(+), 72 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ab638e77/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFileSystem.java
index 511ca7f..c8d1b69 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFileSystem.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFileSystem.java
@@ -381,7 +381,8 @@ public abstract class ChecksumFileSystem extends FilterFileSystem {
long blockSize,
Progressable progress)
throws IOException {
- super(DataChecksum.newCrc32(), fs.getBytesPerSum(), 4);
+ super(DataChecksum.newDataChecksum(DataChecksum.Type.CRC32,
+ fs.getBytesPerSum()));
int bytesPerSum = fs.getBytesPerSum();
this.datas = fs.getRawFileSystem().create(file, overwrite, bufferSize,
replication, blockSize, progress);
@@ -405,10 +406,11 @@ public abstract class ChecksumFileSystem extends FilterFileSystem {
}
@Override
- protected void writeChunk(byte[] b, int offset, int len, byte[] checksum)
+ protected void writeChunk(byte[] b, int offset, int len, byte[] checksum,
+ int ckoff, int cklen)
throws IOException {
datas.write(b, offset, len);
- sums.write(checksum);
+ sums.write(checksum, ckoff, cklen);
}
@Override
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ab638e77/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFs.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFs.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFs.java
index 4be3b29..ab5cd13 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFs.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFs.java
@@ -337,7 +337,8 @@ public abstract class ChecksumFs extends FilterFs {
final short replication, final long blockSize,
final Progressable progress, final ChecksumOpt checksumOpt,
final boolean createParent) throws IOException {
- super(DataChecksum.newCrc32(), fs.getBytesPerSum(), 4);
+ super(DataChecksum.newDataChecksum(DataChecksum.Type.CRC32,
+ fs.getBytesPerSum()));
// checksumOpt is passed down to the raw fs. Unless it implements
// checksum impelemts internally, checksumOpt will be ignored.
@@ -370,10 +371,11 @@ public abstract class ChecksumFs extends FilterFs {
}
@Override
- protected void writeChunk(byte[] b, int offset, int len, byte[] checksum)
+ protected void writeChunk(byte[] b, int offset, int len, byte[] checksum,
+ int ckoff, int cklen)
throws IOException {
datas.write(b, offset, len);
- sums.write(checksum);
+ sums.write(checksum, ckoff, cklen);
}
@Override
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ab638e77/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSOutputSummer.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSOutputSummer.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSOutputSummer.java
index 49c919a..19cbb6f 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSOutputSummer.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSOutputSummer.java
@@ -18,13 +18,14 @@
package org.apache.hadoop.fs;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.util.DataChecksum;
+
import java.io.IOException;
import java.io.OutputStream;
import java.util.zip.Checksum;
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-
/**
* This is a generic output stream for generating checksums for
* data before it is written to the underlying stream
@@ -33,7 +34,7 @@ import org.apache.hadoop.classification.InterfaceStability;
@InterfaceStability.Unstable
abstract public class FSOutputSummer extends OutputStream {
// data checksum
- private Checksum sum;
+ private final DataChecksum sum;
// internal buffer for storing data before it is checksumed
private byte buf[];
// internal buffer for storing checksum
@@ -41,18 +42,24 @@ abstract public class FSOutputSummer extends OutputStream {
// The number of valid bytes in the buffer.
private int count;
- protected FSOutputSummer(Checksum sum, int maxChunkSize, int checksumSize) {
+ // We want this value to be a multiple of 3 because the native code checksums
+ // 3 chunks simultaneously. The chosen value of 9 strikes a balance between
+ // limiting the number of JNI calls and flushing to the underlying stream
+ // relatively frequently.
+ private static final int BUFFER_NUM_CHUNKS = 9;
+
+ protected FSOutputSummer(DataChecksum sum) {
this.sum = sum;
- this.buf = new byte[maxChunkSize];
- this.checksum = new byte[checksumSize];
+ this.buf = new byte[sum.getBytesPerChecksum() * BUFFER_NUM_CHUNKS];
+ this.checksum = new byte[sum.getChecksumSize() * BUFFER_NUM_CHUNKS];
this.count = 0;
}
/* write the data chunk in <code>b</code> staring at <code>offset</code> with
- * a length of <code>len</code>, and its checksum
+ * a length of <code>len > 0</code>, and its checksum
*/
- protected abstract void writeChunk(byte[] b, int offset, int len, byte[] checksum)
- throws IOException;
+ protected abstract void writeChunk(byte[] b, int bOffset, int bLen,
+ byte[] checksum, int checksumOffset, int checksumLen) throws IOException;
/**
* Check if the implementing OutputStream is closed and should no longer
@@ -66,7 +73,6 @@ abstract public class FSOutputSummer extends OutputStream {
/** Write one byte */
@Override
public synchronized void write(int b) throws IOException {
- sum.update(b);
buf[count++] = (byte)b;
if(count == buf.length) {
flushBuffer();
@@ -111,18 +117,17 @@ abstract public class FSOutputSummer extends OutputStream {
*/
private int write1(byte b[], int off, int len) throws IOException {
if(count==0 && len>=buf.length) {
- // local buffer is empty and user data has one chunk
- // checksum and output data
+ // local buffer is empty and user buffer size >= local buffer size, so
+ // simply checksum the user buffer and send it directly to the underlying
+ // stream
final int length = buf.length;
- sum.update(b, off, length);
- writeChecksumChunk(b, off, length, false);
+ writeChecksumChunks(b, off, length);
return length;
}
// copy user data to local buffer
int bytesToCopy = buf.length-count;
bytesToCopy = (len<bytesToCopy) ? len : bytesToCopy;
- sum.update(b, off, bytesToCopy);
System.arraycopy(b, off, buf, count, bytesToCopy);
count += bytesToCopy;
if (count == buf.length) {
@@ -136,22 +141,45 @@ abstract public class FSOutputSummer extends OutputStream {
* the underlying output stream.
*/
protected synchronized void flushBuffer() throws IOException {
- flushBuffer(false);
+ flushBuffer(false, true);
}
- /* Forces any buffered output bytes to be checksumed and written out to
- * the underlying output stream. If keep is true, then the state of
- * this object remains intact.
+ /* Forces buffered output bytes to be checksummed and written out to
+ * the underlying output stream. If there is a trailing partial chunk in the
+ * buffer,
+ * 1) flushPartial tells us whether to flush that chunk
+ * 2) if flushPartial is true, keep tells us whether to keep that chunk in the
+ * buffer (if flushPartial is false, it is always kept in the buffer)
+ *
+ * Returns the number of bytes that were flushed but are still left in the
+ * buffer (can only be non-zero if keep is true).
*/
- protected synchronized void flushBuffer(boolean keep) throws IOException {
- if (count != 0) {
- int chunkLen = count;
+ protected synchronized int flushBuffer(boolean keep,
+ boolean flushPartial) throws IOException {
+ int bufLen = count;
+ int partialLen = bufLen % sum.getBytesPerChecksum();
+ int lenToFlush = flushPartial ? bufLen : bufLen - partialLen;
+ if (lenToFlush != 0) {
+ writeChecksumChunks(buf, 0, lenToFlush);
+ if (!flushPartial || keep) {
+ count = partialLen;
+ System.arraycopy(buf, bufLen - count, buf, 0, count);
+ } else {
count = 0;
- writeChecksumChunk(buf, 0, chunkLen, keep);
- if (keep) {
- count = chunkLen;
}
}
+
+ // total bytes left minus unflushed bytes left
+ return count - (bufLen - lenToFlush);
+ }
+
+ /**
+ * Checksums all complete data chunks and flushes them to the underlying
+ * stream. If there is a trailing partial chunk, it is not flushed and is
+ * maintained in the buffer.
+ */
+ public void flush() throws IOException {
+ flushBuffer(false, false);
}
/**
@@ -161,18 +189,18 @@ abstract public class FSOutputSummer extends OutputStream {
return count;
}
- /** Generate checksum for the data chunk and output data chunk & checksum
- * to the underlying output stream. If keep is true then keep the
- * current checksum intact, do not reset it.
+ /** Generate checksums for the given data chunks and output chunks & checksums
+ * to the underlying output stream.
*/
- private void writeChecksumChunk(byte b[], int off, int len, boolean keep)
+ private void writeChecksumChunks(byte b[], int off, int len)
throws IOException {
- int tempChecksum = (int)sum.getValue();
- if (!keep) {
- sum.reset();
+ sum.calculateChunkedSums(b, off, len, checksum, 0);
+ for (int i = 0; i < len; i += sum.getBytesPerChecksum()) {
+ int chunkLen = Math.min(sum.getBytesPerChecksum(), len - i);
+ int ckOffset = i / sum.getBytesPerChecksum() * sum.getChecksumSize();
+ writeChunk(b, off + i, chunkLen, checksum, ckOffset,
+ sum.getChecksumSize());
}
- int2byte(tempChecksum, checksum);
- writeChunk(b, off, len, checksum);
}
/**
@@ -196,9 +224,14 @@ abstract public class FSOutputSummer extends OutputStream {
/**
* Resets existing buffer with a new one of the specified size.
*/
- protected synchronized void resetChecksumChunk(int size) {
- sum.reset();
+ protected synchronized void setChecksumBufSize(int size) {
this.buf = new byte[size];
+ this.checksum = new byte[((size - 1) / sum.getBytesPerChecksum() + 1) *
+ sum.getChecksumSize()];
this.count = 0;
}
+
+ protected synchronized void resetChecksumBufSize() {
+ setChecksumBufSize(sum.getBytesPerChecksum() * BUFFER_NUM_CHUNKS);
+ }
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ab638e77/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/DataChecksum.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/DataChecksum.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/DataChecksum.java
index 1636af6..9f0ee35 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/DataChecksum.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/DataChecksum.java
@@ -339,6 +339,7 @@ public class DataChecksum implements Checksum {
byte[] data, int dataOff, int dataLen,
byte[] checksums, int checksumsOff, String fileName,
long basePos) throws ChecksumException {
+ if (type.size == 0) return;
if (NativeCrc32.isAvailable()) {
NativeCrc32.verifyChunkedSumsByteArray(bytesPerChecksum, type.id,
@@ -421,6 +422,7 @@ public class DataChecksum implements Checksum {
public void calculateChunkedSums(
byte[] data, int dataOffset, int dataLength,
byte[] sums, int sumsOffset) {
+ if (type.size == 0) return;
if (NativeCrc32.isAvailable()) {
NativeCrc32.calculateChunkedSumsByteArray(bytesPerChecksum, type.id,
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ab638e77/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/NativeCrc32.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/NativeCrc32.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/NativeCrc32.java
index 2f21ae1..0807d2c 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/NativeCrc32.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/NativeCrc32.java
@@ -42,7 +42,7 @@ class NativeCrc32 {
* modified.
*
* @param bytesPerSum the chunk size (eg 512 bytes)
- * @param checksumType the DataChecksum type constant
+ * @param checksumType the DataChecksum type constant (NULL is not supported)
* @param sums the DirectByteBuffer pointing at the beginning of the
* stored checksums
* @param data the DirectByteBuffer pointing at the beginning of the
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ab638e77/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 2c56407..8268b6b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -434,6 +434,9 @@ Release 2.6.0 - UNRELEASED
HDFS-6773. MiniDFSCluster should skip edit log fsync by default (Stephen
Chu via Colin Patrick McCabe)
+ HDFS-6865. Byte array native checksumming on client side
+ (James Thomas via todd)
+
BUG FIXES
HDFS-6823. dfs.web.authentication.kerberos.principal shows up in logs for
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ab638e77/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
index 14977a2..0b5ecda 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
@@ -398,7 +398,7 @@ public class DFSOutputStream extends FSOutputSummer
// one chunk that fills up the partial chunk.
//
computePacketChunkSize(0, freeInCksum);
- resetChecksumChunk(freeInCksum);
+ setChecksumBufSize(freeInCksum);
appendChunk = true;
} else {
// if the remaining space in the block is smaller than
@@ -1563,7 +1563,7 @@ public class DFSOutputStream extends FSOutputSummer
private DFSOutputStream(DFSClient dfsClient, String src, Progressable progress,
HdfsFileStatus stat, DataChecksum checksum) throws IOException {
- super(checksum, checksum.getBytesPerChecksum(), checksum.getChecksumSize());
+ super(checksum);
this.dfsClient = dfsClient;
this.src = src;
this.fileId = stat.getFileId();
@@ -1717,22 +1717,21 @@ public class DFSOutputStream extends FSOutputSummer
// @see FSOutputSummer#writeChunk()
@Override
- protected synchronized void writeChunk(byte[] b, int offset, int len, byte[] checksum)
- throws IOException {
+ protected synchronized void writeChunk(byte[] b, int offset, int len,
+ byte[] checksum, int ckoff, int cklen) throws IOException {
dfsClient.checkOpen();
checkClosed();
- int cklen = checksum.length;
int bytesPerChecksum = this.checksum.getBytesPerChecksum();
if (len > bytesPerChecksum) {
throw new IOException("writeChunk() buffer size is " + len +
" is larger than supported bytesPerChecksum " +
bytesPerChecksum);
}
- if (checksum.length != this.checksum.getChecksumSize()) {
+ if (cklen != this.checksum.getChecksumSize()) {
throw new IOException("writeChunk() checksum size is supposed to be " +
this.checksum.getChecksumSize() +
- " but found to be " + checksum.length);
+ " but found to be " + cklen);
}
if (currentPacket == null) {
@@ -1748,7 +1747,7 @@ public class DFSOutputStream extends FSOutputSummer
}
}
- currentPacket.writeChecksum(checksum, 0, cklen);
+ currentPacket.writeChecksum(checksum, ckoff, cklen);
currentPacket.writeData(b, offset, len);
currentPacket.numChunks++;
bytesCurBlock += len;
@@ -1772,7 +1771,7 @@ public class DFSOutputStream extends FSOutputSummer
// crc chunks from now on.
if (appendChunk && bytesCurBlock%bytesPerChecksum == 0) {
appendChunk = false;
- resetChecksumChunk(bytesPerChecksum);
+ resetChecksumBufSize();
}
if (!appendChunk) {
@@ -1853,20 +1852,13 @@ public class DFSOutputStream extends FSOutputSummer
long lastBlockLength = -1L;
boolean updateLength = syncFlags.contains(SyncFlag.UPDATE_LENGTH);
synchronized (this) {
- /* Record current blockOffset. This might be changed inside
- * flushBuffer() where a partial checksum chunk might be flushed.
- * After the flush, reset the bytesCurBlock back to its previous value,
- * any partial checksum chunk will be sent now and in next packet.
- */
- long saveOffset = bytesCurBlock;
- Packet oldCurrentPacket = currentPacket;
// flush checksum buffer, but keep checksum buffer intact
- flushBuffer(true);
+ int numKept = flushBuffer(true, true);
// bytesCurBlock potentially incremented if there was buffered data
if (DFSClient.LOG.isDebugEnabled()) {
DFSClient.LOG.debug(
- "DFSClient flush() : saveOffset " + saveOffset +
+ "DFSClient flush() :" +
" bytesCurBlock " + bytesCurBlock +
" lastFlushOffset " + lastFlushOffset);
}
@@ -1883,14 +1875,6 @@ public class DFSOutputStream extends FSOutputSummer
bytesCurBlock, currentSeqno++, this.checksum.getChecksumSize());
}
} else {
- // We already flushed up to this offset.
- // This means that we haven't written anything since the last flush
- // (or the beginning of the file). Hence, we should not have any
- // packet queued prior to this call, since the last flush set
- // currentPacket = null.
- assert oldCurrentPacket == null :
- "Empty flush should not occur with a currentPacket";
-
if (isSync && bytesCurBlock > 0) {
// Nothing to send right now,
// and the block was partially written,
@@ -1910,7 +1894,7 @@ public class DFSOutputStream extends FSOutputSummer
// Restore state of stream. Record the last flush offset
// of the last full chunk that was flushed.
//
- bytesCurBlock = saveOffset;
+ bytesCurBlock -= numKept;
toWaitFor = lastQueuedSeqno;
} // end synchronized
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ab638e77/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java
index d840077..34c701d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java
@@ -261,7 +261,9 @@ public class TestFileAppend{
start += 29;
}
stm.write(fileContents, start, AppendTestUtil.FILE_SIZE -start);
-
+ // need to make sure we completely write out all full blocks before
+ // the checkFile() call (see FSOutputSummer#flush)
+ stm.flush();
// verify that full blocks are sane
checkFile(fs, file1, 1);
stm.close();
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ab638e77/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java
index 2429345..1fe7ba8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java
@@ -394,6 +394,8 @@ public class TestBlockToken {
Path filePath = new Path(fileName);
FSDataOutputStream out = fs.create(filePath, (short) 1);
out.write(new byte[1000]);
+ // ensure that the first block is written out (see FSOutputSummer#flush)
+ out.flush();
LocatedBlocks locatedBlocks = cluster.getNameNodeRpc().getBlockLocations(
fileName, 0, 1000);
while (locatedBlocks.getLastLocatedBlock() == null) {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ab638e77/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBlockUnderConstruction.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBlockUnderConstruction.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBlockUnderConstruction.java
index 5448e7a..872ff9c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBlockUnderConstruction.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBlockUnderConstruction.java
@@ -70,6 +70,9 @@ public class TestBlockUnderConstruction {
long blocksBefore = stm.getPos() / BLOCK_SIZE;
TestFileCreation.writeFile(stm, BLOCK_SIZE);
+ // need to make sure the full block is completely flushed to the DataNodes
+ // (see FSOutputSummer#flush)
+ stm.flush();
int blocksAfter = 0;
// wait until the block is allocated by DataStreamer
BlockLocation[] locatedBlocks;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ab638e77/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java
index d01df75..2ee251b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java
@@ -141,6 +141,9 @@ public class TestDecommissioningStatus {
Random rand = new Random(seed);
rand.nextBytes(buffer);
stm.write(buffer);
+ // need to make sure that we actually write out both file blocks
+ // (see FSOutputSummer#flush)
+ stm.flush();
// Do not close stream, return it
// so that it is not garbage collected
return stm;
[12/12] git commit: Merge remote-tracking branch
'apache-commit/trunk' into HDFS-6581
Posted by ar...@apache.org.
Merge remote-tracking branch 'apache-commit/trunk' into HDFS-6581
Conflicts:
hadoop-common-project/hadoop-common/CHANGES.txt
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7e32be87
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7e32be87
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7e32be87
Branch: refs/heads/HDFS-6581
Commit: 7e32be8768b5ad20d1745e9de17e3fd4fd9a7d99
Parents: fef8554 d8774cc
Author: arp <ar...@apache.org>
Authored: Thu Aug 28 19:06:46 2014 -0700
Committer: arp <ar...@apache.org>
Committed: Thu Aug 28 19:06:46 2014 -0700
----------------------------------------------------------------------
hadoop-common-project/hadoop-common/CHANGES.txt | 5 +
.../hadoop-common/src/main/bin/hadoop | 5 +-
.../hadoop-common/src/main/bin/hadoop-config.sh | 5 +
.../src/main/bin/hadoop-functions.sh | 54 ++++++++--
.../hadoop-common/src/main/bin/rcc | 7 +-
.../hadoop/conf/ReconfigurationServlet.java | 2 +
.../apache/hadoop/fs/ChecksumFileSystem.java | 8 +-
.../java/org/apache/hadoop/fs/ChecksumFs.java | 8 +-
.../org/apache/hadoop/fs/FSOutputSummer.java | 107 ++++++++++++-------
.../org/apache/hadoop/util/DataChecksum.java | 2 +
.../org/apache/hadoop/util/NativeCrc32.java | 2 +-
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +
.../hadoop-hdfs/src/main/bin/hdfs | 25 ++++-
.../org/apache/hadoop/hdfs/DFSOutputStream.java | 38 ++-----
.../org/apache/hadoop/hdfs/TestFileAppend.java | 4 +-
.../security/token/block/TestBlockToken.java | 2 +
.../namenode/TestBlockUnderConstruction.java | 3 +
.../namenode/TestDecommissioningStatus.java | 3 +
hadoop-mapreduce-project/bin/mapred | 14 ++-
hadoop-yarn-project/hadoop-yarn/bin/yarn | 15 ++-
20 files changed, 218 insertions(+), 94 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/7e32be87/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
----------------------------------------------------------------------
[10/12] git commit: HADOOP-10880. Move HTTP delegation tokens out of
URL querystring to a header. (tucu)
Posted by ar...@apache.org.
HADOOP-10880. Move HTTP delegation tokens out of URL querystring to a header. (tucu)
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6bf16d11
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6bf16d11
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6bf16d11
Branch: refs/heads/HDFS-6581
Commit: 6bf16d115637c7761123e3b92186daa675c4769c
Parents: bbaa7dc
Author: Alejandro Abdelnur <tu...@apache.org>
Authored: Thu Aug 28 14:45:40 2014 -0700
Committer: arp <ar...@apache.org>
Committed: Thu Aug 28 19:05:57 2014 -0700
----------------------------------------------------------------------
hadoop-common-project/hadoop-common/CHANGES.txt | 3 +
.../web/DelegationTokenAuthenticatedURL.java | 81 ++++++++++++++++----
.../DelegationTokenAuthenticationHandler.java | 14 +++-
.../web/DelegationTokenAuthenticator.java | 19 ++++-
...tionTokenAuthenticationHandlerWithMocks.java | 46 ++++++++++-
.../delegation/web/TestWebDelegationToken.java | 50 +++++++++++-
6 files changed, 187 insertions(+), 26 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6bf16d11/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index ecbaaab..641635b 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -518,6 +518,9 @@ Release 2.6.0 - UNRELEASED
HADOOP-10998. Fix bash tab completion code to work (Jim Hester via aw)
+ HADOOP-10880. Move HTTP delegation tokens out of URL querystring to
+ a header. (tucu)
+
OPTIMIZATIONS
HADOOP-10838. Byte array native checksumming. (James Thomas via todd)
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6bf16d11/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticatedURL.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticatedURL.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticatedURL.java
index d955ada..5aeb177 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticatedURL.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticatedURL.java
@@ -125,6 +125,8 @@ public class DelegationTokenAuthenticatedURL extends AuthenticatedURL {
}
}
+ private boolean useQueryStringforDelegationToken = false;
+
/**
* Creates an <code>DelegationTokenAuthenticatedURL</code>.
* <p/>
@@ -171,6 +173,34 @@ public class DelegationTokenAuthenticatedURL extends AuthenticatedURL {
}
/**
+ * Sets if delegation token should be transmitted in the URL query string.
+ * By default it is transmitted using the
+ * {@link DelegationTokenAuthenticator#DELEGATION_TOKEN_HEADER} HTTP header.
+ * <p/>
+ * This method is provided to enable WebHDFS backwards compatibility.
+ *
+ * @param useQueryString <code>TRUE</code> if the token is transmitted in the
+ * URL query string, <code>FALSE</code> if the delegation token is transmitted
+ * using the {@link DelegationTokenAuthenticator#DELEGATION_TOKEN_HEADER} HTTP
+ * header.
+ */
+ @Deprecated
+ protected void setUseQueryStringForDelegationToken(boolean useQueryString) {
+ useQueryStringforDelegationToken = useQueryString;
+ }
+
+ /**
+ * Returns if delegation token is transmitted as a HTTP header.
+ *
+ * @return <code>TRUE</code> if the token is transmitted in the URL query
+ * string, <code>FALSE</code> if the delegation token is transmitted using the
+ * {@link DelegationTokenAuthenticator#DELEGATION_TOKEN_HEADER} HTTP header.
+ */
+ public boolean useQueryStringForDelegationToken() {
+ return useQueryStringforDelegationToken;
+ }
+
+ /**
* Returns an authenticated {@link HttpURLConnection}, it uses a Delegation
* Token only if the given auth token is an instance of {@link Token} and
* it contains a Delegation Token, otherwise use the configured
@@ -235,23 +265,41 @@ public class DelegationTokenAuthenticatedURL extends AuthenticatedURL {
* @throws IOException if an IO error occurred.
* @throws AuthenticationException if an authentication exception occurred.
*/
+ @SuppressWarnings("unchecked")
public HttpURLConnection openConnection(URL url, Token token, String doAs)
throws IOException, AuthenticationException {
Preconditions.checkNotNull(url, "url");
Preconditions.checkNotNull(token, "token");
Map<String, String> extraParams = new HashMap<String, String>();
-
- // delegation token
- Credentials creds = UserGroupInformation.getCurrentUser().getCredentials();
- if (!creds.getAllTokens().isEmpty()) {
- InetSocketAddress serviceAddr = new InetSocketAddress(url.getHost(),
- url.getPort());
- Text service = SecurityUtil.buildTokenService(serviceAddr);
- org.apache.hadoop.security.token.Token<? extends TokenIdentifier> dt =
- creds.getToken(service);
- if (dt != null) {
- extraParams.put(KerberosDelegationTokenAuthenticator.DELEGATION_PARAM,
- dt.encodeToUrlString());
+ org.apache.hadoop.security.token.Token<? extends TokenIdentifier> dToken
+ = null;
+ // if we have valid auth token, it takes precedence over a delegation token
+ // and we don't even look for one.
+ if (!token.isSet()) {
+ // delegation token
+ Credentials creds = UserGroupInformation.getCurrentUser().
+ getCredentials();
+ if (!creds.getAllTokens().isEmpty()) {
+ InetSocketAddress serviceAddr = new InetSocketAddress(url.getHost(),
+ url.getPort());
+ Text service = SecurityUtil.buildTokenService(serviceAddr);
+ dToken = creds.getToken(service);
+ if (dToken != null) {
+ if (useQueryStringForDelegationToken()) {
+ // delegation token will go in the query string, injecting it
+ extraParams.put(
+ KerberosDelegationTokenAuthenticator.DELEGATION_PARAM,
+ dToken.encodeToUrlString());
+ } else {
+ // delegation token will go as request header, setting it in the
+ // auth-token to ensure no authentication handshake is triggered
+ // (if we have a delegation token, we are authenticated)
+ // the delegation token header is injected in the connection request
+ // at the end of this method.
+ token.delegationToken = (org.apache.hadoop.security.token.Token
+ <AbstractDelegationTokenIdentifier>) dToken;
+ }
+ }
}
}
@@ -261,7 +309,14 @@ public class DelegationTokenAuthenticatedURL extends AuthenticatedURL {
}
url = augmentURL(url, extraParams);
- return super.openConnection(url, token);
+ HttpURLConnection conn = super.openConnection(url, token);
+ if (!token.isSet() && !useQueryStringForDelegationToken() && dToken != null) {
+ // injecting the delegation token header in the connection request
+ conn.setRequestProperty(
+ DelegationTokenAuthenticator.DELEGATION_TOKEN_HEADER,
+ dToken.encodeToUrlString());
+ }
+ return conn;
}
/**
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6bf16d11/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticationHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticationHandler.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticationHandler.java
index 670ec55..e4d9424 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticationHandler.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticationHandler.java
@@ -331,8 +331,7 @@ public abstract class DelegationTokenAuthenticationHandler
HttpServletResponse response)
throws IOException, AuthenticationException {
AuthenticationToken token;
- String delegationParam = ServletUtils.getParameter(request,
- KerberosDelegationTokenAuthenticator.DELEGATION_PARAM);
+ String delegationParam = getDelegationToken(request);
if (delegationParam != null) {
try {
Token<DelegationTokenIdentifier> dt =
@@ -356,4 +355,15 @@ public abstract class DelegationTokenAuthenticationHandler
return token;
}
+ private String getDelegationToken(HttpServletRequest request)
+ throws IOException {
+ String dToken = request.getHeader(
+ DelegationTokenAuthenticator.DELEGATION_TOKEN_HEADER);
+ if (dToken == null) {
+ dToken = ServletUtils.getParameter(request,
+ KerberosDelegationTokenAuthenticator.DELEGATION_PARAM);
+ }
+ return dToken;
+ }
+
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6bf16d11/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticator.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticator.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticator.java
index ec192da..18df56c 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticator.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticator.java
@@ -56,6 +56,9 @@ public abstract class DelegationTokenAuthenticator implements Authenticator {
public static final String OP_PARAM = "op";
+ public static final String DELEGATION_TOKEN_HEADER =
+ "X-Hadoop-Delegation-Token";
+
public static final String DELEGATION_PARAM = "delegation";
public static final String TOKEN_PARAM = "token";
public static final String RENEWER_PARAM = "renewer";
@@ -101,15 +104,23 @@ public abstract class DelegationTokenAuthenticator implements Authenticator {
authenticator.setConnectionConfigurator(configurator);
}
- private boolean hasDelegationToken(URL url) {
- String queryStr = url.getQuery();
- return (queryStr != null) && queryStr.contains(DELEGATION_PARAM + "=");
+ private boolean hasDelegationToken(URL url, AuthenticatedURL.Token token) {
+ boolean hasDt = false;
+ if (token instanceof DelegationTokenAuthenticatedURL.Token) {
+ hasDt = ((DelegationTokenAuthenticatedURL.Token) token).
+ getDelegationToken() != null;
+ }
+ if (!hasDt) {
+ String queryStr = url.getQuery();
+ hasDt = (queryStr != null) && queryStr.contains(DELEGATION_PARAM + "=");
+ }
+ return hasDt;
}
@Override
public void authenticate(URL url, AuthenticatedURL.Token token)
throws IOException, AuthenticationException {
- if (!hasDelegationToken(url)) {
+ if (!hasDelegationToken(url, token)) {
authenticator.authenticate(url, token);
}
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6bf16d11/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/web/TestDelegationTokenAuthenticationHandlerWithMocks.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/web/TestDelegationTokenAuthenticationHandlerWithMocks.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/web/TestDelegationTokenAuthenticationHandlerWithMocks.java
index c9d255d..7880fa1 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/web/TestDelegationTokenAuthenticationHandlerWithMocks.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/web/TestDelegationTokenAuthenticationHandlerWithMocks.java
@@ -284,11 +284,13 @@ public class TestDelegationTokenAuthenticationHandlerWithMocks {
@Test
public void testAuthenticate() throws Exception {
- testValidDelegationToken();
- testInvalidDelegationToken();
+ testValidDelegationTokenQueryString();
+ testValidDelegationTokenHeader();
+ testInvalidDelegationTokenQueryString();
+ testInvalidDelegationTokenHeader();
}
- private void testValidDelegationToken() throws Exception {
+ private void testValidDelegationTokenQueryString() throws Exception {
HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
Token<DelegationTokenIdentifier> dToken =
@@ -307,7 +309,26 @@ public class TestDelegationTokenAuthenticationHandlerWithMocks {
Assert.assertTrue(token.isExpired());
}
- private void testInvalidDelegationToken() throws Exception {
+ private void testValidDelegationTokenHeader() throws Exception {
+ HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
+ HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
+ Token<DelegationTokenIdentifier> dToken =
+ handler.getTokenManager().createToken(
+ UserGroupInformation.getCurrentUser(), "user");
+ Mockito.when(request.getHeader(Mockito.eq(
+ DelegationTokenAuthenticator.DELEGATION_TOKEN_HEADER))).thenReturn(
+ dToken.encodeToUrlString());
+
+ AuthenticationToken token = handler.authenticate(request, response);
+ Assert.assertEquals(UserGroupInformation.getCurrentUser().
+ getShortUserName(), token.getUserName());
+ Assert.assertEquals(0, token.getExpires());
+ Assert.assertEquals(handler.getType(),
+ token.getType());
+ Assert.assertTrue(token.isExpired());
+ }
+
+ private void testInvalidDelegationTokenQueryString() throws Exception {
HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
Mockito.when(request.getQueryString()).thenReturn(
@@ -323,4 +344,21 @@ public class TestDelegationTokenAuthenticationHandlerWithMocks {
}
}
+ private void testInvalidDelegationTokenHeader() throws Exception {
+ HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
+ HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
+ Mockito.when(request.getHeader(Mockito.eq(
+ DelegationTokenAuthenticator.DELEGATION_TOKEN_HEADER))).thenReturn(
+ "invalid");
+
+ try {
+ handler.authenticate(request, response);
+ Assert.fail();
+ } catch (AuthenticationException ex) {
+ //NOP
+ } catch (Exception ex) {
+ Assert.fail();
+ }
+ }
+
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6bf16d11/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/web/TestWebDelegationToken.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/web/TestWebDelegationToken.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/web/TestWebDelegationToken.java
index 1b452f1..118abff 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/web/TestWebDelegationToken.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/web/TestWebDelegationToken.java
@@ -149,6 +149,15 @@ public class TestWebDelegationToken {
throws ServletException, IOException {
resp.setStatus(HttpServletResponse.SC_OK);
resp.getWriter().write("ping");
+ if (req.getHeader(DelegationTokenAuthenticator.DELEGATION_TOKEN_HEADER)
+ != null) {
+ resp.setHeader("UsingHeader", "true");
+ }
+ if (req.getQueryString() != null &&
+ req.getQueryString().contains(
+ DelegationTokenAuthenticator.DELEGATION_PARAM + "=")) {
+ resp.setHeader("UsingQueryString", "true");
+ }
}
@Override
@@ -314,7 +323,20 @@ public class TestWebDelegationToken {
}
@Test
- public void testDelegationTokenAuthenticatorCalls() throws Exception {
+ public void testDelegationTokenAuthenticatorCallsWithHeader()
+ throws Exception {
+ testDelegationTokenAuthenticatorCalls(false);
+ }
+
+ @Test
+ public void testDelegationTokenAuthenticatorCallsWithQueryString()
+ throws Exception {
+ testDelegationTokenAuthenticatorCalls(true);
+ }
+
+
+ private void testDelegationTokenAuthenticatorCalls(final boolean useQS)
+ throws Exception {
final Server jetty = createJettyServer();
Context context = new Context();
context.setContextPath("/foo");
@@ -324,14 +346,15 @@ public class TestWebDelegationToken {
try {
jetty.start();
- URL nonAuthURL = new URL(getJettyURL() + "/foo/bar");
+ final URL nonAuthURL = new URL(getJettyURL() + "/foo/bar");
URL authURL = new URL(getJettyURL() + "/foo/bar?authenticated=foo");
URL authURL2 = new URL(getJettyURL() + "/foo/bar?authenticated=bar");
DelegationTokenAuthenticatedURL.Token token =
new DelegationTokenAuthenticatedURL.Token();
- DelegationTokenAuthenticatedURL aUrl =
+ final DelegationTokenAuthenticatedURL aUrl =
new DelegationTokenAuthenticatedURL();
+ aUrl.setUseQueryStringForDelegationToken(useQS);
try {
aUrl.getDelegationToken(nonAuthURL, token, FOO_USER);
@@ -379,6 +402,27 @@ public class TestWebDelegationToken {
Assert.assertTrue(ex.getMessage().contains("401"));
}
+ aUrl.getDelegationToken(authURL, token, "foo");
+
+ UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
+ ugi.addToken(token.getDelegationToken());
+ ugi.doAs(new PrivilegedExceptionAction<Void>() {
+ @Override
+ public Void run() throws Exception {
+ HttpURLConnection conn = aUrl.openConnection(nonAuthURL, new DelegationTokenAuthenticatedURL.Token());
+ Assert.assertEquals(HttpServletResponse.SC_OK, conn.getResponseCode());
+ if (useQS) {
+ Assert.assertNull(conn.getHeaderField("UsingHeader"));
+ Assert.assertNotNull(conn.getHeaderField("UsingQueryString"));
+ } else {
+ Assert.assertNotNull(conn.getHeaderField("UsingHeader"));
+ Assert.assertNull(conn.getHeaderField("UsingQueryString"));
+ }
+ return null;
+ }
+ });
+
+
} finally {
jetty.stop();
}
[03/12] git commit: Fix up CHANGES.txt for HDFS-6134,
HADOOP-10150 and related JIRAs following merge to branch-2
Posted by ar...@apache.org.
Fix up CHANGES.txt for HDFS-6134, HADOOP-10150 and related JIRAs following merge to branch-2
Conflicts:
hadoop-common-project/hadoop-common/CHANGES.txt
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
hadoop-mapreduce-project/CHANGES.txt
Conflicts:
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d9a7404c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d9a7404c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d9a7404c
Branch: refs/heads/HDFS-6581
Commit: d9a7404c389ea1adffe9c13f7178b54678577b56
Parents: 88c5e21
Author: Alejandro Abdelnur <tu...@cloudera.com>
Authored: Tue Aug 26 12:00:37 2014 -0700
Committer: Alejandro Abdelnur <tu...@apache.org>
Committed: Thu Aug 28 15:10:59 2014 -0700
----------------------------------------------------------------------
hadoop-common-project/hadoop-common/CHANGES.txt | 101 +++++-----
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 185 +++++++++----------
hadoop-mapreduce-project/CHANGES.txt | 23 +--
3 files changed, 155 insertions(+), 154 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d9a7404c/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 2d794cf..9fb0cd3 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -328,56 +328,6 @@ Trunk (Unreleased)
HADOOP-8589. ViewFs tests fail when tests and home dirs are nested (sanjay Radia)
- BREAKDOWN OF HDFS-6134 AND HADOOP-10150 SUBTASKS AND RELATED JIRAS
-
- HADOOP-10734. Implement high-performance secure random number sources.
- (Yi Liu via Colin Patrick McCabe)
-
- HADOOP-10603. Crypto input and output streams implementing Hadoop stream
- interfaces. (Yi Liu and Charles Lamb)
-
- HADOOP-10628. Javadoc and few code style improvement for Crypto
- input and output streams. (Yi Liu via clamb)
-
- HADOOP-10632. Minor improvements to Crypto input and output streams.
- (Yi Liu)
-
- HADOOP-10635. Add a method to CryptoCodec to generate SRNs for IV. (Yi Liu)
-
- HADOOP-10653. Add a new constructor for CryptoInputStream that
- receives current position of wrapped stream. (Yi Liu)
-
- HADOOP-10662. NullPointerException in CryptoInputStream while wrapped
- stream is not ByteBufferReadable. Add tests using normal stream. (Yi Liu)
-
- HADOOP-10713. Refactor CryptoCodec#generateSecureRandom to take a byte[].
- (wang via yliu)
-
- HADOOP-10693. Implementation of AES-CTR CryptoCodec using JNI to OpenSSL.
- (Yi Liu via cmccabe)
-
- HADOOP-10803. Update OpensslCipher#getInstance to accept CipherSuite#name
- format. (Yi Liu)
-
- HADOOP-10735. Fall back AesCtrCryptoCodec implementation from OpenSSL to
- JCE if non native support. (Yi Liu)
-
- HADOOP-10870. Failed to load OpenSSL cipher error logs on systems with old
- openssl versions (cmccabe)
-
- HADOOP-10853. Refactor get instance of CryptoCodec and support create via
- algorithm/mode/padding. (Yi Liu)
-
- HADOOP-10919. Copy command should preserve raw.* namespace
- extended attributes. (clamb)
-
- HDFS-6873. Constants in CommandWithDestination should be static. (clamb)
-
- HADOOP-10871. incorrect prototype in OpensslSecureRandom.c (cmccabe)
-
- HADOOP-10886. CryptoCodec#getCodecclasses throws NPE when configurations not
- loaded. (umamahesh)
-
Release 2.6.0 - UNRELEASED
INCOMPATIBLE CHANGES
@@ -704,6 +654,57 @@ Release 2.6.0 - UNRELEASED
HADOOP-8815. RandomDatum needs to override hashCode().
(Brandon Li via suresh)
+ BREAKDOWN OF HDFS-6134 AND HADOOP-10150 SUBTASKS AND RELATED JIRAS
+
+ HADOOP-10734. Implement high-performance secure random number sources.
+ (Yi Liu via Colin Patrick McCabe)
+
+ HADOOP-10603. Crypto input and output streams implementing Hadoop stream
+ interfaces. (Yi Liu and Charles Lamb)
+
+ HADOOP-10628. Javadoc and few code style improvement for Crypto
+ input and output streams. (Yi Liu via clamb)
+
+ HADOOP-10632. Minor improvements to Crypto input and output streams.
+ (Yi Liu)
+
+ HADOOP-10635. Add a method to CryptoCodec to generate SRNs for IV. (Yi Liu)
+
+ HADOOP-10653. Add a new constructor for CryptoInputStream that
+ receives current position of wrapped stream. (Yi Liu)
+
+ HADOOP-10662. NullPointerException in CryptoInputStream while wrapped
+ stream is not ByteBufferReadable. Add tests using normal stream. (Yi Liu)
+
+ HADOOP-10713. Refactor CryptoCodec#generateSecureRandom to take a byte[].
+ (wang via yliu)
+
+ HADOOP-10693. Implementation of AES-CTR CryptoCodec using JNI to OpenSSL.
+ (Yi Liu via cmccabe)
+
+ HADOOP-10803. Update OpensslCipher#getInstance to accept CipherSuite#name
+ format. (Yi Liu)
+
+ HADOOP-10735. Fall back AesCtrCryptoCodec implementation from OpenSSL to
+ JCE if non native support. (Yi Liu)
+
+ HADOOP-10870. Failed to load OpenSSL cipher error logs on systems with old
+ openssl versions (cmccabe)
+
+ HADOOP-10853. Refactor get instance of CryptoCodec and support create via
+ algorithm/mode/padding. (Yi Liu)
+
+ HADOOP-10919. Copy command should preserve raw.* namespace
+ extended attributes. (clamb)
+
+ HDFS-6873. Constants in CommandWithDestination should be static. (clamb)
+
+ HADOOP-10871. incorrect prototype in OpensslSecureRandom.c (cmccabe)
+
+ HADOOP-10886. CryptoCodec#getCodecclasses throws NPE when configurations not
+ loaded. (umamahesh)
+ --
+
Release 2.5.1 - UNRELEASED
INCOMPATIBLE CHANGES
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d9a7404c/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 1bb6025..2c56407 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -255,99 +255,6 @@ Trunk (Unreleased)
HDFS-6657. Remove link to 'Legacy UI' in trunk's Namenode UI.
(Vinayakumar B via wheat 9)
- BREAKDOWN OF HDFS-6134 AND HADOOP-10150 SUBTASKS AND RELATED JIRAS
-
- HDFS-6387. HDFS CLI admin tool for creating & deleting an
- encryption zone. (clamb)
-
- HDFS-6386. HDFS Encryption Zones (clamb)
-
- HDFS-6388. HDFS integration with KeyProvider. (clamb)
-
- HDFS-6473. Protocol and API for Encryption Zones (clamb)
-
- HDFS-6392. Wire crypto streams for encrypted files in
- DFSClient. (clamb and yliu)
-
- HDFS-6476. Print out the KeyProvider after finding KP successfully on
- startup. (Juan Yu via wang)
-
- HDFS-6391. Get the Key/IV from the NameNode for encrypted files in
- DFSClient. (Charles Lamb and wang)
-
- HDFS-6389. Rename restrictions for encryption zones. (clamb)
-
- HDFS-6605. Client server negotiation of cipher suite. (wang)
-
- HDFS-6625. Remove the Delete Encryption Zone function (clamb)
-
- HDFS-6516. List of Encryption Zones should be based on inodes (clamb)
-
- HDFS-6629. Not able to create symlinks after HDFS-6516 (umamaheswararao)
-
- HDFS-6635. Refactor encryption zone functionality into new
- EncryptionZoneManager class. (wang)
-
- HDFS-6474. Namenode needs to get the actual keys and iv from the
- KeyProvider. (wang)
-
- HDFS-6619. Clean up encryption-related tests. (wang)
-
- HDFS-6405. Test Crypto streams in HDFS. (yliu via wang)
-
- HDFS-6490. Fix the keyid format for generated keys in
- FSNamesystem.createEncryptionZone (clamb)
-
- HDFS-6716. Update usage of KeyProviderCryptoExtension APIs on NameNode.
- (wang)
-
- HDFS-6718. Remove EncryptionZoneManager lock. (wang)
-
- HDFS-6720. Remove KeyProvider in EncryptionZoneManager. (wang)
-
- HDFS-6738. Remove unnecessary getEncryptionZoneForPath call in
- EZManager#createEncryptionZone. (clamb)
-
- HDFS-6724. Decrypt EDEK before creating
- CryptoInputStream/CryptoOutputStream. (wang)
-
- HDFS-6509. Create a special /.reserved/raw directory for raw access to
- encrypted data. (clamb via wang)
-
- HDFS-6771. Require specification of an encryption key when creating
- an encryption zone. (wang)
-
- HDFS-6730. Create a .RAW extended attribute namespace. (clamb)
-
- HDFS-6692. Add more HDFS encryption tests. (wang)
-
- HDFS-6780. Batch the encryption zones listing API. (wang)
-
- HDFS-6394. HDFS encryption documentation. (wang)
-
- HDFS-6834. Improve the configuration guidance in DFSClient when there
- are no Codec classes found in configs. (umamahesh)
-
- HDFS-6546. Add non-superuser capability to get the encryption zone
- for a specific path. (clamb)
-
- HDFS-6733. Creating encryption zone results in NPE when
- KeyProvider is null. (clamb)
-
- HDFS-6785. Should not be able to create encryption zone using path
- to a non-directory file. (clamb)
-
- HDFS-6807. Fix TestReservedRawPaths. (clamb)
-
- HDFS-6814. Mistakenly dfs.namenode.list.encryption.zones.num.responses configured
- as boolean. (umamahesh)
-
- HDFS-6817. Fix findbugs and other warnings. (yliu)
-
- HDFS-6839. Fix TestCLI to expect new output. (clamb)
-
- HDFS-6905. fs-encryption merge triggered release audit failures. (clamb via tucu)
-
HDFS-6694. TestPipelinesFailover.testPipelineRecoveryStress tests fail
intermittently with various symptoms - debugging patch. (Yongjun Zhang via
Arpit Agarwal)
@@ -661,6 +568,98 @@ Release 2.6.0 - UNRELEASED
HDFS-6902. FileWriter should be closed in finally block in
BlockReceiver#receiveBlock() (Tsuyoshi OZAWA via Colin Patrick McCabe)
+ BREAKDOWN OF HDFS-6134 AND HADOOP-10150 SUBTASKS AND RELATED JIRAS
+
+ HDFS-6387. HDFS CLI admin tool for creating & deleting an
+ encryption zone. (clamb)
+
+ HDFS-6386. HDFS Encryption Zones (clamb)
+
+ HDFS-6388. HDFS integration with KeyProvider. (clamb)
+
+ HDFS-6473. Protocol and API for Encryption Zones (clamb)
+
+ HDFS-6392. Wire crypto streams for encrypted files in
+ DFSClient. (clamb and yliu)
+
+ HDFS-6476. Print out the KeyProvider after finding KP successfully on
+ startup. (Juan Yu via wang)
+
+ HDFS-6391. Get the Key/IV from the NameNode for encrypted files in
+ DFSClient. (Charles Lamb and wang)
+
+ HDFS-6389. Rename restrictions for encryption zones. (clamb)
+
+ HDFS-6605. Client server negotiation of cipher suite. (wang)
+
+ HDFS-6625. Remove the Delete Encryption Zone function (clamb)
+
+ HDFS-6516. List of Encryption Zones should be based on inodes (clamb)
+
+ HDFS-6629. Not able to create symlinks after HDFS-6516 (umamaheswararao)
+
+ HDFS-6635. Refactor encryption zone functionality into new
+ EncryptionZoneManager class. (wang)
+
+ HDFS-6474. Namenode needs to get the actual keys and iv from the
+ KeyProvider. (wang)
+
+ HDFS-6619. Clean up encryption-related tests. (wang)
+
+ HDFS-6405. Test Crypto streams in HDFS. (yliu via wang)
+
+ HDFS-6490. Fix the keyid format for generated keys in
+ FSNamesystem.createEncryptionZone (clamb)
+
+ HDFS-6716. Update usage of KeyProviderCryptoExtension APIs on NameNode.
+ (wang)
+
+ HDFS-6718. Remove EncryptionZoneManager lock. (wang)
+
+ HDFS-6720. Remove KeyProvider in EncryptionZoneManager. (wang)
+
+ HDFS-6738. Remove unnecessary getEncryptionZoneForPath call in
+ EZManager#createEncryptionZone. (clamb)
+
+ HDFS-6724. Decrypt EDEK before creating
+ CryptoInputStream/CryptoOutputStream. (wang)
+
+ HDFS-6509. Create a special /.reserved/raw directory for raw access to
+ encrypted data. (clamb via wang)
+
+ HDFS-6771. Require specification of an encryption key when creating
+ an encryption zone. (wang)
+
+ HDFS-6730. Create a .RAW extended attribute namespace. (clamb)
+
+ HDFS-6692. Add more HDFS encryption tests. (wang)
+
+ HDFS-6780. Batch the encryption zones listing API. (wang)
+
+ HDFS-6394. HDFS encryption documentation. (wang)
+
+ HDFS-6834. Improve the configuration guidance in DFSClient when there
+ are no Codec classes found in configs. (umamahesh)
+
+ HDFS-6546. Add non-superuser capability to get the encryption zone
+ for a specific path. (clamb)
+
+ HDFS-6733. Creating encryption zone results in NPE when
+ KeyProvider is null. (clamb)
+
+ HDFS-6785. Should not be able to create encryption zone using path
+ to a non-directory file. (clamb)
+
+ HDFS-6807. Fix TestReservedRawPaths. (clamb)
+
+ HDFS-6814. Mistakenly dfs.namenode.list.encryption.zones.num.responses configured
+ as boolean. (umamahesh)
+
+ HDFS-6817. Fix findbugs and other warnings. (yliu)
+
+ HDFS-6839. Fix TestCLI to expect new output. (clamb)
+ --
+
Release 2.5.1 - UNRELEASED
INCOMPATIBLE CHANGES
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d9a7404c/hadoop-mapreduce-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt
index de0767d..c0038f6 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -154,16 +154,6 @@ Trunk (Unreleased)
MAPREDUCE-5867. Fix NPE in KillAMPreemptionPolicy related to
ProportionalCapacityPreemptionPolicy (Sunil G via devaraj)
- BREAKDOWN OF HDFS-6134 AND HADOOP-10150 SUBTASKS AND RELATED JIRAS
-
- MAPREDUCE-5890. Support for encrypting Intermediate
- data and spills in local filesystem. (asuresh via tucu)
-
- MAPREDUCE-6007. Add support to distcp to preserve raw.* namespace
- extended attributes. (clamb)
-
- MAPREDUCE-6041. Fix TestOptionsParser. (clamb)
-
Release 2.6.0 - UNRELEASED
INCOMPATIBLE CHANGES
@@ -261,6 +251,17 @@ Release 2.6.0 - UNRELEASED
MAPREDUCE-5885. build/test/test.mapred.spill causes release audit warnings
(Chen He via jlowe)
+ BREAKDOWN OF HDFS-6134 AND HADOOP-10150 SUBTASKS AND RELATED JIRAS
+
+ MAPREDUCE-5890. Support for encrypting Intermediate
+ data and spills in local filesystem. (asuresh via tucu)
+
+ MAPREDUCE-6007. Add support to distcp to preserve raw.* namespace
+ extended attributes. (clamb)
+
+ MAPREDUCE-6041. Fix TestOptionsParser. (clamb)
+ --
+
Release 2.5.1 - UNRELEASED
INCOMPATIBLE CHANGES
@@ -273,7 +274,7 @@ Release 2.5.1 - UNRELEASED
BUG FIXES
- MAPREDUCE-6033. Updated access check for displaying job information
+ MAPREDUCE-6033. Updated access check for displaying job information
(Yu Gao via Eric Yang)
Release 2.5.0 - 2014-08-11
[06/12] git commit: HADOOP-11005. Fix HTTP content type for
ReconfigurationServlet. Contributed by Lei Xu.
Posted by ar...@apache.org.
HADOOP-11005. Fix HTTP content type for ReconfigurationServlet. Contributed by Lei Xu.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7119bd49
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7119bd49
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7119bd49
Branch: refs/heads/HDFS-6581
Commit: 7119bd49c870cf1e6b8c091d87025b439b9468df
Parents: ab638e7
Author: Andrew Wang <an...@cloudera.com>
Authored: Thu Aug 28 17:39:50 2014 -0700
Committer: Andrew Wang <an...@cloudera.com>
Committed: Thu Aug 28 17:40:55 2014 -0700
----------------------------------------------------------------------
hadoop-common-project/hadoop-common/CHANGES.txt | 3 +++
.../main/java/org/apache/hadoop/conf/ReconfigurationServlet.java | 2 ++
2 files changed, 5 insertions(+)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/7119bd49/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 9fb0cd3..05eb383 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -468,6 +468,9 @@ Release 2.6.0 - UNRELEASED
HADOOP-10880. Move HTTP delegation tokens out of URL querystring to
a header. (tucu)
+ HADOOP-11005. Fix HTTP content type for ReconfigurationServlet.
+ (Lei Xu via wang)
+
OPTIMIZATIONS
HADOOP-10838. Byte array native checksumming. (James Thomas via todd)
http://git-wip-us.apache.org/repos/asf/hadoop/blob/7119bd49/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ReconfigurationServlet.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ReconfigurationServlet.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ReconfigurationServlet.java
index 3fa162b..eb1fb6b 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ReconfigurationServlet.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ReconfigurationServlet.java
@@ -200,6 +200,7 @@ public class ReconfigurationServlet extends HttpServlet {
protected void doGet(HttpServletRequest req, HttpServletResponse resp)
throws ServletException, IOException {
LOG.info("GET");
+ resp.setContentType("text/html");
PrintWriter out = resp.getWriter();
Reconfigurable reconf = getReconfigurable(req);
@@ -214,6 +215,7 @@ public class ReconfigurationServlet extends HttpServlet {
protected void doPost(HttpServletRequest req, HttpServletResponse resp)
throws ServletException, IOException {
LOG.info("POST");
+ resp.setContentType("text/html");
PrintWriter out = resp.getWriter();
Reconfigurable reconf = getReconfigurable(req);