You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by ar...@apache.org on 2014/10/01 03:06:15 UTC
[01/13] git commit: HADOOP-11154. Update BUILDING.txt to state that
CMake 3.0 or newer is required on Mac. Contributed by Chris Nauroth.
Repository: hadoop
Updated Branches:
refs/heads/HDFS-6581 5e8b69735 -> dde2ed13c
HADOOP-11154. Update BUILDING.txt to state that CMake 3.0 or newer is required on Mac. Contributed by Chris Nauroth.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8dc4e940
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8dc4e940
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8dc4e940
Branch: refs/heads/HDFS-6581
Commit: 8dc4e9408f4cd9a50cd58aee574f3b03c3a33b31
Parents: 0577eb3
Author: cnauroth <cn...@apache.org>
Authored: Tue Sep 30 08:30:44 2014 -0700
Committer: cnauroth <cn...@apache.org>
Committed: Tue Sep 30 08:30:44 2014 -0700
----------------------------------------------------------------------
BUILDING.txt | 2 +-
hadoop-common-project/hadoop-common/CHANGES.txt | 3 +++
2 files changed, 4 insertions(+), 1 deletion(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/8dc4e940/BUILDING.txt
----------------------------------------------------------------------
diff --git a/BUILDING.txt b/BUILDING.txt
index bbad5ef..621a221 100644
--- a/BUILDING.txt
+++ b/BUILDING.txt
@@ -8,7 +8,7 @@ Requirements:
* Maven 3.0 or later
* Findbugs 1.3.9 (if running findbugs)
* ProtocolBuffer 2.5.0
-* CMake 2.6 or newer (if compiling native code)
+* CMake 2.6 or newer (if compiling native code), must be 3.0 or newer on Mac
* Zlib devel (if compiling native code)
* openssl devel ( if compiling native hadoop-pipes )
* Internet connection for first build (to fetch all Maven and Hadoop dependencies)
http://git-wip-us.apache.org/repos/asf/hadoop/blob/8dc4e940/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 9d0816f..f90a988 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -895,6 +895,9 @@ Release 2.6.0 - UNRELEASED
HADOOP-11049. javax package system class default is too broad (Sangjin Lee
via jlowe)
+ HADOOP-11154. Update BUILDING.txt to state that CMake 3.0 or newer is
+ required on Mac. (cnauroth)
+
Release 2.5.1 - 2014-09-05
INCOMPATIBLE CHANGES
[11/13] git commit: HADOOP-11113. Namenode not able to reconnect to
KMS after KMS restart. (Arun Suresh via wang)
Posted by ar...@apache.org.
HADOOP-11113. Namenode not able to reconnect to KMS after KMS restart. (Arun Suresh via wang)
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a4c9b80a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a4c9b80a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a4c9b80a
Branch: refs/heads/HDFS-6581
Commit: a4c9b80a7c2b30404840f39f2f46646479914345
Parents: bbff96b
Author: Andrew Wang <wa...@apache.org>
Authored: Tue Sep 30 16:46:58 2014 -0700
Committer: Andrew Wang <wa...@apache.org>
Committed: Tue Sep 30 16:48:24 2014 -0700
----------------------------------------------------------------------
hadoop-common-project/hadoop-common/CHANGES.txt | 3 +
.../crypto/key/kms/KMSClientProvider.java | 10 +-
.../hadoop/crypto/key/kms/server/MiniKMS.java | 19 ++-
.../hadoop/crypto/key/kms/server/TestKMS.java | 118 +++++++++++++++++--
4 files changed, 132 insertions(+), 18 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a4c9b80a/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index b0ae0f8..95ea5b6 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -780,6 +780,9 @@ Release 2.6.0 - UNRELEASED
HADOOP-11130. NFS updateMaps OS check is reversed (brandonli)
+ HADOOP-11113. Namenode not able to reconnect to KMS after KMS restart.
+ (Arun Suresh via wang)
+
BREAKDOWN OF HDFS-6134 AND HADOOP-10150 SUBTASKS AND RELATED JIRAS
HADOOP-10734. Implement high-performance secure random number sources.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a4c9b80a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
index a97463a..83ae067 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
@@ -415,7 +415,7 @@ public class KMSClientProvider extends KeyProvider implements CryptoExtension,
return conn;
}
- private static <T> T call(HttpURLConnection conn, Map jsonOutput,
+ private <T> T call(HttpURLConnection conn, Map jsonOutput,
int expectedResponse, Class<T> klass)
throws IOException {
T ret = null;
@@ -427,6 +427,14 @@ public class KMSClientProvider extends KeyProvider implements CryptoExtension,
conn.getInputStream().close();
throw ex;
}
+ if (conn.getResponseCode() == HttpURLConnection.HTTP_FORBIDDEN) {
+ // Ideally, this should happen only when there is an Authentication
+ // failure. Unfortunately, the AuthenticationFilter returns 403 when it
+ // cannot authenticate (Since a 401 requires Server to send
+ // WWW-Authenticate header as well)..
+ KMSClientProvider.this.authToken =
+ new DelegationTokenAuthenticatedURL.Token();
+ }
HttpExceptionUtils.validateResponse(conn, expectedResponse);
if (APPLICATION_JSON_MIME.equalsIgnoreCase(conn.getContentType())
&& klass != null) {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a4c9b80a/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/MiniKMS.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/MiniKMS.java b/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/MiniKMS.java
index 697d7ec..51cc026 100644
--- a/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/MiniKMS.java
+++ b/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/MiniKMS.java
@@ -43,12 +43,12 @@ import java.util.UUID;
public class MiniKMS {
- private static Server createJettyServer(String keyStore, String password) {
+ private static Server createJettyServer(String keyStore, String password, int inPort) {
try {
boolean ssl = keyStore != null;
InetAddress localhost = InetAddress.getByName("localhost");
String host = "localhost";
- ServerSocket ss = new ServerSocket(0, 50, localhost);
+ ServerSocket ss = new ServerSocket((inPort < 0) ? 0 : inPort, 50, localhost);
int port = ss.getLocalPort();
ss.close();
Server server = new Server(0);
@@ -91,6 +91,7 @@ public class MiniKMS {
private String log4jConfFile;
private File keyStoreFile;
private String keyStorePassword;
+ private int inPort = -1;
public Builder() {
kmsConfDir = new File("target/test-classes").getAbsoluteFile();
@@ -111,6 +112,12 @@ public class MiniKMS {
return this;
}
+ public Builder setPort(int port) {
+ Preconditions.checkArgument(port > 0, "input port must be greater than 0");
+ this.inPort = port;
+ return this;
+ }
+
public Builder setSslConf(File keyStoreFile, String keyStorePassword) {
Preconditions.checkNotNull(keyStoreFile, "keystore file is NULL");
Preconditions.checkNotNull(keyStorePassword, "keystore password is NULL");
@@ -126,7 +133,7 @@ public class MiniKMS {
"KMS conf dir does not exist");
return new MiniKMS(kmsConfDir.getAbsolutePath(), log4jConfFile,
(keyStoreFile != null) ? keyStoreFile.getAbsolutePath() : null,
- keyStorePassword);
+ keyStorePassword, inPort);
}
}
@@ -135,14 +142,16 @@ public class MiniKMS {
private String keyStore;
private String keyStorePassword;
private Server jetty;
+ private int inPort;
private URL kmsURL;
public MiniKMS(String kmsConfDir, String log4ConfFile, String keyStore,
- String password) {
+ String password, int inPort) {
this.kmsConfDir = kmsConfDir;
this.log4jConfFile = log4ConfFile;
this.keyStore = keyStore;
this.keyStorePassword = password;
+ this.inPort = inPort;
}
public void start() throws Exception {
@@ -174,7 +183,7 @@ public class MiniKMS {
writer.close();
}
System.setProperty("log4j.configuration", log4jConfFile);
- jetty = createJettyServer(keyStore, keyStorePassword);
+ jetty = createJettyServer(keyStore, keyStorePassword, inPort);
// we need to do a special handling for MiniKMS to work when in a dir and
// when in a JAR in the classpath thanks to Jetty way of handling of webapps
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a4c9b80a/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java b/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java
index 9211417..4f106e6 100644
--- a/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java
+++ b/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java
@@ -89,7 +89,7 @@ public class TestKMS {
return file;
}
- public static abstract class KMSCallable implements Callable<Void> {
+ public static abstract class KMSCallable<T> implements Callable<T> {
private URL kmsUrl;
protected URL getKMSUrl() {
@@ -97,19 +97,27 @@ public class TestKMS {
}
}
- protected void runServer(String keystore, String password, File confDir,
- KMSCallable callable) throws Exception {
+ protected <T> T runServer(String keystore, String password, File confDir,
+ KMSCallable<T> callable) throws Exception {
+ return runServer(-1, keystore, password, confDir, callable);
+ }
+
+ protected <T> T runServer(int port, String keystore, String password, File confDir,
+ KMSCallable<T> callable) throws Exception {
MiniKMS.Builder miniKMSBuilder = new MiniKMS.Builder().setKmsConfDir(confDir)
.setLog4jConfFile("log4j.properties");
if (keystore != null) {
miniKMSBuilder.setSslConf(new File(keystore), password);
}
+ if (port > 0) {
+ miniKMSBuilder.setPort(port);
+ }
MiniKMS miniKMS = miniKMSBuilder.build();
miniKMS.start();
try {
System.out.println("Test KMS running at: " + miniKMS.getKMSUrl());
callable.kmsUrl = miniKMS.getKMSUrl();
- callable.call();
+ return callable.call();
} finally {
miniKMS.stop();
}
@@ -284,7 +292,7 @@ public class TestKMS {
writeConf(testDir, conf);
- runServer(keystore, password, testDir, new KMSCallable() {
+ runServer(keystore, password, testDir, new KMSCallable<Void>() {
@Override
public Void call() throws Exception {
final Configuration conf = new Configuration();
@@ -351,7 +359,7 @@ public class TestKMS {
conf.set(KeyAuthorizationKeyProvider.KEY_ACL + "k6.ALL", "*");
writeConf(confDir, conf);
- runServer(null, null, confDir, new KMSCallable() {
+ runServer(null, null, confDir, new KMSCallable<Void>() {
@Override
public Void call() throws Exception {
Date started = new Date();
@@ -616,7 +624,7 @@ public class TestKMS {
writeConf(testDir, conf);
- runServer(null, null, testDir, new KMSCallable() {
+ runServer(null, null, testDir, new KMSCallable<Void>() {
@Override
public Void call() throws Exception {
@@ -783,6 +791,92 @@ public class TestKMS {
}
@Test
+ public void testKMSRestart() throws Exception {
+ Configuration conf = new Configuration();
+ conf.set("hadoop.security.authentication", "kerberos");
+ UserGroupInformation.setConfiguration(conf);
+ final File testDir = getTestDir();
+ conf = createBaseKMSConf(testDir);
+ conf.set("hadoop.kms.authentication.kerberos.keytab",
+ keytab.getAbsolutePath());
+ conf.set("hadoop.kms.authentication.kerberos.principal", "HTTP/localhost");
+ conf.set("hadoop.kms.authentication.kerberos.name.rules", "DEFAULT");
+
+ for (KMSACLs.Type type : KMSACLs.Type.values()) {
+ conf.set(type.getAclConfigKey(), type.toString());
+ }
+ conf.set(KMSACLs.Type.CREATE.getAclConfigKey(),
+ KMSACLs.Type.CREATE.toString() + ",SET_KEY_MATERIAL");
+
+ conf.set(KMSACLs.Type.ROLLOVER.getAclConfigKey(),
+ KMSACLs.Type.ROLLOVER.toString() + ",SET_KEY_MATERIAL");
+
+ conf.set(KeyAuthorizationKeyProvider.KEY_ACL + "k0.ALL", "*");
+ conf.set(KeyAuthorizationKeyProvider.KEY_ACL + "k1.ALL", "*");
+ conf.set(KeyAuthorizationKeyProvider.KEY_ACL + "k2.ALL", "*");
+ conf.set(KeyAuthorizationKeyProvider.KEY_ACL + "k3.ALL", "*");
+
+ writeConf(testDir, conf);
+
+ KMSCallable<KeyProvider> c =
+ new KMSCallable<KeyProvider>() {
+ @Override
+ public KeyProvider call() throws Exception {
+ final Configuration conf = new Configuration();
+ conf.setInt(KeyProvider.DEFAULT_BITLENGTH_NAME, 128);
+ final URI uri = createKMSUri(getKMSUrl());
+
+ final KeyProvider kp =
+ doAs("SET_KEY_MATERIAL",
+ new PrivilegedExceptionAction<KeyProvider>() {
+ @Override
+ public KeyProvider run() throws Exception {
+ KMSClientProvider kp = new KMSClientProvider(uri, conf);
+ kp.createKey("k1", new byte[16],
+ new KeyProvider.Options(conf));
+ return kp;
+ }
+ });
+ return kp;
+ }
+ };
+
+ final KeyProvider retKp =
+ runServer(null, null, testDir, c);
+
+ // Restart server (using the same port)
+ runServer(c.getKMSUrl().getPort(), null, null, testDir,
+ new KMSCallable<Void>() {
+ @Override
+ public Void call() throws Exception {
+ final Configuration conf = new Configuration();
+ conf.setInt(KeyProvider.DEFAULT_BITLENGTH_NAME, 128);
+ doAs("SET_KEY_MATERIAL",
+ new PrivilegedExceptionAction<Void>() {
+ @Override
+ public Void run() throws Exception {
+ try {
+ retKp.createKey("k2", new byte[16],
+ new KeyProvider.Options(conf));
+ Assert.fail("Should fail first time !!");
+ } catch (IOException e) {
+ String message = e.getMessage();
+ Assert.assertTrue("Should be a 403 error : " + message,
+ message.contains("403"));
+ }
+ retKp.createKey("k2", new byte[16],
+ new KeyProvider.Options(conf));
+ retKp.createKey("k3", new byte[16],
+ new KeyProvider.Options(conf));
+ return null;
+ }
+ });
+ return null;
+ }
+ });
+ }
+
+ @Test
public void testACLs() throws Exception {
Configuration conf = new Configuration();
conf.set("hadoop.security.authentication", "kerberos");
@@ -809,7 +903,7 @@ public class TestKMS {
writeConf(testDir, conf);
- runServer(null, null, testDir, new KMSCallable() {
+ runServer(null, null, testDir, new KMSCallable<Void>() {
@Override
public Void call() throws Exception {
final Configuration conf = new Configuration();
@@ -1117,7 +1211,7 @@ public class TestKMS {
writeConf(testDir, conf);
- runServer(null, null, testDir, new KMSCallable() {
+ runServer(null, null, testDir, new KMSCallable<Void>() {
@Override
public Void call() throws Exception {
final Configuration conf = new Configuration();
@@ -1201,7 +1295,7 @@ public class TestKMS {
writeConf(testDir, conf);
- runServer(null, null, testDir, new KMSCallable() {
+ runServer(null, null, testDir, new KMSCallable<Void>() {
@Override
public Void call() throws Exception {
final Configuration conf = new Configuration();
@@ -1326,7 +1420,7 @@ public class TestKMS {
writeConf(testDir, conf);
- runServer(null, null, testDir, new KMSCallable() {
+ runServer(null, null, testDir, new KMSCallable<Void>() {
@Override
public Void call() throws Exception {
final Configuration conf = new Configuration();
@@ -1398,7 +1492,7 @@ public class TestKMS {
writeConf(testDir, conf);
- runServer(null, null, testDir, new KMSCallable() {
+ runServer(null, null, testDir, new KMSCallable<Void>() {
@Override
public Void call() throws Exception {
final Configuration conf = new Configuration();
[06/13] git commit: YARN-2627. Added the info logs of
attemptFailuresValidityInterval and number of previous failed attempts.
Contributed by Xuan Gong.
Posted by ar...@apache.org.
YARN-2627. Added the info logs of attemptFailuresValidityInterval and number of previous failed attempts. Contributed by Xuan Gong.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9582a501
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9582a501
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9582a501
Branch: refs/heads/HDFS-6581
Commit: 9582a50176800433ad3fa8829a50c28b859812a3
Parents: f7743dd
Author: Zhijie Shen <zj...@apache.org>
Authored: Tue Sep 30 12:57:45 2014 -0700
Committer: Zhijie Shen <zj...@apache.org>
Committed: Tue Sep 30 12:57:45 2014 -0700
----------------------------------------------------------------------
hadoop-yarn-project/CHANGES.txt | 3 +++
.../yarn/server/resourcemanager/rmapp/RMAppImpl.java | 11 +++++++++++
2 files changed, 14 insertions(+)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9582a501/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index c1172de..6771839 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -280,6 +280,9 @@ Release 2.6.0 - UNRELEASED
YARN-1769. CapacityScheduler: Improve reservations (Thomas Graves via
jlowe)
+ YARN-2627. Added the info logs of attemptFailuresValidityInterval and number
+ of previous failed attempts. (Xuan Gong via zjshen)
+
OPTIMIZATIONS
BUG FIXES
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9582a501/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
index 0b8f321..eaef7d2 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
@@ -376,6 +376,11 @@ public class RMAppImpl implements RMApp, Recoverable {
this.attemptFailuresValidityInterval =
submissionContext.getAttemptFailuresValidityInterval();
+ if (this.attemptFailuresValidityInterval > 0) {
+ LOG.info("The attemptFailuresValidityInterval for the application: "
+ + this.applicationId + " is " + this.attemptFailuresValidityInterval
+ + ".");
+ }
ReentrantReadWriteLock lock = new ReentrantReadWriteLock();
this.readLock = lock.readLock();
@@ -1179,6 +1184,11 @@ public class RMAppImpl implements RMApp, Recoverable {
@Override
public RMAppState transition(RMAppImpl app, RMAppEvent event) {
int numberOfFailure = app.getNumFailedAppAttempts();
+ LOG.info("The number of failed attempts"
+ + (app.attemptFailuresValidityInterval > 0 ? " in previous "
+ + app.attemptFailuresValidityInterval + " milliseconds " : " ")
+ + "is " + numberOfFailure + ". The max attempts is "
+ + app.maxAppAttempts);
if (!app.submissionContext.getUnmanagedAM()
&& numberOfFailure < app.maxAppAttempts) {
boolean transferStateFromPreviousAttempt;
@@ -1293,4 +1303,5 @@ public class RMAppImpl implements RMApp, Recoverable {
public void setSystemClock(Clock clock) {
this.systemClock = clock;
}
+
}
[07/13] git commit: YARN-2387. Resource Manager crashes with NPE due
to lack of synchronization. Contributed by Mit Desai
Posted by ar...@apache.org.
YARN-2387. Resource Manager crashes with NPE due to lack of synchronization. Contributed by Mit Desai
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/feaf139b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/feaf139b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/feaf139b
Branch: refs/heads/HDFS-6581
Commit: feaf139b4f327d33011e5a4424c06fb44c630955
Parents: 9582a50
Author: Jason Lowe <jl...@apache.org>
Authored: Tue Sep 30 22:37:28 2014 +0000
Committer: Jason Lowe <jl...@apache.org>
Committed: Tue Sep 30 22:37:28 2014 +0000
----------------------------------------------------------------------
hadoop-yarn-project/CHANGES.txt | 3 +++
.../records/impl/pb/ContainerStatusPBImpl.java | 22 ++++++++++----------
2 files changed, 14 insertions(+), 11 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/feaf139b/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 6771839..be61dfe 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -485,6 +485,9 @@ Release 2.6.0 - UNRELEASED
YARN-2610. Hamlet should close table tags. (Ray Chiang via kasha)
+ YARN-2387. Resource Manager crashes with NPE due to lack of
+ synchronization (Mit Desai via jlowe)
+
Release 2.5.1 - 2014-09-05
INCOMPATIBLE CHANGES
http://git-wip-us.apache.org/repos/asf/hadoop/blob/feaf139b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ContainerStatusPBImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ContainerStatusPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ContainerStatusPBImpl.java
index 8e3e2cc..86f2af9 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ContainerStatusPBImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ContainerStatusPBImpl.java
@@ -50,7 +50,7 @@ public class ContainerStatusPBImpl extends ContainerStatus {
viaProto = true;
}
- public ContainerStatusProto getProto() {
+ public synchronized ContainerStatusProto getProto() {
mergeLocalToProto();
proto = viaProto ? proto : builder.build();
viaProto = true;
@@ -90,7 +90,7 @@ public class ContainerStatusPBImpl extends ContainerStatus {
}
}
- private void mergeLocalToProto() {
+ private synchronized void mergeLocalToProto() {
if (viaProto)
maybeInitBuilder();
mergeLocalToBuilder();
@@ -98,7 +98,7 @@ public class ContainerStatusPBImpl extends ContainerStatus {
viaProto = true;
}
- private void maybeInitBuilder() {
+ private synchronized void maybeInitBuilder() {
if (viaProto || builder == null) {
builder = ContainerStatusProto.newBuilder(proto);
}
@@ -107,7 +107,7 @@ public class ContainerStatusPBImpl extends ContainerStatus {
@Override
- public ContainerState getState() {
+ public synchronized ContainerState getState() {
ContainerStatusProtoOrBuilder p = viaProto ? proto : builder;
if (!p.hasState()) {
return null;
@@ -116,7 +116,7 @@ public class ContainerStatusPBImpl extends ContainerStatus {
}
@Override
- public void setState(ContainerState state) {
+ public synchronized void setState(ContainerState state) {
maybeInitBuilder();
if (state == null) {
builder.clearState();
@@ -125,7 +125,7 @@ public class ContainerStatusPBImpl extends ContainerStatus {
builder.setState(convertToProtoFormat(state));
}
@Override
- public ContainerId getContainerId() {
+ public synchronized ContainerId getContainerId() {
ContainerStatusProtoOrBuilder p = viaProto ? proto : builder;
if (this.containerId != null) {
return this.containerId;
@@ -138,32 +138,32 @@ public class ContainerStatusPBImpl extends ContainerStatus {
}
@Override
- public void setContainerId(ContainerId containerId) {
+ public synchronized void setContainerId(ContainerId containerId) {
maybeInitBuilder();
if (containerId == null)
builder.clearContainerId();
this.containerId = containerId;
}
@Override
- public int getExitStatus() {
+ public synchronized int getExitStatus() {
ContainerStatusProtoOrBuilder p = viaProto ? proto : builder;
return p.getExitStatus();
}
@Override
- public void setExitStatus(int exitStatus) {
+ public synchronized void setExitStatus(int exitStatus) {
maybeInitBuilder();
builder.setExitStatus(exitStatus);
}
@Override
- public String getDiagnostics() {
+ public synchronized String getDiagnostics() {
ContainerStatusProtoOrBuilder p = viaProto ? proto : builder;
return (p.getDiagnostics());
}
@Override
- public void setDiagnostics(String diagnostics) {
+ public synchronized void setDiagnostics(String diagnostics) {
maybeInitBuilder();
builder.setDiagnostics(diagnostics);
}
[05/13] git commit: YARN-2610. Hamlet should close table tags. (Ray
Chiang via kasha)
Posted by ar...@apache.org.
YARN-2610. Hamlet should close table tags. (Ray Chiang via kasha)
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f7743dd0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f7743dd0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f7743dd0
Branch: refs/heads/HDFS-6581
Commit: f7743dd07dfbe0dde9be71acfaba16ded52adba7
Parents: cdf1af0
Author: Karthik Kambatla <ka...@apache.org>
Authored: Mon Sep 29 11:33:44 2014 -0700
Committer: Karthik Kambatla <ka...@apache.org>
Committed: Tue Sep 30 11:46:07 2014 -0700
----------------------------------------------------------------------
hadoop-yarn-project/CHANGES.txt | 2 ++
.../org/apache/hadoop/yarn/webapp/hamlet/Hamlet.java | 10 +++++-----
.../apache/hadoop/yarn/webapp/hamlet/TestHamlet.java | 4 ++--
.../hadoop/yarn/webapp/view/TestInfoBlock.java | 15 ++++++++-------
4 files changed, 17 insertions(+), 14 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/f7743dd0/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index a15a98e..c1172de 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -480,6 +480,8 @@ Release 2.6.0 - UNRELEASED
YARN-2606. Application History Server tries to access hdfs before doing
secure login (Mit Desai via jeagles)
+ YARN-2610. Hamlet should close table tags. (Ray Chiang via kasha)
+
Release 2.5.1 - 2014-09-05
INCOMPATIBLE CHANGES
http://git-wip-us.apache.org/repos/asf/hadoop/blob/f7743dd0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/hamlet/Hamlet.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/hamlet/Hamlet.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/hamlet/Hamlet.java
index 7076c9a..83fa5da 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/hamlet/Hamlet.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/hamlet/Hamlet.java
@@ -2423,10 +2423,10 @@ public class Hamlet extends HamletImpl implements HamletSpec._Html {
}
private <T extends _> TH<T> th_(T e, boolean inline) {
- return new TH<T>("th", e, opt(false, inline, false)); }
+ return new TH<T>("th", e, opt(true, inline, false)); }
private <T extends _> TD<T> td_(T e, boolean inline) {
- return new TD<T>("td", e, opt(false, inline, false)); }
+ return new TD<T>("td", e, opt(true, inline, false)); }
public class COL<T extends _> extends EImp<T> implements HamletSpec.COL {
public COL(String name, T parent, EnumSet<EOpt> opts) {
@@ -3719,10 +3719,10 @@ public class Hamlet extends HamletImpl implements HamletSpec._Html {
return new COLGROUP<T>("colgroup", e, opt(false, inline, false)); }
private <T extends _> THEAD<T> thead_(T e, boolean inline) {
- return new THEAD<T>("thead", e, opt(false, inline, false)); }
+ return new THEAD<T>("thead", e, opt(true, inline, false)); }
private <T extends _> TFOOT<T> tfoot_(T e, boolean inline) {
- return new TFOOT<T>("tfoot", e, opt(false, inline, false)); }
+ return new TFOOT<T>("tfoot", e, opt(true, inline, false)); }
private <T extends _> TBODY<T> tbody_(T e, boolean inline) {
return new TBODY<T>("tbody", e, opt(true, inline, false)); }
@@ -3731,7 +3731,7 @@ public class Hamlet extends HamletImpl implements HamletSpec._Html {
return new COL<T>("col", e, opt(false, inline, false)); }
private <T extends _> TR<T> tr_(T e, boolean inline) {
- return new TR<T>("tr", e, opt(false, inline, false)); }
+ return new TR<T>("tr", e, opt(true, inline, false)); }
public class BUTTON<T extends _> extends EImp<T> implements HamletSpec.BUTTON {
public BUTTON(String name, T parent, EnumSet<EOpt> opts) {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/f7743dd0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/hamlet/TestHamlet.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/hamlet/TestHamlet.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/hamlet/TestHamlet.java
index 1ea8bcf..d9eaa37 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/hamlet/TestHamlet.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/hamlet/TestHamlet.java
@@ -88,8 +88,8 @@ public class TestHamlet {
assertEquals(0, h.nestLevel);
verify(out).print("<table");
verify(out).print("</table>");
- verify(out, never()).print("</td>");
- verify(out, never()).print("</tr>");
+ verify(out, atLeast(1)).print("</td>");
+ verify(out, atLeast(1)).print("</tr>");
}
@Test public void testEnumAttrs() {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/f7743dd0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/view/TestInfoBlock.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/view/TestInfoBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/view/TestInfoBlock.java
index 4ec1434..da5efbb 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/view/TestInfoBlock.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/view/TestInfoBlock.java
@@ -68,7 +68,7 @@ public class TestInfoBlock {
static {
resInfo = new ResponseInfo();
- resInfo._("Single_line_value", "This is one line.");
+ resInfo._("Multiple_line_value", "This is one line.");
resInfo._("Multiple_line_value", "This is first line.\nThis is second line.");
}
@@ -98,13 +98,14 @@ public class TestInfoBlock {
WebAppTests.testBlock(MultilineInfoBlock.class);
TestInfoBlock.pw.flush();
String output = TestInfoBlock.sw.toString().replaceAll(" +", " ");
- String expectedSinglelineData = String.format("<tr class=\"odd\">%n"
- + " <th>%n Single_line_value%n <td>%n This is one line.%n");
- String expectedMultilineData = String.format("<tr class=\"even\">%n"
- + " <th>%n Multiple_line_value%n <td>%n <div>%n"
+ String expectedMultilineData1 = String.format("<tr class=\"odd\">%n"
+ + " <th>%n Multiple_line_value%n </th>%n"
+ + " <td>%n This is one line.%n </td>%n");
+ String expectedMultilineData2 = String.format("<tr class=\"even\">%n"
+ + " <th>%n Multiple_line_value%n </th>%n <td>%n <div>%n"
+ " This is first line.%n </div>%n <div>%n"
+ " This is second line.%n </div>%n");
- assertTrue(output.contains(expectedSinglelineData) && output.contains(expectedMultilineData));
+ assertTrue(output.contains(expectedMultilineData1) && output.contains(expectedMultilineData2));
}
@Test(timeout=60000L)
@@ -115,4 +116,4 @@ public class TestInfoBlock {
assertFalse(output.contains("<script>"));
assertTrue(output.contains(JAVASCRIPT_ESCAPED));
}
-}
\ No newline at end of file
+}
[08/13] git commit: YARN-2594. Potential deadlock in RM when querying
ApplicationResourceUsageReport. (Wangda Tan via kasha)
Posted by ar...@apache.org.
YARN-2594. Potential deadlock in RM when querying ApplicationResourceUsageReport. (Wangda Tan via kasha)
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/14d60dad
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/14d60dad
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/14d60dad
Branch: refs/heads/HDFS-6581
Commit: 14d60dadc25b044a2887bf912ba5872367f2dffb
Parents: feaf139
Author: Karthik Kambatla <ka...@apache.org>
Authored: Tue Sep 30 16:12:27 2014 -0700
Committer: Karthik Kambatla <ka...@apache.org>
Committed: Tue Sep 30 16:12:27 2014 -0700
----------------------------------------------------------------------
hadoop-yarn-project/CHANGES.txt | 3 ++
.../server/resourcemanager/rmapp/RMAppImpl.java | 50 ++++++--------------
2 files changed, 18 insertions(+), 35 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/14d60dad/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index be61dfe..95fba23 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -488,6 +488,9 @@ Release 2.6.0 - UNRELEASED
YARN-2387. Resource Manager crashes with NPE due to lack of
synchronization (Mit Desai via jlowe)
+ YARN-2594. Potential deadlock in RM when querying
+ ApplicationResourceUsageReport. (Wangda Tan via kasha)
+
Release 2.5.1 - 2014-09-05
INCOMPATIBLE CHANGES
http://git-wip-us.apache.org/repos/asf/hadoop/blob/14d60dad/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
index eaef7d2..4899434 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
@@ -128,7 +128,8 @@ public class RMAppImpl implements RMApp, Recoverable {
private long startTime;
private long finishTime = 0;
private long storedFinishTime = 0;
- private RMAppAttempt currentAttempt;
+ // This field isn't protected by readlock now.
+ private volatile RMAppAttempt currentAttempt;
private String queue;
private EventHandler handler;
private static final AppFinishedTransition FINISHED_TRANSITION =
@@ -438,16 +439,11 @@ public class RMAppImpl implements RMApp, Recoverable {
@Override
public float getProgress() {
- this.readLock.lock();
-
- try {
- if (this.currentAttempt != null) {
- return this.currentAttempt.getProgress();
- }
- return 0;
- } finally {
- this.readLock.unlock();
+ RMAppAttempt attempt = this.currentAttempt;
+ if (attempt != null) {
+ return attempt.getProgress();
}
+ return 0;
}
@Override
@@ -478,13 +474,7 @@ public class RMAppImpl implements RMApp, Recoverable {
@Override
public RMAppAttempt getCurrentAppAttempt() {
- this.readLock.lock();
-
- try {
- return this.currentAttempt;
- } finally {
- this.readLock.unlock();
- }
+ return this.currentAttempt;
}
@Override
@@ -655,30 +645,20 @@ public class RMAppImpl implements RMApp, Recoverable {
@Override
public String getTrackingUrl() {
- this.readLock.lock();
-
- try {
- if (this.currentAttempt != null) {
- return this.currentAttempt.getTrackingUrl();
- }
- return null;
- } finally {
- this.readLock.unlock();
+ RMAppAttempt attempt = this.currentAttempt;
+ if (attempt != null) {
+ return attempt.getTrackingUrl();
}
+ return null;
}
@Override
public String getOriginalTrackingUrl() {
- this.readLock.lock();
-
- try {
- if (this.currentAttempt != null) {
- return this.currentAttempt.getOriginalTrackingUrl();
- }
- return null;
- } finally {
- this.readLock.unlock();
+ RMAppAttempt attempt = this.currentAttempt;
+ if (attempt != null) {
+ return attempt.getOriginalTrackingUrl();
}
+ return null;
}
@Override
[13/13] git commit: Merge branch 'trunk' into HDFS-6581
Posted by ar...@apache.org.
Merge branch 'trunk' into HDFS-6581
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/dde2ed13
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/dde2ed13
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/dde2ed13
Branch: refs/heads/HDFS-6581
Commit: dde2ed13c24e8906ee135719342acf9faea59ade
Parents: 5e8b697 a469833
Author: arp <ar...@apache.org>
Authored: Tue Sep 30 18:05:54 2014 -0700
Committer: arp <ar...@apache.org>
Committed: Tue Sep 30 18:05:54 2014 -0700
----------------------------------------------------------------------
BUILDING.txt | 2 +-
hadoop-common-project/hadoop-common/CHANGES.txt | 26 ++++
.../crypto/key/kms/KMSClientProvider.java | 10 +-
.../apache/hadoop/fs/DelegateToFileSystem.java | 5 +
.../java/org/apache/hadoop/security/User.java | 3 +-
.../hadoop/security/UserGroupInformation.java | 18 ++-
.../apache/hadoop/ipc/TestFairCallQueue.java | 23 ++-
.../security/TestUserGroupInformation.java | 3 +-
.../hadoop/crypto/key/kms/server/MiniKMS.java | 19 ++-
.../hadoop/crypto/key/kms/server/TestKMS.java | 118 +++++++++++++--
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 22 ++-
.../apache/hadoop/hdfs/server/mover/Mover.java | 22 +--
.../hdfs/server/mover/TestStorageMover.java | 40 ++++-
hadoop-mapreduce-project/CHANGES.txt | 12 ++
hadoop-yarn-project/CHANGES.txt | 26 ++++
.../records/impl/pb/ContainerStatusPBImpl.java | 22 +--
.../hadoop/yarn/webapp/hamlet/Hamlet.java | 10 +-
.../hadoop/yarn/webapp/hamlet/TestHamlet.java | 4 +-
.../hadoop/yarn/webapp/view/TestInfoBlock.java | 15 +-
...pplicationHistoryManagerOnTimelineStore.java | 4 +-
...pplicationHistoryManagerOnTimelineStore.java | 110 ++++++++------
.../metrics/SystemMetricsPublisher.java | 2 +-
.../server/resourcemanager/rmapp/RMAppImpl.java | 61 ++++----
.../metrics/TestSystemMetricsPublisher.java | 146 ++++++++++---------
24 files changed, 496 insertions(+), 227 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/dde2ed13/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/Mover.java
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/dde2ed13/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestStorageMover.java
----------------------------------------------------------------------
diff --cc hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestStorageMover.java
index d982824,a6edd80..0f8a9f6
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestStorageMover.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestStorageMover.java
@@@ -32,10 -30,8 +30,9 @@@ import org.apache.commons.logging.Log
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
- import org.apache.hadoop.conf.ReconfigurationException;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
import org.apache.hadoop.hdfs.DFSConfigKeys;
[09/13] git commit: HADOOP-11156. DelegateToFileSystem should
implement getFsStatus(final Path f). Contributed by Zhihai Xu.
Posted by ar...@apache.org.
HADOOP-11156. DelegateToFileSystem should implement getFsStatus(final Path f). Contributed by Zhihai Xu.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d7075ada
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d7075ada
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d7075ada
Branch: refs/heads/HDFS-6581
Commit: d7075ada5d3019a8c520d34bfddb0cd73a449343
Parents: 14d60da
Author: Andrew Wang <wa...@apache.org>
Authored: Tue Sep 30 16:39:44 2014 -0700
Committer: Andrew Wang <wa...@apache.org>
Committed: Tue Sep 30 16:39:44 2014 -0700
----------------------------------------------------------------------
hadoop-common-project/hadoop-common/CHANGES.txt | 3 +++
.../main/java/org/apache/hadoop/fs/DelegateToFileSystem.java | 5 +++++
2 files changed, 8 insertions(+)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d7075ada/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 65a5592..b0ae0f8 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -347,6 +347,9 @@ Release 2.7.0 - UNRELEASED
IMPROVEMENTS
+ HADOOP-11156. DelegateToFileSystem should implement
+ getFsStatus(final Path f). (Zhihai Xu via wang)
+
OPTIMIZATIONS
BUG FIXES
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d7075ada/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DelegateToFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DelegateToFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DelegateToFileSystem.java
index 708ca4a..1cdcb27 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DelegateToFileSystem.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DelegateToFileSystem.java
@@ -129,6 +129,11 @@ public abstract class DelegateToFileSystem extends AbstractFileSystem {
}
@Override
+ public FsStatus getFsStatus(final Path f) throws IOException {
+ return fsImpl.getStatus(f);
+ }
+
+ @Override
public FsServerDefaults getServerDefaults() throws IOException {
return fsImpl.getServerDefaults();
}
[12/13] git commit: HADOOP-11117 UGI HadoopLoginModule doesn't catch
& wrap all kerberos-related exceptions (stevel)
Posted by ar...@apache.org.
HADOOP-11117 UGI HadoopLoginModule doesn't catch & wrap all kerberos-related exceptions (stevel)
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a4698336
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a4698336
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a4698336
Branch: refs/heads/HDFS-6581
Commit: a469833639c7a5ef525a108a1ac70213881e627d
Parents: a4c9b80
Author: Steve Loughran <st...@apache.org>
Authored: Tue Sep 30 17:30:06 2014 -0700
Committer: Steve Loughran <st...@apache.org>
Committed: Tue Sep 30 17:30:06 2014 -0700
----------------------------------------------------------------------
hadoop-common-project/hadoop-common/CHANGES.txt | 3 +++
.../java/org/apache/hadoop/security/User.java | 3 ++-
.../hadoop/security/UserGroupInformation.java | 18 ++++++++++++++++--
.../hadoop/security/TestUserGroupInformation.java | 3 ++-
4 files changed, 23 insertions(+), 4 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a4698336/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 95ea5b6..da51fd0 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -918,6 +918,9 @@ Release 2.6.0 - UNRELEASED
HADOOP-11145. TestFairCallQueue fails. (Akira AJISAKA via cnauroth)
+ HADOOP-11117 UGI HadoopLoginModule doesn't catch & wrap all
+ kerberos-related exceptions (stevel)
+
Release 2.5.1 - 2014-09-05
INCOMPATIBLE CHANGES
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a4698336/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/User.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/User.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/User.java
index 8d9b28b..236e962 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/User.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/User.java
@@ -47,7 +47,8 @@ class User implements Principal {
try {
shortName = new HadoopKerberosName(name).getShortName();
} catch (IOException ioe) {
- throw new IllegalArgumentException("Illegal principal name " + name, ioe);
+ throw new IllegalArgumentException("Illegal principal name " + name
+ +": " + ioe.toString(), ioe);
}
fullName = name;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a4698336/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
index 1b024eb..45328c7 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
@@ -178,7 +178,21 @@ public class UserGroupInformation {
}
// if we found the user, add our principal
if (user != null) {
- subject.getPrincipals().add(new User(user.getName()));
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Using user: \"" + user + "\" with name " + user.getName());
+ }
+
+ User userEntry = null;
+ try {
+ userEntry = new User(user.getName());
+ } catch (Exception e) {
+ throw (LoginException)(new LoginException(e.toString()).initCause(e));
+ }
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("User entry: \"" + userEntry.toString() + "\"" );
+ }
+
+ subject.getPrincipals().add(userEntry);
return true;
}
LOG.error("Can't find user in " + subject);
@@ -931,7 +945,7 @@ public class UserGroupInformation {
metrics.loginFailure.add(Time.now() - start);
}
throw new IOException("Login failure for " + user + " from keytab " +
- path, le);
+ path+ ": " + le, le);
}
LOG.info("Login successful for user " + keytabPrincipal
+ " using keytab file " + keytabFile);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a4698336/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUserGroupInformation.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUserGroupInformation.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUserGroupInformation.java
index d676782..48b9b99 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUserGroupInformation.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUserGroupInformation.java
@@ -340,7 +340,8 @@ public class TestUserGroupInformation {
} catch (IllegalArgumentException e) {
String expect = (userName == null || userName.isEmpty())
? "Null user" : "Illegal principal name "+userName;
- assertEquals(expect, e.getMessage());
+ assertTrue("Did not find "+ expect + " in " + e,
+ e.toString().contains(expect));
}
}
[10/13] git commit: YARN-2602. Fixed possible NPE in
ApplicationHistoryManagerOnTimelineStore. Contributed by Zhijie Shen
Posted by ar...@apache.org.
YARN-2602. Fixed possible NPE in ApplicationHistoryManagerOnTimelineStore. Contributed by Zhijie Shen
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bbff96be
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bbff96be
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bbff96be
Branch: refs/heads/HDFS-6581
Commit: bbff96be48119774688981d04baf444639135977
Parents: d7075ad
Author: Jian He <ji...@apache.org>
Authored: Tue Sep 30 16:39:25 2014 -0700
Committer: Jian He <ji...@apache.org>
Committed: Tue Sep 30 16:44:17 2014 -0700
----------------------------------------------------------------------
hadoop-yarn-project/CHANGES.txt | 3 +
...pplicationHistoryManagerOnTimelineStore.java | 4 +-
...pplicationHistoryManagerOnTimelineStore.java | 110 ++++++++------
.../metrics/SystemMetricsPublisher.java | 2 +-
.../metrics/TestSystemMetricsPublisher.java | 146 ++++++++++---------
5 files changed, 149 insertions(+), 116 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/bbff96be/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 95fba23..bfaaa90 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -491,6 +491,9 @@ Release 2.6.0 - UNRELEASED
YARN-2594. Potential deadlock in RM when querying
ApplicationResourceUsageReport. (Wangda Tan via kasha)
+ YARN-2602. Fixed possible NPE in ApplicationHistoryManagerOnTimelineStore.
+ (Zhijie Shen via jianhe)
+
Release 2.5.1 - 2014-09-05
INCOMPATIBLE CHANGES
http://git-wip-us.apache.org/repos/asf/hadoop/blob/bbff96be/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerOnTimelineStore.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerOnTimelineStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerOnTimelineStore.java
index f00ec9c..5381bd6 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerOnTimelineStore.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerOnTimelineStore.java
@@ -227,7 +227,9 @@ public class ApplicationHistoryManagerOnTimelineStore extends AbstractService
if (entityInfo.containsKey(ApplicationMetricsConstants.APP_VIEW_ACLS_ENTITY_INFO)) {
String appViewACLsStr = entityInfo.get(
ApplicationMetricsConstants.APP_VIEW_ACLS_ENTITY_INFO).toString();
- appViewACLs.put(ApplicationAccessType.VIEW_APP, appViewACLsStr);
+ if (appViewACLsStr.length() > 0) {
+ appViewACLs.put(ApplicationAccessType.VIEW_APP, appViewACLsStr);
+ }
}
if (field == ApplicationReportField.USER_AND_ACLS) {
return new ApplicationReportExt(ApplicationReport.newInstance(
http://git-wip-us.apache.org/repos/asf/hadoop/blob/bbff96be/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestApplicationHistoryManagerOnTimelineStore.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestApplicationHistoryManagerOnTimelineStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestApplicationHistoryManagerOnTimelineStore.java
index e6bfcd9..49386c5 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestApplicationHistoryManagerOnTimelineStore.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestApplicationHistoryManagerOnTimelineStore.java
@@ -122,7 +122,11 @@ public class TestApplicationHistoryManagerOnTimelineStore {
for (int i = 1; i <= SCALE; ++i) {
TimelineEntities entities = new TimelineEntities();
ApplicationId appId = ApplicationId.newInstance(0, i);
- entities.addEntity(createApplicationTimelineEntity(appId));
+ if (i == 2) {
+ entities.addEntity(createApplicationTimelineEntity(appId, true));
+ } else {
+ entities.addEntity(createApplicationTimelineEntity(appId, false));
+ }
store.put(entities);
for (int j = 1; j <= SCALE; ++j) {
entities = new TimelineEntities();
@@ -142,50 +146,58 @@ public class TestApplicationHistoryManagerOnTimelineStore {
@Test
public void testGetApplicationReport() throws Exception {
- final ApplicationId appId = ApplicationId.newInstance(0, 1);
- ApplicationReport app;
- if (callerUGI == null) {
- app = historyManager.getApplication(appId);
- } else {
- app =
- callerUGI.doAs(new PrivilegedExceptionAction<ApplicationReport> () {
- @Override
- public ApplicationReport run() throws Exception {
- return historyManager.getApplication(appId);
- }
- });
- }
- Assert.assertNotNull(app);
- Assert.assertEquals(appId, app.getApplicationId());
- Assert.assertEquals("test app", app.getName());
- Assert.assertEquals("test app type", app.getApplicationType());
- Assert.assertEquals("user1", app.getUser());
- Assert.assertEquals("test queue", app.getQueue());
- Assert.assertEquals(Integer.MAX_VALUE + 2L, app.getStartTime());
- Assert.assertEquals(Integer.MAX_VALUE + 3L, app.getFinishTime());
- Assert.assertTrue(Math.abs(app.getProgress() - 1.0F) < 0.0001);
- if (callerUGI != null && callerUGI.getShortUserName().equals("user3")) {
- Assert.assertEquals(ApplicationAttemptId.newInstance(appId, -1),
- app.getCurrentApplicationAttemptId());
- Assert.assertEquals(null, app.getHost());
- Assert.assertEquals(-1, app.getRpcPort());
- Assert.assertEquals(null, app.getTrackingUrl());
- Assert.assertEquals(null, app.getOriginalTrackingUrl());
- Assert.assertEquals(null, app.getDiagnostics());
- } else {
- Assert.assertEquals(ApplicationAttemptId.newInstance(appId, 1),
- app.getCurrentApplicationAttemptId());
- Assert.assertEquals("test host", app.getHost());
- Assert.assertEquals(-100, app.getRpcPort());
- Assert.assertEquals("test tracking url", app.getTrackingUrl());
- Assert.assertEquals("test original tracking url",
- app.getOriginalTrackingUrl());
- Assert.assertEquals("test diagnostics info", app.getDiagnostics());
+ for (int i = 1; i <= 2; ++i) {
+ final ApplicationId appId = ApplicationId.newInstance(0, i);
+ ApplicationReport app;
+ if (callerUGI == null) {
+ app = historyManager.getApplication(appId);
+ } else {
+ app =
+ callerUGI.doAs(new PrivilegedExceptionAction<ApplicationReport> () {
+ @Override
+ public ApplicationReport run() throws Exception {
+ return historyManager.getApplication(appId);
+ }
+ });
+ }
+ Assert.assertNotNull(app);
+ Assert.assertEquals(appId, app.getApplicationId());
+ Assert.assertEquals("test app", app.getName());
+ Assert.assertEquals("test app type", app.getApplicationType());
+ Assert.assertEquals("user1", app.getUser());
+ Assert.assertEquals("test queue", app.getQueue());
+ Assert.assertEquals(Integer.MAX_VALUE + 2L, app.getStartTime());
+ Assert.assertEquals(Integer.MAX_VALUE + 3L, app.getFinishTime());
+ Assert.assertTrue(Math.abs(app.getProgress() - 1.0F) < 0.0001);
+ // App 2 doesn't have the ACLs, such that the default ACLs " " will be used.
+ // Nobody except admin and owner has access to the details of the app.
+ if ((i == 1 && callerUGI != null &&
+ callerUGI.getShortUserName().equals("user3")) ||
+ (i == 2 && callerUGI != null &&
+ (callerUGI.getShortUserName().equals("user2") ||
+ callerUGI.getShortUserName().equals("user3")))) {
+ Assert.assertEquals(ApplicationAttemptId.newInstance(appId, -1),
+ app.getCurrentApplicationAttemptId());
+ Assert.assertEquals(null, app.getHost());
+ Assert.assertEquals(-1, app.getRpcPort());
+ Assert.assertEquals(null, app.getTrackingUrl());
+ Assert.assertEquals(null, app.getOriginalTrackingUrl());
+ Assert.assertEquals(null, app.getDiagnostics());
+ } else {
+ Assert.assertEquals(ApplicationAttemptId.newInstance(appId, 1),
+ app.getCurrentApplicationAttemptId());
+ Assert.assertEquals("test host", app.getHost());
+ Assert.assertEquals(-100, app.getRpcPort());
+ Assert.assertEquals("test tracking url", app.getTrackingUrl());
+ Assert.assertEquals("test original tracking url",
+ app.getOriginalTrackingUrl());
+ Assert.assertEquals("test diagnostics info", app.getDiagnostics());
+ }
+ Assert.assertEquals(FinalApplicationStatus.UNDEFINED,
+ app.getFinalApplicationStatus());
+ Assert.assertEquals(YarnApplicationState.FINISHED,
+ app.getYarnApplicationState());
}
- Assert.assertEquals(FinalApplicationStatus.UNDEFINED,
- app.getFinalApplicationStatus());
- Assert.assertEquals(YarnApplicationState.FINISHED,
- app.getYarnApplicationState());
}
@Test
@@ -396,7 +408,7 @@ public class TestApplicationHistoryManagerOnTimelineStore {
}
private static TimelineEntity createApplicationTimelineEntity(
- ApplicationId appId) {
+ ApplicationId appId, boolean emptyACLs) {
TimelineEntity entity = new TimelineEntity();
entity.setEntityType(ApplicationMetricsConstants.ENTITY_TYPE);
entity.setEntityId(appId.toString());
@@ -410,8 +422,12 @@ public class TestApplicationHistoryManagerOnTimelineStore {
entityInfo.put(ApplicationMetricsConstants.QUEUE_ENTITY_INFO, "test queue");
entityInfo.put(ApplicationMetricsConstants.SUBMITTED_TIME_ENTITY_INFO,
Integer.MAX_VALUE + 1L);
- entityInfo.put(ApplicationMetricsConstants.APP_VIEW_ACLS_ENTITY_INFO,
- "user2");
+ if (emptyACLs) {
+ entityInfo.put(ApplicationMetricsConstants.APP_VIEW_ACLS_ENTITY_INFO, "");
+ } else {
+ entityInfo.put(ApplicationMetricsConstants.APP_VIEW_ACLS_ENTITY_INFO,
+ "user2");
+ }
entity.setOtherInfo(entityInfo);
TimelineEvent tEvent = new TimelineEvent();
tEvent.setEventType(ApplicationMetricsConstants.CREATED_EVENT_TYPE);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/bbff96be/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/SystemMetricsPublisher.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/SystemMetricsPublisher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/SystemMetricsPublisher.java
index 5da006c..e2ecf9a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/SystemMetricsPublisher.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/SystemMetricsPublisher.java
@@ -137,7 +137,7 @@ public class SystemMetricsPublisher extends CompositeService {
dispatcher.getEventHandler().handle(
new ApplicationACLsUpdatedEvent(
app.getApplicationId(),
- appViewACLs,
+ appViewACLs == null ? "" : appViewACLs,
updatedTime));
}
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/bbff96be/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TestSystemMetricsPublisher.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TestSystemMetricsPublisher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TestSystemMetricsPublisher.java
index 63343e9..52faf12 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TestSystemMetricsPublisher.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TestSystemMetricsPublisher.java
@@ -95,77 +95,89 @@ public class TestSystemMetricsPublisher {
@Test(timeout = 10000)
public void testPublishApplicationMetrics() throws Exception {
- ApplicationId appId = ApplicationId.newInstance(0, 1);
- RMApp app = createRMApp(appId);
- metricsPublisher.appCreated(app, app.getStartTime());
- metricsPublisher.appFinished(app, RMAppState.FINISHED, app.getFinishTime());
- metricsPublisher.appACLsUpdated(app, "uers1,user2", 4L);
- TimelineEntity entity = null;
- do {
- entity =
- store.getEntity(appId.toString(),
- ApplicationMetricsConstants.ENTITY_TYPE,
- EnumSet.allOf(Field.class));
- // ensure three events are both published before leaving the loop
- } while (entity == null || entity.getEvents().size() < 3);
- // verify all the fields
- Assert.assertEquals(ApplicationMetricsConstants.ENTITY_TYPE,
- entity.getEntityType());
- Assert
- .assertEquals(app.getApplicationId().toString(), entity.getEntityId());
- Assert
- .assertEquals(
- app.getName(),
- entity.getOtherInfo().get(
- ApplicationMetricsConstants.NAME_ENTITY_INFO));
- Assert.assertEquals(app.getQueue(),
- entity.getOtherInfo()
- .get(ApplicationMetricsConstants.QUEUE_ENTITY_INFO));
- Assert
- .assertEquals(
- app.getUser(),
- entity.getOtherInfo().get(
- ApplicationMetricsConstants.USER_ENTITY_INFO));
- Assert
- .assertEquals(
- app.getApplicationType(),
+ for (int i = 1; i <= 2; ++i) {
+ ApplicationId appId = ApplicationId.newInstance(0, i);
+ RMApp app = createRMApp(appId);
+ metricsPublisher.appCreated(app, app.getStartTime());
+ metricsPublisher.appFinished(app, RMAppState.FINISHED, app.getFinishTime());
+ if (i == 1) {
+ metricsPublisher.appACLsUpdated(app, "uers1,user2", 4L);
+ } else {
+ // in case user doesn't specify the ACLs
+ metricsPublisher.appACLsUpdated(app, null, 4L);
+ }
+ TimelineEntity entity = null;
+ do {
+ entity =
+ store.getEntity(appId.toString(),
+ ApplicationMetricsConstants.ENTITY_TYPE,
+ EnumSet.allOf(Field.class));
+ // ensure three events are both published before leaving the loop
+ } while (entity == null || entity.getEvents().size() < 3);
+ // verify all the fields
+ Assert.assertEquals(ApplicationMetricsConstants.ENTITY_TYPE,
+ entity.getEntityType());
+ Assert
+ .assertEquals(app.getApplicationId().toString(), entity.getEntityId());
+ Assert
+ .assertEquals(
+ app.getName(),
+ entity.getOtherInfo().get(
+ ApplicationMetricsConstants.NAME_ENTITY_INFO));
+ Assert.assertEquals(app.getQueue(),
+ entity.getOtherInfo()
+ .get(ApplicationMetricsConstants.QUEUE_ENTITY_INFO));
+ Assert
+ .assertEquals(
+ app.getUser(),
+ entity.getOtherInfo().get(
+ ApplicationMetricsConstants.USER_ENTITY_INFO));
+ Assert
+ .assertEquals(
+ app.getApplicationType(),
+ entity.getOtherInfo().get(
+ ApplicationMetricsConstants.TYPE_ENTITY_INFO));
+ Assert.assertEquals(app.getSubmitTime(),
+ entity.getOtherInfo().get(
+ ApplicationMetricsConstants.SUBMITTED_TIME_ENTITY_INFO));
+ if (i == 1) {
+ Assert.assertEquals("uers1,user2",
entity.getOtherInfo().get(
- ApplicationMetricsConstants.TYPE_ENTITY_INFO));
- Assert.assertEquals(app.getSubmitTime(),
- entity.getOtherInfo().get(
- ApplicationMetricsConstants.SUBMITTED_TIME_ENTITY_INFO));
- Assert.assertEquals("uers1,user2",
- entity.getOtherInfo().get(
+ ApplicationMetricsConstants.APP_VIEW_ACLS_ENTITY_INFO));
+ } else {
+ Assert.assertEquals("", entity.getOtherInfo().get(
ApplicationMetricsConstants.APP_VIEW_ACLS_ENTITY_INFO));
- boolean hasCreatedEvent = false;
- boolean hasFinishedEvent = false;
- boolean hasACLsUpdatedEvent = false;
- for (TimelineEvent event : entity.getEvents()) {
- if (event.getEventType().equals(
- ApplicationMetricsConstants.CREATED_EVENT_TYPE)) {
- hasCreatedEvent = true;
- Assert.assertEquals(app.getStartTime(), event.getTimestamp());
- } else if (event.getEventType().equals(
- ApplicationMetricsConstants.FINISHED_EVENT_TYPE)) {
- hasFinishedEvent = true;
- Assert.assertEquals(app.getFinishTime(), event.getTimestamp());
- Assert.assertEquals(
- app.getDiagnostics().toString(),
- event.getEventInfo().get(
- ApplicationMetricsConstants.DIAGNOSTICS_INFO_EVENT_INFO));
- Assert.assertEquals(
- app.getFinalApplicationStatus().toString(),
- event.getEventInfo().get(
- ApplicationMetricsConstants.FINAL_STATUS_EVENT_INFO));
- Assert.assertEquals(YarnApplicationState.FINISHED.toString(), event
- .getEventInfo().get(ApplicationMetricsConstants.STATE_EVENT_INFO));
- } else if (event.getEventType().equals(
- ApplicationMetricsConstants.ACLS_UPDATED_EVENT_TYPE)) {
- hasACLsUpdatedEvent = true;
- Assert.assertEquals(4L, event.getTimestamp());
}
+ boolean hasCreatedEvent = false;
+ boolean hasFinishedEvent = false;
+ boolean hasACLsUpdatedEvent = false;
+ for (TimelineEvent event : entity.getEvents()) {
+ if (event.getEventType().equals(
+ ApplicationMetricsConstants.CREATED_EVENT_TYPE)) {
+ hasCreatedEvent = true;
+ Assert.assertEquals(app.getStartTime(), event.getTimestamp());
+ } else if (event.getEventType().equals(
+ ApplicationMetricsConstants.FINISHED_EVENT_TYPE)) {
+ hasFinishedEvent = true;
+ Assert.assertEquals(app.getFinishTime(), event.getTimestamp());
+ Assert.assertEquals(
+ app.getDiagnostics().toString(),
+ event.getEventInfo().get(
+ ApplicationMetricsConstants.DIAGNOSTICS_INFO_EVENT_INFO));
+ Assert.assertEquals(
+ app.getFinalApplicationStatus().toString(),
+ event.getEventInfo().get(
+ ApplicationMetricsConstants.FINAL_STATUS_EVENT_INFO));
+ Assert.assertEquals(YarnApplicationState.FINISHED.toString(), event
+ .getEventInfo().get(ApplicationMetricsConstants.STATE_EVENT_INFO));
+ } else if (event.getEventType().equals(
+ ApplicationMetricsConstants.ACLS_UPDATED_EVENT_TYPE)) {
+ hasACLsUpdatedEvent = true;
+ Assert.assertEquals(4L, event.getTimestamp());
+ }
+ }
+ Assert.assertTrue(hasCreatedEvent && hasFinishedEvent && hasACLsUpdatedEvent);
}
- Assert.assertTrue(hasCreatedEvent && hasFinishedEvent && hasACLsUpdatedEvent);
}
@Test(timeout = 10000)
[03/13] git commit: Preparing to branch hadoop-2.6
Posted by ar...@apache.org.
Preparing to branch hadoop-2.6
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ea32a66f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ea32a66f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ea32a66f
Branch: refs/heads/HDFS-6581
Commit: ea32a66f7d37d4d6a121e34c95d93ae8992be571
Parents: b915869
Author: Arun C. Murthy <ac...@apache.org>
Authored: Tue Sep 30 10:26:42 2014 -0700
Committer: Arun C. Murthy <ac...@apache.org>
Committed: Tue Sep 30 10:26:42 2014 -0700
----------------------------------------------------------------------
hadoop-common-project/hadoop-common/CHANGES.txt | 12 ++++++++++++
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 12 ++++++++++++
hadoop-mapreduce-project/CHANGES.txt | 12 ++++++++++++
hadoop-yarn-project/CHANGES.txt | 12 ++++++++++++
4 files changed, 48 insertions(+)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ea32a66f/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index e11e9a4..65a5592 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -339,6 +339,18 @@ Trunk (Unreleased)
HADOOP-8589. ViewFs tests fail when tests and home dirs are nested (sanjay Radia)
+Release 2.7.0 - UNRELEASED
+
+ INCOMPATIBLE CHANGES
+
+ NEW FEATURES
+
+ IMPROVEMENTS
+
+ OPTIMIZATIONS
+
+ BUG FIXES
+
Release 2.6.0 - UNRELEASED
INCOMPATIBLE CHANGES
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ea32a66f/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 8a753d3..1a97659 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -258,6 +258,18 @@ Trunk (Unreleased)
HDFS-6981. Fix DN upgrade with layout version change. (Arpit Agarwal)
+Release 2.7.0 - UNRELEASED
+
+ INCOMPATIBLE CHANGES
+
+ NEW FEATURES
+
+ IMPROVEMENTS
+
+ OPTIMIZATIONS
+
+ BUG FIXES
+
Release 2.6.0 - UNRELEASED
INCOMPATIBLE CHANGES
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ea32a66f/hadoop-mapreduce-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt
index 2dc7b6d..8bc1e53 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -224,6 +224,18 @@ Trunk (Unreleased)
MAPREDUCE-6078. native-task: fix gtest build on macosx (Binglin Chang)
+Release 2.7.0 - UNRELEASED
+
+ INCOMPATIBLE CHANGES
+
+ NEW FEATURES
+
+ IMPROVEMENTS
+
+ OPTIMIZATIONS
+
+ BUG FIXES
+
Release 2.6.0 - UNRELEASED
INCOMPATIBLE CHANGES
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ea32a66f/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 6f23eee..a15a98e 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -27,6 +27,18 @@ Trunk - Unreleased
YARN-2525. yarn logs command gives error on trunk (Akira AJISAKA via aw)
+Release 2.7.0 - UNRELEASED
+
+ INCOMPATIBLE CHANGES
+
+ NEW FEATURES
+
+ IMPROVEMENTS
+
+ OPTIMIZATIONS
+
+ BUG FIXES
+
Release 2.6.0 - UNRELEASED
INCOMPATIBLE CHANGES
[04/13] git commit: HDFS-7167. NPE while running Mover if the given
path is for a file. Contributed by Jing Zhao.
Posted by ar...@apache.org.
HDFS-7167. NPE while running Mover if the given path is for a file. Contributed by Jing Zhao.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cdf1af0e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cdf1af0e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cdf1af0e
Branch: refs/heads/HDFS-6581
Commit: cdf1af0e5a21361924f3f7c6cea5170767d2b6bc
Parents: ea32a66
Author: Jing Zhao <ji...@apache.org>
Authored: Tue Sep 30 11:13:35 2014 -0700
Committer: Jing Zhao <ji...@apache.org>
Committed: Tue Sep 30 11:13:35 2014 -0700
----------------------------------------------------------------------
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 10 +++--
.../apache/hadoop/hdfs/server/mover/Mover.java | 22 ++++-------
.../hdfs/server/mover/TestStorageMover.java | 40 ++++++++++++++++++--
3 files changed, 50 insertions(+), 22 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/cdf1af0e/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 1a97659..40891bf 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -772,6 +772,11 @@ Release 2.6.0 - UNRELEASED
HDFS-7157. Using Time.now() for recording start/end time of reconfiguration
tasks (Lei Xu via cmccabe)
+ HDFS-6664. HDFS permissions guide documentation states incorrect default
+ group mapping class. (Ray Chiang via aw)
+
+ HDFS-4227. Document dfs.namenode.resource.* (Daisuke Kobayashi via aw)
+
BREAKDOWN OF HDFS-6134 AND HADOOP-10150 SUBTASKS AND RELATED JIRAS
HDFS-6387. HDFS CLI admin tool for creating & deleting an
@@ -1003,10 +1008,7 @@ Release 2.6.0 - UNRELEASED
HDFS-7140. Add a tool to list all the existing block storage policies.
(jing9)
- HDFS-6664. HDFS permissions guide documentation states incorrect default
- group mapping class. (Ray Chiang via aw)
-
- HDFS-4227. Document dfs.namenode.resource.* (Daisuke Kobayashi via aw)
+ HDFS-7167. NPE while running Mover if the given path is for a file. (jing9)
Release 2.5.1 - 2014-09-05
http://git-wip-us.apache.org/repos/asf/hadoop/blob/cdf1af0e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/Mover.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/Mover.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/Mover.java
index 4db0df6..04133bd 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/Mover.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/Mover.java
@@ -252,14 +252,9 @@ public class Mover {
*/
private boolean processNamespace() {
getSnapshottableDirs();
- boolean hasRemaining = true;
- try {
- for (Path target : targetPaths) {
- hasRemaining = processDirRecursively("", dfs.getFileInfo(target
- .toUri().getPath()));
- }
- } catch (IOException e) {
- LOG.warn("Failed to get root directory status. Ignore and continue.", e);
+ boolean hasRemaining = false;
+ for (Path target : targetPaths) {
+ hasRemaining |= processPath(target.toUri().getPath());
}
// wait for pending move to finish and retry the failed migration
hasRemaining |= Dispatcher.waitForMoveCompletion(storages.targets.values());
@@ -270,7 +265,7 @@ public class Mover {
* @return whether there is still remaing migration work for the next
* round
*/
- private boolean processChildrenList(String fullPath) {
+ private boolean processPath(String fullPath) {
boolean hasRemaining = false;
for (byte[] lastReturnedName = HdfsFileStatus.EMPTY_NAME;;) {
final DirectoryListing children;
@@ -285,7 +280,7 @@ public class Mover {
return hasRemaining;
}
for (HdfsFileStatus child : children.getPartialListing()) {
- hasRemaining |= processDirRecursively(fullPath, child);
+ hasRemaining |= processRecursively(fullPath, child);
}
if (children.hasMore()) {
lastReturnedName = children.getLastName();
@@ -296,8 +291,7 @@ public class Mover {
}
/** @return whether the migration requires next round */
- private boolean processDirRecursively(String parent,
- HdfsFileStatus status) {
+ private boolean processRecursively(String parent, HdfsFileStatus status) {
String fullPath = status.getFullName(parent);
boolean hasRemaining = false;
if (status.isDir()) {
@@ -305,11 +299,11 @@ public class Mover {
fullPath = fullPath + Path.SEPARATOR;
}
- hasRemaining = processChildrenList(fullPath);
+ hasRemaining = processPath(fullPath);
// process snapshots if this is a snapshottable directory
if (snapshottableDirs.contains(fullPath)) {
final String dirSnapshot = fullPath + HdfsConstants.DOT_SNAPSHOT_DIR;
- hasRemaining |= processChildrenList(dirSnapshot);
+ hasRemaining |= processPath(dirSnapshot);
}
} else if (!status.isSymlink()) { // file
try {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/cdf1af0e/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestStorageMover.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestStorageMover.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestStorageMover.java
index 0001e3b..a6edd80 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestStorageMover.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestStorageMover.java
@@ -17,7 +17,6 @@
*/
package org.apache.hadoop.hdfs.server.mover;
-import java.io.File;
import java.io.IOException;
import java.net.URI;
import java.util.ArrayList;
@@ -27,12 +26,10 @@ import java.util.Collections;
import java.util.List;
import java.util.Map;
-import com.google.common.base.Joiner;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.conf.ReconfigurationException;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.Path;
@@ -46,7 +43,6 @@ import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.StorageType;
import org.apache.hadoop.hdfs.protocol.DirectoryListing;
-import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
@@ -515,6 +511,42 @@ public class TestStorageMover {
}
/**
+ * Run Mover with arguments specifying files and directories
+ */
+ @Test
+ public void testMoveSpecificPaths() throws Exception {
+ LOG.info("testMoveSpecificPaths");
+ final Path foo = new Path("/foo");
+ final Path barFile = new Path(foo, "bar");
+ final Path foo2 = new Path("/foo2");
+ final Path bar2File = new Path(foo2, "bar2");
+ Map<Path, BlockStoragePolicy> policyMap = Maps.newHashMap();
+ policyMap.put(foo, COLD);
+ policyMap.put(foo2, WARM);
+ NamespaceScheme nsScheme = new NamespaceScheme(Arrays.asList(foo, foo2),
+ Arrays.asList(barFile, bar2File), BLOCK_SIZE, null, policyMap);
+ ClusterScheme clusterScheme = new ClusterScheme(DEFAULT_CONF,
+ NUM_DATANODES, REPL, genStorageTypes(NUM_DATANODES), null);
+ MigrationTest test = new MigrationTest(clusterScheme, nsScheme);
+ test.setupCluster();
+
+ try {
+ test.prepareNamespace();
+ test.setStoragePolicy();
+
+ Map<URI, List<Path>> map = Mover.Cli.getNameNodePathsToMove(test.conf,
+ "-p", "/foo/bar", "/foo2");
+ int result = Mover.run(map, test.conf);
+ Assert.assertEquals(ExitStatus.SUCCESS.getExitCode(), result);
+
+ Thread.sleep(5000);
+ test.verify(true);
+ } finally {
+ test.shutdownCluster();
+ }
+ }
+
+ /**
* Move an open file into archival storage
*/
@Test
[02/13] git commit: HADOOP-11145. TestFairCallQueue fails.
Contributed by Akira AJISAKA.
Posted by ar...@apache.org.
HADOOP-11145. TestFairCallQueue fails. Contributed by Akira AJISAKA.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b9158697
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b9158697
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b9158697
Branch: refs/heads/HDFS-6581
Commit: b9158697a4f2d345b681a9b6ed982dae558338bc
Parents: 8dc4e94
Author: cnauroth <cn...@apache.org>
Authored: Tue Sep 30 08:57:05 2014 -0700
Committer: cnauroth <cn...@apache.org>
Committed: Tue Sep 30 08:57:05 2014 -0700
----------------------------------------------------------------------
hadoop-common-project/hadoop-common/CHANGES.txt | 2 ++
.../apache/hadoop/ipc/TestFairCallQueue.java | 23 +++++++++++++++-----
2 files changed, 19 insertions(+), 6 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b9158697/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index f90a988..e11e9a4 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -898,6 +898,8 @@ Release 2.6.0 - UNRELEASED
HADOOP-11154. Update BUILDING.txt to state that CMake 3.0 or newer is
required on Mac. (cnauroth)
+ HADOOP-11145. TestFairCallQueue fails. (Akira AJISAKA via cnauroth)
+
Release 2.5.1 - 2014-09-05
INCOMPATIBLE CHANGES
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b9158697/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestFairCallQueue.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestFairCallQueue.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestFairCallQueue.java
index acbedc5..2694ba3 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestFairCallQueue.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestFairCallQueue.java
@@ -29,6 +29,7 @@ import static org.mockito.Mockito.when;
import junit.framework.TestCase;
+import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.BlockingQueue;
@@ -243,11 +244,14 @@ public class TestFairCallQueue extends TestCase {
public final String tag;
public volatile int callsAdded = 0; // How many calls we added, accurate unless interrupted
private final int maxCalls;
+ private final CountDownLatch latch;
- public Putter(BlockingQueue<Schedulable> aCq, int maxCalls, String tag) {
+ public Putter(BlockingQueue<Schedulable> aCq, int maxCalls, String tag,
+ CountDownLatch latch) {
this.maxCalls = maxCalls;
this.cq = aCq;
this.tag = tag;
+ this.latch = latch;
}
private String getTag() {
@@ -262,6 +266,7 @@ public class TestFairCallQueue extends TestCase {
while (callsAdded < maxCalls || maxCalls < 0) {
cq.put(mockCall(getTag()));
callsAdded++;
+ latch.countDown();
}
} catch (InterruptedException e) {
return;
@@ -280,14 +285,17 @@ public class TestFairCallQueue extends TestCase {
public volatile int callsTaken = 0; // total calls taken, accurate if we aren't interrupted
public volatile Schedulable lastResult = null; // the last thing we took
private final int maxCalls; // maximum calls to take
+ private final CountDownLatch latch;
private IdentityProvider uip;
- public Taker(BlockingQueue<Schedulable> aCq, int maxCalls, String tag) {
+ public Taker(BlockingQueue<Schedulable> aCq, int maxCalls, String tag,
+ CountDownLatch latch) {
this.maxCalls = maxCalls;
this.cq = aCq;
this.tag = tag;
this.uip = new UserIdentityProvider();
+ this.latch = latch;
}
@Override
@@ -303,6 +311,7 @@ public class TestFairCallQueue extends TestCase {
cq.put(res);
} else {
callsTaken++;
+ latch.countDown();
lastResult = res;
}
}
@@ -316,10 +325,11 @@ public class TestFairCallQueue extends TestCase {
public void assertCanTake(BlockingQueue<Schedulable> cq, int numberOfTakes,
int takeAttempts) throws InterruptedException {
- Taker taker = new Taker(cq, takeAttempts, "default");
+ CountDownLatch latch = new CountDownLatch(numberOfTakes);
+ Taker taker = new Taker(cq, takeAttempts, "default", latch);
Thread t = new Thread(taker);
t.start();
- t.join(100);
+ latch.await();
assertEquals(numberOfTakes, taker.callsTaken);
t.interrupt();
@@ -329,10 +339,11 @@ public class TestFairCallQueue extends TestCase {
public void assertCanPut(BlockingQueue<Schedulable> cq, int numberOfPuts,
int putAttempts) throws InterruptedException {
- Putter putter = new Putter(cq, putAttempts, null);
+ CountDownLatch latch = new CountDownLatch(numberOfPuts);
+ Putter putter = new Putter(cq, putAttempts, null, latch);
Thread t = new Thread(putter);
t.start();
- t.join(100);
+ latch.await();
assertEquals(numberOfPuts, putter.callsAdded);
t.interrupt();