You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by aa...@apache.org on 2022/10/29 19:34:07 UTC
[hadoop] branch trunk updated: YARN-11264. Upgrade JUnit from 4 to 5 in hadoop-yarn-server-tests (#4776)
This is an automated email from the ASF dual-hosted git repository.
aajisaka pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git
The following commit(s) were added to refs/heads/trunk by this push:
new cbe02c2e77e YARN-11264. Upgrade JUnit from 4 to 5 in hadoop-yarn-server-tests (#4776)
cbe02c2e77e is described below
commit cbe02c2e77e15c20213fa5e322fbacc1a0bb0fa0
Author: Ashutosh Gupta <as...@st.niituniversity.in>
AuthorDate: Sat Oct 29 20:33:57 2022 +0100
YARN-11264. Upgrade JUnit from 4 to 5 in hadoop-yarn-server-tests (#4776)
Co-authored-by: Ashutosh Gupta <as...@amazon.com>
Signed-off-by: Akira Ajisaka <aa...@apache.org>
---
.../hadoop-yarn-server-tests/pom.xml | 20 ++
.../yarn/server/TestContainerManagerSecurity.java | 315 +++++++++++----------
.../hadoop/yarn/server/TestDiskFailures.java | 39 ++-
.../yarn/server/TestMiniYARNClusterForHA.java | 18 +-
.../hadoop/yarn/server/TestMiniYarnCluster.java | 53 ++--
.../server/TestMiniYarnClusterNodeUtilization.java | 57 ++--
.../hadoop/yarn/server/TestRMNMSecretKeys.java | 120 ++++----
.../server/timeline/TimelineVersionWatcher.java | 21 +-
.../TestTimelineServiceClientIntegration.java | 16 +-
.../security/TestTimelineAuthFilterForV2.java | 96 +++----
10 files changed, 384 insertions(+), 371 deletions(-)
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/pom.xml
index 2de2c13f16b..07838688d70 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/pom.xml
@@ -126,6 +126,26 @@
<artifactId>bcprov-jdk15on</artifactId>
<scope>test</scope>
</dependency>
+ <dependency>
+ <groupId>org.junit.jupiter</groupId>
+ <artifactId>junit-jupiter-api</artifactId>
+ <scope>test</scope>
+ </dependency>
+ <dependency>
+ <groupId>org.junit.jupiter</groupId>
+ <artifactId>junit-jupiter-engine</artifactId>
+ <scope>test</scope>
+ </dependency>
+ <dependency>
+ <groupId>org.junit.jupiter</groupId>
+ <artifactId>junit-jupiter-params</artifactId>
+ <scope>test</scope>
+ </dependency>
+ <dependency>
+ <groupId>org.junit.platform</groupId>
+ <artifactId>junit-platform-launcher</artifactId>
+ <scope>test</scope>
+ </dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-auth</artifactId>
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestContainerManagerSecurity.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestContainerManagerSecurity.java
index de944cbc89c..702c5ea7dfb 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestContainerManagerSecurity.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestContainerManagerSecurity.java
@@ -18,8 +18,6 @@
package org.apache.hadoop.yarn.server;
-import static org.junit.Assert.fail;
-
import java.io.File;
import java.io.IOException;
import java.net.InetSocketAddress;
@@ -31,6 +29,11 @@ import java.util.List;
import java.util.concurrent.TimeoutException;
import org.apache.hadoop.conf.Configuration;
+
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.junit.jupiter.api.Assertions.fail;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.io.DataInputBuffer;
import org.apache.hadoop.minikdc.KerberosSecurityTestcase;
@@ -75,18 +78,15 @@ import org.apache.hadoop.yarn.server.security.BaseNMTokenSecretManager;
import org.apache.hadoop.yarn.server.utils.BuilderUtils;
import org.apache.hadoop.yarn.util.ConverterUtils;
import org.apache.hadoop.yarn.util.Records;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
-import org.junit.runner.RunWith;
-import org.junit.runners.Parameterized;
-import org.junit.runners.Parameterized.Parameters;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.params.ParameterizedTest;
+import org.junit.jupiter.params.provider.MethodSource;
+
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-@RunWith(Parameterized.class)
public class TestContainerManagerSecurity extends KerberosSecurityTestcase {
static Logger LOG = LoggerFactory.getLogger(TestContainerManagerSecurity.class);
@@ -94,29 +94,24 @@ public class TestContainerManagerSecurity extends KerberosSecurityTestcase {
.getRecordFactory(null);
private static MiniYARNCluster yarnCluster;
private static final File testRootDir = new File("target",
- TestContainerManagerSecurity.class.getName() + "-root");
+ TestContainerManagerSecurity.class.getName() + "-root");
private static File httpSpnegoKeytabFile = new File(testRootDir,
- "httpSpnegoKeytabFile.keytab");
+ "httpSpnegoKeytabFile.keytab");
private static String httpSpnegoPrincipal = "HTTP/localhost@EXAMPLE.COM";
private Configuration conf;
- @Before
- public void setUp() throws Exception {
+ @BeforeEach
+ public void setup() throws Exception {
testRootDir.mkdirs();
httpSpnegoKeytabFile.deleteOnExit();
+ startMiniKdc();
getKdc().createPrincipal(httpSpnegoKeytabFile, httpSpnegoPrincipal);
- UserGroupInformation.setConfiguration(conf);
-
- yarnCluster =
- new MiniYARNCluster(TestContainerManagerSecurity.class.getName(), 1, 1,
- 1);
- yarnCluster.init(conf);
- yarnCluster.start();
}
-
- @After
+
+ @AfterEach
public void tearDown() {
+ stopMiniKdc();
if (yarnCluster != null) {
yarnCluster.stop();
yarnCluster = null;
@@ -130,48 +125,56 @@ public class TestContainerManagerSecurity extends KerberosSecurityTestcase {
* and to give details in say an IDE. The second is the configuraiton
* object to use.
*/
- @Parameters(name = "{0}")
public static Collection<Object[]> configs() {
Configuration configurationWithoutSecurity = new Configuration();
configurationWithoutSecurity.set(
CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, "simple");
-
+
Configuration configurationWithSecurity = new Configuration();
configurationWithSecurity.set(
- CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, "kerberos");
+ CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, "kerberos");
configurationWithSecurity.set(
- YarnConfiguration.RM_WEBAPP_SPNEGO_USER_NAME_KEY, httpSpnegoPrincipal);
+ YarnConfiguration.RM_WEBAPP_SPNEGO_USER_NAME_KEY, httpSpnegoPrincipal);
configurationWithSecurity.set(
- YarnConfiguration.RM_WEBAPP_SPNEGO_KEYTAB_FILE_KEY,
- httpSpnegoKeytabFile.getAbsolutePath());
+ YarnConfiguration.RM_WEBAPP_SPNEGO_KEYTAB_FILE_KEY,
+ httpSpnegoKeytabFile.getAbsolutePath());
configurationWithSecurity.set(
- YarnConfiguration.NM_WEBAPP_SPNEGO_USER_NAME_KEY, httpSpnegoPrincipal);
+ YarnConfiguration.NM_WEBAPP_SPNEGO_USER_NAME_KEY, httpSpnegoPrincipal);
configurationWithSecurity.set(
- YarnConfiguration.NM_WEBAPP_SPNEGO_KEYTAB_FILE_KEY,
- httpSpnegoKeytabFile.getAbsolutePath());
+ YarnConfiguration.NM_WEBAPP_SPNEGO_KEYTAB_FILE_KEY,
+ httpSpnegoKeytabFile.getAbsolutePath());
- return Arrays.asList(new Object[][] {
+ return Arrays.asList(new Object[][]{
{"Simple", configurationWithoutSecurity},
{"Secure", configurationWithSecurity}});
}
-
- public TestContainerManagerSecurity(String name, Configuration conf) {
+
+ public void initTestContainerManagerSecurity(String name, Configuration conf) {
LOG.info("RUNNING TEST " + name);
+ UserGroupInformation.setConfiguration(conf);
+ yarnCluster =
+ new MiniYARNCluster(TestContainerManagerSecurity.class.getName(), 1, 1,
+ 1);
+ yarnCluster.init(conf);
+ yarnCluster.start();
conf.setLong(YarnConfiguration.RM_AM_EXPIRY_INTERVAL_MS, 100000L);
this.conf = conf;
}
-
- @Test
- public void testContainerManager() throws Exception {
-
- // TestNMTokens.
- testNMTokens(conf);
-
- // Testing for container token tampering
- testContainerToken(conf);
-
- // Testing for container token tampering with epoch
- testContainerTokenWithEpoch(conf);
+
+ @MethodSource("configs")
+ @ParameterizedTest(name = "{0}")
+ void testContainerManager(String name, Configuration conf) throws Exception {
+
+ initTestContainerManagerSecurity(name, conf);
+
+ // TestNMTokens.
+ testNMTokens(conf);
+
+ // Testing for container token tampering
+ testContainerToken(conf);
+
+ // Testing for container token tampering with epoch
+ testContainerTokenWithEpoch(conf);
}
@@ -182,21 +185,21 @@ public class TestContainerManagerSecurity extends KerberosSecurityTestcase {
private void testNMTokens(Configuration testConf) throws Exception {
NMTokenSecretManagerInRM nmTokenSecretManagerRM =
yarnCluster.getResourceManager().getRMContext()
- .getNMTokenSecretManager();
+ .getNMTokenSecretManager();
NMTokenSecretManagerInNM nmTokenSecretManagerNM =
yarnCluster.getNodeManager(0).getNMContext().getNMTokenSecretManager();
RMContainerTokenSecretManager containerTokenSecretManager =
yarnCluster.getResourceManager().getRMContext().
getContainerTokenSecretManager();
-
+
NodeManager nm = yarnCluster.getNodeManager(0);
-
+
waitForNMToReceiveNMTokenKey(nmTokenSecretManagerNM);
-
+
// Both id should be equal.
- Assert.assertEquals(nmTokenSecretManagerNM.getCurrentKey().getKeyId(),
+ assertEquals(nmTokenSecretManagerNM.getCurrentKey().getKeyId(),
nmTokenSecretManagerRM.getCurrentKey().getKeyId());
-
+
/*
* Below cases should be tested.
* 1) If Invalid NMToken is used then it should be rejected.
@@ -225,25 +228,25 @@ public class TestContainerManagerSecurity extends KerberosSecurityTestcase {
yarnCluster.getResourceManager().getRMContext().getRMApps().put(appId, m);
ApplicationAttemptId validAppAttemptId =
ApplicationAttemptId.newInstance(appId, 1);
-
+
ContainerId validContainerId =
ContainerId.newContainerId(validAppAttemptId, 0);
-
+
NodeId validNode = yarnCluster.getNodeManager(0).getNMContext().getNodeId();
NodeId invalidNode = NodeId.newInstance("InvalidHost", 1234);
-
+
org.apache.hadoop.yarn.api.records.Token validNMToken =
nmTokenSecretManagerRM.createNMToken(validAppAttemptId, validNode, user);
-
+
org.apache.hadoop.yarn.api.records.Token validContainerToken =
containerTokenSecretManager.createContainerToken(validContainerId,
0, validNode, user, r, Priority.newInstance(10), 1234);
ContainerTokenIdentifier identifier =
BuilderUtils.newContainerTokenIdentifier(validContainerToken);
- Assert.assertEquals(Priority.newInstance(10), identifier.getPriority());
- Assert.assertEquals(1234, identifier.getCreationTime());
-
+ assertEquals(Priority.newInstance(10), identifier.getPriority());
+ assertEquals(1234, identifier.getCreationTime());
+
StringBuilder sb;
// testInvalidNMToken ... creating NMToken using different secret manager.
@@ -255,7 +258,7 @@ public class TestContainerManagerSecurity extends KerberosSecurityTestcase {
// Making sure key id is different.
} while (tempManager.getCurrentKey().getKeyId() == nmTokenSecretManagerRM
.getCurrentKey().getKeyId());
-
+
// Testing that NM rejects the requests when we don't send any token.
if (UserGroupInformation.isSecurityEnabled()) {
sb = new StringBuilder("Client cannot authenticate via:[TOKEN]");
@@ -266,55 +269,55 @@ public class TestContainerManagerSecurity extends KerberosSecurityTestcase {
}
String errorMsg = testStartContainer(rpc, validAppAttemptId, validNode,
validContainerToken, null, true);
- Assert.assertTrue("In calling " + validNode + " exception was '"
+ assertTrue(errorMsg.contains(sb.toString()), "In calling " + validNode + " exception was '"
+ errorMsg + "' but doesn't contain '"
- + sb.toString() + "'", errorMsg.contains(sb.toString()));
-
+ + sb.toString() + "'");
+
org.apache.hadoop.yarn.api.records.Token invalidNMToken =
tempManager.createNMToken(validAppAttemptId, validNode, user);
sb = new StringBuilder("Given NMToken for application : ");
sb.append(validAppAttemptId.toString())
- .append(" seems to have been generated illegally.");
- Assert.assertTrue(sb.toString().contains(
+ .append(" seems to have been generated illegally.");
+ assertTrue(sb.toString().contains(
testStartContainer(rpc, validAppAttemptId, validNode,
validContainerToken, invalidNMToken, true)));
-
+
// valid NMToken but belonging to other node
invalidNMToken =
nmTokenSecretManagerRM.createNMToken(validAppAttemptId, invalidNode,
user);
sb = new StringBuilder("Given NMToken for application : ");
sb.append(validAppAttemptId)
- .append(" is not valid for current node manager.expected : ")
- .append(validNode.toString())
- .append(" found : ").append(invalidNode.toString());
- Assert.assertTrue(sb.toString().contains(
+ .append(" is not valid for current node manager.expected : ")
+ .append(validNode.toString())
+ .append(" found : ").append(invalidNode.toString());
+ assertTrue(sb.toString().contains(
testStartContainer(rpc, validAppAttemptId, validNode,
validContainerToken, invalidNMToken, true)));
-
+
// using correct tokens. nmtoken for app attempt should get saved.
testConf.setInt(YarnConfiguration.RM_CONTAINER_ALLOC_EXPIRY_INTERVAL_MS,
4 * 60 * 1000);
validContainerToken =
containerTokenSecretManager.createContainerToken(validContainerId,
0, validNode, user, r, Priority.newInstance(0), 0);
- Assert.assertTrue(testStartContainer(rpc, validAppAttemptId, validNode,
- validContainerToken, validNMToken, false).isEmpty());
- Assert.assertTrue(nmTokenSecretManagerNM
+ assertTrue(testStartContainer(rpc, validAppAttemptId, validNode,
+ validContainerToken, validNMToken, false).isEmpty());
+ assertTrue(nmTokenSecretManagerNM
.isAppAttemptNMTokenKeyPresent(validAppAttemptId));
-
+
// using a new compatible version nmtoken, expect container can be started
// successfully.
ApplicationAttemptId validAppAttemptId2 =
ApplicationAttemptId.newInstance(appId, 2);
-
+
ContainerId validContainerId2 =
ContainerId.newContainerId(validAppAttemptId2, 0);
org.apache.hadoop.yarn.api.records.Token validContainerToken2 =
containerTokenSecretManager.createContainerToken(validContainerId2,
0, validNode, user, r, Priority.newInstance(0), 0);
-
+
org.apache.hadoop.yarn.api.records.Token validNMToken2 =
nmTokenSecretManagerRM.createNMToken(validAppAttemptId2, validNode, user);
// First, get a new NMTokenIdentifier.
@@ -323,43 +326,42 @@ public class TestContainerManagerSecurity extends KerberosSecurityTestcase {
DataInputBuffer dib = new DataInputBuffer();
dib.reset(tokenIdentifierContent, tokenIdentifierContent.length);
newIdentifier.readFields(dib);
-
+
// Then, generate a new version NMTokenIdentifier (NMTokenIdentifierNewForTest)
// with additional field of message.
- NMTokenIdentifierNewForTest newVersionIdentifier =
+ NMTokenIdentifierNewForTest newVersionIdentifier =
new NMTokenIdentifierNewForTest(newIdentifier, "message");
-
+
// check new version NMTokenIdentifier has correct info.
- Assert.assertEquals("The ApplicationAttemptId is changed after set to " +
- "newVersionIdentifier", validAppAttemptId2.getAttemptId(),
+ assertEquals(validAppAttemptId2.getAttemptId(),
newVersionIdentifier.getApplicationAttemptId().getAttemptId()
- );
-
- Assert.assertEquals("The message is changed after set to newVersionIdentifier",
- "message", newVersionIdentifier.getMessage());
-
- Assert.assertEquals("The NodeId is changed after set to newVersionIdentifier",
- validNode, newVersionIdentifier.getNodeId());
-
+ ,
+ "The ApplicationAttemptId is changed after set to " +
+ "newVersionIdentifier");
+
+ assertEquals("message", newVersionIdentifier.getMessage(), "The message is changed after set to newVersionIdentifier");
+
+ assertEquals(validNode, newVersionIdentifier.getNodeId(), "The NodeId is changed after set to newVersionIdentifier");
+
// create new Token based on new version NMTokenIdentifier.
org.apache.hadoop.yarn.api.records.Token newVersionedNMToken =
BaseNMTokenSecretManager.newInstance(
- nmTokenSecretManagerRM.retrievePassword(newVersionIdentifier),
+ nmTokenSecretManagerRM.retrievePassword(newVersionIdentifier),
newVersionIdentifier);
-
+
// Verify startContainer is successful and no exception is thrown.
- Assert.assertTrue(testStartContainer(rpc, validAppAttemptId2, validNode,
+ assertTrue(testStartContainer(rpc, validAppAttemptId2, validNode,
validContainerToken2, newVersionedNMToken, false).isEmpty());
- Assert.assertTrue(nmTokenSecretManagerNM
+ assertTrue(nmTokenSecretManagerNM
.isAppAttemptNMTokenKeyPresent(validAppAttemptId2));
-
+
//Now lets wait till container finishes and is removed from node manager.
waitForContainerToFinishOnNM(validContainerId);
sb = new StringBuilder("Attempt to relaunch the same container with id ");
sb.append(validContainerId);
- Assert.assertTrue(testStartContainer(rpc, validAppAttemptId, validNode,
+ assertTrue(testStartContainer(rpc, validAppAttemptId, validNode,
validContainerToken, validNMToken, true).contains(sb.toString()));
-
+
// Container is removed from node manager's memory by this time.
// trying to stop the container. It should not throw any exception.
testStopContainer(rpc, validAppAttemptId, validNode, validContainerId,
@@ -370,25 +372,25 @@ public class TestContainerManagerSecurity extends KerberosSecurityTestcase {
rollNMTokenMasterKey(nmTokenSecretManagerRM, nmTokenSecretManagerNM);
// Key rolled over once.. rolling over again
rollNMTokenMasterKey(nmTokenSecretManagerRM, nmTokenSecretManagerNM);
-
+
// trying get container status. Now saved nmToken should be used for
// authentication... It should complain saying container was recently
// stopped.
sb = new StringBuilder("Container ");
sb.append(validContainerId)
.append(" was recently stopped on node manager");
- Assert.assertTrue(testGetContainer(rpc, validAppAttemptId, validNode,
+ assertTrue(testGetContainer(rpc, validAppAttemptId, validNode,
validContainerId, validNMToken, true).contains(sb.toString()));
// Now lets remove the container from nm-memory
nm.getNodeStatusUpdater().clearFinishedContainersFromCache();
-
+
// This should fail as container is removed from recently tracked finished
// containers.
sb = new StringBuilder("Container ")
.append(validContainerId.toString())
.append(" is not handled by this NodeManager");
- Assert.assertTrue(testGetContainer(rpc, validAppAttemptId, validNode,
+ assertTrue(testGetContainer(rpc, validAppAttemptId, validNode,
validContainerId, validNMToken, false).contains(sb.toString()));
// using appAttempt-1 NMtoken for launching container for appAttempt-2
@@ -396,13 +398,13 @@ public class TestContainerManagerSecurity extends KerberosSecurityTestcase {
ApplicationAttemptId attempt2 = ApplicationAttemptId.newInstance(appId, 2);
Token attempt1NMToken =
nmTokenSecretManagerRM
- .createNMToken(validAppAttemptId, validNode, user);
+ .createNMToken(validAppAttemptId, validNode, user);
org.apache.hadoop.yarn.api.records.Token newContainerToken =
containerTokenSecretManager.createContainerToken(
- ContainerId.newContainerId(attempt2, 1), 0, validNode, user, r,
+ ContainerId.newContainerId(attempt2, 1), 0, validNode, user, r,
Priority.newInstance(0), 0);
- Assert.assertTrue(testStartContainer(rpc, attempt2, validNode,
- newContainerToken, attempt1NMToken, false).isEmpty());
+ assertTrue(testStartContainer(rpc, attempt2, validNode,
+ newContainerToken, attempt1NMToken, false).isEmpty());
}
private void waitForContainerToFinishOnNM(ContainerId containerId)
@@ -419,7 +421,7 @@ public class TestContainerManagerSecurity extends KerberosSecurityTestcase {
LOG.info("Waiting for " + containerId + " to get to state " +
ContainerState.COMPLETE);
GenericTestUtils.waitFor(() -> ContainerState.COMPLETE.equals(
- waitContainer.cloneAndGetContainerStatus().getState()),
+ waitContainer.cloneAndGetContainerStatus().getState()),
500, timeout);
} catch (TimeoutException te) {
LOG.error("TimeoutException", te);
@@ -433,7 +435,7 @@ public class TestContainerManagerSecurity extends KerberosSecurityTestcase {
// Normally, Containers will be removed from NM context after they are
// explicitly acked by RM. Now, manually remove it for testing.
yarnCluster.getNodeManager(0).getNodeStatusUpdater()
- .addCompletedContainer(containerId);
+ .addCompletedContainer(containerId);
LOG.info("Removing container from NMContext, containerID = " + containerId);
nmContext.getContainers().remove(containerId);
}
@@ -458,16 +460,16 @@ public class TestContainerManagerSecurity extends KerberosSecurityTestcase {
Thread.sleep(1000);
}
nmTokenSecretManagerRM.activateNextMasterKey();
- Assert.assertTrue((nmTokenSecretManagerNM.getCurrentKey().getKeyId()
+ assertTrue((nmTokenSecretManagerNM.getCurrentKey().getKeyId()
== nmTokenSecretManagerRM.getCurrentKey().getKeyId()));
}
-
+
private String testStopContainer(YarnRPC rpc,
ApplicationAttemptId appAttemptId, NodeId nodeId,
ContainerId containerId, Token nmToken, boolean isExceptionExpected) {
try {
stopContainer(rpc, nmToken,
- Arrays.asList(new ContainerId[] {containerId}), appAttemptId,
+ Arrays.asList(new ContainerId[]{containerId}), appAttemptId,
nodeId);
if (isExceptionExpected) {
fail("Exception was expected!!");
@@ -505,8 +507,8 @@ public class TestContainerManagerSecurity extends KerberosSecurityTestcase {
try {
startContainer(rpc, nmToken, containerToken, nodeId,
appAttemptId.toString());
- if (isExceptionExpected){
- fail("Exception was expected!!");
+ if (isExceptionExpected) {
+ fail("Exception was expected!!");
}
return "";
} catch (Exception e) {
@@ -514,7 +516,7 @@ public class TestContainerManagerSecurity extends KerberosSecurityTestcase {
return e.getMessage();
}
}
-
+
private void stopContainer(YarnRPC rpc, Token nmToken,
List<ContainerId> containerId, ApplicationAttemptId appAttemptId,
NodeId nodeId) throws Exception {
@@ -537,13 +539,12 @@ public class TestContainerManagerSecurity extends KerberosSecurityTestcase {
}
}
}
-
- private void
- getContainerStatus(YarnRPC rpc,
- org.apache.hadoop.yarn.api.records.Token nmToken,
- ContainerId containerId,
- ApplicationAttemptId appAttemptId, NodeId nodeId,
- boolean isExceptionExpected) throws Exception {
+
+ private void getContainerStatus(YarnRPC rpc,
+ org.apache.hadoop.yarn.api.records.Token nmToken,
+ ContainerId containerId,
+ ApplicationAttemptId appAttemptId, NodeId nodeId,
+ boolean isExceptionExpected) throws Exception {
List<ContainerId> containerIds = new ArrayList<ContainerId>();
containerIds.add(containerId);
GetContainerStatusesRequest request =
@@ -558,7 +559,7 @@ public class TestContainerManagerSecurity extends KerberosSecurityTestcase {
if (statuses.getFailedRequests() != null
&& statuses.getFailedRequests().containsKey(containerId)) {
parseAndThrowException(statuses.getFailedRequests().get(containerId)
- .deSerialize());
+ .deSerialize());
}
} finally {
if (proxy != null) {
@@ -566,7 +567,7 @@ public class TestContainerManagerSecurity extends KerberosSecurityTestcase {
}
}
}
-
+
private void startContainer(final YarnRPC rpc,
org.apache.hadoop.yarn.api.records.Token nmToken,
org.apache.hadoop.yarn.api.records.Token containerToken,
@@ -584,7 +585,7 @@ public class TestContainerManagerSecurity extends KerberosSecurityTestcase {
try {
proxy = getContainerManagementProtocolProxy(rpc, nmToken, nodeId, user);
StartContainersResponse response = proxy.startContainers(allRequests);
- for(SerializedException ex : response.getFailedRequests().values()){
+ for (SerializedException ex : response.getFailedRequests().values()) {
parseAndThrowException(ex.deSerialize());
}
} finally {
@@ -613,11 +614,11 @@ public class TestContainerManagerSecurity extends KerberosSecurityTestcase {
final InetSocketAddress addr =
new InetSocketAddress(nodeId.getHost(), nodeId.getPort());
if (nmToken != null) {
- ugi.addToken(ConverterUtils.convertFromYarn(nmToken, addr));
+ ugi.addToken(ConverterUtils.convertFromYarn(nmToken, addr));
}
proxy =
NMProxy.createNMProxy(conf, ContainerManagementProtocol.class, ugi,
- rpc, addr);
+ rpc, addr);
return proxy;
}
@@ -642,7 +643,7 @@ public class TestContainerManagerSecurity extends KerberosSecurityTestcase {
*/
NMTokenSecretManagerInRM nmTokenSecretManagerInRM =
yarnCluster.getResourceManager().getRMContext()
- .getNMTokenSecretManager();
+ .getNMTokenSecretManager();
ApplicationId appId = ApplicationId.newInstance(1, 1);
ApplicationAttemptId appAttemptId =
ApplicationAttemptId.newInstance(appId, 0);
@@ -651,46 +652,46 @@ public class TestContainerManagerSecurity extends KerberosSecurityTestcase {
NMTokenSecretManagerInNM nmTokenSecretManagerInNM =
nm.getNMContext().getNMTokenSecretManager();
String user = "test";
-
+
waitForNMToReceiveNMTokenKey(nmTokenSecretManagerInNM);
NodeId nodeId = nm.getNMContext().getNodeId();
-
+
// Both id should be equal.
- Assert.assertEquals(nmTokenSecretManagerInNM.getCurrentKey().getKeyId(),
+ assertEquals(nmTokenSecretManagerInNM.getCurrentKey().getKeyId(),
nmTokenSecretManagerInRM.getCurrentKey().getKeyId());
-
-
+
+
RMContainerTokenSecretManager containerTokenSecretManager =
yarnCluster.getResourceManager().getRMContext().
getContainerTokenSecretManager();
-
+
Resource r = Resource.newInstance(1230, 2);
-
- Token containerToken =
+
+ Token containerToken =
containerTokenSecretManager.createContainerToken(
cId, 0, nodeId, user, r, Priority.newInstance(0), 0);
-
- ContainerTokenIdentifier containerTokenIdentifier =
+
+ ContainerTokenIdentifier containerTokenIdentifier =
getContainerTokenIdentifierFromToken(containerToken);
-
+
// Verify new compatible version ContainerTokenIdentifier
// can work successfully.
- ContainerTokenIdentifierForTest newVersionTokenIdentifier =
+ ContainerTokenIdentifierForTest newVersionTokenIdentifier =
new ContainerTokenIdentifierForTest(containerTokenIdentifier,
"message");
- byte[] password =
+ byte[] password =
containerTokenSecretManager.createPassword(newVersionTokenIdentifier);
-
+
Token newContainerToken = BuilderUtils.newContainerToken(
nodeId, password, newVersionTokenIdentifier);
-
+
Token nmToken =
- nmTokenSecretManagerInRM.createNMToken(appAttemptId, nodeId, user);
+ nmTokenSecretManagerInRM.createNMToken(appAttemptId, nodeId, user);
YarnRPC rpc = YarnRPC.create(conf);
- Assert.assertTrue(testStartContainer(rpc, appAttemptId, nodeId,
+ assertTrue(testStartContainer(rpc, appAttemptId, nodeId,
newContainerToken, nmToken, false).isEmpty());
-
+
// Creating a tampered Container Token
RMContainerTokenSecretManager tamperedContainerTokenSecretManager =
new RMContainerTokenSecretManager(conf);
@@ -700,17 +701,17 @@ public class TestContainerManagerSecurity extends KerberosSecurityTestcase {
tamperedContainerTokenSecretManager.activateNextMasterKey();
} while (containerTokenSecretManager.getCurrentKey().getKeyId()
== tamperedContainerTokenSecretManager.getCurrentKey().getKeyId());
-
+
ContainerId cId2 = ContainerId.newContainerId(appAttemptId, 1);
// Creating modified containerToken
Token containerToken2 =
tamperedContainerTokenSecretManager.createContainerToken(cId2, 0,
nodeId, user, r, Priority.newInstance(0), 0);
-
+
StringBuilder sb = new StringBuilder("Given Container ");
sb.append(cId2)
.append(" seems to have an illegally generated token.");
- Assert.assertTrue(testStartContainer(rpc, appAttemptId, nodeId,
+ assertTrue(testStartContainer(rpc, appAttemptId, nodeId,
containerToken2, nmToken, true).contains(sb.toString()));
}
@@ -754,7 +755,7 @@ public class TestContainerManagerSecurity extends KerberosSecurityTestcase {
NodeId nodeId = nm.getNMContext().getNodeId();
// Both id should be equal.
- Assert.assertEquals(nmTokenSecretManagerInNM.getCurrentKey().getKeyId(),
+ assertEquals(nmTokenSecretManagerInNM.getCurrentKey().getKeyId(),
nmTokenSecretManagerInRM.getCurrentKey().getKeyId());
// Creating a normal Container Token
@@ -765,17 +766,17 @@ public class TestContainerManagerSecurity extends KerberosSecurityTestcase {
Token containerToken =
containerTokenSecretManager.createContainerToken(cId, 0, nodeId, user,
r, Priority.newInstance(0), 0);
-
+
ContainerTokenIdentifier containerTokenIdentifier =
new ContainerTokenIdentifier();
byte[] tokenIdentifierContent = containerToken.getIdentifier().array();
DataInputBuffer dib = new DataInputBuffer();
dib.reset(tokenIdentifierContent, tokenIdentifierContent.length);
containerTokenIdentifier.readFields(dib);
-
-
- Assert.assertEquals(cId, containerTokenIdentifier.getContainerID());
- Assert.assertEquals(
+
+
+ assertEquals(cId, containerTokenIdentifier.getContainerID());
+ assertEquals(
cId.toString(), containerTokenIdentifier.getContainerID().toString());
Token nmToken =
@@ -791,10 +792,10 @@ public class TestContainerManagerSecurity extends KerberosSecurityTestcase {
= getContainerManagementProtocolProxy(rpc, nmToken, nodeId, user);
GetContainerStatusesResponse res = proxy.getContainerStatuses(
GetContainerStatusesRequest.newInstance(containerIds));
- Assert.assertNotNull(res.getContainerStatuses().get(0));
- Assert.assertEquals(
+ assertNotNull(res.getContainerStatuses().get(0));
+ assertEquals(
cId, res.getContainerStatuses().get(0).getContainerId());
- Assert.assertEquals(cId.toString(),
+ assertEquals(cId.toString(),
res.getContainerStatuses().get(0).getContainerId().toString());
}
}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestDiskFailures.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestDiskFailures.java
index 23bb0399930..daeb21db0cb 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestDiskFailures.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestDiskFailures.java
@@ -18,6 +18,8 @@
package org.apache.hadoop.yarn.server;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileContext;
import org.apache.hadoop.fs.FileUtil;
@@ -37,11 +39,10 @@ import java.io.IOException;
import java.util.Iterator;
import java.util.List;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
-import org.junit.Assert;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -73,7 +74,7 @@ public class TestDiskFailures {
private static MiniYARNCluster yarnCluster;
LocalDirsHandlerService dirsHandler;
- @BeforeClass
+ @BeforeAll
public static void setup() throws AccessControlException,
FileNotFoundException, UnsupportedFileSystemException, IOException {
localFS = FileContext.getLocalFSFileContext();
@@ -82,7 +83,7 @@ public class TestDiskFailures {
// Do not start cluster here
}
- @AfterClass
+ @AfterAll
public static void teardown() {
if (yarnCluster != null) {
yarnCluster.stop();
@@ -99,7 +100,7 @@ public class TestDiskFailures {
* @throws IOException
*/
@Test
- public void testLocalDirsFailures() throws IOException {
+ void testLocalDirsFailures() throws IOException {
testDirsFailures(true);
}
@@ -111,7 +112,7 @@ public class TestDiskFailures {
* @throws IOException
*/
@Test
- public void testLogDirsFailures() throws IOException {
+ void testLogDirsFailures() throws IOException {
testDirsFailures(false);
}
@@ -122,7 +123,7 @@ public class TestDiskFailures {
* @throws IOException
*/
@Test
- public void testDirFailuresOnStartup() throws IOException {
+ void testDirFailuresOnStartup() throws IOException {
Configuration conf = new YarnConfiguration();
String localDir1 = new File(testDir, "localDir1").getPath();
String localDir2 = new File(testDir, "localDir2").getPath();
@@ -137,11 +138,11 @@ public class TestDiskFailures {
LocalDirsHandlerService dirSvc = new LocalDirsHandlerService();
dirSvc.init(conf);
List<String> localDirs = dirSvc.getLocalDirs();
- Assert.assertEquals(1, localDirs.size());
- Assert.assertEquals(new Path(localDir2).toString(), localDirs.get(0));
+ assertEquals(1, localDirs.size());
+ assertEquals(new Path(localDir2).toString(), localDirs.get(0));
List<String> logDirs = dirSvc.getLogDirs();
- Assert.assertEquals(1, logDirs.size());
- Assert.assertEquals(new Path(logDir1).toString(), logDirs.get(0));
+ assertEquals(1, logDirs.size());
+ assertEquals(new Path(logDir1).toString(), logDirs.get(0));
}
private void testDirsFailures(boolean localORLogDirs) throws IOException {
@@ -177,8 +178,7 @@ public class TestDiskFailures {
List<String> list = localORLogDirs ? dirsHandler.getLocalDirs()
: dirsHandler.getLogDirs();
String[] dirs = list.toArray(new String[list.size()]);
- Assert.assertEquals("Number of nm-" + dirType + "-dirs is wrong.",
- numLocalDirs, dirs.length);
+ assertEquals(numLocalDirs, dirs.length, "Number of nm-" + dirType + "-dirs is wrong.");
String expectedDirs = StringUtils.join(",", list);
// validate the health of disks initially
verifyDisksHealth(localORLogDirs, expectedDirs, true);
@@ -225,11 +225,9 @@ public class TestDiskFailures {
String seenDirs = StringUtils.join(",", list);
LOG.info("ExpectedDirs=" + expectedDirs);
LOG.info("SeenDirs=" + seenDirs);
- Assert.assertTrue("NodeManager could not identify disk failure.",
- expectedDirs.equals(seenDirs));
+ assertEquals(expectedDirs, seenDirs);
- Assert.assertEquals("Node's health in terms of disks is wrong",
- isHealthy, dirsHandler.areDisksHealthy());
+ assertEquals(isHealthy, dirsHandler.areDisksHealthy(), "Node's health in terms of disks is wrong");
for (int i = 0; i < 10; i++) {
Iterator<RMNode> iter = yarnCluster.getResourceManager().getRMContext()
.getRMNodes().values().iterator();
@@ -247,8 +245,7 @@ public class TestDiskFailures {
}
Iterator<RMNode> iter = yarnCluster.getResourceManager().getRMContext()
.getRMNodes().values().iterator();
- Assert.assertEquals("RM is not updated with the health status of a node",
- isHealthy, iter.next().getState() != NodeState.UNHEALTHY);
+ assertEquals(isHealthy, iter.next().getState() != NodeState.UNHEALTHY, "RM is not updated with the health status of a node");
}
/**
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestMiniYARNClusterForHA.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestMiniYARNClusterForHA.java
index ed596340d7c..8bd7e59eb3f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestMiniYARNClusterForHA.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestMiniYARNClusterForHA.java
@@ -18,21 +18,21 @@
package org.apache.hadoop.yarn.server;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertTrue;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.exceptions.YarnException;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
public class TestMiniYARNClusterForHA {
MiniYARNCluster cluster;
- @Before
+ @BeforeEach
public void setup() throws IOException, InterruptedException {
Configuration conf = new YarnConfiguration();
conf.setBoolean(YarnConfiguration.AUTO_FAILOVER_ENABLED, false);
@@ -43,12 +43,12 @@ public class TestMiniYARNClusterForHA {
cluster.init(conf);
cluster.start();
- assertFalse("RM never turned active", -1 == cluster.getActiveRMIndex());
+ assertFalse(-1 == cluster.getActiveRMIndex(), "RM never turned active");
}
@Test
- public void testClusterWorks() throws YarnException, InterruptedException {
- assertTrue("NMs fail to connect to the RM",
- cluster.waitForNodeManagersToConnect(5000));
+ void testClusterWorks() throws YarnException, InterruptedException {
+ assertTrue(cluster.waitForNodeManagersToConnect(5000),
+ "NMs fail to connect to the RM");
}
}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestMiniYarnCluster.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestMiniYarnCluster.java
index ff7fafc2001..c5c2e3e0f5e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestMiniYarnCluster.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestMiniYarnCluster.java
@@ -18,18 +18,23 @@
package org.apache.hadoop.yarn.server;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertNull;
+
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.yarn.conf.HAUtil;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.server.resourcemanager.HATestUtil;
-import org.junit.Assert;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
+
import java.io.IOException;
public class TestMiniYarnCluster {
@Test
- public void testTimelineServiceStartInMiniCluster() throws Exception {
+ void testTimelineServiceStartInMiniCluster() throws Exception {
Configuration conf = new YarnConfiguration();
int numNodeManagers = 1;
int numLocalDirs = 1;
@@ -45,14 +50,14 @@ public class TestMiniYarnCluster {
try (MiniYARNCluster cluster =
new MiniYARNCluster(TestMiniYarnCluster.class.getSimpleName(),
numNodeManagers, numLocalDirs, numLogDirs, numLogDirs,
- enableAHS)) {
+ enableAHS)) {
cluster.init(conf);
cluster.start();
//verify that the timeline service is not started.
- Assert.assertNull("Timeline Service should not have been started",
- cluster.getApplicationHistoryServer());
+ assertNull(cluster.getApplicationHistoryServer(),
+ "Timeline Service should not have been started");
}
/*
@@ -64,25 +69,25 @@ public class TestMiniYarnCluster {
try (MiniYARNCluster cluster =
new MiniYARNCluster(TestMiniYarnCluster.class.getSimpleName(),
numNodeManagers, numLocalDirs, numLogDirs, numLogDirs,
- enableAHS)) {
+ enableAHS)) {
cluster.init(conf);
// Verify that the timeline-service starts on ephemeral ports by default
String hostname = MiniYARNCluster.getHostname();
- Assert.assertEquals(hostname + ":0",
- conf.get(YarnConfiguration.TIMELINE_SERVICE_ADDRESS));
+ assertEquals(hostname + ":0",
+ conf.get(YarnConfiguration.TIMELINE_SERVICE_ADDRESS));
cluster.start();
//Timeline service may sometime take a while to get started
int wait = 0;
- while(cluster.getApplicationHistoryServer() == null && wait < 20) {
+ while (cluster.getApplicationHistoryServer() == null && wait < 20) {
Thread.sleep(500);
wait++;
}
//verify that the timeline service is started.
- Assert.assertNotNull("Timeline Service should have been started",
- cluster.getApplicationHistoryServer());
+ assertNotNull(cluster.getApplicationHistoryServer(),
+ "Timeline Service should have been started");
}
/*
* Timeline service should start if TIMELINE_SERVICE_ENABLED == false
@@ -93,24 +98,24 @@ public class TestMiniYarnCluster {
try (MiniYARNCluster cluster =
new MiniYARNCluster(TestMiniYarnCluster.class.getSimpleName(),
numNodeManagers, numLocalDirs, numLogDirs, numLogDirs,
- enableAHS)) {
+ enableAHS)) {
cluster.init(conf);
cluster.start();
//Timeline service may sometime take a while to get started
int wait = 0;
- while(cluster.getApplicationHistoryServer() == null && wait < 20) {
+ while (cluster.getApplicationHistoryServer() == null && wait < 20) {
Thread.sleep(500);
wait++;
}
//verify that the timeline service is started.
- Assert.assertNotNull("Timeline Service should have been started",
- cluster.getApplicationHistoryServer());
+ assertNotNull(cluster.getApplicationHistoryServer(),
+ "Timeline Service should have been started");
}
}
@Test
- public void testMultiRMConf() throws IOException {
+ void testMultiRMConf() throws IOException {
String RM1_NODE_ID = "rm1", RM2_NODE_ID = "rm2";
int RM1_PORT_BASE = 10000, RM2_PORT_BASE = 20000;
Configuration conf = new YarnConfiguration();
@@ -130,22 +135,22 @@ public class TestMiniYarnCluster {
cluster.init(conf);
Configuration conf1 = cluster.getResourceManager(0).getConfig(),
conf2 = cluster.getResourceManager(1).getConfig();
- Assert.assertFalse(conf1 == conf2);
- Assert.assertEquals("0.0.0.0:18032",
+ assertFalse(conf1 == conf2);
+ assertEquals("0.0.0.0:18032",
conf1.get(HAUtil.addSuffix(YarnConfiguration.RM_ADDRESS,
RM1_NODE_ID)));
- Assert.assertEquals("0.0.0.0:28032",
+ assertEquals("0.0.0.0:28032",
conf1.get(HAUtil.addSuffix(YarnConfiguration.RM_ADDRESS,
RM2_NODE_ID)));
- Assert.assertEquals("rm1", conf1.get(YarnConfiguration.RM_HA_ID));
+ assertEquals("rm1", conf1.get(YarnConfiguration.RM_HA_ID));
- Assert.assertEquals("0.0.0.0:18032",
+ assertEquals("0.0.0.0:18032",
conf2.get(HAUtil.addSuffix(YarnConfiguration.RM_ADDRESS,
RM1_NODE_ID)));
- Assert.assertEquals("0.0.0.0:28032",
+ assertEquals("0.0.0.0:28032",
conf2.get(HAUtil.addSuffix(YarnConfiguration.RM_ADDRESS,
RM2_NODE_ID)));
- Assert.assertEquals("rm2", conf2.get(YarnConfiguration.RM_HA_ID));
+ assertEquals("rm2", conf2.get(YarnConfiguration.RM_HA_ID));
}
}
}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestMiniYarnClusterNodeUtilization.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestMiniYarnClusterNodeUtilization.java
index d1ccc9a6fec..9c7f4447c6c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestMiniYarnClusterNodeUtilization.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestMiniYarnClusterNodeUtilization.java
@@ -18,9 +18,9 @@
package org.apache.hadoop.yarn.server;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertTrue;
import java.io.IOException;
import java.util.ArrayList;
@@ -39,8 +39,9 @@ import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNode;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.Timeout;
public class TestMiniYarnClusterNodeUtilization {
// Mini YARN cluster setup
@@ -72,7 +73,7 @@ public class TestMiniYarnClusterNodeUtilization {
private NodeStatus nodeStatus;
- @Before
+ @BeforeEach
public void setup() {
conf = new YarnConfiguration();
conf.set(YarnConfiguration.RM_WEBAPP_ADDRESS, "localhost:0");
@@ -81,7 +82,7 @@ public class TestMiniYarnClusterNodeUtilization {
cluster = new MiniYARNCluster(name, NUM_RM, NUM_NM, 1, 1);
cluster.init(conf);
cluster.start();
- assertFalse("RM never turned active", -1 == cluster.getActiveRMIndex());
+ assertFalse(-1 == cluster.getActiveRMIndex(), "RM never turned active");
nm = (CustomNodeManager)cluster.getNodeManager(0);
nodeStatus = createNodeStatus(nm.getNMContext().getNodeId(), 0,
@@ -95,11 +96,12 @@ public class TestMiniYarnClusterNodeUtilization {
* both the RMNode and SchedulerNode have been updated with the new
* utilization.
*/
- @Test(timeout=60000)
- public void testUpdateNodeUtilization()
+ @Test
+ @Timeout(60000)
+ void testUpdateNodeUtilization()
throws InterruptedException, IOException, YarnException {
- assertTrue("NMs fail to connect to the RM",
- cluster.waitForNodeManagersToConnect(10000));
+ assertTrue(cluster.waitForNodeManagersToConnect(10000),
+ "NMs fail to connect to the RM");
// Give the heartbeat time to propagate to the RM
verifySimulatedUtilization();
@@ -119,11 +121,12 @@ public class TestMiniYarnClusterNodeUtilization {
* Verify both the RMNode and SchedulerNode have been updated with the new
* utilization.
*/
- @Test(timeout=60000)
- public void testMockNodeStatusHeartbeat()
+ @Test
+ @Timeout(60000)
+ void testMockNodeStatusHeartbeat()
throws InterruptedException, YarnException {
- assertTrue("NMs fail to connect to the RM",
- cluster.waitForNodeManagersToConnect(10000));
+ assertTrue(cluster.waitForNodeManagersToConnect(10000),
+ "NMs fail to connect to the RM");
NodeStatusUpdater updater = nm.getNodeStatusUpdater();
updater.sendOutofBandHeartBeat();
@@ -196,12 +199,12 @@ public class TestMiniYarnClusterNodeUtilization {
// Give the heartbeat time to propagate to the RM (max 10 seconds)
// We check if the nodeUtilization is up to date
- for (int i=0; i<100; i++) {
+ for (int i = 0; i < 100; i++) {
for (RMNode ni : rmContext.getRMNodes().values()) {
if (ni.getNodeUtilization() != null) {
- if (ni.getNodeUtilization().equals(nodeUtilization)) {
- break;
- }
+ if (ni.getNodeUtilization().equals(nodeUtilization)) {
+ break;
+ }
}
}
Thread.sleep(100);
@@ -210,22 +213,18 @@ public class TestMiniYarnClusterNodeUtilization {
// Verify the data is readable from the RM and scheduler nodes
for (RMNode ni : rmContext.getRMNodes().values()) {
ResourceUtilization cu = ni.getAggregatedContainersUtilization();
- assertEquals("Containers Utillization not propagated to RMNode",
- containersUtilization, cu);
+ assertEquals(containersUtilization, cu, "Containers Utillization not propagated to RMNode");
ResourceUtilization nu = ni.getNodeUtilization();
- assertEquals("Node Utillization not propagated to RMNode",
- nodeUtilization, nu);
+ assertEquals(nodeUtilization, nu, "Node Utillization not propagated to RMNode");
- SchedulerNode scheduler =
- rmContext.getScheduler().getSchedulerNode(ni.getNodeID());
+ SchedulerNode scheduler = rmContext.getScheduler().getSchedulerNode(ni.getNodeID());
cu = scheduler.getAggregatedContainersUtilization();
- assertEquals("Containers Utillization not propagated to SchedulerNode",
- containersUtilization, cu);
+ assertEquals(containersUtilization, cu,
+ "Containers Utillization not propagated to SchedulerNode");
nu = scheduler.getNodeUtilization();
- assertEquals("Node Utillization not propagated to SchedulerNode",
- nodeUtilization, nu);
+ assertEquals(nodeUtilization, nu, "Node Utillization not propagated to SchedulerNode");
}
}
}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestRMNMSecretKeys.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestRMNMSecretKeys.java
index ba14491bf51..ae12658a299 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestRMNMSecretKeys.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestRMNMSecretKeys.java
@@ -22,10 +22,12 @@ import java.io.File;
import java.io.IOException;
import java.util.UUID;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.BeforeClass;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
+
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertNull;
+
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.event.Dispatcher;
@@ -36,7 +38,10 @@ import org.apache.hadoop.yarn.server.api.records.MasterKey;
import org.apache.hadoop.yarn.server.resourcemanager.MockNM;
import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
import org.apache.kerby.util.IOUtil;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.Timeout;
public class TestRMNMSecretKeys {
private static final String KRB5_CONF = "java.security.krb5.conf";
@@ -44,7 +49,7 @@ public class TestRMNMSecretKeys {
System.getProperty("test.build.dir", "target/test-dir"),
UUID.randomUUID().toString());
- @BeforeClass
+ @BeforeAll
public static void setup() throws IOException {
KRB5_CONF_ROOT_DIR.mkdir();
File krb5ConfFile = new File(KRB5_CONF_ROOT_DIR, "krb5.conf");
@@ -63,17 +68,18 @@ public class TestRMNMSecretKeys {
System.setProperty(KRB5_CONF, krb5ConfFile.getAbsolutePath());
}
- @AfterClass
+ @AfterAll
public static void tearDown() throws IOException {
KRB5_CONF_ROOT_DIR.delete();
}
- @Test(timeout = 1000000)
- public void testNMUpdation() throws Exception {
+ @Test
+ @Timeout(1000000)
+ void testNMUpdation() throws Exception {
YarnConfiguration conf = new YarnConfiguration();
// validating RM NM keys for Unsecured environment
validateRMNMKeyExchange(conf);
-
+
// validating RM NM keys for secured environment
conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,
"kerberos");
@@ -113,30 +119,30 @@ public class TestRMNMSecretKeys {
MasterKey containerTokenMasterKey =
registrationResponse.getContainerTokenMasterKey();
- Assert.assertNotNull(containerToken
- + "Registration should cause a key-update!", containerTokenMasterKey);
+ assertNotNull(containerTokenMasterKey, containerToken
+ + "Registration should cause a key-update!");
MasterKey nmTokenMasterKey = registrationResponse.getNMTokenMasterKey();
- Assert.assertNotNull(nmToken
- + "Registration should cause a key-update!", nmTokenMasterKey);
+ assertNotNull(nmTokenMasterKey, nmToken
+ + "Registration should cause a key-update!");
dispatcher.await();
NodeHeartbeatResponse response = nm.nodeHeartbeat(true);
- Assert.assertNull(containerToken +
- "First heartbeat after registration shouldn't get any key updates!",
- response.getContainerTokenMasterKey());
- Assert.assertNull(nmToken +
- "First heartbeat after registration shouldn't get any key updates!",
- response.getNMTokenMasterKey());
+ assertNull(response.getContainerTokenMasterKey(),
+ containerToken +
+ "First heartbeat after registration shouldn't get any key updates!");
+ assertNull(response.getNMTokenMasterKey(),
+ nmToken +
+ "First heartbeat after registration shouldn't get any key updates!");
dispatcher.await();
response = nm.nodeHeartbeat(true);
- Assert.assertNull(containerToken +
- "Even second heartbeat after registration shouldn't get any key updates!",
- response.getContainerTokenMasterKey());
- Assert.assertNull(nmToken +
- "Even second heartbeat after registration shouldn't get any key updates!",
- response.getContainerTokenMasterKey());
+ assertNull(response.getContainerTokenMasterKey(),
+ containerToken +
+ "Even second heartbeat after registration shouldn't get any key updates!");
+ assertNull(response.getContainerTokenMasterKey(),
+ nmToken +
+ "Even second heartbeat after registration shouldn't get any key updates!");
dispatcher.await();
@@ -146,30 +152,30 @@ public class TestRMNMSecretKeys {
// Heartbeats after roll-over and before activation should be fine.
response = nm.nodeHeartbeat(true);
- Assert.assertNotNull(containerToken +
- "Heartbeats after roll-over and before activation should not err out.",
- response.getContainerTokenMasterKey());
- Assert.assertNotNull(nmToken +
- "Heartbeats after roll-over and before activation should not err out.",
- response.getNMTokenMasterKey());
+ assertNotNull(response.getContainerTokenMasterKey(),
+ containerToken +
+ "Heartbeats after roll-over and before activation should not err out.");
+ assertNotNull(response.getNMTokenMasterKey(),
+ nmToken +
+ "Heartbeats after roll-over and before activation should not err out.");
- Assert.assertEquals(containerToken +
- "Roll-over should have incremented the key-id only by one!",
- containerTokenMasterKey.getKeyId() + 1,
- response.getContainerTokenMasterKey().getKeyId());
- Assert.assertEquals(nmToken +
- "Roll-over should have incremented the key-id only by one!",
- nmTokenMasterKey.getKeyId() + 1,
- response.getNMTokenMasterKey().getKeyId());
+ assertEquals(containerTokenMasterKey.getKeyId() + 1,
+ response.getContainerTokenMasterKey().getKeyId(),
+ containerToken +
+ "Roll-over should have incremented the key-id only by one!");
+ assertEquals(nmTokenMasterKey.getKeyId() + 1,
+ response.getNMTokenMasterKey().getKeyId(),
+ nmToken +
+ "Roll-over should have incremented the key-id only by one!");
dispatcher.await();
response = nm.nodeHeartbeat(true);
- Assert.assertNull(containerToken +
- "Second heartbeat after roll-over shouldn't get any key updates!",
- response.getContainerTokenMasterKey());
- Assert.assertNull(nmToken +
- "Second heartbeat after roll-over shouldn't get any key updates!",
- response.getNMTokenMasterKey());
+ assertNull(response.getContainerTokenMasterKey(),
+ containerToken +
+ "Second heartbeat after roll-over shouldn't get any key updates!");
+ assertNull(response.getNMTokenMasterKey(),
+ nmToken +
+ "Second heartbeat after roll-over shouldn't get any key updates!");
dispatcher.await();
// Let's force activation
@@ -177,21 +183,21 @@ public class TestRMNMSecretKeys {
rm.getRMContext().getNMTokenSecretManager().activateNextMasterKey();
response = nm.nodeHeartbeat(true);
- Assert.assertNull(containerToken
- + "Activation shouldn't cause any key updates!",
- response.getContainerTokenMasterKey());
- Assert.assertNull(nmToken
- + "Activation shouldn't cause any key updates!",
- response.getNMTokenMasterKey());
+ assertNull(response.getContainerTokenMasterKey(),
+ containerToken
+ + "Activation shouldn't cause any key updates!");
+ assertNull(response.getNMTokenMasterKey(),
+ nmToken
+ + "Activation shouldn't cause any key updates!");
dispatcher.await();
response = nm.nodeHeartbeat(true);
- Assert.assertNull(containerToken +
- "Even second heartbeat after activation shouldn't get any key updates!",
- response.getContainerTokenMasterKey());
- Assert.assertNull(nmToken +
- "Even second heartbeat after activation shouldn't get any key updates!",
- response.getNMTokenMasterKey());
+ assertNull(response.getContainerTokenMasterKey(),
+ containerToken +
+ "Even second heartbeat after activation shouldn't get any key updates!");
+ assertNull(response.getNMTokenMasterKey(),
+ nmToken +
+ "Even second heartbeat after activation shouldn't get any key updates!");
dispatcher.await();
rm.stop();
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/timeline/TimelineVersionWatcher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/timeline/TimelineVersionWatcher.java
index b00f13a0aba..e06281ce33d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/timeline/TimelineVersionWatcher.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/timeline/TimelineVersionWatcher.java
@@ -20,28 +20,13 @@ package org.apache.hadoop.yarn.server.timeline;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
-import org.junit.rules.TestWatcher;
-import org.junit.runner.Description;
+
+import org.junit.jupiter.api.extension.TestWatcher;
@InterfaceAudience.Private
@InterfaceStability.Unstable
-public class TimelineVersionWatcher extends TestWatcher {
+public class TimelineVersionWatcher implements TestWatcher {
static final float DEFAULT_TIMELINE_VERSION = 1.0f;
private TimelineVersion version;
- @Override
- protected void starting(Description description) {
- version = description.getAnnotation(TimelineVersion.class);
- }
-
- /**
- * @return the version number of timeline server for the current test (using
- * timeline server v1.0 by default)
- */
- public float getTimelineVersion() {
- if(version == null) {
- return DEFAULT_TIMELINE_VERSION;
- }
- return version.value();
- }
}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/TestTimelineServiceClientIntegration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/TestTimelineServiceClientIntegration.java
index 21a77d89e2c..871cbb6c036 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/TestTimelineServiceClientIntegration.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/TestTimelineServiceClientIntegration.java
@@ -19,7 +19,7 @@
package org.apache.hadoop.yarn.server.timelineservice;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.fail;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
@@ -54,9 +54,9 @@ import org.apache.hadoop.yarn.server.timelineservice.collector.NodeTimelineColle
import org.apache.hadoop.yarn.server.timelineservice.collector.PerNodeTimelineCollectorsAuxService;
import org.apache.hadoop.yarn.server.timelineservice.storage.FileSystemTimelineWriterImpl;
import org.apache.hadoop.yarn.server.timelineservice.storage.TimelineWriter;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
public class TestTimelineServiceClientIntegration {
private static final String ROOT_DIR = new File("target",
@@ -66,7 +66,7 @@ public class TestTimelineServiceClientIntegration {
private static PerNodeTimelineCollectorsAuxService auxService;
private static Configuration conf;
- @BeforeClass
+ @BeforeAll
public static void setupClass() throws Exception {
try {
collectorManager = new MockNodeTimelineCollectorManager();
@@ -88,7 +88,7 @@ public class TestTimelineServiceClientIntegration {
}
}
- @AfterClass
+ @AfterAll
public static void tearDownClass() throws Exception {
if (auxService != null) {
auxService.stop();
@@ -97,7 +97,7 @@ public class TestTimelineServiceClientIntegration {
}
@Test
- public void testPutEntities() throws Exception {
+ void testPutEntities() throws Exception {
TimelineV2Client client =
TimelineV2Client.createTimelineClient(ApplicationId.newInstance(0, 1));
try {
@@ -123,7 +123,7 @@ public class TestTimelineServiceClientIntegration {
}
@Test
- public void testPutExtendedEntities() throws Exception {
+ void testPutExtendedEntities() throws Exception {
ApplicationId appId = ApplicationId.newInstance(0, 1);
TimelineV2Client client =
TimelineV2Client.createTimelineClient(appId);
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/security/TestTimelineAuthFilterForV2.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/security/TestTimelineAuthFilterForV2.java
index f773807f05d..4b041d70155 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/security/TestTimelineAuthFilterForV2.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/security/TestTimelineAuthFilterForV2.java
@@ -18,11 +18,11 @@
package org.apache.hadoop.yarn.server.timelineservice.security;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertNotEquals;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.junit.jupiter.api.Assertions.fail;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.ArgumentMatchers.eq;
import static org.mockito.Mockito.atLeastOnce;
@@ -75,18 +75,16 @@ import org.apache.hadoop.yarn.server.timelineservice.storage.FileSystemTimelineR
import org.apache.hadoop.yarn.server.timelineservice.storage.FileSystemTimelineWriterImpl;
import org.apache.hadoop.yarn.server.timelineservice.storage.TimelineWriter;
import static org.apache.hadoop.yarn.conf.YarnConfiguration.TIMELINE_HTTP_AUTH_PREFIX;
-import org.junit.After;
-import org.junit.AfterClass;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Test;
-import org.junit.runner.RunWith;
-import org.junit.runners.Parameterized;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.params.ParameterizedTest;
+import org.junit.jupiter.params.provider.MethodSource;
/**
* Tests timeline authentication filter based security for timeline service v2.
*/
-@RunWith(Parameterized.class)
public class TestTimelineAuthFilterForV2 {
private static final String FOO_USER = "foo";
@@ -106,9 +104,8 @@ public class TestTimelineAuthFilterForV2 {
// First param indicates whether HTTPS access or HTTP access and second param
// indicates whether it is kerberos access or token based access.
- @Parameterized.Parameters
public static Collection<Object[]> params() {
- return Arrays.asList(new Object[][] {{false, true}, {false, false},
+ return Arrays.asList(new Object[][]{{false, true}, {false, false},
{true, false}, {true, true}});
}
@@ -117,11 +114,14 @@ public class TestTimelineAuthFilterForV2 {
private static String sslConfDir;
private static Configuration conf;
private static UserGroupInformation nonKerberosUser;
+
static {
try {
nonKerberosUser = UserGroupInformation.getCurrentUser();
- } catch (IOException e) {}
+ } catch (IOException e) {
+ }
}
+
// Indicates whether HTTPS or HTTP access.
private boolean withSsl;
// Indicates whether Kerberos based login is used or token based access is
@@ -129,13 +129,14 @@ public class TestTimelineAuthFilterForV2 {
private boolean withKerberosLogin;
private NodeTimelineCollectorManager collectorManager;
private PerNodeTimelineCollectorsAuxService auxService;
- public TestTimelineAuthFilterForV2(boolean withSsl,
+
+ public void initTestTimelineAuthFilterForV2(boolean withSsl,
boolean withKerberosLogin) {
this.withSsl = withSsl;
this.withKerberosLogin = withKerberosLogin;
}
- @BeforeClass
+ @BeforeAll
public static void setup() {
try {
testMiniKDC = new MiniKdc(MiniKdc.createConf(), TEST_ROOT_DIR);
@@ -181,7 +182,7 @@ public class TestTimelineAuthFilterForV2 {
}
}
- @Before
+ @BeforeEach
public void initialize() throws Exception {
if (withSsl) {
conf.set(YarnConfiguration.YARN_HTTP_POLICY_KEY,
@@ -221,7 +222,7 @@ public class TestTimelineAuthFilterForV2 {
appId, UserGroupInformation.getCurrentUser().getUserName());
if (!withKerberosLogin) {
AppLevelTimelineCollector collector =
- (AppLevelTimelineCollector)collectorManager.get(appId);
+ (AppLevelTimelineCollector) collectorManager.get(appId);
Token<TimelineDelegationTokenIdentifier> token =
collector.getDelegationTokenForApp();
token.setService(new Text("localhost" + token.getService().toString().
@@ -243,7 +244,7 @@ public class TestTimelineAuthFilterForV2 {
return client;
}
- @AfterClass
+ @AfterAll
public static void tearDown() throws Exception {
if (testMiniKDC != null) {
testMiniKDC.stop();
@@ -251,7 +252,7 @@ public class TestTimelineAuthFilterForV2 {
FileUtil.fullyDelete(TEST_ROOT_DIR);
}
- @After
+ @AfterEach
public void destroy() throws Exception {
if (auxService != null) {
auxService.stop();
@@ -318,7 +319,7 @@ public class TestTimelineAuthFilterForV2 {
String entityType, int numEntities) throws Exception {
TimelineV2Client client = createTimelineClientForUGI(appId);
try {
- // Sync call. Results available immediately.
+ // Sync call. Results available immediately.
client.putEntities(createEntity("entity1", entityType));
assertEquals(numEntities, entityTypeDir.listFiles().length);
verifyEntity(entityTypeDir, "entity1", entityType);
@@ -343,8 +344,10 @@ public class TestTimelineAuthFilterForV2 {
return false;
}
- @Test
- public void testPutTimelineEntities() throws Exception {
+ @MethodSource("params")
+ @ParameterizedTest
+ void testPutTimelineEntities(boolean withSsl, boolean withKerberosLogin) throws Exception {
+ initTestTimelineAuthFilterForV2(withSsl, withKerberosLogin);
final String entityType = ENTITY_TYPE +
ENTITY_TYPE_SUFFIX.getAndIncrement();
ApplicationId appId = ApplicationId.newInstance(0, 1);
@@ -364,8 +367,8 @@ public class TestTimelineAuthFilterForV2 {
}
});
} else {
- assertTrue("Entities should have been published successfully.",
- publishWithRetries(appId, entityTypeDir, entityType, 1));
+ assertTrue(publishWithRetries(appId, entityTypeDir, entityType, 1),
+ "Entities should have been published successfully.");
AppLevelTimelineCollector collector =
(AppLevelTimelineCollector) collectorManager.get(appId);
@@ -377,12 +380,12 @@ public class TestTimelineAuthFilterForV2 {
// published.
Thread.sleep(1000);
// Entities should publish successfully after renewal.
- assertTrue("Entities should have been published successfully.",
- publishWithRetries(appId, entityTypeDir, entityType, 2));
+ assertTrue(publishWithRetries(appId, entityTypeDir, entityType, 2),
+ "Entities should have been published successfully.");
assertNotNull(collector);
verify(collectorManager.getTokenManagerService(), atLeastOnce()).
renewToken(eq(collector.getDelegationTokenForApp()),
- any(String.class));
+ any(String.class));
// Wait to ensure lifetime of token expires and ensure its regenerated
// automatically.
@@ -393,8 +396,9 @@ public class TestTimelineAuthFilterForV2 {
}
Thread.sleep(50);
}
- assertNotEquals("Token should have been regenerated.", token,
- collector.getDelegationTokenForApp());
+ assertNotEquals(token,
+ collector.getDelegationTokenForApp(),
+ "Token should have been regenerated.");
Thread.sleep(1000);
// Try publishing with the old token in UGI. Publishing should fail due
// to invalid token.
@@ -402,8 +406,8 @@ public class TestTimelineAuthFilterForV2 {
publishAndVerifyEntity(appId, entityTypeDir, entityType, 2);
fail("Exception should have been thrown due to Invalid Token.");
} catch (YarnException e) {
- assertTrue("Exception thrown should have been due to Invalid Token.",
- e.getCause().getMessage().contains("InvalidToken"));
+ assertTrue(e.getCause().getMessage().contains("InvalidToken"),
+ "Exception thrown should have been due to Invalid Token.");
}
// Update the regenerated token in UGI and retry publishing entities.
@@ -411,10 +415,10 @@ public class TestTimelineAuthFilterForV2 {
collector.getDelegationTokenForApp();
regeneratedToken.setService(new Text("localhost" +
regeneratedToken.getService().toString().substring(
- regeneratedToken.getService().toString().indexOf(":"))));
+ regeneratedToken.getService().toString().indexOf(":"))));
UserGroupInformation.getCurrentUser().addToken(regeneratedToken);
- assertTrue("Entities should have been published successfully.",
- publishWithRetries(appId, entityTypeDir, entityType, 2));
+ assertTrue(publishWithRetries(appId, entityTypeDir, entityType, 2),
+ "Entities should have been published successfully.");
// Token was generated twice, once when app collector was created and
// later after token lifetime expiry.
verify(collectorManager.getTokenManagerService(), times(2)).
@@ -432,11 +436,11 @@ public class TestTimelineAuthFilterForV2 {
}
Thread.sleep(50);
}
- assertNotNull("Error reading entityTypeDir", entities);
+ assertNotNull(entities, "Error reading entityTypeDir");
assertEquals(2, entities.length);
verifyEntity(entityTypeDir, "entity2", entityType);
AppLevelTimelineCollector collector =
- (AppLevelTimelineCollector)collectorManager.get(appId);
+ (AppLevelTimelineCollector) collectorManager.get(appId);
assertNotNull(collector);
auxService.removeApplication(appId);
verify(collectorManager.getTokenManagerService()).cancelToken(
@@ -446,24 +450,20 @@ public class TestTimelineAuthFilterForV2 {
private static class DummyNodeTimelineCollectorManager extends
NodeTimelineCollectorManager {
private volatile int tokenExpiredCnt = 0;
- DummyNodeTimelineCollectorManager() {
- super();
- }
private int getTokenExpiredCnt() {
return tokenExpiredCnt;
}
@Override
- protected TimelineV2DelegationTokenSecretManagerService
- createTokenManagerService() {
+ protected TimelineV2DelegationTokenSecretManagerService createTokenManagerService() {
return spy(new TimelineV2DelegationTokenSecretManagerService() {
@Override
protected AbstractDelegationTokenSecretManager
- <TimelineDelegationTokenIdentifier>
- createTimelineDelegationTokenSecretManager(long secretKeyInterval,
- long tokenMaxLifetime, long tokenRenewInterval,
- long tokenRemovalScanInterval) {
+ <TimelineDelegationTokenIdentifier> createTimelineDelegationTokenSecretManager(long
+ secretKeyInterval,
+ long tokenMaxLifetime, long tokenRenewInterval,
+ long tokenRemovalScanInterval) {
return spy(new TimelineV2DelegationTokenSecretManager(
secretKeyInterval, tokenMaxLifetime, tokenRenewInterval, 2000L) {
@Override
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org