You are viewing a plain text version of this content. The canonical link for it is here.
Posted to yarn-commits@hadoop.apache.org by su...@apache.org on 2013/04/20 18:57:46 UTC
svn commit: r1470194 - in
/hadoop/common/branches/HDFS-2802/hadoop-yarn-project: ./
hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/
hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecor...
Author: suresh
Date: Sat Apr 20 16:57:44 2013
New Revision: 1470194
URL: http://svn.apache.org/r1470194
Log:
HDFS-4434. Reverting change r1470089 that merges trunk to HDFS-2802.
Modified:
hadoop/common/branches/HDFS-2802/hadoop-yarn-project/CHANGES.txt
hadoop/common/branches/HDFS-2802/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/AllocateRequest.java
hadoop/common/branches/HDFS-2802/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/StartContainerResponse.java
hadoop/common/branches/HDFS-2802/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/AllocateRequestPBImpl.java
hadoop/common/branches/HDFS-2802/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/StartContainerResponsePBImpl.java
hadoop/common/branches/HDFS-2802/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/BuilderUtils.java
hadoop/common/branches/HDFS-2802/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java
hadoop/common/branches/HDFS-2802/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DefaultContainerExecutor.java
hadoop/common/branches/HDFS-2802/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
hadoop/common/branches/HDFS-2802/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java
hadoop/common/branches/HDFS-2802/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerShutdown.java
hadoop/common/branches/HDFS-2802/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/BaseContainerManagerTest.java
hadoop/common/branches/HDFS-2802/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManager.java
hadoop/common/branches/HDFS-2802/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java
hadoop/common/branches/HDFS-2802/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestContainerManagerSecurity.java
Modified: hadoop/common/branches/HDFS-2802/hadoop-yarn-project/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-yarn-project/CHANGES.txt?rev=1470194&r1=1470193&r2=1470194&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-yarn-project/CHANGES.txt (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-yarn-project/CHANGES.txt Sat Apr 20 16:57:44 2013
@@ -34,13 +34,6 @@ Trunk - Unreleased
YARN-487. Modify path manipulation in LocalDirsHandlerService to let
TestDiskFailures pass on Windows. (Chris Nauroth via vinodkv)
- YARN-493. Fixed some shell related flaws in YARN on Windows. (Chris Nauroth
- via vinodkv)
-
- YARN-593. container launch on Windows does not correctly populate
- classpath with new process's environment variables and localized resources
- (Chris Nauroth via bikas)
-
BREAKDOWN OF HADOOP-8562 SUBTASKS
YARN-158. Yarn creating package-info.java must not depend on sh.
@@ -92,9 +85,6 @@ Release 2.0.5-beta - UNRELEASED
YARN-444. Moved special container exit codes from YarnConfiguration to API
where they belong. (Sandy Ryza via vinodkv)
- YARN-441. Removed unused utility methods for collections from two API
- records. (Xuan Gong via vinodkv)
-
NEW FEATURES
YARN-482. FS: Extend SchedulingMode to intermediate queues.
Modified: hadoop/common/branches/HDFS-2802/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/AllocateRequest.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/AllocateRequest.java?rev=1470194&r1=1470193&r2=1470194&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/AllocateRequest.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/AllocateRequest.java Sat Apr 20 16:57:44 2013
@@ -20,8 +20,10 @@ package org.apache.hadoop.yarn.api.proto
import java.util.List;
+import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceStability.Stable;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.yarn.api.AMRMProtocol;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.Container;
@@ -118,16 +120,36 @@ public interface AllocateRequest {
@Stable
List<ResourceRequest> getAskList();
+ @Private
+ @Unstable
+ ResourceRequest getAsk(int index);
+
+ @Private
+ @Unstable
+ int getAskCount();
+
/**
- * Set list of <code>ResourceRequest</code> to update the
+ * Add list of <code>ResourceRequest</code> to update the
* <code>ResourceManager</code> about the application's resource requirements.
- * @param resourceRequests list of <code>ResourceRequest</code> to update the
+ * @param resourceRequest list of <code>ResourceRequest</code> to update the
* <code>ResourceManager</code> about the application's
* resource requirements
*/
@Public
@Stable
- void setAskList(List<ResourceRequest> resourceRequests);
+ void addAllAsks(List<ResourceRequest> resourceRequest);
+
+ @Private
+ @Unstable
+ void addAsk(ResourceRequest request);
+
+ @Private
+ @Unstable
+ void removeAsk(int index);
+
+ @Private
+ @Unstable
+ void clearAsks();
/**
* Get the list of <code>ContainerId</code> of containers being
@@ -138,9 +160,17 @@ public interface AllocateRequest {
@Public
@Stable
List<ContainerId> getReleaseList();
+
+ @Private
+ @Unstable
+ ContainerId getRelease(int index);
+
+ @Private
+ @Unstable
+ int getReleaseCount();
/**
- * Set the list of <code>ContainerId</code> of containers being
+ * Add the list of <code>ContainerId</code> of containers being
* released by the <code>ApplicationMaster</code>
* @param releaseContainers list of <code>ContainerId</code> of
* containers being released by the <
@@ -148,5 +178,17 @@ public interface AllocateRequest {
*/
@Public
@Stable
- void setReleaseList(List<ContainerId> releaseContainers);
+ void addAllReleases(List<ContainerId> releaseContainers);
+
+ @Private
+ @Unstable
+ void addRelease(ContainerId container);
+
+ @Private
+ @Unstable
+ void removeRelease(int index);
+
+ @Private
+ @Unstable
+ void clearReleases();
}
Modified: hadoop/common/branches/HDFS-2802/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/StartContainerResponse.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/StartContainerResponse.java?rev=1470194&r1=1470193&r2=1470194&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/StartContainerResponse.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/StartContainerResponse.java Sat Apr 20 16:57:44 2013
@@ -45,11 +45,43 @@ public interface StartContainerResponse
Map<String, ByteBuffer> getAllServiceResponse();
/**
- * Set to the list of auxiliary services which have been started on the
+ * Get the response from a single auxiliary service running on the
+ * <code>NodeManager</code>
+ *
+ * @param key The auxiliary service name whose response is desired.
+ * @return The opaque blob <code>ByteBuffer</code> returned by the auxiliary
+ * service.
+ */
+ ByteBuffer getServiceResponse(String key);
+
+ /**
+ * Add to the list of auxiliary services which have been started on the
* <code>NodeManager</code>. This is done only once when the
* <code>NodeManager</code> starts up
- * @param serviceResponses A map from auxiliary service names to the opaque
+ * @param serviceResponse A map from auxiliary service names to the opaque
* blob <code>ByteBuffer</code>s for that auxiliary service
*/
- void setAllServiceResponse(Map<String, ByteBuffer> serviceResponses);
+ void addAllServiceResponse(Map<String, ByteBuffer> serviceResponse);
+
+ /**
+ * Add to the list of auxiliary services which have been started on the
+ * <code>NodeManager</code>. This is done only once when the
+ * <code>NodeManager</code> starts up
+ *
+ * @param key The auxiliary service name
+ * @param value The opaque blob <code>ByteBuffer</code> managed by the
+ * auxiliary service
+ */
+ void setServiceResponse(String key, ByteBuffer value);
+
+ /**
+ * Remove a single auxiliary service from the StartContainerResponse object
+ * @param key The auxiliary service to remove
+ */
+ void removeServiceResponse(String key);
+
+ /**
+ * Remove all the auxiliary services from the StartContainerResponse object
+ */
+ void clearServiceResponse();
}
Modified: hadoop/common/branches/HDFS-2802/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/AllocateRequestPBImpl.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/AllocateRequestPBImpl.java?rev=1470194&r1=1470193&r2=1470194&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/AllocateRequestPBImpl.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/AllocateRequestPBImpl.java Sat Apr 20 16:57:44 2013
@@ -25,6 +25,7 @@ import java.util.List;
import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.api.records.Container;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.ProtoBase;
import org.apache.hadoop.yarn.api.records.ResourceRequest;
@@ -143,13 +144,14 @@ public class AllocateRequestPBImpl exten
return this.ask;
}
@Override
- public void setAskList(final List<ResourceRequest> resourceRequests) {
- if(resourceRequests == null) {
- return;
- }
+ public ResourceRequest getAsk(int index) {
initAsks();
- this.ask.clear();
- this.ask.addAll(resourceRequests);
+ return this.ask.get(index);
+ }
+ @Override
+ public int getAskCount() {
+ initAsks();
+ return this.ask.size();
}
private void initAsks() {
@@ -165,6 +167,14 @@ public class AllocateRequestPBImpl exten
}
}
+ @Override
+ public void addAllAsks(final List<ResourceRequest> ask) {
+ if (ask == null)
+ return;
+ initAsks();
+ this.ask.addAll(ask);
+ }
+
private void addAsksToProto() {
maybeInitBuilder();
builder.clearAsk();
@@ -199,18 +209,34 @@ public class AllocateRequestPBImpl exten
builder.addAllAsk(iterable);
}
@Override
+ public void addAsk(ResourceRequest ask) {
+ initAsks();
+ this.ask.add(ask);
+ }
+ @Override
+ public void removeAsk(int index) {
+ initAsks();
+ this.ask.remove(index);
+ }
+ @Override
+ public void clearAsks() {
+ initAsks();
+ this.ask.clear();
+ }
+ @Override
public List<ContainerId> getReleaseList() {
initReleases();
return this.release;
}
@Override
- public void setReleaseList(List<ContainerId> releaseContainers) {
- if(releaseContainers == null) {
- return;
- }
+ public ContainerId getRelease(int index) {
initReleases();
- this.release.clear();
- this.release.addAll(releaseContainers);
+ return this.release.get(index);
+ }
+ @Override
+ public int getReleaseCount() {
+ initReleases();
+ return this.release.size();
}
private void initReleases() {
@@ -226,6 +252,14 @@ public class AllocateRequestPBImpl exten
}
}
+ @Override
+ public void addAllReleases(final List<ContainerId> release) {
+ if (release == null)
+ return;
+ initReleases();
+ this.release.addAll(release);
+ }
+
private void addReleasesToProto() {
maybeInitBuilder();
builder.clearRelease();
@@ -259,6 +293,21 @@ public class AllocateRequestPBImpl exten
};
builder.addAllRelease(iterable);
}
+ @Override
+ public void addRelease(ContainerId release) {
+ initReleases();
+ this.release.add(release);
+ }
+ @Override
+ public void removeRelease(int index) {
+ initReleases();
+ this.release.remove(index);
+ }
+ @Override
+ public void clearReleases() {
+ initReleases();
+ this.release.clear();
+ }
private ApplicationAttemptIdPBImpl convertFromProtoFormat(ApplicationAttemptIdProto p) {
return new ApplicationAttemptIdPBImpl(p);
Modified: hadoop/common/branches/HDFS-2802/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/StartContainerResponsePBImpl.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/StartContainerResponsePBImpl.java?rev=1470194&r1=1470193&r2=1470194&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/StartContainerResponsePBImpl.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/StartContainerResponsePBImpl.java Sat Apr 20 16:57:44 2013
@@ -84,14 +84,9 @@ public class StartContainerResponsePBImp
return this.serviceResponse;
}
@Override
- public synchronized void setAllServiceResponse(
- Map<String, ByteBuffer> serviceResponses) {
- if(serviceResponses == null) {
- return;
- }
+ public synchronized ByteBuffer getServiceResponse(String key) {
initServiceResponse();
- this.serviceResponse.clear();
- this.serviceResponse.putAll(serviceResponses);
+ return this.serviceResponse.get(key);
}
private synchronized void initServiceResponse() {
@@ -107,6 +102,14 @@ public class StartContainerResponsePBImp
}
}
+ @Override
+ public synchronized void addAllServiceResponse(final Map<String, ByteBuffer> serviceResponse) {
+ if (serviceResponse == null)
+ return;
+ initServiceResponse();
+ this.serviceResponse.putAll(serviceResponse);
+ }
+
private synchronized void addServiceResponseToProto() {
maybeInitBuilder();
builder.clearServiceResponse();
@@ -140,4 +143,19 @@ public class StartContainerResponsePBImp
};
builder.addAllServiceResponse(iterable);
}
+ @Override
+ public synchronized void setServiceResponse(String key, ByteBuffer val) {
+ initServiceResponse();
+ this.serviceResponse.put(key, val);
+ }
+ @Override
+ public synchronized void removeServiceResponse(String key) {
+ initServiceResponse();
+ this.serviceResponse.remove(key);
+ }
+ @Override
+ public synchronized void clearServiceResponse() {
+ initServiceResponse();
+ this.serviceResponse.clear();
+ }
}
Modified: hadoop/common/branches/HDFS-2802/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/BuilderUtils.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/BuilderUtils.java?rev=1470194&r1=1470193&r2=1470194&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/BuilderUtils.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/BuilderUtils.java Sat Apr 20 16:57:44 2013
@@ -393,8 +393,8 @@ public class BuilderUtils {
allocateRequest.setApplicationAttemptId(applicationAttemptId);
allocateRequest.setResponseId(responseID);
allocateRequest.setProgress(appProgress);
- allocateRequest.setAskList(resourceAsk);
- allocateRequest.setReleaseList(containersToBeReleased);
+ allocateRequest.addAllAsks(resourceAsk);
+ allocateRequest.addAllReleases(containersToBeReleased);
return allocateRequest;
}
Modified: hadoop/common/branches/HDFS-2802/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java?rev=1470194&r1=1470193&r2=1470194&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java Sat Apr 20 16:57:44 2013
@@ -222,6 +222,19 @@ public abstract class ContainerExecutor
}
+ /** Return a command for determining if process with specified pid is alive. */
+ protected static String[] getCheckProcessIsAliveCommand(String pid) {
+ return Shell.WINDOWS ?
+ new String[] { Shell.WINUTILS, "task", "isAlive", pid } :
+ new String[] { "kill", "-0", pid };
+ }
+
+ /** Return a command to send a signal to a given pid */
+ protected static String[] getSignalKillCommand(int code, String pid) {
+ return Shell.WINDOWS ? new String[] { Shell.WINUTILS, "task", "kill", pid } :
+ new String[] { "kill", "-" + code, pid };
+ }
+
/**
* Is the container still active?
* @param containerId
@@ -290,6 +303,26 @@ public abstract class ContainerExecutor
return pid;
}
+ public static final boolean isSetsidAvailable = isSetsidSupported();
+ private static boolean isSetsidSupported() {
+ if (Shell.WINDOWS) {
+ return true;
+ }
+ ShellCommandExecutor shexec = null;
+ boolean setsidSupported = true;
+ try {
+ String[] args = {"setsid", "bash", "-c", "echo $$"};
+ shexec = new ShellCommandExecutor(args);
+ shexec.execute();
+ } catch (IOException ioe) {
+ LOG.warn("setsid is not available on this machine. So not using it.");
+ setsidSupported = false;
+ } finally { // handle the exit code
+ LOG.info("setsid exited with exit code " + shexec.getExitCode());
+ }
+ return setsidSupported;
+ }
+
public static class DelayedProcessKiller extends Thread {
private final String user;
private final String pid;
Modified: hadoop/common/branches/HDFS-2802/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DefaultContainerExecutor.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DefaultContainerExecutor.java?rev=1470194&r1=1470193&r2=1470194&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DefaultContainerExecutor.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DefaultContainerExecutor.java Sat Apr 20 16:57:44 2013
@@ -50,8 +50,6 @@ import org.apache.hadoop.yarn.server.nod
import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.ContainerLocalizer;
import org.apache.hadoop.yarn.util.ConverterUtils;
-import com.google.common.annotations.VisibleForTesting;
-
public class DefaultContainerExecutor extends ContainerExecutor {
private static final Log LOG = LogFactory
@@ -239,9 +237,8 @@ public class DefaultContainerExecutor ex
protected abstract void writeLocalWrapperScript(Path launchDst, Path pidFile,
PrintStream pout);
- protected LocalWrapperScriptBuilder(Path containerWorkDir) {
- this.wrapperScriptPath = new Path(containerWorkDir,
- Shell.appendScriptExtension("default_container_executor"));
+ protected LocalWrapperScriptBuilder(Path wrapperScriptPath) {
+ this.wrapperScriptPath = wrapperScriptPath;
}
}
@@ -249,7 +246,7 @@ public class DefaultContainerExecutor ex
extends LocalWrapperScriptBuilder {
public UnixLocalWrapperScriptBuilder(Path containerWorkDir) {
- super(containerWorkDir);
+ super(new Path(containerWorkDir, "default_container_executor.sh"));
}
@Override
@@ -263,7 +260,7 @@ public class DefaultContainerExecutor ex
pout.println();
pout.println("echo $$ > " + pidFile.toString() + ".tmp");
pout.println("/bin/mv -f " + pidFile.toString() + ".tmp " + pidFile);
- String exec = Shell.isSetsidAvailable? "exec setsid" : "exec";
+ String exec = ContainerExecutor.isSetsidAvailable? "exec setsid" : "exec";
pout.println(exec + " /bin/bash -c \"" +
launchDst.toUri().getPath().toString() + "\"");
}
@@ -277,7 +274,7 @@ public class DefaultContainerExecutor ex
public WindowsLocalWrapperScriptBuilder(String containerIdStr,
Path containerWorkDir) {
- super(containerWorkDir);
+ super(new Path(containerWorkDir, "default_container_executor.cmd"));
this.containerIdStr = containerIdStr;
}
@@ -300,15 +297,18 @@ public class DefaultContainerExecutor ex
@Override
public boolean signalContainer(String user, String pid, Signal signal)
throws IOException {
- LOG.debug("Sending signal " + signal.getValue() + " to pid " + pid
+ final String sigpid = ContainerExecutor.isSetsidAvailable
+ ? "-" + pid
+ : pid;
+ LOG.debug("Sending signal " + signal.getValue() + " to pid " + sigpid
+ " as user " + user);
- if (!containerIsAlive(pid)) {
+ if (!containerIsAlive(sigpid)) {
return false;
}
try {
- killContainer(pid, signal);
+ killContainer(sigpid, signal);
} catch (IOException e) {
- if (!containerIsAlive(pid)) {
+ if (!containerIsAlive(sigpid)) {
return false;
}
throw e;
@@ -322,11 +322,9 @@ public class DefaultContainerExecutor ex
* @param pid String pid
* @return boolean true if the process is alive
*/
- @VisibleForTesting
- public static boolean containerIsAlive(String pid) throws IOException {
+ private boolean containerIsAlive(String pid) throws IOException {
try {
- new ShellCommandExecutor(Shell.getCheckProcessIsAliveCommand(pid))
- .execute();
+ new ShellCommandExecutor(getCheckProcessIsAliveCommand(pid)).execute();
// successful execution means process is alive
return true;
}
@@ -344,7 +342,7 @@ public class DefaultContainerExecutor ex
* (for logging).
*/
private void killContainer(String pid, Signal signal) throws IOException {
- new ShellCommandExecutor(Shell.getSignalKillCommand(signal.getValue(), pid))
+ new ShellCommandExecutor(getSignalKillCommand(signal.getValue(), pid))
.execute();
}
Modified: hadoop/common/branches/HDFS-2802/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java?rev=1470194&r1=1470193&r2=1470194&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java Sat Apr 20 16:57:44 2013
@@ -468,7 +468,7 @@ public class ContainerManagerImpl extend
StartContainerResponse response =
recordFactory.newRecordInstance(StartContainerResponse.class);
- response.setAllServiceResponse(auxiliaryServices.getMeta());
+ response.addAllServiceResponse(auxiliaryServices.getMeta());
// TODO launchedContainer misplaced -> doesn't necessarily mean a container
// launch. A finished Application will not launch containers.
metrics.launchedContainer();
Modified: hadoop/common/branches/HDFS-2802/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java?rev=1470194&r1=1470193&r2=1470194&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java Sat Apr 20 16:57:44 2013
@@ -28,7 +28,6 @@ import java.io.OutputStream;
import java.io.PrintStream;
import java.util.ArrayList;
import java.util.EnumSet;
-import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
@@ -73,8 +72,8 @@ public class ContainerLaunch implements
private static final Log LOG = LogFactory.getLog(ContainerLaunch.class);
- public static final String CONTAINER_SCRIPT =
- Shell.appendScriptExtension("launch_container");
+ public static final String CONTAINER_SCRIPT = Shell.WINDOWS ?
+ "launch_container.cmd" : "launch_container.sh";
public static final String FINAL_CONTAINER_TOKENS_FILE = "container_tokens";
private static final String PID_FILE_NAME_FMT = "%s.pid";
@@ -212,7 +211,7 @@ public class ContainerLaunch implements
FINAL_CONTAINER_TOKENS_FILE).toUri().getPath());
// Sanitize the container's environment
- sanitizeEnv(environment, containerWorkDir, appDirs, localResources);
+ sanitizeEnv(environment, containerWorkDir, appDirs);
// Write out the environment
writeLaunchEnv(containerScriptOutStream, environment, localResources,
@@ -507,17 +506,9 @@ public class ContainerLaunch implements
@Override
protected void link(Path src, Path dst) throws IOException {
- File srcFile = new File(src.toUri().getPath());
- String srcFileStr = srcFile.getPath();
- String dstFileStr = new File(dst.toString()).getPath();
- // If not on Java7+ on Windows, then copy file instead of symlinking.
- // See also FileUtil#symLink for full explanation.
- if (!Shell.isJava7OrAbove() && srcFile.isFile()) {
- line(String.format("@copy \"%s\" \"%s\"", srcFileStr, dstFileStr));
- } else {
- line(String.format("@%s symlink \"%s\" \"%s\"", Shell.WINUTILS,
- dstFileStr, srcFileStr));
- }
+ line(String.format("@%s symlink \"%s\" \"%s\"", Shell.WINUTILS,
+ new File(dst.toString()).getPath(),
+ new File(src.toUri().getPath()).getPath()));
}
@Override
@@ -541,8 +532,7 @@ public class ContainerLaunch implements
}
public void sanitizeEnv(Map<String, String> environment,
- Path pwd, List<Path> appDirs, Map<Path, List<String>> resources)
- throws IOException {
+ Path pwd, List<Path> appDirs) throws IOException {
/**
* Non-modifiable environment variables
*/
@@ -576,6 +566,16 @@ public class ContainerLaunch implements
environment.put("JVM_PID", "$$");
}
+ // TODO: Remove Windows check and use this approach on all platforms after
+ // additional testing. See YARN-358.
+ if (Shell.WINDOWS) {
+ String inputClassPath = environment.get(Environment.CLASSPATH.name());
+ if (inputClassPath != null && !inputClassPath.isEmpty()) {
+ environment.put(Environment.CLASSPATH.name(),
+ FileUtil.createJarWithClassPath(inputClassPath, pwd));
+ }
+ }
+
/**
* Modifiable environment variables
*/
@@ -594,57 +594,6 @@ public class ContainerLaunch implements
YarnConfiguration.NM_ADMIN_USER_ENV,
YarnConfiguration.DEFAULT_NM_ADMIN_USER_ENV)
);
-
- // TODO: Remove Windows check and use this approach on all platforms after
- // additional testing. See YARN-358.
- if (Shell.WINDOWS) {
- String inputClassPath = environment.get(Environment.CLASSPATH.name());
- if (inputClassPath != null && !inputClassPath.isEmpty()) {
- StringBuilder newClassPath = new StringBuilder(inputClassPath);
-
- // Localized resources do not exist at the desired paths yet, because the
- // container launch script has not run to create symlinks yet. This
- // means that FileUtil.createJarWithClassPath can't automatically expand
- // wildcards to separate classpath entries for each file in the manifest.
- // To resolve this, append classpath entries explicitly for each
- // resource.
- for (Map.Entry<Path,List<String>> entry : resources.entrySet()) {
- boolean targetIsDirectory = new File(entry.getKey().toUri().getPath())
- .isDirectory();
-
- for (String linkName : entry.getValue()) {
- // Append resource.
- newClassPath.append(File.pathSeparator).append(pwd.toString())
- .append(Path.SEPARATOR).append(linkName);
-
- // FileUtil.createJarWithClassPath must use File.toURI to convert
- // each file to a URI to write into the manifest's classpath. For
- // directories, the classpath must have a trailing '/', but
- // File.toURI only appends the trailing '/' if it is a directory that
- // already exists. To resolve this, add the classpath entries with
- // explicit trailing '/' here for any localized resource that targets
- // a directory. Then, FileUtil.createJarWithClassPath will guarantee
- // that the resulting entry in the manifest's classpath will have a
- // trailing '/', and thus refer to a directory instead of a file.
- if (targetIsDirectory) {
- newClassPath.append(Path.SEPARATOR);
- }
- }
- }
-
- // When the container launches, it takes the parent process's environment
- // and then adds/overwrites with the entries from the container launch
- // context. Do the same thing here for correct substitution of
- // environment variables in the classpath jar manifest.
- Map<String, String> mergedEnv = new HashMap<String, String>(
- System.getenv());
- mergedEnv.putAll(environment);
-
- String classPathJar = FileUtil.createJarWithClassPath(
- newClassPath.toString(), pwd, mergedEnv);
- environment.put(Environment.CLASSPATH.name(), classPathJar);
- }
- }
}
static void writeLaunchEnv(OutputStream out,
Modified: hadoop/common/branches/HDFS-2802/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerShutdown.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerShutdown.java?rev=1470194&r1=1470193&r2=1470194&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerShutdown.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerShutdown.java Sat Apr 20 16:57:44 2013
@@ -22,13 +22,12 @@ import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
import java.io.BufferedReader;
+import java.io.BufferedWriter;
import java.io.File;
import java.io.FileReader;
import java.io.FileWriter;
import java.io.IOException;
-import java.io.PrintWriter;
import java.util.ArrayList;
-import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
@@ -41,7 +40,6 @@ import junit.framework.Assert;
import org.apache.hadoop.fs.FileContext;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.UnsupportedFileSystemException;
-import org.apache.hadoop.util.Shell;
import org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusRequest;
import org.apache.hadoop.yarn.api.protocolrecords.StartContainerRequest;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
@@ -61,7 +59,6 @@ import org.apache.hadoop.yarn.event.Disp
import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
import org.apache.hadoop.yarn.factories.RecordFactory;
import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
-import org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.ContainerManagerImpl;
import org.apache.hadoop.yarn.server.nodemanager.metrics.NodeManagerMetrics;
import org.apache.hadoop.yarn.util.BuilderUtils;
@@ -84,7 +81,6 @@ public class TestNodeManagerShutdown {
.getRecordFactory(null);
static final String user = "nobody";
private FileContext localFS;
- private ContainerId cId;
private CyclicBarrier syncBarrier = new CyclicBarrier(2);
@Before
@@ -94,9 +90,6 @@ public class TestNodeManagerShutdown {
logsDir.mkdirs();
remoteLogsDir.mkdirs();
nmLocalDir.mkdirs();
-
- // Construct the Container-id
- cId = createContainerId();
}
@After
@@ -122,32 +115,25 @@ public class TestNodeManagerShutdown {
nm.stop();
- // Now verify the contents of the file. Script generates a message when it
- // receives a sigterm so we look for that. We cannot perform this check on
- // Windows, because the process is not notified when killed by winutils.
- // There is no way for the process to trap and respond. Instead, we can
- // verify that the job object with ID matching container ID no longer exists.
- if (Shell.WINDOWS) {
- Assert.assertFalse("Process is still alive!",
- DefaultContainerExecutor.containerIsAlive(cId.toString()));
- } else {
- BufferedReader reader =
- new BufferedReader(new FileReader(processStartFile));
-
- boolean foundSigTermMessage = false;
- while (true) {
- String line = reader.readLine();
- if (line == null) {
- break;
- }
- if (line.contains("SIGTERM")) {
- foundSigTermMessage = true;
- break;
- }
+ // Now verify the contents of the file
+ // Script generates a message when it receives a sigterm
+ // so we look for that
+ BufferedReader reader =
+ new BufferedReader(new FileReader(processStartFile));
+
+ boolean foundSigTermMessage = false;
+ while (true) {
+ String line = reader.readLine();
+ if (line == null) {
+ break;
+ }
+ if (line.contains("SIGTERM")) {
+ foundSigTermMessage = true;
+ break;
}
- Assert.assertTrue("Did not find sigterm message", foundSigTermMessage);
- reader.close();
}
+ Assert.assertTrue("Did not find sigterm message", foundSigTermMessage);
+ reader.close();
}
@SuppressWarnings("unchecked")
@@ -176,6 +162,8 @@ public class TestNodeManagerShutdown {
ContainerLaunchContext containerLaunchContext =
recordFactory.newRecordInstance(ContainerLaunchContext.class);
Container mockContainer = mock(Container.class);
+ // Construct the Container-id
+ ContainerId cId = createContainerId();
when(mockContainer.getId()).thenReturn(cId);
containerLaunchContext.setUser(user);
@@ -196,7 +184,9 @@ public class TestNodeManagerShutdown {
localResources.put(destinationFile, localResource);
containerLaunchContext.setLocalResources(localResources);
containerLaunchContext.setUser(containerLaunchContext.getUser());
- List<String> commands = Arrays.asList(Shell.getRunScriptCommand(scriptFile));
+ List<String> commands = new ArrayList<String>();
+ commands.add("/bin/bash");
+ commands.add(scriptFile.getAbsolutePath());
containerLaunchContext.setCommands(commands);
Resource resource = BuilderUtils.newResource(1024, 1);
when(mockContainer.getResource()).thenReturn(resource);
@@ -244,24 +234,16 @@ public class TestNodeManagerShutdown {
* stopped by external means.
*/
private File createUnhaltingScriptFile() throws IOException {
- File scriptFile = Shell.appendScriptExtension(tmpDir, "scriptFile");
- PrintWriter fileWriter = new PrintWriter(scriptFile);
- if (Shell.WINDOWS) {
- fileWriter.println("@echo \"Running testscript for delayed kill\"");
- fileWriter.println("@echo \"Writing pid to start file\"");
- fileWriter.println("@echo " + cId + ">> " + processStartFile);
- fileWriter.println("@pause");
- } else {
- fileWriter.write("#!/bin/bash\n\n");
- fileWriter.write("echo \"Running testscript for delayed kill\"\n");
- fileWriter.write("hello=\"Got SIGTERM\"\n");
- fileWriter.write("umask 0\n");
- fileWriter.write("trap \"echo $hello >> " + processStartFile +
- "\" SIGTERM\n");
- fileWriter.write("echo \"Writing pid to start file\"\n");
- fileWriter.write("echo $$ >> " + processStartFile + "\n");
- fileWriter.write("while true; do\ndate >> /dev/null;\n done\n");
- }
+ File scriptFile = new File(tmpDir, "scriptFile.sh");
+ BufferedWriter fileWriter = new BufferedWriter(new FileWriter(scriptFile));
+ fileWriter.write("#!/bin/bash\n\n");
+ fileWriter.write("echo \"Running testscript for delayed kill\"\n");
+ fileWriter.write("hello=\"Got SIGTERM\"\n");
+ fileWriter.write("umask 0\n");
+ fileWriter.write("trap \"echo $hello >> " + processStartFile + "\" SIGTERM\n");
+ fileWriter.write("echo \"Writing pid to start file\"\n");
+ fileWriter.write("echo $$ >> " + processStartFile + "\n");
+ fileWriter.write("while true; do\ndate >> /dev/null;\n done\n");
fileWriter.close();
return scriptFile;
Modified: hadoop/common/branches/HDFS-2802/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/BaseContainerManagerTest.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/BaseContainerManagerTest.java?rev=1470194&r1=1470193&r2=1470194&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/BaseContainerManagerTest.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/BaseContainerManagerTest.java Sat Apr 20 16:57:44 2013
@@ -76,15 +76,15 @@ public abstract class BaseContainerManag
public BaseContainerManagerTest() throws UnsupportedFileSystemException {
localFS = FileContext.getLocalFSFileContext();
localDir =
- new File("target", this.getClass().getSimpleName() + "-localDir")
+ new File("target", this.getClass().getName() + "-localDir")
.getAbsoluteFile();
localLogDir =
- new File("target", this.getClass().getSimpleName() + "-localLogDir")
+ new File("target", this.getClass().getName() + "-localLogDir")
.getAbsoluteFile();
remoteLogDir =
- new File("target", this.getClass().getSimpleName() + "-remoteLogDir")
+ new File("target", this.getClass().getName() + "-remoteLogDir")
.getAbsoluteFile();
- tmpDir = new File("target", this.getClass().getSimpleName() + "-tmpDir");
+ tmpDir = new File("target", this.getClass().getName() + "-tmpDir");
}
protected static Log LOG = LogFactory
Modified: hadoop/common/branches/HDFS-2802/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManager.java?rev=1470194&r1=1470193&r2=1470194&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManager.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManager.java Sat Apr 20 16:57:44 2013
@@ -35,7 +35,6 @@ import org.apache.commons.logging.LogFac
import org.apache.hadoop.fs.FileContext;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.UnsupportedFileSystemException;
-import org.apache.hadoop.util.Shell;
import org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusRequest;
import org.apache.hadoop.yarn.api.protocolrecords.StartContainerRequest;
import org.apache.hadoop.yarn.api.protocolrecords.StopContainerRequest;
@@ -54,7 +53,6 @@ import org.apache.hadoop.yarn.exceptions
import org.apache.hadoop.yarn.server.nodemanager.CMgrCompletedAppsEvent;
import org.apache.hadoop.yarn.server.nodemanager.ContainerExecutor.ExitCode;
import org.apache.hadoop.yarn.server.nodemanager.ContainerExecutor.Signal;
-import org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor;
import org.apache.hadoop.yarn.server.nodemanager.DeletionService;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.ApplicationState;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.ContainerLocalizer;
@@ -198,29 +196,22 @@ public class TestContainerManager extend
InterruptedException {
containerManager.start();
- File scriptFile = Shell.appendScriptExtension(tmpDir, "scriptFile");
+ File scriptFile = new File(tmpDir, "scriptFile.sh");
PrintWriter fileWriter = new PrintWriter(scriptFile);
File processStartFile =
new File(tmpDir, "start_file.txt").getAbsoluteFile();
-
- // ////// Construct the Container-id
- ContainerId cId = createContainerId();
-
- if (Shell.WINDOWS) {
- fileWriter.println("@echo Hello World!> " + processStartFile);
- fileWriter.println("@echo " + cId + ">> " + processStartFile);
- fileWriter.println("@ping -n 100 127.0.0.1 >nul");
- } else {
- fileWriter.write("\numask 0"); // So that start file is readable by the test
- fileWriter.write("\necho Hello World! > " + processStartFile);
- fileWriter.write("\necho $$ >> " + processStartFile);
- fileWriter.write("\nexec sleep 100");
- }
+ fileWriter.write("\numask 0"); // So that start file is readable by the test
+ fileWriter.write("\necho Hello World! > " + processStartFile);
+ fileWriter.write("\necho $$ >> " + processStartFile);
+ fileWriter.write("\nexec sleep 100");
fileWriter.close();
ContainerLaunchContext containerLaunchContext =
recordFactory.newRecordInstance(ContainerLaunchContext.class);
+ // ////// Construct the Container-id
+ ContainerId cId = createContainerId();
+
containerLaunchContext.setUser(user);
URL resource_alpha =
@@ -239,12 +230,14 @@ public class TestContainerManager extend
localResources.put(destinationFile, rsrc_alpha);
containerLaunchContext.setLocalResources(localResources);
containerLaunchContext.setUser(containerLaunchContext.getUser());
- List<String> commands = Arrays.asList(Shell.getRunScriptCommand(scriptFile));
+ List<String> commands = new ArrayList<String>();
+ commands.add("/bin/bash");
+ commands.add(scriptFile.getAbsolutePath());
containerLaunchContext.setCommands(commands);
Container mockContainer = mock(Container.class);
when(mockContainer.getId()).thenReturn(cId);
when(mockContainer.getResource()).thenReturn(
- BuilderUtils.newResource(100, 1)); // MB
+ BuilderUtils.newResource(100 * 1024 * 1024, 1));
StartContainerRequest startRequest = recordFactory.newRecordInstance(StartContainerRequest.class);
startRequest.setContainerLaunchContext(containerLaunchContext);
startRequest.setContainer(mockContainer);
@@ -271,10 +264,12 @@ public class TestContainerManager extend
// Assert that the process is alive
Assert.assertTrue("Process is not alive!",
- DefaultContainerExecutor.containerIsAlive(pid));
+ exec.signalContainer(user,
+ pid, Signal.NULL));
// Once more
Assert.assertTrue("Process is not alive!",
- DefaultContainerExecutor.containerIsAlive(pid));
+ exec.signalContainer(user,
+ pid, Signal.NULL));
StopContainerRequest stopRequest = recordFactory.newRecordInstance(StopContainerRequest.class);
stopRequest.setContainerId(cId);
@@ -288,39 +283,28 @@ public class TestContainerManager extend
gcsRequest.setContainerId(cId);
ContainerStatus containerStatus =
containerManager.getContainerStatus(gcsRequest).getStatus();
- int expectedExitCode = Shell.WINDOWS ? ExitCode.FORCE_KILLED.getExitCode() :
- ExitCode.TERMINATED.getExitCode();
- Assert.assertEquals(expectedExitCode, containerStatus.getExitStatus());
+ Assert.assertEquals(ExitCode.TERMINATED.getExitCode(),
+ containerStatus.getExitStatus());
// Assert that the process is not alive anymore
Assert.assertFalse("Process is still alive!",
- DefaultContainerExecutor.containerIsAlive(pid));
+ exec.signalContainer(user,
+ pid, Signal.NULL));
}
private void testContainerLaunchAndExit(int exitCode) throws IOException, InterruptedException {
- File scriptFile = Shell.appendScriptExtension(tmpDir, "scriptFile");
+ File scriptFile = new File(tmpDir, "scriptFile.sh");
PrintWriter fileWriter = new PrintWriter(scriptFile);
File processStartFile =
new File(tmpDir, "start_file.txt").getAbsoluteFile();
-
- // ////// Construct the Container-id
- ContainerId cId = createContainerId();
-
- if (Shell.WINDOWS) {
- fileWriter.println("@echo Hello World!> " + processStartFile);
- fileWriter.println("@echo " + cId + ">> " + processStartFile);
- if (exitCode != 0) {
- fileWriter.println("@exit " + exitCode);
- }
- } else {
- fileWriter.write("\numask 0"); // So that start file is readable by the test
- fileWriter.write("\necho Hello World! > " + processStartFile);
- fileWriter.write("\necho $$ >> " + processStartFile);
- // Have script throw an exit code at the end
- if (exitCode != 0) {
- fileWriter.write("\nexit "+exitCode);
- }
+ fileWriter.write("\numask 0"); // So that start file is readable by the test
+ fileWriter.write("\necho Hello World! > " + processStartFile);
+ fileWriter.write("\necho $$ >> " + processStartFile);
+
+ // Have script throw an exit code at the end
+ if (exitCode != 0) {
+ fileWriter.write("\nexit "+exitCode);
}
fileWriter.close();
@@ -328,6 +312,9 @@ public class TestContainerManager extend
ContainerLaunchContext containerLaunchContext =
recordFactory.newRecordInstance(ContainerLaunchContext.class);
+ // ////// Construct the Container-id
+ ContainerId cId = createContainerId();
+
containerLaunchContext.setUser(user);
URL resource_alpha =
@@ -346,12 +333,14 @@ public class TestContainerManager extend
localResources.put(destinationFile, rsrc_alpha);
containerLaunchContext.setLocalResources(localResources);
containerLaunchContext.setUser(containerLaunchContext.getUser());
- List<String> commands = Arrays.asList(Shell.getRunScriptCommand(scriptFile));
+ List<String> commands = new ArrayList<String>();
+ commands.add("/bin/bash");
+ commands.add(scriptFile.getAbsolutePath());
containerLaunchContext.setCommands(commands);
Container mockContainer = mock(Container.class);
when(mockContainer.getId()).thenReturn(cId);
when(mockContainer.getResource()).thenReturn(
- BuilderUtils.newResource(100, 1)); // MB
+ BuilderUtils.newResource(100 * 1024 * 1024, 1));
StartContainerRequest startRequest = recordFactory.newRecordInstance(StartContainerRequest.class);
startRequest.setContainerLaunchContext(containerLaunchContext);
Modified: hadoop/common/branches/HDFS-2802/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java?rev=1470194&r1=1470193&r2=1470194&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java Sat Apr 20 16:57:44 2013
@@ -56,7 +56,6 @@ import org.apache.hadoop.yarn.api.record
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.server.nodemanager.ContainerExecutor.ExitCode;
import org.apache.hadoop.yarn.server.nodemanager.ContainerExecutor.Signal;
-import org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.BaseContainerManagerTest;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch;
import org.apache.hadoop.yarn.util.BuilderUtils;
@@ -89,15 +88,13 @@ public class TestContainerLaunch extends
File shellFile = null;
File tempFile = null;
- String badSymlink = Shell.WINDOWS ? "foo@zz_#!-+bar.cmd" :
- "foo@zz%_#*&!-+= bar()";
+ String badSymlink = "foo@zz%_#*&!-+= bar()";
File symLinkFile = null;
try {
- shellFile = Shell.appendScriptExtension(tmpDir, "hello");
- tempFile = Shell.appendScriptExtension(tmpDir, "temp");
- String timeoutCommand = Shell.WINDOWS ? "@echo \"hello\"" :
- "echo \"hello\"";
+ shellFile = new File(tmpDir, "hello.sh");
+ tempFile = new File(tmpDir, "temp.sh");
+ String timeoutCommand = "echo \"hello\"";
PrintWriter writer = new PrintWriter(new FileOutputStream(shellFile));
shellFile.setExecutable(true);
writer.println(timeoutCommand);
@@ -112,13 +109,7 @@ public class TestContainerLaunch extends
Map<String, String> env = new HashMap<String, String>();
List<String> commands = new ArrayList<String>();
- if (Shell.WINDOWS) {
- commands.add("cmd");
- commands.add("/c");
- commands.add("\"" + badSymlink + "\"");
- } else {
- commands.add("/bin/sh ./\\\"" + badSymlink + "\\\"");
- }
+ commands.add("/bin/sh ./\\\"" + badSymlink + "\\\"");
ContainerLaunch.writeLaunchEnv(fos, env, resources, commands);
fos.flush();
@@ -154,30 +145,16 @@ public class TestContainerLaunch extends
// this is a dirty hack - but should be ok for a unittest.
@SuppressWarnings({ "rawtypes", "unchecked" })
public static void setNewEnvironmentHack(Map<String, String> newenv) throws Exception {
- try {
- Class<?> cl = Class.forName("java.lang.ProcessEnvironment");
- Field field = cl.getDeclaredField("theEnvironment");
- field.setAccessible(true);
- Map<String, String> env = (Map<String, String>)field.get(null);
- env.clear();
- env.putAll(newenv);
- Field ciField = cl.getDeclaredField("theCaseInsensitiveEnvironment");
- ciField.setAccessible(true);
- Map<String, String> cienv = (Map<String, String>)ciField.get(null);
- cienv.clear();
- cienv.putAll(newenv);
- } catch (NoSuchFieldException e) {
- Class[] classes = Collections.class.getDeclaredClasses();
- Map<String, String> env = System.getenv();
- for (Class cl : classes) {
- if ("java.util.Collections$UnmodifiableMap".equals(cl.getName())) {
- Field field = cl.getDeclaredField("m");
- field.setAccessible(true);
- Object obj = field.get(env);
- Map<String, String> map = (Map<String, String>) obj;
- map.clear();
- map.putAll(newenv);
- }
+ Class[] classes = Collections.class.getDeclaredClasses();
+ Map<String, String> env = System.getenv();
+ for (Class cl : classes) {
+ if ("java.util.Collections$UnmodifiableMap".equals(cl.getName())) {
+ Field field = cl.getDeclaredField("m");
+ field.setAccessible(true);
+ Object obj = field.get(env);
+ Map<String, String> map = (Map<String, String>) obj;
+ map.clear();
+ map.putAll(newenv);
}
}
}
@@ -195,6 +172,22 @@ public class TestContainerLaunch extends
envWithDummy.put(Environment.MALLOC_ARENA_MAX.name(), "99");
setNewEnvironmentHack(envWithDummy);
+ String malloc = System.getenv(Environment.MALLOC_ARENA_MAX.name());
+ File scriptFile = new File(tmpDir, "scriptFile.sh");
+ PrintWriter fileWriter = new PrintWriter(scriptFile);
+ File processStartFile =
+ new File(tmpDir, "env_vars.txt").getAbsoluteFile();
+ fileWriter.write("\numask 0"); // So that start file is readable by the test
+ fileWriter.write("\necho $" + Environment.MALLOC_ARENA_MAX.name() + " > " + processStartFile);
+ fileWriter.write("\necho $$ >> " + processStartFile);
+ fileWriter.write("\nexec sleep 100");
+ fileWriter.close();
+
+ assert(malloc != null && !"".equals(malloc));
+
+ ContainerLaunchContext containerLaunchContext =
+ recordFactory.newRecordInstance(ContainerLaunchContext.class);
+
Container mockContainer = mock(Container.class);
// ////// Construct the Container-id
ApplicationId appId = recordFactory.newRecordInstance(ApplicationId.class);
@@ -207,30 +200,6 @@ public class TestContainerLaunch extends
ContainerId cId =
recordFactory.newRecordInstance(ContainerId.class);
cId.setApplicationAttemptId(appAttemptId);
- String malloc = System.getenv(Environment.MALLOC_ARENA_MAX.name());
- File scriptFile = Shell.appendScriptExtension(tmpDir, "scriptFile");
- PrintWriter fileWriter = new PrintWriter(scriptFile);
- File processStartFile =
- new File(tmpDir, "env_vars.txt").getAbsoluteFile();
- if (Shell.WINDOWS) {
- fileWriter.println("@echo " + Environment.MALLOC_ARENA_MAX.$() + "> " +
- processStartFile);
- fileWriter.println("@echo " + cId + ">> " + processStartFile);
- fileWriter.println("@ping -n 100 127.0.0.1 >nul");
- } else {
- fileWriter.write("\numask 0"); // So that start file is readable by the test
- fileWriter.write("\necho " + Environment.MALLOC_ARENA_MAX.$() + " > " +
- processStartFile);
- fileWriter.write("\necho $$ >> " + processStartFile);
- fileWriter.write("\nexec sleep 100");
- }
- fileWriter.close();
-
- assert(malloc != null && !"".equals(malloc));
-
- ContainerLaunchContext containerLaunchContext =
- recordFactory.newRecordInstance(ContainerLaunchContext.class);
-
when(mockContainer.getId()).thenReturn(cId);
containerLaunchContext.setUser(user);
@@ -254,7 +223,9 @@ public class TestContainerLaunch extends
// set up the rest of the container
containerLaunchContext.setUser(containerLaunchContext.getUser());
- List<String> commands = Arrays.asList(Shell.getRunScriptCommand(scriptFile));
+ List<String> commands = new ArrayList<String>();
+ commands.add("/bin/bash");
+ commands.add(scriptFile.getAbsolutePath());
containerLaunchContext.setCommands(commands);
when(mockContainer.getResource()).thenReturn(
BuilderUtils.newResource(1024, 1));
@@ -284,10 +255,12 @@ public class TestContainerLaunch extends
// Assert that the process is alive
Assert.assertTrue("Process is not alive!",
- DefaultContainerExecutor.containerIsAlive(pid));
+ exec.signalContainer(user,
+ pid, Signal.NULL));
// Once more
Assert.assertTrue("Process is not alive!",
- DefaultContainerExecutor.containerIsAlive(pid));
+ exec.signalContainer(user,
+ pid, Signal.NULL));
StopContainerRequest stopRequest = recordFactory.newRecordInstance(StopContainerRequest.class);
stopRequest.setContainerId(cId);
@@ -301,19 +274,38 @@ public class TestContainerLaunch extends
gcsRequest.setContainerId(cId);
ContainerStatus containerStatus =
containerManager.getContainerStatus(gcsRequest).getStatus();
- int expectedExitCode = Shell.WINDOWS ? ExitCode.FORCE_KILLED.getExitCode() :
- ExitCode.TERMINATED.getExitCode();
- Assert.assertEquals(expectedExitCode, containerStatus.getExitStatus());
+ Assert.assertEquals(ExitCode.TERMINATED.getExitCode(),
+ containerStatus.getExitStatus());
// Assert that the process is not alive anymore
Assert.assertFalse("Process is still alive!",
- DefaultContainerExecutor.containerIsAlive(pid));
+ exec.signalContainer(user,
+ pid, Signal.NULL));
}
@Test
public void testDelayedKill() throws Exception {
containerManager.start();
+ File processStartFile =
+ new File(tmpDir, "pid.txt").getAbsoluteFile();
+
+ // setup a script that can handle sigterm gracefully
+ File scriptFile = new File(tmpDir, "testscript.sh");
+ PrintWriter writer = new PrintWriter(new FileOutputStream(scriptFile));
+ writer.println("#!/bin/bash\n\n");
+ writer.println("echo \"Running testscript for delayed kill\"");
+ writer.println("hello=\"Got SIGTERM\"");
+ writer.println("umask 0");
+ writer.println("trap \"echo $hello >> " + processStartFile + "\" SIGTERM");
+ writer.println("echo \"Writing pid to start file\"");
+ writer.println("echo $$ >> " + processStartFile);
+ writer.println("while true; do\nsleep 1s;\ndone");
+ writer.close();
+ scriptFile.setExecutable(true);
+
+ ContainerLaunchContext containerLaunchContext =
+ recordFactory.newRecordInstance(ContainerLaunchContext.class);
Container mockContainer = mock(Container.class);
// ////// Construct the Container-id
ApplicationId appId = recordFactory.newRecordInstance(ApplicationId.class);
@@ -326,33 +318,6 @@ public class TestContainerLaunch extends
ContainerId cId =
recordFactory.newRecordInstance(ContainerId.class);
cId.setApplicationAttemptId(appAttemptId);
-
- File processStartFile =
- new File(tmpDir, "pid.txt").getAbsoluteFile();
-
- // setup a script that can handle sigterm gracefully
- File scriptFile = Shell.appendScriptExtension(tmpDir, "testscript");
- PrintWriter writer = new PrintWriter(new FileOutputStream(scriptFile));
- if (Shell.WINDOWS) {
- writer.println("@echo \"Running testscript for delayed kill\"");
- writer.println("@echo \"Writing pid to start file\"");
- writer.println("@echo " + cId + "> " + processStartFile);
- writer.println("@ping -n 100 127.0.0.1 >nul");
- } else {
- writer.println("#!/bin/bash\n\n");
- writer.println("echo \"Running testscript for delayed kill\"");
- writer.println("hello=\"Got SIGTERM\"");
- writer.println("umask 0");
- writer.println("trap \"echo $hello >> " + processStartFile + "\" SIGTERM");
- writer.println("echo \"Writing pid to start file\"");
- writer.println("echo $$ >> " + processStartFile);
- writer.println("while true; do\nsleep 1s;\ndone");
- }
- writer.close();
- scriptFile.setExecutable(true);
-
- ContainerLaunchContext containerLaunchContext =
- recordFactory.newRecordInstance(ContainerLaunchContext.class);
when(mockContainer.getId()).thenReturn(cId);
containerLaunchContext.setUser(user);
@@ -376,7 +341,8 @@ public class TestContainerLaunch extends
// set up the rest of the container
containerLaunchContext.setUser(containerLaunchContext.getUser());
- List<String> commands = Arrays.asList(Shell.getRunScriptCommand(scriptFile));
+ List<String> commands = new ArrayList<String>();
+ commands.add(scriptFile.getAbsolutePath());
containerLaunchContext.setCommands(commands);
when(mockContainer.getResource()).thenReturn(
BuilderUtils.newResource(1024, 1));
@@ -410,32 +376,25 @@ public class TestContainerLaunch extends
Assert.assertEquals(ExitCode.FORCE_KILLED.getExitCode(),
containerStatus.getExitStatus());
- // Now verify the contents of the file. Script generates a message when it
- // receives a sigterm so we look for that. We cannot perform this check on
- // Windows, because the process is not notified when killed by winutils.
- // There is no way for the process to trap and respond. Instead, we can
- // verify that the job object with ID matching container ID no longer exists.
- if (Shell.WINDOWS) {
- Assert.assertFalse("Process is still alive!",
- DefaultContainerExecutor.containerIsAlive(cId.toString()));
- } else {
- BufferedReader reader =
- new BufferedReader(new FileReader(processStartFile));
-
- boolean foundSigTermMessage = false;
- while (true) {
- String line = reader.readLine();
- if (line == null) {
- break;
- }
- if (line.contains("SIGTERM")) {
- foundSigTermMessage = true;
- break;
- }
+ // Now verify the contents of the file
+ // Script generates a message when it receives a sigterm
+ // so we look for that
+ BufferedReader reader =
+ new BufferedReader(new FileReader(processStartFile));
+
+ boolean foundSigTermMessage = false;
+ while (true) {
+ String line = reader.readLine();
+ if (line == null) {
+ break;
+ }
+ if (line.contains("SIGTERM")) {
+ foundSigTermMessage = true;
+ break;
}
- Assert.assertTrue("Did not find sigterm message", foundSigTermMessage);
- reader.close();
}
+ Assert.assertTrue("Did not find sigterm message", foundSigTermMessage);
+ reader.close();
}
}
Modified: hadoop/common/branches/HDFS-2802/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestContainerManagerSecurity.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestContainerManagerSecurity.java?rev=1470194&r1=1470193&r2=1470194&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestContainerManagerSecurity.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestContainerManagerSecurity.java Sat Apr 20 16:57:44 2013
@@ -497,7 +497,7 @@ public class TestContainerManagerSecurit
.getAllocatedContainers();
// Modify ask to request no more.
- allocateRequest.setAskList(new ArrayList<ResourceRequest>());
+ allocateRequest.clearAsks();
int waitCounter = 0;
while ((allocatedContainers == null || allocatedContainers.size() == 0)