You are viewing a plain text version of this content. The canonical link for it is here.
Posted to mapreduce-commits@hadoop.apache.org by at...@apache.org on 2011/11/02 06:35:03 UTC
svn commit: r1196458 [16/19] - in
/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project: ./ assembly/
bin/ conf/ dev-support/ hadoop-mapreduce-client/
hadoop-mapreduce-client/hadoop-mapreduce-client-app/
hadoop-mapreduce-client/hadoop-mapreduce-cl...
Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/impl/pb/service/RMAdminProtocolPBServiceImpl.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/impl/pb/service/RMAdminProtocolPBServiceImpl.java?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/impl/pb/service/RMAdminProtocolPBServiceImpl.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/impl/pb/service/RMAdminProtocolPBServiceImpl.java Wed Nov 2 05:34:31 2011
@@ -20,11 +20,14 @@ package org.apache.hadoop.yarn.server.re
import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
import org.apache.hadoop.yarn.proto.RMAdminProtocol.RMAdminProtocolService.BlockingInterface;
+import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshServiceAclsRequestProto;
+import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshServiceAclsResponseProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.*;
import org.apache.hadoop.yarn.server.resourcemanager.api.RMAdminProtocol;
import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.RefreshAdminAclsResponse;
import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.RefreshNodesResponse;
import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.RefreshQueuesResponse;
+import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.RefreshServiceAclsResponse;
import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.RefreshSuperUserGroupsConfigurationResponse;
import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.RefreshUserToGroupsMappingsResponse;
import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.impl.pb.RefreshAdminAclsRequestPBImpl;
@@ -33,6 +36,8 @@ import org.apache.hadoop.yarn.server.res
import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.impl.pb.RefreshNodesResponsePBImpl;
import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.impl.pb.RefreshQueuesRequestPBImpl;
import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.impl.pb.RefreshQueuesResponsePBImpl;
+import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.impl.pb.RefreshServiceAclsRequestPBImpl;
+import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.impl.pb.RefreshServiceAclsResponsePBImpl;
import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.impl.pb.RefreshSuperUserGroupsConfigurationRequestPBImpl;
import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.impl.pb.RefreshSuperUserGroupsConfigurationResponsePBImpl;
import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.impl.pb.RefreshUserToGroupsMappingsRequestPBImpl;
@@ -119,4 +124,19 @@ public class RMAdminProtocolPBServiceImp
}
}
+ @Override
+ public RefreshServiceAclsResponseProto refreshServiceAcls(
+ RpcController controller, RefreshServiceAclsRequestProto proto)
+ throws ServiceException {
+ RefreshServiceAclsRequestPBImpl request =
+ new RefreshServiceAclsRequestPBImpl(proto);
+ try {
+ RefreshServiceAclsResponse response =
+ real.refreshServiceAcls(request);
+ return ((RefreshServiceAclsResponsePBImpl)response).getProto();
+ } catch (YarnRemoteException e) {
+ throw new ServiceException(e);
+ }
+ }
+
}
Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMApp.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMApp.java?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMApp.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMApp.java Wed Nov 2 05:34:31 2011
@@ -116,6 +116,12 @@ public interface RMApp extends EventHand
long getStartTime();
/**
+ * the submit time of the application.
+ * @return the submit time of the application.
+ */
+ long getSubmitTime();
+
+ /**
* The tracking url for the application master.
* @return the tracking url for the application master.
*/
Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java Wed Nov 2 05:34:31 2011
@@ -32,6 +32,7 @@ import org.apache.commons.logging.LogFac
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.yarn.YarnException;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.api.records.ApplicationResourceUsageReport;
import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ApplicationReport;
@@ -83,6 +84,7 @@ public class RMAppImpl implements RMApp
private final WriteLock writeLock;
private final Map<ApplicationAttemptId, RMAppAttempt> attempts
= new LinkedHashMap<ApplicationAttemptId, RMAppAttempt>();
+ private final long submitTime;
// Mutable fields
private long startTime;
@@ -150,8 +152,10 @@ public class RMAppImpl implements RMApp
.addTransition(
RMAppState.KILLED,
RMAppState.KILLED,
- EnumSet.of(RMAppEventType.KILL, RMAppEventType.ATTEMPT_FINISHED,
- RMAppEventType.ATTEMPT_FAILED, RMAppEventType.ATTEMPT_KILLED))
+ EnumSet.of(RMAppEventType.APP_ACCEPTED,
+ RMAppEventType.APP_REJECTED, RMAppEventType.KILL,
+ RMAppEventType.ATTEMPT_FINISHED, RMAppEventType.ATTEMPT_FAILED,
+ RMAppEventType.ATTEMPT_KILLED))
.installTopology();
@@ -162,7 +166,8 @@ public class RMAppImpl implements RMApp
Configuration config, String name, String user, String queue,
ApplicationSubmissionContext submissionContext, String clientTokenStr,
ApplicationStore appStore,
- YarnScheduler scheduler, ApplicationMasterService masterService) {
+ YarnScheduler scheduler, ApplicationMasterService masterService,
+ long submitTime) {
this.applicationId = applicationId;
this.name = name;
@@ -177,6 +182,7 @@ public class RMAppImpl implements RMApp
this.appStore = appStore;
this.scheduler = scheduler;
this.masterService = masterService;
+ this.submitTime = submitTime;
this.startTime = System.currentTimeMillis();
this.maxRetries = conf.getInt(YarnConfiguration.RM_AM_MAX_RETRIES,
@@ -206,7 +212,8 @@ public class RMAppImpl implements RMApp
&& currentAttempt.getFinalApplicationStatus() != null) {
return currentAttempt.getFinalApplicationStatus();
}
- return createFinalApplicationStatus(this.stateMachine.getCurrentState());
+ return
+ createFinalApplicationStatus(this.stateMachine.getCurrentState());
} finally {
this.readLock.unlock();
}
@@ -324,19 +331,24 @@ public class RMAppImpl implements RMApp
String clientToken = "N/A";
String trackingUrl = "N/A";
String host = "N/A";
+ String origTrackingUrl = "N/A";
int rpcPort = -1;
+ ApplicationResourceUsageReport appUsageReport = null;
FinalApplicationStatus finishState = getFinalApplicationStatus();
if (this.currentAttempt != null) {
trackingUrl = this.currentAttempt.getTrackingUrl();
+ origTrackingUrl = this.currentAttempt.getOriginalTrackingUrl();
clientToken = this.currentAttempt.getClientToken();
host = this.currentAttempt.getHost();
rpcPort = this.currentAttempt.getRpcPort();
+ appUsageReport = currentAttempt.getApplicationResourceUsageReport();
}
return BuilderUtils.newApplicationReport(this.applicationId, this.user,
this.queue, this.name, host, rpcPort, clientToken,
createApplicationState(this.stateMachine.getCurrentState()),
this.diagnostics.toString(), trackingUrl,
- this.startTime, this.finishTime, finishState);
+ this.startTime, this.finishTime, finishState, appUsageReport,
+ origTrackingUrl);
} finally {
this.readLock.unlock();
}
@@ -365,9 +377,14 @@ public class RMAppImpl implements RMApp
}
@Override
+ public long getSubmitTime() {
+ return this.submitTime;
+ }
+
+ @Override
public String getTrackingUrl() {
this.readLock.lock();
-
+
try {
if (this.currentAttempt != null) {
return this.currentAttempt.getTrackingUrl();
@@ -425,7 +442,7 @@ public class RMAppImpl implements RMApp
RMAppAttempt attempt = new RMAppAttemptImpl(appAttemptId,
clientTokenStr, rmContext, scheduler, masterService,
- submissionContext);
+ submissionContext, YarnConfiguration.getProxyHostAndPort(conf));
attempts.put(appAttemptId, attempt);
currentAttempt = attempt;
handler.handle(
Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/AMLivelinessMonitor.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/AMLivelinessMonitor.java?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/AMLivelinessMonitor.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/AMLivelinessMonitor.java Wed Nov 2 05:34:31 2011
@@ -37,10 +37,10 @@ public class AMLivelinessMonitor extends
public void init(Configuration conf) {
super.init(conf);
- setExpireInterval(conf.getInt(YarnConfiguration.RM_AM_EXPIRY_INTERVAL_MS,
- YarnConfiguration.DEFAULT_RM_AM_EXPIRY_INTERVAL_MS));
- setMonitorInterval(conf.getInt(YarnConfiguration.RM_AM_LIVENESS_MONITOR_INTERVAL_MS,
- YarnConfiguration.DEFAULT_RM_AM_LIVENESS_MONITOR_INTERVAL_MS));
+ int expireIntvl = conf.getInt(YarnConfiguration.RM_AM_EXPIRY_INTERVAL_MS,
+ YarnConfiguration.DEFAULT_RM_AM_EXPIRY_INTERVAL_MS);
+ setExpireInterval(expireIntvl);
+ setMonitorInterval(expireIntvl/3);
}
@Override
Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttempt.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttempt.java?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttempt.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttempt.java Wed Nov 2 05:34:31 2011
@@ -22,6 +22,7 @@ import java.util.List;
import java.util.Set;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.api.records.ApplicationResourceUsageReport;
import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
import org.apache.hadoop.yarn.api.records.Container;
@@ -71,6 +72,22 @@ public interface RMAppAttempt extends Ev
String getTrackingUrl();
/**
+ * The original url at which the status of the application attempt can be
+ * accessed. This url is not fronted by a proxy. This is only intended to be
+ * used by the proxy.
+ * @return the url at which the status of the attempt can be accessed and is
+ * not fronted by a proxy.
+ */
+ String getOriginalTrackingUrl();
+
+ /**
+ * The base to be prepended to web URLs that are not relative, and the user
+ * has been checked.
+ * @return the base URL to be prepended to web URLs that are not relative.
+ */
+ String getWebProxyBase();
+
+ /**
* The token required by the clients to talk to the application attempt
* @return the token required by the clients to talk to the application attempt
*/
@@ -127,4 +144,10 @@ public interface RMAppAttempt extends Ev
* @return the application submission context for this Application.
*/
ApplicationSubmissionContext getSubmissionContext();
+
+ /*
+ * Get application container and resource usage information.
+ * @return an ApplicationResourceUsageReport object.
+ */
+ ApplicationResourceUsageReport getApplicationResourceUsageReport();
}
Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java Wed Nov 2 05:34:31 2011
@@ -18,7 +18,10 @@
package org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt;
+import java.net.URI;
+import java.net.URISyntaxException;
import java.util.ArrayList;
+import java.util.Collection;
import java.util.Collections;
import java.util.EnumSet;
import java.util.HashSet;
@@ -31,6 +34,7 @@ import java.util.concurrent.locks.Reentr
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.api.records.ApplicationResourceUsageReport;
import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
@@ -47,6 +51,7 @@ import org.apache.hadoop.yarn.server.res
import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
import org.apache.hadoop.yarn.server.resourcemanager.amlauncher.AMLauncherEvent;
import org.apache.hadoop.yarn.server.resourcemanager.amlauncher.AMLauncherEventType;
+import org.apache.hadoop.yarn.server.resourcemanager.resource.Resources;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppEvent;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppEventType;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppFailedAttemptEvent;
@@ -58,10 +63,13 @@ import org.apache.hadoop.yarn.server.res
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.event.RMAppAttemptRejectedEvent;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.event.RMAppAttemptStatusupdateEvent;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.event.RMAppAttemptUnregistrationEvent;
+import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.Allocation;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerAppReport;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.YarnScheduler;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppAddedSchedulerEvent;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppRemovedSchedulerEvent;
+import org.apache.hadoop.yarn.server.webproxy.ProxyUriUtils;
import org.apache.hadoop.yarn.state.InvalidStateTransitonException;
import org.apache.hadoop.yarn.state.MultipleArcTransition;
import org.apache.hadoop.yarn.state.SingleArcTransition;
@@ -109,12 +117,16 @@ public class RMAppAttemptImpl implements
private float progress = 0;
private String host = "N/A";
private int rpcPort;
- private String trackingUrl = "N/A";
+ private String origTrackingUrl = "N/A";
+ private String proxiedTrackingUrl = "N/A";
+
// Set to null initially. Will eventually get set
// if an RMAppAttemptUnregistrationEvent occurs
private FinalApplicationStatus finalStatus = null;
private final StringBuilder diagnostics = new StringBuilder();
+ private final String proxy;
+
private static final StateMachineFactory<RMAppAttemptImpl,
RMAppAttemptState,
RMAppAttemptEventType,
@@ -227,7 +239,9 @@ public class RMAppAttemptImpl implements
.addTransition(
RMAppAttemptState.KILLED,
RMAppAttemptState.KILLED,
- EnumSet.of(RMAppAttemptEventType.EXPIRE,
+ EnumSet.of(RMAppAttemptEventType.APP_ACCEPTED,
+ RMAppAttemptEventType.APP_REJECTED,
+ RMAppAttemptEventType.EXPIRE,
RMAppAttemptEventType.LAUNCHED,
RMAppAttemptEventType.LAUNCH_FAILED,
RMAppAttemptEventType.EXPIRE,
@@ -243,8 +257,10 @@ public class RMAppAttemptImpl implements
public RMAppAttemptImpl(ApplicationAttemptId appAttemptId,
String clientToken, RMContext rmContext, YarnScheduler scheduler,
ApplicationMasterService masterService,
- ApplicationSubmissionContext submissionContext) {
+ ApplicationSubmissionContext submissionContext,
+ String proxy) {
+ this.proxy = proxy;
this.applicationAttemptId = appAttemptId;
this.rmContext = rmContext;
this.eventHandler = rmContext.getDispatcher().getEventHandler();
@@ -315,9 +331,46 @@ public class RMAppAttemptImpl implements
@Override
public String getTrackingUrl() {
this.readLock.lock();
-
try {
- return this.trackingUrl;
+ return this.proxiedTrackingUrl;
+ } finally {
+ this.readLock.unlock();
+ }
+ }
+
+ @Override
+ public String getOriginalTrackingUrl() {
+ this.readLock.lock();
+ try {
+ return this.origTrackingUrl;
+ } finally {
+ this.readLock.unlock();
+ }
+ }
+
+ @Override
+ public String getWebProxyBase() {
+ this.readLock.lock();
+ try {
+ return ProxyUriUtils.getPath(applicationAttemptId.getApplicationId());
+ } finally {
+ this.readLock.unlock();
+ }
+ }
+
+ private String generateProxyUriWithoutScheme(
+ final String trackingUriWithoutScheme) {
+ this.readLock.lock();
+ try {
+ URI trackingUri = ProxyUriUtils.getUriFromAMUrl(trackingUriWithoutScheme);
+ URI proxyUri = ProxyUriUtils.getUriFromAMUrl(proxy);
+ URI result = ProxyUriUtils.getProxyUri(trackingUri, proxyUri,
+ applicationAttemptId.getApplicationId());
+ //We need to strip off the scheme to have it match what was there before
+ return result.toASCIIString().substring(7);
+ } catch (URISyntaxException e) {
+ LOG.warn("Could not proxify "+trackingUriWithoutScheme,e);
+ return trackingUriWithoutScheme;
} finally {
this.readLock.unlock();
}
@@ -428,6 +481,52 @@ public class RMAppAttemptImpl implements
}
}
+ @Override
+ public ApplicationResourceUsageReport getApplicationResourceUsageReport() {
+ this.readLock.lock();
+
+ try {
+ int numUsedContainers = 0;
+ int numReservedContainers = 0;
+ int reservedResources = 0;
+ int currentConsumption = 0;
+ SchedulerAppReport schedApp =
+ scheduler.getSchedulerAppInfo(this.getAppAttemptId());
+ Collection<RMContainer> liveContainers;
+ Collection<RMContainer> reservedContainers;
+ if (schedApp != null) {
+ liveContainers = schedApp.getLiveContainers();
+ reservedContainers = schedApp.getReservedContainers();
+ if (liveContainers != null) {
+ numUsedContainers = liveContainers.size();
+ for (RMContainer lc : liveContainers) {
+ currentConsumption += lc.getContainer().getResource().getMemory();
+ }
+ }
+ if (reservedContainers != null) {
+ numReservedContainers = reservedContainers.size();
+ for (RMContainer rc : reservedContainers) {
+ reservedResources += rc.getContainer().getResource().getMemory();
+ }
+ }
+ }
+
+ ApplicationResourceUsageReport appResources =
+ recordFactory.newRecordInstance(ApplicationResourceUsageReport.class);
+ appResources.setNumUsedContainers(numUsedContainers);
+ appResources.setNumReservedContainers(numReservedContainers);
+ appResources.setUsedResources(
+ Resources.createResource(currentConsumption));
+ appResources.setReservedResources(
+ Resources.createResource(reservedResources));
+ appResources.setNeededResources(
+ Resources.createResource(currentConsumption + reservedResources));
+ return appResources;
+ } finally {
+ this.readLock.unlock();
+ }
+ }
+
private static class BaseTransition implements
SingleArcTransition<RMAppAttemptImpl, RMAppAttemptEvent> {
@@ -638,7 +737,9 @@ public class RMAppAttemptImpl implements
= (RMAppAttemptRegistrationEvent) event;
appAttempt.host = registrationEvent.getHost();
appAttempt.rpcPort = registrationEvent.getRpcport();
- appAttempt.trackingUrl = registrationEvent.getTrackingurl();
+ appAttempt.origTrackingUrl = registrationEvent.getTrackingurl();
+ appAttempt.proxiedTrackingUrl =
+ appAttempt.generateProxyUriWithoutScheme(appAttempt.origTrackingUrl);
// Let the app know
appAttempt.eventHandler.handle(new RMAppEvent(appAttempt
@@ -734,7 +835,9 @@ public class RMAppAttemptImpl implements
RMAppAttemptUnregistrationEvent unregisterEvent
= (RMAppAttemptUnregistrationEvent) event;
appAttempt.diagnostics.append(unregisterEvent.getDiagnostics());
- appAttempt.trackingUrl = unregisterEvent.getTrackingUrl();
+ appAttempt.origTrackingUrl = unregisterEvent.getTrackingUrl();
+ appAttempt.proxiedTrackingUrl =
+ appAttempt.generateProxyUriWithoutScheme(appAttempt.origTrackingUrl);
appAttempt.finalStatus = unregisterEvent.getFinalApplicationStatus();
// Tell the app and the scheduler
@@ -777,6 +880,16 @@ public class RMAppAttemptImpl implements
" due to: " + containerStatus.getDiagnostics() + "." +
"Failing this attempt.");
+ /*
+ * In the case when the AM dies, the trackingUrl is left pointing to the AM's
+ * URL, which shows up in the scheduler UI as a broken link. Setting it here
+ * to empty string will prevent any link from being displayed.
+ * NOTE: don't set trackingUrl to 'null'. That will cause null-pointer exceptions
+ * in the generated proto code.
+ */
+ appAttempt.origTrackingUrl = "";
+ appAttempt.proxiedTrackingUrl = "";
+
new FinalTransition(RMAppAttemptState.FAILED).transition(
appAttempt, containerFinishedEvent);
return RMAppAttemptState.FAILED;
Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/ContainerAllocationExpirer.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/ContainerAllocationExpirer.java?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/ContainerAllocationExpirer.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/ContainerAllocationExpirer.java Wed Nov 2 05:34:31 2011
@@ -39,11 +39,11 @@ public class ContainerAllocationExpirer
public void init(Configuration conf) {
super.init(conf);
- setExpireInterval(conf.getInt(
- YarnConfiguration.RM_CONTAINER_LIVENESS_MONITOR_INTERVAL_MS,
- YarnConfiguration.DEFAULT_RM_CONTAINER_LIVENESS_MONITOR_INTERVAL_MS));
- setMonitorInterval(conf.getInt(YarnConfiguration.RM_AM_LIVENESS_MONITOR_INTERVAL_MS,
- YarnConfiguration.DEFAULT_RM_AM_LIVENESS_MONITOR_INTERVAL_MS));
+ int expireIntvl = conf.getInt(
+ YarnConfiguration.RM_CONTAINER_ALLOC_EXPIRY_INTERVAL_MS,
+ YarnConfiguration.DEFAULT_RM_CONTAINER_ALLOC_EXPIRY_INTERVAL_MS);
+ setExpireInterval(expireIntvl);
+ setMonitorInterval(expireIntvl/3);
}
@Override
Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeEventType.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeEventType.java?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeEventType.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeEventType.java Wed Nov 2 05:34:31 2011
@@ -19,6 +19,9 @@
package org.apache.hadoop.yarn.server.resourcemanager.rmnode;
public enum RMNodeEventType {
+
+ STARTED,
+
// Source: AdminService
DECOMMISSION,
Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java Wed Nov 2 05:34:31 2011
@@ -45,6 +45,7 @@ import org.apache.hadoop.yarn.event.Even
import org.apache.hadoop.yarn.factories.RecordFactory;
import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
import org.apache.hadoop.yarn.server.api.records.HeartbeatResponse;
+import org.apache.hadoop.yarn.server.resourcemanager.ClusterMetrics;
import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeAddedSchedulerEvent;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeRemovedSchedulerEvent;
@@ -107,9 +108,11 @@ public class RMNodeImpl implements RMNod
= new StateMachineFactory<RMNodeImpl,
RMNodeState,
RMNodeEventType,
- RMNodeEvent>(RMNodeState.RUNNING)
+ RMNodeEvent>(RMNodeState.NEW)
//Transitions from RUNNING state
+ .addTransition(RMNodeState.NEW, RMNodeState.RUNNING,
+ RMNodeEventType.STARTED, new AddNodeTransition())
.addTransition(RMNodeState.RUNNING,
EnumSet.of(RMNodeState.RUNNING, RMNodeState.UNHEALTHY),
RMNodeEventType.STATUS_UPDATE, new StatusUpdateWhenHealthyTransition())
@@ -158,8 +161,6 @@ public class RMNodeImpl implements RMNod
this.stateMachine = stateMachineFactory.make(this);
- context.getDispatcher().getEventHandler().handle(
- new NodeAddedSchedulerEvent(this));
}
@Override
@@ -311,6 +312,21 @@ public class RMNodeImpl implements RMNod
}
}
+ public static class AddNodeTransition implements
+ SingleArcTransition<RMNodeImpl, RMNodeEvent> {
+
+ @SuppressWarnings("unchecked")
+ @Override
+ public void transition(RMNodeImpl rmNode, RMNodeEvent event) {
+ // Inform the scheduler
+
+ rmNode.context.getDispatcher().getEventHandler().handle(
+ new NodeAddedSchedulerEvent(rmNode));
+
+ ClusterMetrics.getMetrics().addNode();
+ }
+ }
+
public static class CleanUpAppTransition
implements SingleArcTransition<RMNodeImpl, RMNodeEvent> {
@@ -335,6 +351,7 @@ public class RMNodeImpl implements RMNod
public static class RemoveNodeTransition
implements SingleArcTransition<RMNodeImpl, RMNodeEvent> {
+ @SuppressWarnings("unchecked")
@Override
public void transition(RMNodeImpl rmNode, RMNodeEvent event) {
// Inform the scheduler
@@ -345,11 +362,14 @@ public class RMNodeImpl implements RMNod
rmNode.context.getRMNodes().remove(rmNode.nodeId);
LOG.info("Removed Node " + rmNode.nodeId);
+ //Update the metrics
+ ClusterMetrics.getMetrics().removeNode(event.getType());
}
}
public static class StatusUpdateWhenHealthyTransition implements
MultipleArcTransition<RMNodeImpl, RMNodeEvent, RMNodeState> {
+ @SuppressWarnings("unchecked")
@Override
public RMNodeState transition(RMNodeImpl rmNode, RMNodeEvent event) {
@@ -365,6 +385,7 @@ public class RMNodeImpl implements RMNod
// Inform the scheduler
rmNode.context.getDispatcher().getEventHandler().handle(
new NodeRemovedSchedulerEvent(rmNode));
+ ClusterMetrics.getMetrics().incrNumUnhealthyNMs();
return RMNodeState.UNHEALTHY;
}
@@ -402,6 +423,7 @@ public class RMNodeImpl implements RMNod
implements
MultipleArcTransition<RMNodeImpl, RMNodeEvent, RMNodeState> {
+ @SuppressWarnings("unchecked")
@Override
public RMNodeState transition(RMNodeImpl rmNode, RMNodeEvent event) {
RMNodeStatusEvent statusEvent = (RMNodeStatusEvent) event;
@@ -413,6 +435,7 @@ public class RMNodeImpl implements RMNod
if (remoteNodeHealthStatus.getIsNodeHealthy()) {
rmNode.context.getDispatcher().getEventHandler().handle(
new NodeAddedSchedulerEvent(rmNode));
+ ClusterMetrics.getMetrics().decrNumUnhealthyNMs();
return RMNodeState.RUNNING;
}
Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeState.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeState.java?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeState.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeState.java Wed Nov 2 05:34:31 2011
@@ -19,5 +19,5 @@
package org.apache.hadoop.yarn.server.resourcemanager.rmnode;
public enum RMNodeState {
- RUNNING, UNHEALTHY, DECOMMISSIONED, LOST
+ NEW, RUNNING, UNHEALTHY, DECOMMISSIONED, LOST, REBOOTED
}
Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/QueueMetrics.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/QueueMetrics.java?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/QueueMetrics.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/QueueMetrics.java Wed Nov 2 05:34:31 2011
@@ -18,9 +18,11 @@
package org.apache.hadoop.yarn.server.resourcemanager.scheduler;
-import com.google.common.base.Splitter;
-import java.util.Map;
+import static org.apache.hadoop.metrics2.lib.Interns.info;
+import static org.apache.hadoop.yarn.server.resourcemanager.resource.Resources.multiply;
+
import java.util.HashMap;
+import java.util.Map;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.metrics2.MetricsInfo;
@@ -28,16 +30,16 @@ import org.apache.hadoop.metrics2.Metric
import org.apache.hadoop.metrics2.annotation.Metric;
import org.apache.hadoop.metrics2.annotation.Metrics;
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
-import static org.apache.hadoop.metrics2.lib.Interns.info;
import org.apache.hadoop.metrics2.lib.MetricsRegistry;
import org.apache.hadoop.metrics2.lib.MutableCounterInt;
+import org.apache.hadoop.metrics2.lib.MutableCounterLong;
import org.apache.hadoop.metrics2.lib.MutableGaugeInt;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptState;
-import static org.apache.hadoop.yarn.server.resourcemanager.resource.Resources.*;
-
-import org.slf4j.LoggerFactory;
import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.base.Splitter;
@InterfaceAudience.Private
@Metrics(context="yarn")
@@ -51,6 +53,8 @@ public class QueueMetrics {
@Metric("Allocated memory in GiB") MutableGaugeInt allocatedGB;
@Metric("# of allocated containers") MutableGaugeInt allocatedContainers;
+ @Metric("Aggregate # of allocated containers") MutableCounterLong aggregateContainersAllocated;
+ @Metric("Aggregate # of released containers") MutableCounterLong aggregateContainersReleased;
@Metric("Available memory in GiB") MutableGaugeInt availableGB;
@Metric("Pending memory allocation in GiB") MutableGaugeInt pendingGB;
@Metric("# of pending containers") MutableGaugeInt pendingContainers;
@@ -234,6 +238,7 @@ public class QueueMetrics {
public void allocateResources(String user, int containers, Resource res) {
allocatedContainers.incr(containers);
+ aggregateContainersAllocated.incr(containers);
allocatedGB.incr(res.getMemory()/GB * containers);
_decrPendingResources(containers, multiply(res, containers));
QueueMetrics userMetrics = getUserMetrics(user);
@@ -247,6 +252,7 @@ public class QueueMetrics {
public void releaseResources(String user, int containers, Resource res) {
allocatedContainers.decr(containers);
+ aggregateContainersReleased.incr(containers);
allocatedGB.decr(res.getMemory()/GB * containers);
QueueMetrics userMetrics = getUserMetrics(user);
if (userMetrics != null) {
Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerUtils.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerUtils.java?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerUtils.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerUtils.java Wed Nov 2 05:34:31 2011
@@ -17,11 +17,14 @@
*/
package org.apache.hadoop.yarn.server.resourcemanager.scheduler;
+import java.util.List;
+
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.ContainerState;
import org.apache.hadoop.yarn.api.records.ContainerStatus;
+import org.apache.hadoop.yarn.api.records.ResourceRequest;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.factories.RecordFactory;
import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
@@ -72,5 +75,35 @@ public class SchedulerUtils {
return containerStatus;
}
+ /**
+ * Utility method to normalize a list of resource requests, by insuring that
+ * the memory for each request is a multiple of minMemory and is not zero.
+ *
+ * @param asks
+ * a list of resource requests.
+ * @param minMemory
+ * the configured minimum memory allocation.
+ */
+ public static void normalizeRequests(List<ResourceRequest> asks,
+ int minMemory) {
+ for (ResourceRequest ask : asks) {
+ normalizeRequest(ask, minMemory);
+ }
+ }
+
+ /**
+ * Utility method to normalize a resource request, by insuring that the
+ * requested memory is a multiple of minMemory and is not zero.
+ *
+ * @param ask
+ * the resource request.
+ * @param minMemory
+ * the configured minimum memory allocation.
+ */
+ public static void normalizeRequest(ResourceRequest ask, int minMemory) {
+ int memory = Math.max(ask.getCapability().getMemory(), minMemory);
+ ask.getCapability().setMemory(
+ minMemory * ((memory / minMemory) + (memory % minMemory > 0 ? 1 : 0)));
+ }
}
Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java Wed Nov 2 05:34:31 2011
@@ -441,7 +441,7 @@ implements ResourceScheduler, CapacitySc
}
// Sanity check
- normalizeRequests(ask);
+ SchedulerUtils.normalizeRequests(ask, minimumAllocation.getMemory());
// Release containers
for (ContainerId releasedContainerId : release) {
@@ -521,21 +521,6 @@ implements ResourceScheduler, CapacitySc
return root.getQueueUserAclInfo(user);
}
- @Lock(Lock.NoLock.class)
- private void normalizeRequests(List<ResourceRequest> asks) {
- for (ResourceRequest ask : asks) {
- normalizeRequest(ask);
- }
- }
-
- @Lock(Lock.NoLock.class)
- private void normalizeRequest(ResourceRequest ask) {
- int minMemory = minimumAllocation.getMemory();
- int memory = Math.max(ask.getCapability().getMemory(), minMemory);
- ask.getCapability().setMemory (
- minMemory * ((memory/minMemory) + (memory%minMemory > 0 ? 1 : 0)));
- }
-
private synchronized void nodeUpdate(RMNode nm,
List<ContainerStatus> newlyLaunchedContainers,
List<ContainerStatus> completedContainers) {
Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java Wed Nov 2 05:34:31 2011
@@ -165,6 +165,12 @@ public class CapacitySchedulerConfigurat
getInt(getQueuePrefix(queue) + USER_LIMIT, DEFAULT_USER_LIMIT);
return userLimit;
}
+
+ public void setUserLimit(String queue, int userLimit) {
+ setInt(getQueuePrefix(queue) + USER_LIMIT, userLimit);
+ LOG.info("here setUserLimit: queuePrefix=" + getQueuePrefix(queue) +
+ ", userLimit=" + getUserLimit(queue));
+ }
public float getUserLimitFactor(String queue) {
float userLimitFactor =
Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java Wed Nov 2 05:34:31 2011
@@ -41,6 +41,7 @@ import org.apache.hadoop.security.UserGr
import org.apache.hadoop.security.authorize.AccessControlList;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.Container;
+import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.ContainerStatus;
import org.apache.hadoop.yarn.api.records.ContainerToken;
import org.apache.hadoop.yarn.api.records.NodeId;
@@ -533,9 +534,9 @@ public class LeafQueue implements CSQueu
} catch (IOException ioe) {
throw new AccessControlException(ioe);
}
- if (!hasAccess(QueueACL.SUBMIT_JOB, userUgi)) {
+ if (!hasAccess(QueueACL.SUBMIT_APPLICATIONS, userUgi)) {
throw new AccessControlException("User " + userName + " cannot submit" +
- " jobs to queue " + getQueuePath());
+ " applications to queue " + getQueuePath());
}
User user = null;
@@ -698,9 +699,7 @@ public class LeafQueue implements CSQueu
application.showRequests();
synchronized (application) {
- Resource userLimit =
- computeUserLimit(application, clusterResource, Resources.none());
- setUserResourceLimit(application, userLimit);
+ computeAndSetUserResourceLimit(application, clusterResource);
for (Priority priority : application.getPriorities()) {
// Required resource
@@ -719,7 +718,7 @@ public class LeafQueue implements CSQueu
}
// User limits
- userLimit =
+ Resource userLimit =
computeUserLimit(application, clusterResource, required);
if (!assignToUser(application.getUser(), userLimit)) {
break;
@@ -740,7 +739,7 @@ public class LeafQueue implements CSQueu
// Book-keeping
allocateResource(clusterResource,
- application.getUser(), assignedResource);
+ application, assignedResource);
// Reset scheduling opportunities
application.resetSchedulingOpportunities(priority);
@@ -807,10 +806,13 @@ public class LeafQueue implements CSQueu
return true;
}
- private void setUserResourceLimit(SchedulerApp application,
- Resource resourceLimit) {
- application.setAvailableResourceLimit(resourceLimit);
- metrics.setAvailableResourcesToUser(application.getUser(), resourceLimit);
+ private void computeAndSetUserResourceLimit(SchedulerApp application,
+ Resource clusterResource) {
+ Resource userLimit =
+ computeUserLimit(application, clusterResource, Resources.none());
+ application.setAvailableResourceLimit(userLimit);
+ metrics.setAvailableResourcesToUser(application.getUser(),
+ application.getHeadroom());
}
private int roundUp(int memory) {
@@ -1064,35 +1066,26 @@ public class LeafQueue implements CSQueu
public Container createContainer(SchedulerApp application, SchedulerNode node,
Resource capability, Priority priority) {
- Container container =
- BuilderUtils.newContainer(this.recordFactory,
- application.getApplicationAttemptId(),
- application.getNewContainerId(),
- node.getNodeID(), node.getHttpAddress(),
- capability, priority);
+
+ NodeId nodeId = node.getRMNode().getNodeID();
+ ContainerId containerId = BuilderUtils.newContainerId(application
+ .getApplicationAttemptId(), application.getNewContainerId());
+ ContainerToken containerToken = null;
// If security is enabled, send the container-tokens too.
if (UserGroupInformation.isSecurityEnabled()) {
- ContainerToken containerToken =
- this.recordFactory.newRecordInstance(ContainerToken.class);
- NodeId nodeId = container.getNodeId();
- ContainerTokenIdentifier tokenidentifier = new ContainerTokenIdentifier(
- container.getId(), nodeId.toString(), container.getResource());
- containerToken.setIdentifier(
- ByteBuffer.wrap(tokenidentifier.getBytes()));
- containerToken.setKind(ContainerTokenIdentifier.KIND.toString());
- containerToken.setPassword(
- ByteBuffer.wrap(
- containerTokenSecretManager.createPassword(tokenidentifier))
- );
- // RPC layer client expects ip:port as service for tokens
- InetSocketAddress addr = NetUtils.createSocketAddr(nodeId.getHost(),
- nodeId.getPort());
- containerToken.setService(addr.getAddress().getHostAddress() + ":"
- + addr.getPort());
- container.setContainerToken(containerToken);
+ ContainerTokenIdentifier tokenIdentifier = new ContainerTokenIdentifier(
+ containerId, nodeId.toString(), capability);
+ containerToken = BuilderUtils.newContainerToken(nodeId, ByteBuffer
+ .wrap(containerTokenSecretManager
+ .createPassword(tokenIdentifier)), tokenIdentifier);
}
+ // Create the container
+ Container container = BuilderUtils.newContainer(containerId, nodeId,
+ node.getRMNode().getHttpAddress(), capability, priority,
+ containerToken);
+
return container;
}
@@ -1216,7 +1209,7 @@ public class LeafQueue implements CSQueu
// Book-keeping
releaseResource(clusterResource,
- application.getUser(), container.getResource());
+ application, container.getResource());
LOG.info("completedContainer" +
" container=" + container +
@@ -1234,32 +1227,35 @@ public class LeafQueue implements CSQueu
}
synchronized void allocateResource(Resource clusterResource,
- String userName, Resource resource) {
+ SchedulerApp application, Resource resource) {
// Update queue metrics
Resources.addTo(usedResources, resource);
updateResource(clusterResource);
++numContainers;
// Update user metrics
+ String userName = application.getUser();
User user = getUser(userName);
user.assignContainer(resource);
-
+ metrics.setAvailableResourcesToUser(userName, application.getHeadroom());
LOG.info(getQueueName() +
" used=" + usedResources + " numContainers=" + numContainers +
" user=" + userName + " resources=" + user.getConsumedResources());
}
synchronized void releaseResource(Resource clusterResource,
- String userName, Resource resource) {
+ SchedulerApp application, Resource resource) {
// Update queue metrics
Resources.subtractFrom(usedResources, resource);
updateResource(clusterResource);
--numContainers;
// Update user metrics
+ String userName = application.getUser();
User user = getUser(userName);
user.releaseContainer(resource);
-
+ metrics.setAvailableResourcesToUser(userName, application.getHeadroom());
+
LOG.info(getQueueName() +
" used=" + usedResources + " numContainers=" + numContainers +
" user=" + userName + " resources=" + user.getConsumedResources());
@@ -1267,12 +1263,18 @@ public class LeafQueue implements CSQueu
@Override
public synchronized void updateClusterResource(Resource clusterResource) {
+ // Update queue properties
maxActiveApplications =
computeMaxActiveApplications(clusterResource, maxAMResourcePercent,
absoluteCapacity);
maxActiveApplicationsPerUser =
computeMaxActiveApplicationsPerUser(maxActiveApplications, userLimit,
userLimitFactor);
+
+ // Update application properties
+ for (SchedulerApp application : activeApplications) {
+ computeAndSetUserResourceLimit(application, clusterResource);
+ }
}
private synchronized void updateResource(Resource clusterResource) {
@@ -1282,9 +1284,9 @@ public class LeafQueue implements CSQueu
usedResources.getMemory() / (clusterResource.getMemory() * capacity));
Resource resourceLimit =
- Resources.createResource((int)queueLimit);
+ Resources.createResource(roundUp((int)queueLimit));
metrics.setAvailableResourcesToQueue(
- Resources.subtractFrom(resourceLimit, usedResources));
+ Resources.subtractFrom(resourceLimit, usedResources));
}
@Override
@@ -1340,7 +1342,7 @@ public class LeafQueue implements CSQueu
SchedulerApp application, Container container) {
// Careful! Locking order is important!
synchronized (this) {
- allocateResource(clusterResource, application.getUser(), container.getResource());
+ allocateResource(clusterResource, application, container.getResource());
}
parent.recoverContainer(clusterResource, application, container);
Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java Wed Nov 2 05:34:31 2011
@@ -217,7 +217,7 @@ public class FifoScheduler implements Re
}
// Sanity check
- normalizeRequests(ask);
+ SchedulerUtils.normalizeRequests(ask, MINIMUM_MEMORY);
// Release containers
for (ContainerId releasedContainer : release) {
@@ -260,21 +260,6 @@ public class FifoScheduler implements Re
application.getHeadroom());
}
- private void normalizeRequests(List<ResourceRequest> asks) {
- for (ResourceRequest ask : asks) {
- normalizeRequest(ask);
- }
- }
-
- private void normalizeRequest(ResourceRequest ask) {
- int memory = ask.getCapability().getMemory();
- // FIXME: TestApplicationCleanup is relying on unnormalized behavior.
- memory =
- MINIMUM_MEMORY *
- ((memory/MINIMUM_MEMORY) + (memory%MINIMUM_MEMORY > 0 ? 1 : 0));
- ask.setCapability(Resources.createResource(memory));
- }
-
private SchedulerApp getApplication(
ApplicationAttemptId applicationAttemptId) {
return applications.get(applicationAttemptId);
@@ -524,36 +509,25 @@ public class FifoScheduler implements Re
if (assignedContainers > 0) {
for (int i=0; i < assignedContainers; ++i) {
- // Create the container
- Container container =
- BuilderUtils.newContainer(recordFactory,
- application.getApplicationAttemptId(),
- application.getNewContainerId(),
- node.getRMNode().getNodeID(),
- node.getRMNode().getHttpAddress(),
- capability, priority);
-
+
+ NodeId nodeId = node.getRMNode().getNodeID();
+ ContainerId containerId = BuilderUtils.newContainerId(application
+ .getApplicationAttemptId(), application.getNewContainerId());
+ ContainerToken containerToken = null;
+
// If security is enabled, send the container-tokens too.
if (UserGroupInformation.isSecurityEnabled()) {
- ContainerToken containerToken =
- recordFactory.newRecordInstance(ContainerToken.class);
- NodeId nodeId = container.getNodeId();
- ContainerTokenIdentifier tokenidentifier =
- new ContainerTokenIdentifier(container.getId(),
- nodeId.toString(), container.getResource());
- containerToken.setIdentifier(
- ByteBuffer.wrap(tokenidentifier.getBytes()));
- containerToken.setKind(ContainerTokenIdentifier.KIND.toString());
- containerToken.setPassword(
- ByteBuffer.wrap(containerTokenSecretManager
- .createPassword(tokenidentifier)));
- // RPC layer client expects ip:port as service for tokens
- InetSocketAddress addr = NetUtils.createSocketAddr(
- nodeId.getHost(), nodeId.getPort());
- containerToken.setService(addr.getAddress().getHostAddress() + ":"
- + addr.getPort());
- container.setContainerToken(containerToken);
+ ContainerTokenIdentifier tokenIdentifier = new ContainerTokenIdentifier(
+ containerId, nodeId.toString(), capability);
+ containerToken = BuilderUtils.newContainerToken(nodeId, ByteBuffer
+ .wrap(containerTokenSecretManager
+ .createPassword(tokenIdentifier)), tokenIdentifier);
}
+
+ // Create the container
+ Container container = BuilderUtils.newContainer(containerId, nodeId,
+ node.getRMNode().getHttpAddress(), capability, priority,
+ containerToken);
// Allocate!
Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/tools/RMAdmin.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/tools/RMAdmin.java?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/tools/RMAdmin.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/tools/RMAdmin.java Wed Nov 2 05:34:31 2011
@@ -19,13 +19,13 @@
package org.apache.hadoop.yarn.server.resourcemanager.tools;
import java.io.IOException;
+import java.net.InetSocketAddress;
import java.security.PrivilegedAction;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.net.NetUtils;
-import org.apache.hadoop.security.SecurityInfo;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
@@ -33,11 +33,11 @@ import org.apache.hadoop.yarn.conf.YarnC
import org.apache.hadoop.yarn.factories.RecordFactory;
import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
import org.apache.hadoop.yarn.ipc.YarnRPC;
-import org.apache.hadoop.yarn.security.admin.AdminSecurityInfo;
import org.apache.hadoop.yarn.server.resourcemanager.api.RMAdminProtocol;
import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.RefreshAdminAclsRequest;
import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.RefreshNodesRequest;
import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.RefreshQueuesRequest;
+import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.RefreshServiceAclsRequest;
import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.RefreshSuperUserGroupsConfigurationRequest;
import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.RefreshUserToGroupsMappingsRequest;
@@ -63,6 +63,7 @@ public class RMAdmin extends Configured
" [-refreshSuperUserGroupsConfiguration]" +
" [-refreshUserToGroupsMappings]" +
" [-refreshAdminAcls]" +
+ " [-refreshServiceAcl]" +
" [-help [cmd]]\n";
String refreshQueues =
@@ -84,6 +85,10 @@ public class RMAdmin extends Configured
String help = "-help [cmd]: \tDisplays help for the given command or all commands if none\n" +
"\t\tis specified.\n";
+ String refreshServiceAcl =
+ "-refreshServiceAcl: Reload the service-level authorization policy file\n" +
+ "\t\tResoureceManager will reload the authorization policy file.\n";
+
if ("refreshQueues".equals(cmd)) {
System.out.println(refreshQueues);
} else if ("refreshNodes".equals(cmd)) {
@@ -94,11 +99,18 @@ public class RMAdmin extends Configured
System.out.println(refreshSuperUserGroupsConfiguration);
} else if ("refreshAdminAcls".equals(cmd)) {
System.out.println(refreshAdminAcls);
+ } else if ("refreshServiceAcl".equals(cmd)) {
+ System.out.println(refreshServiceAcl);
} else if ("help".equals(cmd)) {
System.out.println(help);
} else {
System.out.println(summary);
System.out.println(refreshQueues);
+ System.out.println(refreshNodes);
+ System.out.println(refreshUserToGroupsMappings);
+ System.out.println(refreshSuperUserGroupsConfiguration);
+ System.out.println(refreshAdminAcls);
+ System.out.println(refreshServiceAcl);
System.out.println(help);
System.out.println();
ToolRunner.printGenericCommandUsage(System.out);
@@ -120,6 +132,8 @@ public class RMAdmin extends Configured
System.err.println("Usage: java RMAdmin" + " [-refreshSuperUserGroupsConfiguration]");
} else if ("-refreshAdminAcls".equals(cmd)){
System.err.println("Usage: java RMAdmin" + " [-refreshAdminAcls]");
+ } else if ("-refreshService".equals(cmd)){
+ System.err.println("Usage: java RMAdmin" + " [-refreshServiceAcl]");
} else {
System.err.println("Usage: java RMAdmin");
System.err.println(" [-refreshQueues]");
@@ -127,6 +141,7 @@ public class RMAdmin extends Configured
System.err.println(" [-refreshUserToGroupsMappings]");
System.err.println(" [-refreshSuperUserGroupsConfiguration]");
System.err.println(" [-refreshAdminAcls]");
+ System.err.println(" [-refreshServiceAcl]");
System.err.println(" [-help [cmd]]");
System.err.println();
ToolRunner.printGenericCommandUsage(System.err);
@@ -145,7 +160,11 @@ public class RMAdmin extends Configured
// Create the client
final String adminAddress =
conf.get(YarnConfiguration.RM_ADMIN_ADDRESS,
- YarnConfiguration.RM_ADMIN_ADDRESS);
+ YarnConfiguration.DEFAULT_RM_ADMIN_ADDRESS);
+ final InetSocketAddress addr =
+ NetUtils.createSocketAddr(adminAddress,
+ YarnConfiguration.DEFAULT_RM_ADMIN_PORT,
+ YarnConfiguration.RM_ADMIN_ADDRESS);
final YarnRPC rpc = YarnRPC.create(conf);
RMAdminProtocol adminProtocol =
@@ -153,7 +172,7 @@ public class RMAdmin extends Configured
@Override
public RMAdminProtocol run() {
return (RMAdminProtocol) rpc.getProxy(RMAdminProtocol.class,
- NetUtils.createSocketAddr(adminAddress), conf);
+ addr, conf);
}
});
@@ -205,6 +224,15 @@ public class RMAdmin extends Configured
return 0;
}
+ private int refreshServiceAcls() throws IOException {
+ // Refresh the service acls
+ RMAdminProtocol adminProtocol = createAdminProtocol();
+ RefreshServiceAclsRequest request =
+ recordFactory.newRecordInstance(RefreshServiceAclsRequest.class);
+ adminProtocol.refreshServiceAcls(request);
+ return 0;
+ }
+
@Override
public int run(String[] args) throws Exception {
if (args.length < 1) {
@@ -219,7 +247,7 @@ public class RMAdmin extends Configured
// verify that we have enough command line parameters
//
if ("-refreshAdminAcls".equals(cmd) || "-refreshQueues".equals(cmd) ||
- "-refreshNodes".equals(cmd) ||
+ "-refreshNodes".equals(cmd) || "-refreshServiceAcl".equals(cmd) ||
"-refreshUserToGroupsMappings".equals(cmd) ||
"-refreshSuperUserGroupsConfiguration".equals(cmd)) {
if (args.length != 1) {
@@ -240,6 +268,8 @@ public class RMAdmin extends Configured
exitCode = refreshSuperUserGroupsConfiguration();
} else if ("-refreshAdminAcls".equals(cmd)) {
exitCode = refreshAdminAcls();
+ } else if ("-refreshServiceAcl".equals(cmd)) {
+ exitCode = refreshServiceAcls();
} else if ("-help".equals(cmd)) {
if (i < args.length) {
printUsage(args[i]);
Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppsBlock.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppsBlock.java?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppsBlock.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppsBlock.java Wed Nov 2 05:34:31 2011
@@ -23,7 +23,6 @@ import static org.apache.hadoop.yarn.web
import static org.apache.hadoop.yarn.webapp.view.JQueryUI._PROGRESSBAR_VALUE;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
-import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState;
import org.apache.hadoop.yarn.webapp.hamlet.Hamlet;
import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TABLE;
import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TBODY;
@@ -59,7 +58,8 @@ class AppsBlock extends HtmlBlock {
for (RMApp app : list.apps.values()) {
String appId = app.getApplicationId().toString();
String trackingUrl = app.getTrackingUrl();
- String ui = trackingUrl == null || trackingUrl.isEmpty() ? "UNASSIGNED" :
+ boolean trackingUrlIsNotReady = trackingUrl == null || trackingUrl.isEmpty() || "N/A".equalsIgnoreCase(trackingUrl);
+ String ui = trackingUrlIsNotReady ? "UNASSIGNED" :
(app.getFinishTime() == 0 ?
"ApplicationMaster" : "History");
String percent = String.format("%.1f", app.getProgress() * 100);
@@ -80,7 +80,7 @@ class AppsBlock extends HtmlBlock {
div(_PROGRESSBAR_VALUE).
$style(join("width:", percent, '%'))._()._()._().
td().
- a(trackingUrl == null || trackingUrl.isEmpty() || "N/A".equalsIgnoreCase(trackingUrl) ?
+ a(trackingUrlIsNotReady ?
"#" : join("http://", trackingUrl), ui)._().
td(app.getDiagnostics().toString())._();
if (list.rendering != Render.HTML && ++i >= 20) break;
Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppsList.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppsList.java?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppsList.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppsList.java Wed Nov 2 05:34:31 2011
@@ -31,7 +31,6 @@ import java.util.concurrent.ConcurrentMa
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
-import org.apache.hadoop.yarn.util.Apps;
import org.apache.hadoop.yarn.webapp.Controller.RequestContext;
import org.apache.hadoop.yarn.webapp.ToJSON;
import org.apache.hadoop.yarn.webapp.view.JQueryUI.Render;
@@ -62,18 +61,21 @@ class AppsList implements ToJSON {
}
String appID = app.getApplicationId().toString();
String trackingUrl = app.getTrackingUrl();
- String ui = trackingUrl == null || trackingUrl.isEmpty() || "N/A".equalsIgnoreCase(trackingUrl) ?
- "UNASSIGNED" : (app.getFinishTime() == 0 ? "ApplicationMaster" : "JobHistory");
+ boolean trackingUrlIsNotReady = trackingUrl == null
+ || trackingUrl.isEmpty() || "N/A".equalsIgnoreCase(trackingUrl);
+ String ui = trackingUrlIsNotReady ? "UNASSIGNED"
+ : (app.getFinishTime() == 0 ? "ApplicationMaster" : "History");
out.append("[\"");
appendSortable(out, app.getApplicationId().getId());
appendLink(out, appID, rc.prefix(), "app", appID).append(_SEP).
append(escapeHtml(app.getUser().toString())).append(_SEP).
append(escapeHtml(app.getName().toString())).append(_SEP).
append(escapeHtml(app.getQueue())).append(_SEP).
- append(app.getState().toString()).append(_SEP);
+ append(app.getState().toString()).append(_SEP).
+ append(app.getFinalApplicationStatus().toString()).append(_SEP);
appendProgressBar(out, app.getProgress()).append(_SEP);
appendLink(out, ui, rc.prefix(),
- trackingUrl == null || trackingUrl.isEmpty() || "N/A".equalsIgnoreCase(trackingUrl) ?
+ trackingUrlIsNotReady ?
"#" : "http://", trackingUrl).
append(_SEP).append(escapeJavaScript(escapeHtml(
app.getDiagnostics().toString()))).
Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/MetricsOverviewTable.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/MetricsOverviewTable.java?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/MetricsOverviewTable.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/MetricsOverviewTable.java Wed Nov 2 05:34:31 2011
@@ -22,6 +22,7 @@ import java.util.concurrent.ConcurrentMa
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.server.resourcemanager.ClusterMetrics;
import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
@@ -60,6 +61,7 @@ public class MetricsOverviewTable extend
ResourceScheduler rs = rm.getResourceScheduler();
QueueMetrics metrics = rs.getRootQueueMetrics();
+ ClusterMetrics clusterMetrics = ClusterMetrics.getMetrics();
int appsSubmitted = metrics.getAppsSubmitted();
int reservedGB = metrics.getReservedGB();
@@ -67,30 +69,13 @@ public class MetricsOverviewTable extend
int allocatedGB = metrics.getAllocatedGB();
int containersAllocated = metrics.getAllocatedContainers();
int totalGB = availableGB + reservedGB + allocatedGB;
-
- ConcurrentMap<NodeId,RMNode> nodes = rmContext.getRMNodes();
- int totalNodes = nodes.size();
- int lostNodes = 0;
- int unhealthyNodes = 0;
- int decommissionedNodes = 0;
- for(RMNode node: nodes.values()) {
- if(node == null || node.getState() == null) {
- lostNodes++;
- continue;
- }
- switch(node.getState()) {
- case DECOMMISSIONED:
- decommissionedNodes++;
- break;
- case LOST:
- lostNodes++;
- break;
- case UNHEALTHY:
- unhealthyNodes++;
- break;
- //RUNNING noop
- }
- }
+
+ int totalNodes = clusterMetrics.getNumNMs();
+ int lostNodes = clusterMetrics.getNumLostNMs();
+ int unhealthyNodes = clusterMetrics.getUnhealthyNMs();
+ int decommissionedNodes = clusterMetrics.getNumDecommisionedNMs();
+ int rebootedNodes = clusterMetrics.getNumRebootedNMs();
+
DIV<Hamlet> div = html.div().$class("metrics");
@@ -106,6 +91,7 @@ public class MetricsOverviewTable extend
th().$class("ui-state-default")._("Decommissioned Nodes")._().
th().$class("ui-state-default")._("Lost Nodes")._().
th().$class("ui-state-default")._("Unhealthy Nodes")._().
+ th().$class("ui-state-default")._("Rebooted Nodes")._().
_().
_().
tbody().$class("ui-widget-content").
@@ -116,9 +102,10 @@ public class MetricsOverviewTable extend
td(StringUtils.byteDesc(totalGB * BYTES_IN_GB)).
td(StringUtils.byteDesc(reservedGB * BYTES_IN_GB)).
td().a(url("nodes"),String.valueOf(totalNodes))._().
- td().a(url("nodes/DECOMMISSIONED"),String.valueOf(decommissionedNodes))._().
- td().a(url("nodes/LOST"),String.valueOf(lostNodes))._().
- td().a(url("nodes/UNHEALTHY"),String.valueOf(unhealthyNodes))._().
+ td().a(url("nodes/decommissioned"),String.valueOf(decommissionedNodes))._().
+ td().a(url("nodes/lost"),String.valueOf(lostNodes))._().
+ td().a(url("nodes/unhealthy"),String.valueOf(unhealthyNodes))._().
+ td().a(url("nodes/rebooted"),String.valueOf(rebootedNodes))._().
_().
_()._();
Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebApp.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebApp.java?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebApp.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebApp.java Wed Nov 2 05:34:31 2011
@@ -22,6 +22,7 @@ import static org.apache.hadoop.yarn.uti
import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
+import org.apache.hadoop.yarn.server.security.ApplicationACLsManager;
import org.apache.hadoop.yarn.webapp.WebApp;
/**
@@ -43,6 +44,8 @@ public class RMWebApp extends WebApp {
if (rm != null) {
bind(ResourceManager.class).toInstance(rm);
bind(RMContext.class).toInstance(rm.getRMContext());
+ bind(ApplicationACLsManager.class).toInstance(
+ rm.getApplicationACLsManager());
}
route("/", RmController.class);
route(pajoin("/nodes", NODE_STATE), RmController.class, "nodes");
Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RmController.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RmController.java?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RmController.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RmController.java Wed Nov 2 05:34:31 2011
@@ -24,15 +24,17 @@ import static org.apache.hadoop.yarn.uti
import javax.servlet.http.HttpServletResponse;
+import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ApplicationAccessType;
import org.apache.hadoop.yarn.api.records.Container;
import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
-import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler;
+import org.apache.hadoop.yarn.server.security.ApplicationACLsManager;
import org.apache.hadoop.yarn.util.Apps;
import org.apache.hadoop.yarn.util.ConverterUtils;
import org.apache.hadoop.yarn.util.Times;
@@ -44,7 +46,14 @@ import com.google.inject.Inject;
// Do NOT rename/refactor this to RMView as it will wreak havoc
// on Mac OS HFS as its case-insensitive!
public class RmController extends Controller {
- @Inject RmController(RequestContext ctx) { super(ctx); }
+
+ private ApplicationACLsManager aclsManager;
+
+ @Inject
+ RmController(RequestContext ctx, ApplicationACLsManager aclsManager) {
+ super(ctx);
+ this.aclsManager = aclsManager;
+ }
@Override public void index() {
setTitle("Applications");
@@ -71,10 +80,29 @@ public class RmController extends Contro
setTitle("Application not found: "+ aid);
return;
}
+
+ // Check for the authorization.
+ String remoteUser = request().getRemoteUser();
+ UserGroupInformation callerUGI = null;
+ if (remoteUser != null) {
+ callerUGI = UserGroupInformation.createRemoteUser(remoteUser);
+ }
+ if (callerUGI != null
+ && !this.aclsManager.checkAccess(callerUGI,
+ ApplicationAccessType.VIEW_APP, app.getUser(), appID)) {
+ setStatus(HttpServletResponse.SC_UNAUTHORIZED);
+ setTitle("Unauthorized request for viewing application " + appID);
+ renderText("You (User " + remoteUser
+ + ") are not authorized to view the logs for application " + appID);
+ return;
+ }
+
setTitle(join("Application ", aid));
String trackingUrl = app.getTrackingUrl();
- String ui = trackingUrl == null ? "UNASSIGNED" :
- (app.getFinishTime() == 0 ? "ApplicationMaster" : "JobHistory");
+ boolean trackingUrlIsNotReady = trackingUrl == null
+ || trackingUrl.isEmpty() || "N/A".equalsIgnoreCase(trackingUrl);
+ String ui = trackingUrlIsNotReady ? "UNASSIGNED" :
+ (app.getFinishTime() == 0 ? "ApplicationMaster" : "History");
ResponseInfo info = info("Application Overview").
_("User:", app.getUser()).
@@ -84,8 +112,8 @@ public class RmController extends Contro
_("Started:", Times.format(app.getStartTime())).
_("Elapsed:", StringUtils.formatTime(
Times.elapsed(app.getStartTime(), app.getFinishTime()))).
- _("Tracking URL:", trackingUrl == null ? "#" :
- join("http://", trackingUrl), ui).
+ _("Tracking URL:", trackingUrlIsNotReady ?
+ "#" : join("http://", trackingUrl), ui).
_("Diagnostics:", app.getDiagnostics());
Container masterContainer = app.getCurrentAppAttempt()
.getMasterContainer();
Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/proto/RMAdminProtocol.proto
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/proto/RMAdminProtocol.proto?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/proto/RMAdminProtocol.proto (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/proto/RMAdminProtocol.proto Wed Nov 2 05:34:31 2011
@@ -29,4 +29,5 @@ service RMAdminProtocolService {
rpc refreshSuperUserGroupsConfiguration(RefreshSuperUserGroupsConfigurationRequestProto) returns (RefreshSuperUserGroupsConfigurationResponseProto);
rpc refreshUserToGroupsMappings(RefreshUserToGroupsMappingsRequestProto) returns (RefreshUserToGroupsMappingsResponseProto);
rpc refreshAdminAcls(RefreshAdminAclsRequestProto) returns (RefreshAdminAclsResponseProto);
+ rpc refreshServiceAcls(RefreshServiceAclsRequestProto) returns (RefreshServiceAclsResponseProto);
}
Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/proto/yarn_server_resourcemanager_service_protos.proto
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/proto/yarn_server_resourcemanager_service_protos.proto?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/proto/yarn_server_resourcemanager_service_protos.proto (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/proto/yarn_server_resourcemanager_service_protos.proto Wed Nov 2 05:34:31 2011
@@ -46,3 +46,9 @@ message RefreshAdminAclsRequestProto {
}
message RefreshAdminAclsResponseProto {
}
+
+message RefreshServiceAclsRequestProto {
+}
+message RefreshServiceAclsResponseProto {
+}
+
Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/resources/META-INF/services/org.apache.hadoop.security.SecurityInfo
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/resources/META-INF/services/org.apache.hadoop.security.SecurityInfo?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/resources/META-INF/services/org.apache.hadoop.security.SecurityInfo (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/resources/META-INF/services/org.apache.hadoop.security.SecurityInfo Wed Nov 2 05:34:31 2011
@@ -1 +1 @@
-org.apache.hadoop.yarn.security.admin.AdminSecurityInfo
+org.apache.hadoop.yarn.server.resourcemanager.security.admin.AdminSecurityInfo