You are viewing a plain text version of this content. The canonical link for it is here.
Posted to mapreduce-commits@hadoop.apache.org by to...@apache.org on 2011/09/08 03:39:23 UTC
svn commit: r1166495 [5/6] - in
/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project: ./ conf/
hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/
hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/mai...
Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java?rev=1166495&r1=1166494&r2=1166495&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java Thu Sep 8 01:39:07 2011
@@ -70,6 +70,7 @@ import org.apache.hadoop.yarn.factory.pr
import org.apache.hadoop.yarn.ipc.RPCUtil;
import org.apache.hadoop.yarn.ipc.YarnRPC;
import org.apache.hadoop.yarn.security.client.ClientRMSecurityInfo;
+import org.apache.hadoop.yarn.server.resourcemanager.RMAuditLogger.AuditConstants;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppEvent;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppEventType;
@@ -195,9 +196,11 @@ public class ClientRMService extends Abs
SubmitApplicationRequest request) throws YarnRemoteException {
ApplicationSubmissionContext submissionContext = request
.getApplicationSubmissionContext();
+ ApplicationId applicationId = null;
+ String user = null;
try {
- String user = UserGroupInformation.getCurrentUser().getShortUserName();
- ApplicationId applicationId = submissionContext.getApplicationId();
+ user = UserGroupInformation.getCurrentUser().getShortUserName();
+ applicationId = submissionContext.getApplicationId();
if (rmContext.getRMApps().get(applicationId) != null) {
throw new IOException("Application with id " + applicationId
+ " is already present! Cannot add a duplicate!");
@@ -207,8 +210,13 @@ public class ClientRMService extends Abs
LOG.info("Application with id " + applicationId.getId() +
" submitted by user " + user + " with " + submissionContext);
+ RMAuditLogger.logSuccess(user, AuditConstants.SUBMIT_APP_REQUEST,
+ "ClientRMService", applicationId);
} catch (IOException ie) {
LOG.info("Exception in submitting application", ie);
+ RMAuditLogger.logFailure(user, AuditConstants.SUBMIT_APP_REQUEST,
+ ie.getMessage(), "ClientRMService",
+ "Exception in submitting application", applicationId);
throw RPCUtil.getRemoteException(ie);
}
@@ -228,6 +236,9 @@ public class ClientRMService extends Abs
callerUGI = UserGroupInformation.getCurrentUser();
} catch (IOException ie) {
LOG.info("Error getting UGI ", ie);
+ RMAuditLogger.logFailure("UNKNOWN", AuditConstants.KILL_APP_REQUEST,
+ "UNKNOWN", "ClientRMService" , "Error getting UGI",
+ applicationId);
throw RPCUtil.getRemoteException(ie);
}
@@ -235,6 +246,10 @@ public class ClientRMService extends Abs
// TODO: What if null
if (!checkAccess(callerUGI, application.getUser(),
ApplicationACL.MODIFY_APP)) {
+ RMAuditLogger.logFailure(callerUGI.getShortUserName(),
+ AuditConstants.KILL_APP_REQUEST,
+ "User doesn't have MODIFY_APP permissions", "ClientRMService",
+ AuditConstants.UNAUTHORIZED_USER, applicationId);
throw RPCUtil.getRemoteException(new AccessControlException("User "
+ callerUGI.getShortUserName() + " cannot perform operation "
+ ApplicationACL.MODIFY_APP.name() + " on " + applicationId));
@@ -243,6 +258,8 @@ public class ClientRMService extends Abs
this.rmContext.getDispatcher().getEventHandler().handle(
new RMAppEvent(applicationId, RMAppEventType.KILL));
+ RMAuditLogger.logSuccess(callerUGI.getShortUserName(),
+ AuditConstants.KILL_APP_REQUEST, "ClientRMService" , applicationId);
FinishApplicationResponse response = recordFactory
.newRecordInstance(FinishApplicationResponse.class);
return response;
Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java?rev=1166495&r1=1166494&r2=1166495&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java Thu Sep 8 01:39:07 2011
@@ -32,13 +32,18 @@ import org.apache.hadoop.yarn.event.Even
import org.apache.hadoop.yarn.security.ApplicationTokenIdentifier;
import org.apache.hadoop.yarn.security.ApplicationTokenSecretManager;
import org.apache.hadoop.yarn.security.client.ClientToAMSecretManager;
+import org.apache.hadoop.yarn.server.resourcemanager.RMAuditLogger;
+import org.apache.hadoop.yarn.server.resourcemanager.RMAuditLogger.AuditConstants;
import org.apache.hadoop.yarn.server.resourcemanager.recovery.ApplicationsStore.ApplicationStore;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppEvent;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppEventType;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppImpl;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppRejectedEvent;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.YarnScheduler;
+import org.apache.hadoop.util.StringUtils;
/**
* This class manages the list of applications for the resource manager.
@@ -69,6 +74,86 @@ public class RMAppManager implements Eve
RMConfig.DEFAULT_EXPIRE_APPLICATIONS_COMPLETED_MAX));
}
+ /**
+ * This class is for logging the application summary.
+ */
+ static class ApplicationSummary {
+ static final Log LOG = LogFactory.getLog(ApplicationSummary.class);
+
+ // Escape sequences
+ static final char EQUALS = '=';
+ static final char[] charsToEscape =
+ {StringUtils.COMMA, EQUALS, StringUtils.ESCAPE_CHAR};
+
+ static class SummaryBuilder {
+ final StringBuilder buffer = new StringBuilder();
+
+ // A little optimization for a very common case
+ SummaryBuilder add(String key, long value) {
+ return _add(key, Long.toString(value));
+ }
+
+ <T> SummaryBuilder add(String key, T value) {
+ return _add(key, StringUtils.escapeString(String.valueOf(value),
+ StringUtils.ESCAPE_CHAR, charsToEscape));
+ }
+
+ SummaryBuilder add(SummaryBuilder summary) {
+ if (buffer.length() > 0) buffer.append(StringUtils.COMMA);
+ buffer.append(summary.buffer);
+ return this;
+ }
+
+ SummaryBuilder _add(String key, String value) {
+ if (buffer.length() > 0) buffer.append(StringUtils.COMMA);
+ buffer.append(key).append(EQUALS).append(value);
+ return this;
+ }
+
+ @Override public String toString() {
+ return buffer.toString();
+ }
+ }
+
+ /**
+ * create a summary of the application's runtime.
+ *
+ * @param app {@link RMApp} whose summary is to be created, cannot
+ * be <code>null</code>.
+ */
+ public static SummaryBuilder createAppSummary(RMApp app) {
+ String trackingUrl = "N/A";
+ String host = "N/A";
+ RMAppAttempt attempt = app.getCurrentAppAttempt();
+ if (attempt != null) {
+ trackingUrl = attempt.getTrackingUrl();
+ host = attempt.getHost();
+ }
+ SummaryBuilder summary = new SummaryBuilder()
+ .add("appId", app.getApplicationId())
+ .add("name", app.getName())
+ .add("user", app.getUser())
+ .add("queue", app.getQueue())
+ .add("state", app.getState())
+ .add("trackingUrl", trackingUrl)
+ .add("appMasterHost", host)
+ .add("startTime", app.getStartTime())
+ .add("finishTime", app.getFinishTime());
+ return summary;
+ }
+
+ /**
+ * Log a summary of the application's runtime.
+ *
+ * @param app {@link RMApp} whose summary is to be logged
+ */
+ public static void logAppSummary(RMApp app) {
+ if (app != null) {
+ LOG.info(createAppSummary(app));
+ }
+ }
+ }
+
protected void setCompletedAppsMax(int max) {
this.completedAppsMax = max;
}
@@ -82,8 +167,39 @@ public class RMAppManager implements Eve
LOG.error("RMAppManager received completed appId of null, skipping");
} else {
completedApps.add(appId);
+ writeAuditLog(appId);
}
- };
+ }
+
+ protected void writeAuditLog(ApplicationId appId) {
+ RMApp app = rmContext.getRMApps().get(appId);
+ String operation = "UNKONWN";
+ boolean success = false;
+ switch (app.getState()) {
+ case FAILED:
+ operation = AuditConstants.FINISH_FAILED_APP;
+ break;
+ case FINISHED:
+ operation = AuditConstants.FINISH_SUCCESS_APP;
+ success = true;
+ break;
+ case KILLED:
+ operation = AuditConstants.FINISH_KILLED_APP;
+ success = true;
+ break;
+ default:
+ }
+
+ if (success) {
+ RMAuditLogger.logSuccess(app.getUser(), operation,
+ "RMAppManager", app.getApplicationId());
+ } else {
+ StringBuilder diag = app.getDiagnostics();
+ String msg = diag == null ? null : diag.toString();
+ RMAuditLogger.logFailure(app.getUser(), operation, msg, "RMAppManager",
+ "App failed with state: " + app.getState(), appId);
+ }
+ }
/*
* check to see if hit the limit for max # completed apps kept
@@ -154,6 +270,7 @@ public class RMAppManager implements Eve
case APP_COMPLETED:
{
addCompletedApp(appID);
+ ApplicationSummary.logAppSummary(rmContext.getRMApps().get(appID));
checkAppNumCompletedLimit();
}
break;
Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java?rev=1166495&r1=1166494&r2=1166495&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java Thu Sep 8 01:39:07 2011
@@ -31,6 +31,7 @@ import org.apache.hadoop.conf.Configurat
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.util.ReflectionUtils;
+import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.yarn.YarnException;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ApplicationId;
@@ -460,7 +461,7 @@ public class ResourceManager extends Com
/**
* return the scheduler.
- * @return
+ * @return the scheduler for the Resource Manager.
*/
@Private
public ResourceScheduler getResourceScheduler() {
@@ -469,7 +470,7 @@ public class ResourceManager extends Com
/**
* return the resource tracking component.
- * @return
+ * @return the resource tracking component.
*/
@Private
public ResourceTrackerService getResourceTrackerService() {
@@ -488,6 +489,7 @@ public class ResourceManager extends Com
}
public static void main(String argv[]) {
+ StringUtils.startupShutdownMessage(ResourceManager.class, argv, LOG);
ResourceManager resourceManager = null;
try {
Configuration conf = new YarnConfiguration();
Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ResourceScheduler.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ResourceScheduler.java?rev=1166495&r1=1166494&r2=1166495&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ResourceScheduler.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ResourceScheduler.java Thu Sep 8 01:39:07 2011
@@ -29,7 +29,7 @@ import org.apache.hadoop.yarn.server.sec
/**
* This interface is the one implemented by the schedulers. It mainly extends
- * {@link ResourceListener} and {@link YarnScheduler}.
+ * {@link YarnScheduler}.
*
*/
@LimitedPrivate("yarn")
Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApp.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApp.java?rev=1166495&r1=1166494&r2=1166495&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApp.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApp.java Thu Sep 8 01:39:07 2011
@@ -39,6 +39,8 @@ import org.apache.hadoop.yarn.api.record
import org.apache.hadoop.yarn.factories.RecordFactory;
import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
+import org.apache.hadoop.yarn.server.resourcemanager.RMAuditLogger;
+import org.apache.hadoop.yarn.server.resourcemanager.RMAuditLogger.AuditConstants;
import org.apache.hadoop.yarn.server.resourcemanager.recovery.ApplicationsStore.ApplicationStore;
import org.apache.hadoop.yarn.server.resourcemanager.resource.Resources;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptState;
@@ -73,7 +75,11 @@ public class SchedulerApp {
final Map<Priority, Map<NodeId, RMContainer>> reservedContainers =
new HashMap<Priority, Map<NodeId, RMContainer>>();
- Map<Priority, Integer> schedulingOpportunities = new HashMap<Priority, Integer>();
+ Map<Priority, Integer> schedulingOpportunities =
+ new HashMap<Priority, Integer>();
+
+ Map<Priority, Integer> reReservations =
+ new HashMap<Priority, Integer>();
Resource currentReservation = recordFactory
.newRecordInstance(Resource.class);
@@ -178,6 +184,10 @@ public class SchedulerApp {
// Remove from the list of containers
liveContainers.remove(rmContainer.getContainerId());
+
+ RMAuditLogger.logSuccess(getUser(),
+ AuditConstants.RELEASE_CONTAINER, "SchedulerApp",
+ getApplicationId(), containerId);
// Update usage metrics
Resource containerResource = rmContainer.getContainer().getResource();
@@ -213,6 +223,9 @@ public class SchedulerApp {
+ " container=" + container.getId() + " host="
+ container.getNodeId().getHost() + " type=" + type);
}
+ RMAuditLogger.logSuccess(getUser(),
+ AuditConstants.ALLOC_CONTAINER, "SchedulerApp",
+ getApplicationId(), container.getId());
// Add it to allContainers list.
newlyAllocatedContainers.add(rmContainer);
@@ -265,15 +278,15 @@ public class SchedulerApp {
}
synchronized public void resetSchedulingOpportunities(Priority priority) {
- Integer schedulingOpportunities = this.schedulingOpportunities
- .get(priority);
+ Integer schedulingOpportunities =
+ this.schedulingOpportunities.get(priority);
schedulingOpportunities = 0;
this.schedulingOpportunities.put(priority, schedulingOpportunities);
}
synchronized public void addSchedulingOpportunity(Priority priority) {
- Integer schedulingOpportunities = this.schedulingOpportunities
- .get(priority);
+ Integer schedulingOpportunities =
+ this.schedulingOpportunities.get(priority);
if (schedulingOpportunities == null) {
schedulingOpportunities = 0;
}
@@ -282,8 +295,8 @@ public class SchedulerApp {
}
synchronized public int getSchedulingOpportunities(Priority priority) {
- Integer schedulingOpportunities = this.schedulingOpportunities
- .get(priority);
+ Integer schedulingOpportunities =
+ this.schedulingOpportunities.get(priority);
if (schedulingOpportunities == null) {
schedulingOpportunities = 0;
this.schedulingOpportunities.put(priority, schedulingOpportunities);
@@ -291,6 +304,30 @@ public class SchedulerApp {
return schedulingOpportunities;
}
+ synchronized void resetReReservations(Priority priority) {
+ Integer reReservations = this.reReservations.get(priority);
+ reReservations = 0;
+ this.reReservations.put(priority, reReservations);
+ }
+
+ synchronized void addReReservation(Priority priority) {
+ Integer reReservations = this.reReservations.get(priority);
+ if (reReservations == null) {
+ reReservations = 0;
+ }
+ ++reReservations;
+ this.reReservations.put(priority, reReservations);
+ }
+
+ synchronized public int getReReservations(Priority priority) {
+ Integer reReservations = this.reReservations.get(priority);
+ if (reReservations == null) {
+ reReservations = 0;
+ this.reReservations.put(priority, reReservations);
+ }
+ return reReservations;
+ }
+
public synchronized int getNumReservedContainers(Priority priority) {
Map<NodeId, RMContainer> reservedContainers =
this.reservedContainers.get(priority);
@@ -318,6 +355,12 @@ public class SchedulerApp {
rmContext.getContainerAllocationExpirer());
Resources.addTo(currentReservation, container.getResource());
+
+ // Reset the re-reservation count
+ resetReReservations(priority);
+ } else {
+ // Note down the re-reservation
+ addReReservation(priority);
}
rmContainer.handle(new RMContainerReservedEvent(container.getId(),
container.getResource(), node.getNodeID(), priority));
@@ -347,6 +390,9 @@ public class SchedulerApp {
this.reservedContainers.remove(priority);
}
+ // Reset the re-reservation count
+ resetReReservations(priority);
+
Resource resource = reservedContainer.getContainer().getResource();
Resources.subtractFrom(currentReservation, resource);
@@ -360,7 +406,7 @@ public class SchedulerApp {
* given <code>priority</code>?
* @param node node to be checked
* @param priority priority of reserved container
- * @return
+ * @return true is reserved, false if not
*/
public synchronized boolean isReserved(SchedulerNode node, Priority priority) {
Map<NodeId, RMContainer> reservedContainers =
Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerNode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerNode.java?rev=1166495&r1=1166494&r2=1166495&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerNode.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerNode.java Thu Sep 8 01:39:07 2011
@@ -90,7 +90,7 @@ public class SchedulerNode {
* given application.
*
* @param applicationId application
- * @param containers allocated containers
+ * @param rmContainer allocated container
*/
public synchronized void allocateContainer(ApplicationId applicationId,
RMContainer rmContainer) {
Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java?rev=1166495&r1=1166494&r2=1166495&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java Thu Sep 8 01:39:07 2011
@@ -46,6 +46,8 @@ import org.apache.hadoop.yarn.api.record
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.api.records.ResourceRequest;
import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
+import org.apache.hadoop.yarn.server.resourcemanager.RMAuditLogger;
+import org.apache.hadoop.yarn.server.resourcemanager.RMAuditLogger.AuditConstants;
import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
import org.apache.hadoop.yarn.server.resourcemanager.recovery.Store.RMState;
import org.apache.hadoop.yarn.server.resourcemanager.resource.Resources;
@@ -167,6 +169,11 @@ implements ResourceScheduler, CapacitySc
}
@Override
+ public Resource getClusterResources() {
+ return clusterResource;
+ }
+
+ @Override
public synchronized void reinitialize(Configuration conf,
ContainerTokenSecretManager containerTokenSecretManager, RMContext rmContext)
throws IOException {
@@ -348,6 +355,8 @@ implements ResourceScheduler, CapacitySc
try {
queue.submitApplication(SchedulerApp, user, queueName);
} catch (AccessControlException ace) {
+ LOG.info("Failed to submit application " + applicationAttemptId +
+ " to queue " + queueName + " from user " + user, ace);
this.rmContext.getDispatcher().getEventHandler().handle(
new RMAppAttemptRejectedEvent(applicationAttemptId,
ace.toString()));
@@ -428,8 +437,15 @@ implements ResourceScheduler, CapacitySc
// Release containers
for (ContainerId releasedContainerId : release) {
- completedContainer(getRMContainer(releasedContainerId),
- RMContainerEventType.RELEASED);
+ RMContainer rmContainer = getRMContainer(releasedContainerId);
+ if (rmContainer == null) {
+ RMAuditLogger.logFailure(application.getUser(),
+ AuditConstants.RELEASE_CONTAINER,
+ "Unauthorized access or invalid container", "CapacityScheduler",
+ "Trying to release container not owned by app or with invalid id",
+ application.getApplicationId(), releasedContainerId);
+ }
+ completedContainer(rmContainer, RMContainerEventType.RELEASED);
}
synchronized (application) {
@@ -621,6 +637,7 @@ implements ResourceScheduler, CapacitySc
private synchronized void addNode(RMNode nodeManager) {
this.nodes.put(nodeManager.getNodeID(), new SchedulerNode(nodeManager));
Resources.addTo(clusterResource, nodeManager.getTotalCapability());
+ root.updateClusterResource(clusterResource);
++numNodeManagers;
LOG.info("Added node " + nodeManager.getNodeAddress() +
" clusterResource: " + clusterResource);
@@ -629,6 +646,7 @@ implements ResourceScheduler, CapacitySc
private synchronized void removeNode(RMNode nodeInfo) {
SchedulerNode node = this.nodes.get(nodeInfo.getNodeID());
Resources.subtractFrom(clusterResource, nodeInfo.getTotalCapability());
+ root.updateClusterResource(clusterResource);
--numNodeManagers;
// Remove running containers
Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java?rev=1166495&r1=1166494&r2=1166495&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java Thu Sep 8 01:39:07 2011
@@ -50,6 +50,10 @@ public class CapacitySchedulerConfigurat
PREFIX + "maximum-applications";
@Private
+ public static final String MAXIMUM_APPLICATION_MASTERS_RESOURCE_PERCENT =
+ PREFIX + "maximum-am-resource-percent";
+
+ @Private
public static final String QUEUES = "queues";
@Private
@@ -83,6 +87,10 @@ public class CapacitySchedulerConfigurat
public static final int DEFAULT_MAXIMUM_SYSTEM_APPLICATIIONS = 10000;
@Private
+ public static final float
+ DEFAULT_MAXIMUM_APPLICATIONMASTERS_RESOURCE_PERCENT = 0.1f;
+
+ @Private
public static final int UNDEFINED = -1;
@Private
@@ -124,6 +132,11 @@ public class CapacitySchedulerConfigurat
return maxApplications;
}
+ public float getMaximumApplicationMasterResourcePercent() {
+ return getFloat(MAXIMUM_APPLICATION_MASTERS_RESOURCE_PERCENT,
+ DEFAULT_MAXIMUM_APPLICATIONMASTERS_RESOURCE_PERCENT);
+ }
+
public int getCapacity(String queue) {
int capacity = getInt(getQueuePrefix(queue) + CAPACITY, UNDEFINED);
if (capacity < MINIMUM_CAPACITY_VALUE || capacity > MAXIMUM_CAPACITY_VALUE) {
Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerContext.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerContext.java?rev=1166495&r1=1166494&r2=1166495&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerContext.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerContext.java Thu Sep 8 01:39:07 2011
@@ -19,7 +19,6 @@
package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity;
import org.apache.hadoop.yarn.api.records.Resource;
-import org.apache.hadoop.yarn.event.EventHandler;
import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
import org.apache.hadoop.yarn.server.security.ContainerTokenSecretManager;
@@ -38,4 +37,6 @@ public interface CapacitySchedulerContex
int getNumClusterNodes();
RMContext getRMContext();
+
+ Resource getClusterResources();
}
Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java?rev=1166495&r1=1166494&r2=1166495&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java Thu Sep 8 01:39:07 2011
@@ -24,6 +24,7 @@ import java.util.ArrayList;
import java.util.Collections;
import java.util.Comparator;
import java.util.HashMap;
+import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Set;
@@ -77,16 +78,25 @@ public class LeafQueue implements Queue
private int maxApplications;
private int maxApplicationsPerUser;
+
+ private float maxAMResourcePercent;
+ private int maxActiveApplications;
+ private int maxActiveApplicationsPerUser;
+
private Resource usedResources = Resources.createResource(0);
private float utilization = 0.0f;
private float usedCapacity = 0.0f;
private volatile int numContainers;
- Set<SchedulerApp> applications;
+ Set<SchedulerApp> activeApplications;
Map<ApplicationAttemptId, SchedulerApp> applicationsMap =
new HashMap<ApplicationAttemptId, SchedulerApp>();
- public final Resource minimumAllocation;
+ Set<SchedulerApp> pendingApplications;
+
+ private final Resource minimumAllocation;
+ private final Resource maximumAllocation;
+ private final float minimumAllocationFactor;
private ContainerTokenSecretManager containerTokenSecretManager;
@@ -106,6 +116,8 @@ public class LeafQueue implements Queue
private CapacitySchedulerContext scheduler;
+ final static int DEFAULT_AM_RESOURCE = 2 * 1024;
+
public LeafQueue(CapacitySchedulerContext cs,
String queueName, Queue parent,
Comparator<SchedulerApp> applicationComparator, Queue old) {
@@ -118,6 +130,10 @@ public class LeafQueue implements Queue
cs.getConfiguration().getEnableUserMetrics());
this.minimumAllocation = cs.getMinimumResourceCapability();
+ this.maximumAllocation = cs.getMaximumResourceCapability();
+ this.minimumAllocationFactor =
+ (float)(maximumAllocation.getMemory() - minimumAllocation.getMemory()) /
+ maximumAllocation.getMemory();
this.containerTokenSecretManager = cs.getContainerTokenSecretManager();
float capacity =
@@ -138,6 +154,15 @@ public class LeafQueue implements Queue
int maxApplicationsPerUser =
(int)(maxApplications * (userLimit / 100.0f) * userLimitFactor);
+ this.maxAMResourcePercent =
+ cs.getConfiguration().getMaximumApplicationMasterResourcePercent();
+ int maxActiveApplications =
+ computeMaxActiveApplications(cs.getClusterResources(),
+ maxAMResourcePercent, absoluteCapacity);
+ int maxActiveApplicationsPerUser =
+ computeMaxActiveApplicationsPerUser(maxActiveApplications, userLimit,
+ userLimitFactor);
+
this.queueInfo = recordFactory.newRecordInstance(QueueInfo.class);
this.queueInfo.setQueueName(queueName);
this.queueInfo.setChildQueues(new ArrayList<QueueInfo>());
@@ -151,20 +176,38 @@ public class LeafQueue implements Queue
maximumCapacity, absoluteMaxCapacity,
userLimit, userLimitFactor,
maxApplications, maxApplicationsPerUser,
+ maxActiveApplications, maxActiveApplicationsPerUser,
state, acls);
LOG.info("DEBUG --- LeafQueue:" +
" name=" + queueName +
", fullname=" + getQueuePath());
- this.applications = new TreeSet<SchedulerApp>(applicationComparator);
+ this.pendingApplications =
+ new TreeSet<SchedulerApp>(applicationComparator);
+ this.activeApplications = new TreeSet<SchedulerApp>(applicationComparator);
}
+ private int computeMaxActiveApplications(Resource clusterResource,
+ float maxAMResourcePercent, float absoluteCapacity) {
+ return
+ Math.max(
+ (int)((clusterResource.getMemory() / DEFAULT_AM_RESOURCE) *
+ maxAMResourcePercent * absoluteCapacity),
+ 1);
+ }
+
+ private int computeMaxActiveApplicationsPerUser(int maxActiveApplications,
+ int userLimit, float userLimitFactor) {
+ return (int)(maxActiveApplications * (userLimit / 100.0f) * userLimitFactor);
+ }
+
private synchronized void setupQueueConfigs(
float capacity, float absoluteCapacity,
float maxCapacity, float absoluteMaxCapacity,
int userLimit, float userLimitFactor,
int maxApplications, int maxApplicationsPerUser,
+ int maxActiveApplications, int maxActiveApplicationsPerUser,
QueueState state, Map<QueueACL, AccessControlList> acls)
{
this.capacity = capacity;
@@ -179,6 +222,9 @@ public class LeafQueue implements Queue
this.maxApplications = maxApplications;
this.maxApplicationsPerUser = maxApplicationsPerUser;
+ this.maxActiveApplications = maxActiveApplications;
+ this.maxActiveApplicationsPerUser = maxActiveApplicationsPerUser;
+
this.state = state;
this.acls = acls;
@@ -239,6 +285,46 @@ public class LeafQueue implements Queue
return parent.getQueuePath() + "." + getQueueName();
}
+ /**
+ * Used only by tests.
+ */
+ @Private
+ public Resource getMinimumAllocation() {
+ return minimumAllocation;
+ }
+
+ /**
+ * Used only by tests.
+ */
+ @Private
+ public Resource getMaximumAllocation() {
+ return maximumAllocation;
+ }
+
+ /**
+ * Used only by tests.
+ */
+ @Private
+ public float getMinimumAllocationFactor() {
+ return minimumAllocationFactor;
+ }
+
+ public int getMaxApplications() {
+ return maxApplications;
+ }
+
+ public int getMaxApplicationsPerUser() {
+ return maxApplicationsPerUser;
+ }
+
+ public int getMaximumActiveApplications() {
+ return maxActiveApplications;
+ }
+
+ public int getMaximumActiveApplicationsPerUser() {
+ return maxActiveApplicationsPerUser;
+ }
+
@Override
public synchronized float getUsedCapacity() {
return usedCapacity;
@@ -299,10 +385,34 @@ public class LeafQueue implements Queue
this.parent = parent;
}
+ @Override
public synchronized int getNumApplications() {
- return applications.size();
+ return getNumPendingApplications() + getNumActiveApplications();
+ }
+
+ public synchronized int getNumPendingApplications() {
+ return pendingApplications.size();
+ }
+
+ public synchronized int getNumActiveApplications() {
+ return activeApplications.size();
+ }
+
+ @Private
+ public synchronized int getNumApplications(String user) {
+ return getUser(user).getTotalApplications();
+ }
+
+ @Private
+ public synchronized int getNumPendingApplications(String user) {
+ return getUser(user).getPendingApplications();
}
+ @Private
+ public synchronized int getNumActiveApplications(String user) {
+ return getUser(user).getActiveApplications();
+ }
+
public synchronized int getNumContainers() {
return numContainers;
}
@@ -312,6 +422,16 @@ public class LeafQueue implements Queue
return state;
}
+ @Private
+ public int getUserLimit() {
+ return userLimit;
+ }
+
+ @Private
+ public float getUserLimitFactor() {
+ return userLimitFactor;
+ }
+
@Override
public synchronized Map<QueueACL, AccessControlList> getQueueAcls() {
return new HashMap<QueueACL, AccessControlList>(acls);
@@ -374,6 +494,8 @@ public class LeafQueue implements Queue
leafQueue.maximumCapacity, leafQueue.absoluteMaxCapacity,
leafQueue.userLimit, leafQueue.userLimitFactor,
leafQueue.maxApplications, leafQueue.maxApplicationsPerUser,
+ leafQueue.maxActiveApplications,
+ leafQueue.maxActiveApplicationsPerUser,
leafQueue.state, leafQueue.acls);
updateResource(clusterResource);
@@ -413,7 +535,7 @@ public class LeafQueue implements Queue
synchronized (this) {
// Check if the queue is accepting jobs
- if (state != QueueState.RUNNING) {
+ if (getState() != QueueState.RUNNING) {
String msg = "Queue " + getQueuePath() +
" is STOPPED. Cannot accept submission of application: " +
application.getApplicationId();
@@ -422,7 +544,7 @@ public class LeafQueue implements Queue
}
// Check submission limits for queues
- if (getNumApplications() >= maxApplications) {
+ if (getNumApplications() >= getMaxApplications()) {
String msg = "Queue " + getQueuePath() +
" already has " + getNumApplications() + " applications," +
" cannot accept submission of application: " +
@@ -433,9 +555,9 @@ public class LeafQueue implements Queue
// Check submission limits for the user on this queue
user = getUser(userName);
- if (user.getApplications() >= maxApplicationsPerUser) {
+ if (user.getTotalApplications() >= getMaxApplicationsPerUser()) {
String msg = "Queue " + getQueuePath() +
- " already has " + user.getApplications() +
+ " already has " + user.getTotalApplications() +
" applications from user " + userName +
" cannot accept submission of application: " +
application.getApplicationId();
@@ -460,17 +582,46 @@ public class LeafQueue implements Queue
}
}
+ private synchronized void activateApplications() {
+ for (Iterator<SchedulerApp> i=pendingApplications.iterator();
+ i.hasNext(); ) {
+ SchedulerApp application = i.next();
+
+ // Check queue limit
+ if (getNumActiveApplications() >= getMaximumActiveApplications()) {
+ break;
+ }
+
+ // Check user limit
+ User user = getUser(application.getUser());
+ if (user.getActiveApplications() < getMaximumActiveApplicationsPerUser()) {
+ user.activateApplication();
+ activeApplications.add(application);
+ i.remove();
+ LOG.info("Application " + application.getApplicationId().getId() +
+ " from user: " + application.getUser() +
+ " activated in queue: " + getQueueName());
+ }
+ }
+ }
+
private synchronized void addApplication(SchedulerApp application, User user) {
// Accept
user.submitApplication();
- applications.add(application);
+ pendingApplications.add(application);
applicationsMap.put(application.getApplicationAttemptId(), application);
+ // Activate applications
+ activateApplications();
+
LOG.info("Application added -" +
" appId: " + application.getApplicationId() +
" user: " + user + "," + " leaf-queue: " + getQueueName() +
- " #user-applications: " + user.getApplications() +
- " #queue-applications: " + getNumApplications());
+ " #user-pending-applications: " + user.getPendingApplications() +
+ " #user-active-applications: " + user.getActiveApplications() +
+ " #queue-pending-applications: " + getNumPendingApplications() +
+ " #queue-active-applications: " + getNumActiveApplications()
+ );
}
@Override
@@ -485,20 +636,26 @@ public class LeafQueue implements Queue
}
public synchronized void removeApplication(SchedulerApp application, User user) {
- applications.remove(application);
+ activeApplications.remove(application);
applicationsMap.remove(application.getApplicationAttemptId());
user.finishApplication();
- if (user.getApplications() == 0) {
+ if (user.getTotalApplications() == 0) {
users.remove(application.getUser());
}
+ // Check if we can activate more applications
+ activateApplications();
+
LOG.info("Application removed -" +
" appId: " + application.getApplicationId() +
" user: " + application.getUser() +
" queue: " + getQueueName() +
- " #user-applications: " + user.getApplications() +
- " #queue-applications: " + getNumApplications());
+ " #user-pending-applications: " + user.getPendingApplications() +
+ " #user-active-applications: " + user.getActiveApplications() +
+ " #queue-pending-applications: " + getNumPendingApplications() +
+ " #queue-active-applications: " + getNumActiveApplications()
+ );
}
private synchronized SchedulerApp getApplication(
@@ -512,7 +669,7 @@ public class LeafQueue implements Queue
LOG.info("DEBUG --- assignContainers:" +
" node=" + node.getHostName() +
- " #applications=" + applications.size());
+ " #applications=" + activeApplications.size());
// Check for reserved resources
RMContainer reservedContainer = node.getReservedContainer();
@@ -524,7 +681,7 @@ public class LeafQueue implements Queue
}
// Try to assign containers to applications in order
- for (SchedulerApp application : applications) {
+ for (SchedulerApp application : activeApplications) {
LOG.info("DEBUG --- pre-assignContainers for application "
+ application.getApplicationId());
@@ -536,25 +693,24 @@ public class LeafQueue implements Queue
setUserResourceLimit(application, userLimit);
for (Priority priority : application.getPriorities()) {
+ // Required resource
+ Resource required =
+ application.getResourceRequest(priority, RMNode.ANY).getCapability();
// Do we need containers at this 'priority'?
- if (!needContainers(application, priority)) {
+ if (!needContainers(application, priority, required)) {
continue;
}
// Are we going over limits by allocating to this application?
- ResourceRequest required =
- application.getResourceRequest(priority, RMNode.ANY);
-
// Maximum Capacity of the queue
- if (!assignToQueue(clusterResource, required.getCapability())) {
+ if (!assignToQueue(clusterResource, required)) {
return Resources.none();
}
// User limits
userLimit =
- computeUserLimit(application, clusterResource,
- required.getCapability());
+ computeUserLimit(application, clusterResource, required);
if (!assignToUser(application.getUser(), userLimit)) {
break;
}
@@ -732,10 +888,32 @@ public class LeafQueue implements Queue
return (a + (b - 1)) / b;
}
- boolean needContainers(SchedulerApp application, Priority priority) {
+ boolean needContainers(SchedulerApp application, Priority priority, Resource required) {
int requiredContainers = application.getTotalRequiredResources(priority);
int reservedContainers = application.getNumReservedContainers(priority);
- return ((requiredContainers - reservedContainers) > 0);
+ int starvation = 0;
+ if (reservedContainers > 0) {
+ float nodeFactor =
+ ((float)required.getMemory() / getMaximumAllocation().getMemory());
+
+ // Use percentage of node required to bias against large containers...
+ // Protect against corner case where you need the whole node with
+ // Math.min(nodeFactor, minimumAllocationFactor)
+ starvation =
+ (int)((application.getReReservations(priority) / reservedContainers) *
+ (1.0f - (Math.min(nodeFactor, getMinimumAllocationFactor())))
+ );
+
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("needsContainers:" +
+ " app.#re-reserve=" + application.getReReservations(priority) +
+ " reserved=" + reservedContainers +
+ " nodeFactor=" + nodeFactor +
+ " minAllocFactor=" + minimumAllocationFactor +
+ " starvation=" + starvation);
+ }
+ }
+ return (((starvation + requiredContainers) - reservedContainers) > 0);
}
private Resource assignContainersOnNode(Resource clusterResource,
@@ -1068,7 +1246,16 @@ public class LeafQueue implements Queue
}
@Override
- public synchronized void updateResource(Resource clusterResource) {
+ public synchronized void updateClusterResource(Resource clusterResource) {
+ maxActiveApplications =
+ computeMaxActiveApplications(clusterResource, maxAMResourcePercent,
+ absoluteCapacity);
+ maxActiveApplicationsPerUser =
+ computeMaxActiveApplicationsPerUser(maxActiveApplications, userLimit,
+ userLimitFactor);
+ }
+
+ private synchronized void updateResource(Resource clusterResource) {
float queueLimit = clusterResource.getMemory() * absoluteCapacity;
setUtilization(usedResources.getMemory() / queueLimit);
setUsedCapacity(
@@ -1087,22 +1274,36 @@ public class LeafQueue implements Queue
static class User {
Resource consumed = Resources.createResource(0);
- int applications = 0;
+ int pendingApplications = 0;
+ int activeApplications = 0;
public Resource getConsumedResources() {
return consumed;
}
- public int getApplications() {
- return applications;
+ public int getPendingApplications() {
+ return pendingApplications;
}
+ public int getActiveApplications() {
+ return activeApplications;
+ }
+
+ public int getTotalApplications() {
+ return getPendingApplications() + getActiveApplications();
+ }
+
public synchronized void submitApplication() {
- ++applications;
+ ++pendingApplications;
+ }
+
+ public synchronized void activateApplication() {
+ --pendingApplications;
+ ++activeApplications;
}
public synchronized void finishApplication() {
- --applications;
+ --activeApplications;
}
public synchronized void assignContainer(Resource resource) {
@@ -1124,4 +1325,5 @@ public class LeafQueue implements Queue
parent.recoverContainer(clusterResource, application, container);
}
+
}
Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java?rev=1166495&r1=1166494&r2=1166495&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java Thu Sep 8 01:39:07 2011
@@ -646,7 +646,14 @@ public class ParentQueue implements Queu
}
@Override
- public synchronized void updateResource(Resource clusterResource) {
+ public synchronized void updateClusterResource(Resource clusterResource) {
+ // Update all children
+ for (Queue childQueue : childQueues) {
+ childQueue.updateClusterResource(clusterResource);
+ }
+ }
+
+ private synchronized void updateResource(Resource clusterResource) {
float queueLimit = clusterResource.getMemory() * absoluteCapacity;
setUtilization(usedResources.getMemory() / queueLimit);
setUsedCapacity(
Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/Queue.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/Queue.java?rev=1166495&r1=1166494&r2=1166495&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/Queue.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/Queue.java Thu Sep 8 01:39:07 2011
@@ -29,7 +29,6 @@ import org.apache.hadoop.yarn.api.record
import org.apache.hadoop.yarn.api.records.QueueACL;
import org.apache.hadoop.yarn.api.records.QueueState;
import org.apache.hadoop.yarn.api.records.Resource;
-import org.apache.hadoop.yarn.server.resourcemanager.resource.Resources;
import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer;
import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerEventType;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApp;
@@ -155,7 +154,7 @@ extends org.apache.hadoop.yarn.server.re
* Assign containers to applications in the queue or it's children (if any).
* @param clusterResource the resource of the cluster.
* @param node node on which resources are available
- * @return
+ * @return the resource that is being assigned.
*/
public Resource assignContainers(Resource clusterResource, SchedulerNode node);
@@ -191,7 +190,7 @@ extends org.apache.hadoop.yarn.server.re
* Update the cluster resource for queues as we add/remove nodes
* @param clusterResource the current cluster resource
*/
- public void updateResource(Resource clusterResource);
+ public void updateClusterResource(Resource clusterResource);
/**
* Recover the state of the queue
Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java?rev=1166495&r1=1166494&r2=1166495&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java Thu Sep 8 01:39:07 2011
@@ -55,6 +55,8 @@ import org.apache.hadoop.yarn.conf.YarnC
import org.apache.hadoop.yarn.factories.RecordFactory;
import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
import org.apache.hadoop.yarn.security.ContainerTokenIdentifier;
+import org.apache.hadoop.yarn.server.resourcemanager.RMAuditLogger;
+import org.apache.hadoop.yarn.server.resourcemanager.RMAuditLogger.AuditConstants;
import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
import org.apache.hadoop.yarn.server.resourcemanager.recovery.Store.RMState;
import org.apache.hadoop.yarn.server.resourcemanager.resource.Resources;
@@ -225,8 +227,15 @@ public class FifoScheduler implements Re
// Release containers
for (ContainerId releasedContainer : release) {
- containerCompleted(getRMContainer(releasedContainer),
- RMContainerEventType.RELEASED);
+ RMContainer rmContainer = getRMContainer(releasedContainer);
+ if (rmContainer == null) {
+ RMAuditLogger.logFailure(application.getUser(),
+ AuditConstants.RELEASE_CONTAINER,
+ "Unauthorized access or invalid container", "FifoScheduler",
+ "Trying to release container not owned by app or with invalid id",
+ application.getApplicationId(), releasedContainer);
+ }
+ containerCompleted(rmContainer, RMContainerEventType.RELEASED);
}
if (!ask.isEmpty()) {
@@ -642,6 +651,11 @@ public class FifoScheduler implements Re
@Lock(FifoScheduler.class)
private synchronized void containerCompleted(RMContainer rmContainer,
RMContainerEventType event) {
+ if (rmContainer == null) {
+ LOG.info("Null container completed...");
+ return;
+ }
+
// Get the application for the finished container
Container container = rmContainer.getContainer();
ApplicationAttemptId applicationAttemptId = container.getId().getAppAttemptId();
@@ -725,7 +739,7 @@ public class FifoScheduler implements Re
private RMContainer getRMContainer(ContainerId containerId) {
SchedulerApp application =
getApplication(containerId.getAppAttemptId());
- return application.getRMContainer(containerId);
+ return (application == null) ? null : application.getRMContainer(containerId);
}
}
Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/resources/capacity-scheduler.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/resources/capacity-scheduler.xml?rev=1166495&r1=1166494&r2=1166495&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/resources/capacity-scheduler.xml (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/resources/capacity-scheduler.xml Thu Sep 8 01:39:07 2011
@@ -6,6 +6,11 @@
</property>
<property>
+ <name>yarn.capacity-scheduler.maximum-am-resource-percent</name>
+ <value>0.1</value>
+ </property>
+
+ <property>
<name>yarn.capacity-scheduler.root.queues</name>
<value>default</value>
</property>
Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/MockRMApp.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/MockRMApp.java?rev=1166495&r1=1166494&r2=1166495&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/MockRMApp.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/MockRMApp.java Thu Sep 8 01:39:07 2011
@@ -36,6 +36,9 @@ public class MockRMApp implements RMApp
RMAppState state = RMAppState.NEW;
int failCount = 0;
ApplicationId id;
+ String url = null;
+ StringBuilder diagnostics = new StringBuilder();
+ RMAppAttempt attempt;
public MockRMApp(int newid, long time, RMAppState newState) {
finish = time;
@@ -48,6 +51,11 @@ public class MockRMApp implements RMApp
user = userName;
}
+ public MockRMApp(int newid, long time, RMAppState newState, String userName, String diag) {
+ this(newid, time, newState, userName);
+ this.diagnostics = new StringBuilder(diag);
+ }
+
@Override
public ApplicationId getApplicationId() {
return id;
@@ -58,11 +66,19 @@ public class MockRMApp implements RMApp
return state;
}
+ public void setState(RMAppState state) {
+ this.state = state;
+ }
+
@Override
public String getUser() {
return user;
}
+ public void setUser(String user) {
+ this.user = user;
+ }
+
@Override
public float getProgress() {
return (float) 0.0;
@@ -78,14 +94,26 @@ public class MockRMApp implements RMApp
return queue;
}
+ public void setQueue(String queue) {
+ this.queue = queue;
+ }
+
@Override
public String getName() {
return name;
}
+ public void setName(String name) {
+ this.name = name;
+ }
+
@Override
public RMAppAttempt getCurrentAppAttempt() {
- throw new UnsupportedOperationException("Not supported yet.");
+ return attempt;
+ }
+
+ public void setCurrentAppAttempt(RMAppAttempt attempt) {
+ this.attempt = attempt;
}
@Override
@@ -103,19 +131,35 @@ public class MockRMApp implements RMApp
return finish;
}
+ public void setFinishTime(long time) {
+ this.finish = time;
+ }
+
@Override
public long getStartTime() {
return start;
}
+ public void setStartTime(long time) {
+ this.start = time;
+ }
+
@Override
public String getTrackingUrl() {
- throw new UnsupportedOperationException("Not supported yet.");
+ return url;
+ }
+
+ public void setTrackingUrl(String url) {
+ this.url = url;
}
@Override
public StringBuilder getDiagnostics() {
- throw new UnsupportedOperationException("Not supported yet.");
+ return diagnostics;
+ }
+
+ public void setDiagnostics(String diag) {
+ this.diagnostics = new StringBuilder(diag);
}
public void handle(RMAppEvent event) {
Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java?rev=1166495&r1=1166494&r2=1166495&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java Thu Sep 8 01:39:07 2011
@@ -83,8 +83,12 @@ public class TestLeafQueue {
csContext = mock(CapacitySchedulerContext.class);
when(csContext.getConfiguration()).thenReturn(csConf);
- when(csContext.getMinimumResourceCapability()).thenReturn(Resources.createResource(GB));
- when(csContext.getMaximumResourceCapability()).thenReturn(Resources.createResource(16*GB));
+ when(csContext.getMinimumResourceCapability()).
+ thenReturn(Resources.createResource(GB));
+ when(csContext.getMaximumResourceCapability()).
+ thenReturn(Resources.createResource(16*GB));
+ when(csContext.getClusterResources()).
+ thenReturn(Resources.createResource(100 * 16 * GB));
root =
CapacityScheduler.parseQueue(csContext, csConf, null, "root",
queues, queues,
@@ -447,7 +451,7 @@ public class TestLeafQueue {
SchedulerNode node_0 = TestUtils.getMockNode(host_0, DEFAULT_RACK, 0, 4*GB);
final int numNodes = 1;
- Resource clusterResource = Resources.createResource(numNodes * (8*GB));
+ Resource clusterResource = Resources.createResource(numNodes * (4*GB));
when(csContext.getNumClusterNodes()).thenReturn(numNodes);
// Setup resource-requests
@@ -504,6 +508,121 @@ public class TestLeafQueue {
assertEquals(4*GB, node_0.getUsedResource().getMemory());
}
+ @Test
+ public void testReservationExchange() throws Exception {
+
+ // Manipulate queue 'a'
+ LeafQueue a = stubLeafQueue((LeafQueue)queues.get(A));
+ a.setUserLimitFactor(10);
+
+ // Users
+ final String user_0 = "user_0";
+ final String user_1 = "user_1";
+
+ // Submit applications
+ final ApplicationAttemptId appAttemptId_0 =
+ TestUtils.getMockApplicationAttemptId(0, 0);
+ SchedulerApp app_0 =
+ new SchedulerApp(appAttemptId_0, user_0, a, rmContext, null);
+ a.submitApplication(app_0, user_0, A);
+
+ final ApplicationAttemptId appAttemptId_1 =
+ TestUtils.getMockApplicationAttemptId(1, 0);
+ SchedulerApp app_1 =
+ new SchedulerApp(appAttemptId_1, user_1, a, rmContext, null);
+ a.submitApplication(app_1, user_1, A);
+
+ // Setup some nodes
+ String host_0 = "host_0";
+ SchedulerNode node_0 = TestUtils.getMockNode(host_0, DEFAULT_RACK, 0, 4*GB);
+
+ String host_1 = "host_1";
+ SchedulerNode node_1 = TestUtils.getMockNode(host_1, DEFAULT_RACK, 0, 4*GB);
+
+ final int numNodes = 2;
+ Resource clusterResource = Resources.createResource(numNodes * (4*GB));
+ when(csContext.getNumClusterNodes()).thenReturn(numNodes);
+ when(csContext.getMaximumResourceCapability()).thenReturn(
+ Resources.createResource(4*GB));
+ when(a.getMaximumAllocation()).thenReturn(Resources.createResource(4*GB));
+ when(a.getMinimumAllocationFactor()).thenReturn(0.25f); // 1G / 4G
+
+ // Setup resource-requests
+ Priority priority = TestUtils.createMockPriority(1);
+ app_0.updateResourceRequests(Collections.singletonList(
+ TestUtils.createResourceRequest(RMNodeImpl.ANY, 1*GB, 2, priority,
+ recordFactory)));
+
+ app_1.updateResourceRequests(Collections.singletonList(
+ TestUtils.createResourceRequest(RMNodeImpl.ANY, 4*GB, 1, priority,
+ recordFactory)));
+
+ // Start testing...
+
+ // Only 1 container
+ a.assignContainers(clusterResource, node_0);
+ assertEquals(1*GB, a.getUsedResources().getMemory());
+ assertEquals(1*GB, app_0.getCurrentConsumption().getMemory());
+ assertEquals(0*GB, app_1.getCurrentConsumption().getMemory());
+
+ // Also 2nd -> minCapacity = 1024 since (.1 * 8G) < minAlloc, also
+ // you can get one container more than user-limit
+ a.assignContainers(clusterResource, node_0);
+ assertEquals(2*GB, a.getUsedResources().getMemory());
+ assertEquals(2*GB, app_0.getCurrentConsumption().getMemory());
+ assertEquals(0*GB, app_1.getCurrentConsumption().getMemory());
+
+ // Now, reservation should kick in for app_1
+ a.assignContainers(clusterResource, node_0);
+ assertEquals(6*GB, a.getUsedResources().getMemory());
+ assertEquals(2*GB, app_0.getCurrentConsumption().getMemory());
+ assertEquals(0*GB, app_1.getCurrentConsumption().getMemory());
+ assertEquals(4*GB, app_1.getCurrentReservation().getMemory());
+ assertEquals(2*GB, node_0.getUsedResource().getMemory());
+
+ // Now free 1 container from app_0 i.e. 1G, and re-reserve it
+ a.completedContainer(clusterResource, app_0, node_0,
+ app_0.getLiveContainers().iterator().next(), RMContainerEventType.KILL);
+ a.assignContainers(clusterResource, node_0);
+ assertEquals(5*GB, a.getUsedResources().getMemory());
+ assertEquals(1*GB, app_0.getCurrentConsumption().getMemory());
+ assertEquals(0*GB, app_1.getCurrentConsumption().getMemory());
+ assertEquals(4*GB, app_1.getCurrentReservation().getMemory());
+ assertEquals(1*GB, node_0.getUsedResource().getMemory());
+ assertEquals(1, app_1.getReReservations(priority));
+
+ // Re-reserve
+ a.assignContainers(clusterResource, node_0);
+ assertEquals(5*GB, a.getUsedResources().getMemory());
+ assertEquals(1*GB, app_0.getCurrentConsumption().getMemory());
+ assertEquals(0*GB, app_1.getCurrentConsumption().getMemory());
+ assertEquals(4*GB, app_1.getCurrentReservation().getMemory());
+ assertEquals(1*GB, node_0.getUsedResource().getMemory());
+ assertEquals(2, app_1.getReReservations(priority));
+
+ // Try to schedule on node_1 now, should *move* the reservation
+ a.assignContainers(clusterResource, node_1);
+ assertEquals(9*GB, a.getUsedResources().getMemory());
+ assertEquals(1*GB, app_0.getCurrentConsumption().getMemory());
+ assertEquals(4*GB, app_1.getCurrentConsumption().getMemory());
+ assertEquals(4*GB, app_1.getCurrentReservation().getMemory());
+ assertEquals(4*GB, node_1.getUsedResource().getMemory());
+ // Doesn't change yet... only when reservation is cancelled or a different
+ // container is reserved
+ assertEquals(2, app_1.getReReservations(priority));
+
+ // Now finish another container from app_0 and see the reservation cancelled
+ a.completedContainer(clusterResource, app_0, node_0,
+ app_0.getLiveContainers().iterator().next(), RMContainerEventType.KILL);
+ a.assignContainers(clusterResource, node_0);
+ assertEquals(4*GB, a.getUsedResources().getMemory());
+ assertEquals(0*GB, app_0.getCurrentConsumption().getMemory());
+ assertEquals(4*GB, app_1.getCurrentConsumption().getMemory());
+ assertEquals(0*GB, app_1.getCurrentReservation().getMemory());
+ assertEquals(0*GB, node_0.getUsedResource().getMemory());
+ }
+
+
@Test
public void testLocalityScheduling() throws Exception {
Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestParentQueue.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestParentQueue.java?rev=1166495&r1=1166494&r2=1166495&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestParentQueue.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestParentQueue.java Thu Sep 8 01:39:07 2011
@@ -60,6 +60,8 @@ public class TestParentQueue {
Resources.createResource(GB));
when(csContext.getMaximumResourceCapability()).thenReturn(
Resources.createResource(16*GB));
+ when(csContext.getClusterResources()).
+ thenReturn(Resources.createResource(100 * 16 * GB));
}
private static final String A = "a";
Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestUtils.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestUtils.java?rev=1166495&r1=1166494&r2=1166495&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestUtils.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestUtils.java Thu Sep 8 01:39:07 2011
@@ -116,6 +116,13 @@ public class TestUtils {
return request;
}
+ public static ApplicationId getMockApplicationId(int appId) {
+ ApplicationId applicationId = mock(ApplicationId.class);
+ when(applicationId.getClusterTimestamp()).thenReturn(0L);
+ when(applicationId.getId()).thenReturn(appId);
+ return applicationId;
+ }
+
public static ApplicationAttemptId
getMockApplicationAttemptId(int appId, int attemptId) {
ApplicationId applicationId = mock(ApplicationId.class);
Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/pom.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/pom.xml?rev=1166495&r1=1166494&r2=1166495&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/pom.xml (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/pom.xml Thu Sep 8 01:39:07 2011
@@ -56,7 +56,7 @@
<dependency>
<groupId>org.apache.avro</groupId>
<artifactId>avro</artifactId>
- <version>1.5.2</version>
+ <version>1.5.3</version>
<exclusions>
<exclusion>
<groupId>org.mortbay.jetty</groupId>
Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/ivy/ivysettings.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/ivy/ivysettings.xml?rev=1166495&r1=1166494&r2=1166495&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/ivy/ivysettings.xml (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/ivy/ivysettings.xml Thu Sep 8 01:39:07 2011
@@ -39,11 +39,11 @@
<settings defaultResolver="${resolvers}"/>
<resolvers>
- <ibiblio name="maven2" root="${repo.maven.org}" pattern="${maven2.pattern.ext}" m2compatible="true"/>
+ <ibiblio name="maven2" root="${repo.maven.org}" pattern="${maven2.pattern.ext}" m2compatible="true" checkconsistency="false"/>
<ibiblio name="apache-snapshot" root="${snapshot.apache.org}" m2compatible="true"
- checkmodified="true" changingPattern=".*SNAPSHOT"/>
+ checkmodified="true" changingPattern=".*SNAPSHOT" checkconsistency="false"/>
- <filesystem name="fs" m2compatible="true" force="${force-resolve}">
+ <filesystem name="fs" m2compatible="true" checkconsistency="false" force="${force-resolve}">
<artifact pattern="${repo.dir}/${maven2.pattern.ext}"/>
<ivy pattern="${repo.dir}/[organisation]/[module]/[revision]/[module]-[revision].pom"/>
</filesystem>
Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/pom.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/pom.xml?rev=1166495&r1=1166494&r2=1166495&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/pom.xml (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/pom.xml Thu Sep 8 01:39:07 2011
@@ -64,7 +64,7 @@
<dependency>
<groupId>org.apache.avro</groupId>
<artifactId>avro</artifactId>
- <version>1.5.2</version>
+ <version>1.5.3</version>
<exclusions>
<exclusion>
<groupId>org.mortbay.jetty</groupId>
Propchange: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/src/c++/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Thu Sep 8 01:39:07 2011
@@ -1,3 +1,3 @@
-/hadoop/common/trunk/hadoop-mapreduce-project/src/c++:1159757-1162221
+/hadoop/common/trunk/hadoop-mapreduce-project/src/c++:1159757-1166484
/hadoop/core/branches/branch-0.19/mapred/src/c++:713112
/hadoop/core/trunk/src/c++:776175-784663
Propchange: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/src/contrib/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Thu Sep 8 01:39:07 2011
@@ -1,3 +1,3 @@
-/hadoop/common/trunk/hadoop-mapreduce-project/src/contrib:1152502-1162221
+/hadoop/common/trunk/hadoop-mapreduce-project/src/contrib:1152502-1166484
/hadoop/core/branches/branch-0.19/mapred/src/contrib:713112
/hadoop/core/trunk/src/contrib:784664-785643
Propchange: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/src/contrib/block_forensics/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Thu Sep 8 01:39:07 2011
@@ -1,4 +1,4 @@
-/hadoop/common/trunk/hadoop-mapreduce-project/src/contrib/block_forensics:1152502-1162221
+/hadoop/common/trunk/hadoop-mapreduce-project/src/contrib/block_forensics:1152502-1166484
/hadoop/core/branches/branch-0.19/hdfs/src/contrib/block_forensics:713112
/hadoop/core/branches/branch-0.19/mapred/src/contrib/block_forensics:713112
/hadoop/core/trunk/src/contrib/block_forensics:784664-785643
Propchange: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/src/contrib/build-contrib.xml
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Thu Sep 8 01:39:07 2011
@@ -1,2 +1,3 @@
+/hadoop/common/trunk/hadoop-mapreduce-project/src/contrib/build-contrib.xml:1161333-1166484
/hadoop/core/branches/branch-0.19/mapred/src/contrib/build-contrib.xml:713112
/hadoop/core/trunk/src/contrib/build-contrib.xml:776175-786373
Propchange: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/src/contrib/build.xml
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Thu Sep 8 01:39:07 2011
@@ -1,2 +1,3 @@
+/hadoop/common/trunk/hadoop-mapreduce-project/src/contrib/build.xml:1161333-1166484
/hadoop/core/branches/branch-0.19/mapred/src/contrib/build.xml:713112
/hadoop/core/trunk/src/contrib/build.xml:776175-786373
Propchange: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/src/contrib/capacity-scheduler/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Thu Sep 8 01:39:07 2011
@@ -1,3 +1,3 @@
-/hadoop/common/trunk/hadoop-mapreduce-project/src/contrib/capacity-scheduler:1159757-1162221
+/hadoop/common/trunk/hadoop-mapreduce-project/src/contrib/capacity-scheduler:1159757-1166484
/hadoop/core/branches/branch-0.19/mapred/src/contrib/capacity-scheduler:713112
/hadoop/core/trunk/src/contrib/capacity-scheduler:776175-786373
Propchange: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/src/contrib/data_join/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Thu Sep 8 01:39:07 2011
@@ -1,3 +1,3 @@
-/hadoop/common/trunk/hadoop-mapreduce-project/src/contrib/data_join:1159757-1162221
+/hadoop/common/trunk/hadoop-mapreduce-project/src/contrib/data_join:1159757-1166484
/hadoop/core/branches/branch-0.19/mapred/src/contrib/data_join:713112
/hadoop/core/trunk/src/contrib/data_join:776175-786373
Propchange: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/src/contrib/dynamic-scheduler/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Thu Sep 8 01:39:07 2011
@@ -1,4 +1,4 @@
-/hadoop/common/trunk/hadoop-mapreduce-project/src/contrib/dynamic-scheduler:1159757-1162221
+/hadoop/common/trunk/hadoop-mapreduce-project/src/contrib/dynamic-scheduler:1159757-1166484
/hadoop/core/branches/branch-0.19/mapred/src/contrib/dynamic-scheduler:713112
/hadoop/core/branches/branch-0.19/src/contrib/dynamic-scheduler:713112
/hadoop/core/trunk/src/contrib/dynamic-scheduler:784664-786373
Propchange: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/src/contrib/eclipse-plugin/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Thu Sep 8 01:39:07 2011
@@ -1,4 +1,4 @@
-/hadoop/common/trunk/hadoop-mapreduce-project/src/contrib/eclipse-plugin:1159757-1162221
+/hadoop/common/trunk/hadoop-mapreduce-project/src/contrib/eclipse-plugin:1159757-1166484
/hadoop/core/branches/branch-0.19/core/src/contrib/eclipse-plugin:713112
/hadoop/core/branches/branch-0.19/mapred/src/contrib/eclipse-plugin:713112
/hadoop/core/trunk/src/contrib/eclipse-plugin:776175-785643
Propchange: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/src/contrib/fairscheduler/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Thu Sep 8 01:39:07 2011
@@ -1,3 +1,3 @@
-/hadoop/common/trunk/hadoop-mapreduce-project/src/contrib/fairscheduler:1159757-1162221
+/hadoop/common/trunk/hadoop-mapreduce-project/src/contrib/fairscheduler:1159757-1166484
/hadoop/core/branches/branch-0.19/mapred/src/contrib/fairscheduler:713112
/hadoop/core/trunk/src/contrib/fairscheduler:776175-786373
Propchange: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/src/contrib/index/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Thu Sep 8 01:39:07 2011
@@ -1,3 +1,3 @@
-/hadoop/common/trunk/hadoop-mapreduce-project/src/contrib/index:1159757-1162221
+/hadoop/common/trunk/hadoop-mapreduce-project/src/contrib/index:1159757-1166484
/hadoop/core/branches/branch-0.19/mapred/src/contrib/index:713112
/hadoop/core/trunk/src/contrib/index:776175-786373
Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/src/contrib/raid/src/java/org/apache/hadoop/hdfs/server/datanode/RaidBlockSender.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/src/contrib/raid/src/java/org/apache/hadoop/hdfs/server/datanode/RaidBlockSender.java?rev=1166495&r1=1166494&r2=1166495&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/src/contrib/raid/src/java/org/apache/hadoop/hdfs/server/datanode/RaidBlockSender.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/src/contrib/raid/src/java/org/apache/hadoop/hdfs/server/datanode/RaidBlockSender.java Thu Sep 8 01:39:07 2011
@@ -31,7 +31,7 @@ import java.util.Arrays;
import org.apache.commons.logging.Log;
import org.apache.hadoop.fs.ChecksumException;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
-import org.apache.hadoop.hdfs.protocol.FSConstants;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.datatransfer.PacketHeader;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.net.SocketOutputStream;
@@ -389,7 +389,7 @@ public class RaidBlockSender implements
streamForSendChunks = baseStream;
// assure a mininum buffer size.
- maxChunksPerPacket = (Math.max(FSConstants.IO_FILE_BUFFER_SIZE,
+ maxChunksPerPacket = (Math.max(HdfsConstants.IO_FILE_BUFFER_SIZE,
MIN_BUFFER_WITH_TRANSFERTO)
+ bytesPerChecksum - 1)/bytesPerChecksum;
@@ -397,7 +397,7 @@ public class RaidBlockSender implements
pktSize += checksumSize * maxChunksPerPacket;
} else {
maxChunksPerPacket = Math.max(1,
- (FSConstants.IO_FILE_BUFFER_SIZE + bytesPerChecksum - 1)/bytesPerChecksum);
+ (HdfsConstants.IO_FILE_BUFFER_SIZE + bytesPerChecksum - 1)/bytesPerChecksum);
pktSize += (bytesPerChecksum + checksumSize) * maxChunksPerPacket;
}
Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/src/contrib/raid/src/java/org/apache/hadoop/raid/BlockFixer.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/src/contrib/raid/src/java/org/apache/hadoop/raid/BlockFixer.java?rev=1166495&r1=1166494&r2=1166495&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/src/contrib/raid/src/java/org/apache/hadoop/raid/BlockFixer.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/src/contrib/raid/src/java/org/apache/hadoop/raid/BlockFixer.java Thu Sep 8 01:39:07 2011
@@ -46,11 +46,11 @@ import org.apache.hadoop.hdfs.Distribute
import org.apache.hadoop.hdfs.protocol.datatransfer.*;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
-import org.apache.hadoop.hdfs.protocol.FSConstants;
-import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
-import org.apache.hadoop.hdfs.server.common.HdfsConstants;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.hdfs.server.datanode.FSDataset;
import org.apache.hadoop.hdfs.server.datanode.RaidBlockSender;
import org.apache.commons.logging.Log;
@@ -741,17 +741,17 @@ public abstract class BlockFixer extends
int readTimeout =
getConf().getInt(BLOCKFIX_READ_TIMEOUT,
- HdfsConstants.READ_TIMEOUT);
+ HdfsServerConstants.READ_TIMEOUT);
NetUtils.connect(sock, target, readTimeout);
sock.setSoTimeout(readTimeout);
int writeTimeout = getConf().getInt(BLOCKFIX_WRITE_TIMEOUT,
- HdfsConstants.WRITE_TIMEOUT);
+ HdfsServerConstants.WRITE_TIMEOUT);
OutputStream baseStream = NetUtils.getOutputStream(sock, writeTimeout);
DataOutputStream out =
new DataOutputStream(new BufferedOutputStream(baseStream,
- FSConstants.
+ HdfsConstants.
SMALL_BUFFER_SIZE));
boolean corruptChecksumOk = false;
Propchange: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/src/contrib/streaming/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Thu Sep 8 01:39:07 2011
@@ -1,3 +1,3 @@
-/hadoop/common/trunk/hadoop-mapreduce-project/src/contrib/streaming:1159757-1162221
+/hadoop/common/trunk/hadoop-mapreduce-project/src/contrib/streaming:1159757-1166484
/hadoop/core/branches/branch-0.19/mapred/src/contrib/streaming:713112
/hadoop/core/trunk/src/contrib/streaming:776175-786373
Propchange: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/src/contrib/vaidya/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Thu Sep 8 01:39:07 2011
@@ -1,3 +1,3 @@
-/hadoop/common/trunk/hadoop-mapreduce-project/src/contrib/vaidya:1159757-1162221
+/hadoop/common/trunk/hadoop-mapreduce-project/src/contrib/vaidya:1159757-1166484
/hadoop/core/branches/branch-0.19/mapred/src/contrib/vaidya:713112
/hadoop/core/trunk/src/contrib/vaidya:776175-786373
Propchange: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/src/examples/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Thu Sep 8 01:39:07 2011
@@ -1,3 +1,3 @@
-/hadoop/common/trunk/hadoop-mapreduce-project/src/examples:1152502-1162221
+/hadoop/common/trunk/hadoop-mapreduce-project/src/examples:1152502-1166484
/hadoop/core/branches/branch-0.19/mapred/src/examples:713112
/hadoop/core/trunk/src/examples:776175-784663
Propchange: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/src/java/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Thu Sep 8 01:39:07 2011
@@ -1,3 +1,3 @@
-/hadoop/common/trunk/hadoop-mapreduce-project/src/java:1152502-1162221
+/hadoop/common/trunk/hadoop-mapreduce-project/src/java:1152502-1166484
/hadoop/core/branches/branch-0.19/mapred/src/java:713112
/hadoop/core/trunk/src/mapred:776175-785643
Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/src/java/org/apache/hadoop/mapred/JobTracker.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/src/java/org/apache/hadoop/mapred/JobTracker.java?rev=1166495&r1=1166494&r2=1166495&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/src/java/org/apache/hadoop/mapred/JobTracker.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/src/java/org/apache/hadoop/mapred/JobTracker.java Thu Sep 8 01:39:07 2011
@@ -1483,7 +1483,7 @@ public class JobTracker implements MRCon
taskScheduler = (TaskScheduler) ReflectionUtils.newInstance(schedulerClass, conf);
int handlerCount = conf.getInt(JT_IPC_HANDLER_COUNT, 10);
- this.interTrackerServer = RPC.getServer(ClientProtocol.class,
+ this.interTrackerServer = RPC.getServer(JobTracker.class, // All protocols in JobTracker
this,
addr.getHostName(),
addr.getPort(), handlerCount,