You are viewing a plain text version of this content. The canonical link for it is here.
Posted to mapreduce-commits@hadoop.apache.org by ac...@apache.org on 2011/04/01 00:23:34 UTC

svn commit: r1087462 [18/20] - in /hadoop/mapreduce/branches/MR-279: ./ mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/ mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/ mr-client/...

Modified: hadoop/mapreduce/branches/MR-279/yarn/yarn-server/yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/ApplicationMasterInfo.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/branches/MR-279/yarn/yarn-server/yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/ApplicationMasterInfo.java?rev=1087462&r1=1087461&r2=1087462&view=diff
==============================================================================
--- hadoop/mapreduce/branches/MR-279/yarn/yarn-server/yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/ApplicationMasterInfo.java (original)
+++ hadoop/mapreduce/branches/MR-279/yarn/yarn-server/yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/ApplicationMasterInfo.java Thu Mar 31 22:23:22 2011
@@ -25,14 +25,16 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
-import org.apache.hadoop.yarn.ApplicationID;
-import org.apache.hadoop.yarn.ApplicationMaster;
-import org.apache.hadoop.yarn.ApplicationState;
-import org.apache.hadoop.yarn.ApplicationStatus;
-import org.apache.hadoop.yarn.ApplicationSubmissionContext;
-import org.apache.hadoop.yarn.Container;
-import org.apache.hadoop.yarn.Resource;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ApplicationMaster;
+import org.apache.hadoop.yarn.api.records.ApplicationState;
+import org.apache.hadoop.yarn.api.records.ApplicationStatus;
+import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
+import org.apache.hadoop.yarn.api.records.Container;
+import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.event.EventHandler;
+import org.apache.hadoop.yarn.factories.RecordFactory;
+import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
 import org.apache.hadoop.yarn.server.resourcemanager.applicationsmanager.events.ASMEvent;
 import org.apache.hadoop.yarn.server.resourcemanager.applicationsmanager.events.ApplicationMasterEvents.AMLauncherEventType;
 import org.apache.hadoop.yarn.server.resourcemanager.applicationsmanager.events.ApplicationMasterEvents.ApplicationEventType;
@@ -66,6 +68,7 @@ public class ApplicationMasterInfo imple
    */
   private final  KillTransition killTransition =  new KillTransition();
   private final StatusUpdateTransition statusUpdatetransition = new StatusUpdateTransition();
+  private final RecordFactory recordFactory = RecordFactoryProvider.getRecordFactory(null);
   private final ExpireTransition expireTransition = new ExpireTransition();
   private final FailedTransition failedTransition = new FailedTransition();
   private final AllocateTransition allocateTransition = new AllocateTransition();
@@ -146,14 +149,14 @@ public class ApplicationMasterInfo imple
     this.user = user;
     this.handler = handler;
     this.submissionContext = submissionContext;
-    master = new ApplicationMaster();
-    master.applicationId = submissionContext.applicationId;
-    master.status = new ApplicationStatus();
-    master.status.applicationId = submissionContext.applicationId;
-    master.status.progress = -1.0f;
+    master = recordFactory.newRecordInstance(ApplicationMaster.class);
+    master.setApplicationId(submissionContext.getApplicationId());
+    master.setStatus(recordFactory.newRecordInstance(ApplicationStatus.class));
+    master.getStatus().setApplicationId(submissionContext.getApplicationId());
+    master.getStatus().setProgress(-1.0f);
     stateMachine = stateMachineFactory.make(this);
-    master.state = ApplicationState.PENDING;
-    master.clientToken = clientToken;
+    master.setState(ApplicationState.PENDING);
+    master.setClientToken(clientToken);
   }
 
   @Override
@@ -163,21 +166,21 @@ public class ApplicationMasterInfo imple
 
   @Override
   public Resource getResource() {
-    return submissionContext.masterCapability;
+    return submissionContext.getMasterCapability();
   }
 
   @Override
-  public synchronized ApplicationID getApplicationID() {
-    return this.master.applicationId;
+  public synchronized ApplicationId getApplicationID() {
+    return this.master.getApplicationId();
   }
 
   @Override
   public synchronized ApplicationStatus getStatus() {
-    return master.status;
+    return master.getStatus();
   }
 
   public synchronized void updateStatus(ApplicationStatus status) {
-    this.master.status = status;
+    this.master.setStatus(status);
   }
 
   @Override
@@ -187,7 +190,7 @@ public class ApplicationMasterInfo imple
 
   /* make sure the master state is in sync with statemachine state */
   public synchronized ApplicationState getState() {
-    return master.state;
+    return master.getState();
   }
 
   @Override
@@ -203,7 +206,7 @@ public class ApplicationMasterInfo imple
 
   @Override
   public synchronized long getLastSeen() {
-    return this.master.status.lastSeen;
+    return this.master.getStatus().getLastSeen();
   }
 
   @Override
@@ -213,12 +216,12 @@ public class ApplicationMasterInfo imple
   
   @Override
   public String getName() {
-    return submissionContext.applicationName.toString();
+    return submissionContext.getApplicationName();
   }
 
   @Override
   public String getQueue() {
-    return submissionContext.queue.toString();
+    return submissionContext.getQueue();
   }
   
   /* the applicaiton master completed successfully */
@@ -265,7 +268,7 @@ public class ApplicationMasterInfo imple
     public void transition(ApplicationMasterInfo masterInfo,
     ASMEvent<ApplicationEventType> event) {
       /* make sure the time stamp is update else expiry thread will expire this */
-      masterInfo.master.status.lastSeen = System.currentTimeMillis();
+      masterInfo.master.getStatus().setLastSeen(System.currentTimeMillis());
     }
   }
 
@@ -320,12 +323,12 @@ public class ApplicationMasterInfo imple
     public void transition(ApplicationMasterInfo masterInfo,
     ASMEvent<ApplicationEventType> event) {
       ApplicationMaster registeredMaster = event.getAppContext().getMaster();
-      masterInfo.master.host = registeredMaster.host;
-      masterInfo.master.httpPort = registeredMaster.httpPort;
-      masterInfo.master.rpcPort = registeredMaster.rpcPort;
-      masterInfo.master.status = registeredMaster.status;
-      masterInfo.master.status.progress = 0.0f;
-      masterInfo.master.status.lastSeen = System.currentTimeMillis();
+      masterInfo.master.setHost(registeredMaster.getHost());
+      masterInfo.master.setHttpPort(registeredMaster.getHttpPort());
+      masterInfo.master.setRpcPort(registeredMaster.getRpcPort());
+      masterInfo.master.setStatus(registeredMaster.getStatus());
+      masterInfo.master.getStatus().setProgress(0.0f);
+      masterInfo.master.getStatus().setLastSeen(System.currentTimeMillis());
     }
   }
 
@@ -349,20 +352,20 @@ public class ApplicationMasterInfo imple
     @Override
     public void transition(ApplicationMasterInfo masterInfo,
     ASMEvent<ApplicationEventType> event) {
-      masterInfo.master.status = event.getAppContext().getStatus();
-      masterInfo.master.status.lastSeen = System.currentTimeMillis();
+      masterInfo.master.setStatus(event.getAppContext().getStatus());
+      masterInfo.master.getStatus().setLastSeen(System.currentTimeMillis());
     }
   }
 
   @Override
   public synchronized void handle(ASMEvent<ApplicationEventType> event) {
-    ApplicationID appID =  event.getAppContext().getApplicationID();
+    ApplicationId appID =  event.getAppContext().getApplicationID();
     LOG.info("Processing event for " + appID +  " of type " + event.getType());
     final ApplicationState oldState = getState();
     try {
       /* keep the master in sync with the state machine */
       stateMachine.doTransition(event.getType(), event);
-      master.state = stateMachine.getCurrentState();
+      master.setState(stateMachine.getCurrentState());
       LOG.info("State is " +  stateMachine.getCurrentState());
     } catch (InvalidStateTransitonException e) {
       LOG.error("Can't handle this event at current state", e);
@@ -374,4 +377,4 @@ public class ApplicationMasterInfo imple
       + getState());
     }
   }
-}
\ No newline at end of file
+}

Modified: hadoop/mapreduce/branches/MR-279/yarn/yarn-server/yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/ApplicationsManager.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/branches/MR-279/yarn/yarn-server/yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/ApplicationsManager.java?rev=1087462&r1=1087461&r2=1087462&view=diff
==============================================================================
--- hadoop/mapreduce/branches/MR-279/yarn/yarn-server/yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/ApplicationsManager.java (original)
+++ hadoop/mapreduce/branches/MR-279/yarn/yarn-server/yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/ApplicationsManager.java Thu Mar 31 22:23:22 2011
@@ -24,9 +24,9 @@ import java.util.List;
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.classification.InterfaceStability.Evolving;
 import org.apache.hadoop.yarn.Application;
-import org.apache.hadoop.yarn.ApplicationID;
-import org.apache.hadoop.yarn.ApplicationMaster;
-import org.apache.hadoop.yarn.ApplicationSubmissionContext;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ApplicationMaster;
+import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
 
 /**
  * This interface defines the interface for ApplicationsManager.
@@ -34,11 +34,11 @@ import org.apache.hadoop.yarn.Applicatio
 @Private
 @Evolving
 public interface ApplicationsManager {
-   ApplicationID getNewApplicationID();
-   ApplicationMaster getApplicationMaster(ApplicationID applicationId);
-   Application getApplication(ApplicationID applicationID);
+   ApplicationId getNewApplicationID();
+   ApplicationMaster getApplicationMaster(ApplicationId applicationId);
+   Application getApplication(ApplicationId applicationID);
    void submitApplication(ApplicationSubmissionContext context) throws IOException;
-   void finishApplication(ApplicationID applicationId) throws IOException;
+   void finishApplication(ApplicationId applicationId) throws IOException;
    List<AppContext> getAllApplications();
    List<Application> getApplications();
 }
\ No newline at end of file

Modified: hadoop/mapreduce/branches/MR-279/yarn/yarn-server/yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/ApplicationsManagerImpl.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/branches/MR-279/yarn/yarn-server/yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/ApplicationsManagerImpl.java?rev=1087462&r1=1087461&r2=1087462&view=diff
==============================================================================
--- hadoop/mapreduce/branches/MR-279/yarn/yarn-server/yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/ApplicationsManagerImpl.java (original)
+++ hadoop/mapreduce/branches/MR-279/yarn/yarn-server/yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/ApplicationsManagerImpl.java Thu Mar 31 22:23:22 2011
@@ -29,12 +29,11 @@ import org.apache.hadoop.conf.Configurat
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.yarn.Application;
-import org.apache.hadoop.yarn.ApplicationID;
-import org.apache.hadoop.yarn.ApplicationMaster;
-import org.apache.hadoop.yarn.ApplicationState;
-import org.apache.hadoop.yarn.ApplicationStatus;
-import org.apache.hadoop.yarn.ApplicationSubmissionContext;
-import org.apache.hadoop.yarn.event.Event;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ApplicationMaster;
+import org.apache.hadoop.yarn.api.records.ApplicationState;
+import org.apache.hadoop.yarn.api.records.ApplicationStatus;
+import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
 import org.apache.hadoop.yarn.event.EventHandler;
 import org.apache.hadoop.yarn.security.ApplicationTokenIdentifier;
 import org.apache.hadoop.yarn.security.ApplicationTokenSecretManager;
@@ -134,18 +133,18 @@ public class ApplicationsManagerImpl ext
   }
 
   @Override
-  public synchronized ApplicationMaster getApplicationMaster(ApplicationID applicationId) {
+  public synchronized ApplicationMaster getApplicationMaster(ApplicationId applicationId) {
     ApplicationMaster appMaster =
       amTracker.get(applicationId).getMaster();
     return appMaster;
   }
   
   @Override
-  public ApplicationID getNewApplicationID() {
-    ApplicationID applicationId =
+  public ApplicationId getNewApplicationID() {
+    ApplicationId applicationId =
       org.apache.hadoop.yarn.server.resourcemanager.resource.ApplicationID.create(
           ResourceManager.clusterTimeStamp, applicationCounter.incrementAndGet());
-    LOG.info("Allocated new applicationId: " + applicationId.id);
+    LOG.info("Allocated new applicationId: " + applicationId.getId());
     return applicationId;
   }
 
@@ -153,7 +152,7 @@ public class ApplicationsManagerImpl ext
   public synchronized void submitApplication(ApplicationSubmissionContext context)
   throws IOException {
     String user;
-    ApplicationID applicationId = context.applicationId;
+    ApplicationId applicationId = context.getApplicationId();
     String clientTokenStr = null;
     try {
       user = UserGroupInformation.getCurrentUser().getShortUserName();
@@ -170,27 +169,24 @@ public class ApplicationsManagerImpl ext
       throw e;
     } 
 
-    context.queue =
-        (context.queue == null ? "default" : context.queue.toString());
-    context.applicationName =
-        (context.applicationName == null ? "N/A" : context.applicationName);
+    context.setQueue(context.getQueue() == null ? "default" : context.getQueue());
+    context.setApplicationName(context.getApplicationName() == null ? "N/A" : context.getApplicationName());
 
     amTracker.addMaster(user, context, clientTokenStr);
     // TODO this should happen via dispatcher. should move it out to scheudler
     // negotiator.
-    /* schedule */    
-    LOG.info("Application with id " + applicationId.id + " submitted by user " + 
+    LOG.info("Application with id " + applicationId.getId() + " submitted by user " + 
         user + " with " + context);
   }
 
   @Override
   public synchronized void finishApplicationMaster(ApplicationMaster applicationMaster)
   throws IOException {
-    amTracker.finish(applicationMaster.applicationId);
+    amTracker.finish(applicationMaster.getApplicationId());
   }
 
   @Override
-  public synchronized void finishApplication(ApplicationID applicationId) 
+  public synchronized void finishApplication(ApplicationId applicationId) 
   throws IOException {
     /* remove the applicaiton from the scheduler  for now. Later scheduler should
      * be a event handler of adding and cleaning up appications*/
@@ -214,7 +210,7 @@ public class ApplicationsManagerImpl ext
     return amTracker.getAllApplications();
   }
 
-  public synchronized ApplicationMasterInfo getApplicationMasterInfo(ApplicationID
+  public synchronized ApplicationMasterInfo getApplicationMasterInfo(ApplicationId
       applicationId) {
     return amTracker.get(applicationId);
   }
@@ -232,16 +228,16 @@ public class ApplicationsManagerImpl ext
       this.name = name;
     }
 
-    @Override public ApplicationID id() { return am.applicationId; }
-    @Override public CharSequence user() { return user; }
-    @Override public CharSequence name() { return name; }
-    @Override public CharSequence queue() { return queue; }
-    @Override public ApplicationStatus status() { return am.status; }
-    @Override public CharSequence master() { return am.host; }
-    @Override public int httpPort() { return am.httpPort; }
-    @Override public ApplicationState state() { return am.state; }
+    @Override public ApplicationId id() { return am.getApplicationId(); }
+    @Override public String user() { return user; }
+    @Override public String name() { return name; }
+    @Override public String queue() { return queue; }
+    @Override public ApplicationStatus status() { return am.getStatus(); }
+    @Override public String master() { return am.getHost(); }
+    @Override public int httpPort() { return am.getHttpPort(); }
+    @Override public ApplicationState state() { return am.getState(); }
     @Override public boolean isFinished() {
-      switch (am.state) {
+      switch (am.getState()) {
         case COMPLETED:
         case FAILED:
         case KILLED: return true;
@@ -261,7 +257,7 @@ public class ApplicationsManagerImpl ext
   }
 
   @Override
-  public Application getApplication(ApplicationID appID) {
+  public Application getApplication(ApplicationId appID) {
     ApplicationMasterInfo master = amTracker.get(appID);
     return (master == null) ? null : 
       new AppImpl(master.getMaster(), 

Modified: hadoop/mapreduce/branches/MR-279/yarn/yarn-server/yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/SchedulerNegotiator.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/branches/MR-279/yarn/yarn-server/yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/SchedulerNegotiator.java?rev=1087462&r1=1087461&r2=1087462&view=diff
==============================================================================
--- hadoop/mapreduce/branches/MR-279/yarn/yarn-server/yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/SchedulerNegotiator.java (original)
+++ hadoop/mapreduce/branches/MR-279/yarn/yarn-server/yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/SchedulerNegotiator.java Thu Mar 31 22:23:22 2011
@@ -29,15 +29,17 @@ import java.util.List;
 import org.apache.commons.lang.NotImplementedException;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.yarn.ApplicationID;
-import org.apache.hadoop.yarn.ApplicationMaster;
-import org.apache.hadoop.yarn.ApplicationStatus;
-import org.apache.hadoop.yarn.ApplicationSubmissionContext;
-import org.apache.hadoop.yarn.Container;
-import org.apache.hadoop.yarn.Priority;
-import org.apache.hadoop.yarn.Resource;
-import org.apache.hadoop.yarn.ResourceRequest;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ApplicationMaster;
+import org.apache.hadoop.yarn.api.records.ApplicationStatus;
+import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
+import org.apache.hadoop.yarn.api.records.Container;
+import org.apache.hadoop.yarn.api.records.Priority;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.api.records.ResourceRequest;
 import org.apache.hadoop.yarn.event.EventHandler;
+import org.apache.hadoop.yarn.factories.RecordFactory;
+import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
 import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.ASMContext;
 import org.apache.hadoop.yarn.server.resourcemanager.applicationsmanager.events.ASMEvent;
 import org.apache.hadoop.yarn.server.resourcemanager.applicationsmanager.events.ApplicationMasterEvents.ApplicationEventType;
@@ -52,10 +54,11 @@ import org.apache.hadoop.yarn.service.Ab
 class SchedulerNegotiator extends AbstractService implements EventHandler<ASMEvent<SNEventType>> {
 
   private static final Log LOG = LogFactory.getLog(SchedulerNegotiator.class);
+  private static final RecordFactory recordFactory = RecordFactoryProvider.getRecordFactory(null);
 
-  final static Priority AM_CONTAINER_PRIORITY = new Priority();
+  final static Priority AM_CONTAINER_PRIORITY = recordFactory.newRecordInstance(Priority.class);
   static {
-    AM_CONTAINER_PRIORITY.priority = 0;
+    AM_CONTAINER_PRIORITY.setPriority(0);
   }
   static final List<ResourceRequest> EMPTY_ASK =
     new ArrayList<ResourceRequest>();
@@ -129,7 +132,7 @@ class SchedulerNegotiator extends Abstra
               // in the first call to allocate
               LOG.debug("About to request resources for AM of " + 
                   masterInfo.getMaster() + " required " + request);
-              scheduler.allocate(masterInfo.getMaster().applicationId, 
+              scheduler.allocate(masterInfo.getMaster().getApplicationId(), 
                   Collections.singletonList(request), 
                   EMPTY_RELEASE);
             }
@@ -141,7 +144,7 @@ class SchedulerNegotiator extends Abstra
           for (Iterator<AppContext> it=submittedApplications.iterator(); 
           it.hasNext();) {
             AppContext masterInfo = it.next();
-            ApplicationID appId = masterInfo.getMaster().applicationId;
+            ApplicationId appId = masterInfo.getMaster().getApplicationId();
             containers = scheduler.allocate(appId, 
                 EMPTY_ASK, EMPTY_RELEASE);
             if (!containers.isEmpty()) {
@@ -218,16 +221,16 @@ class SchedulerNegotiator extends Abstra
     //TODO we should release the container but looks like we just 
     // wait for update from NodeManager
     Container[] containers = new Container[] {masterInfo.getMasterContainer()};
-    scheduler.allocate(masterInfo.getMaster().applicationId, 
+    scheduler.allocate(masterInfo.getMaster().getApplicationId(), 
         EMPTY_ASK, Arrays.asList(containers));
   }
   
   private static class SNAppContext implements AppContext {
-    private final ApplicationID appID;
+    private final ApplicationId appID;
     private final Container container;
     private final UnsupportedOperationException notImplementedException;
     
-    public SNAppContext(ApplicationID appID, Container container) {
+    public SNAppContext(ApplicationId appID, Container container) {
       this.appID = appID;
       this.container = container;
       this.notImplementedException = new UnsupportedOperationException("Not Implemented");
@@ -244,7 +247,7 @@ class SchedulerNegotiator extends Abstra
     }
 
     @Override
-    public ApplicationID getApplicationID() {
+    public ApplicationId getApplicationID() {
       return appID;
     }
 

Modified: hadoop/mapreduce/branches/MR-279/yarn/yarn-server/yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/resource/ApplicationID.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/branches/MR-279/yarn/yarn-server/yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/resource/ApplicationID.java?rev=1087462&r1=1087461&r2=1087462&view=diff
==============================================================================
--- hadoop/mapreduce/branches/MR-279/yarn/yarn-server/yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/resource/ApplicationID.java (original)
+++ hadoop/mapreduce/branches/MR-279/yarn/yarn-server/yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/resource/ApplicationID.java Thu Mar 31 22:23:22 2011
@@ -18,32 +18,33 @@
 
 package org.apache.hadoop.yarn.server.resourcemanager.resource;
 
+import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
+
 public class ApplicationID {
   
-  public static org.apache.hadoop.yarn.ApplicationID create(long clusterTimeStamp,
+  public static org.apache.hadoop.yarn.api.records.ApplicationId create(long clusterTimeStamp,
       int id) {
-    org.apache.hadoop.yarn.ApplicationID applicationId =
-        new org.apache.hadoop.yarn.ApplicationID();
-    applicationId.id = id;
-    applicationId.clusterTimeStamp = clusterTimeStamp;
+    org.apache.hadoop.yarn.api.records.ApplicationId applicationId = RecordFactoryProvider.getRecordFactory(null).newRecordInstance(org.apache.hadoop.yarn.api.records.ApplicationId.class);
+    applicationId.setId(id);
+    applicationId.setClusterTimestamp(clusterTimeStamp);
     return applicationId;
   }
   
-  public static org.apache.hadoop.yarn.ApplicationID convert(long clustertimestamp,
+  public static org.apache.hadoop.yarn.api.records.ApplicationId convert(long clustertimestamp,
       CharSequence id) {
-    org.apache.hadoop.yarn.ApplicationID applicationId =
-        new org.apache.hadoop.yarn.ApplicationID();
-    applicationId.id = Integer.valueOf(id.toString());
-    applicationId.clusterTimeStamp = clustertimestamp;
+    org.apache.hadoop.yarn.api.records.ApplicationId applicationId = RecordFactoryProvider.getRecordFactory(null).newRecordInstance(org.apache.hadoop.yarn.api.records.ApplicationId.class);
+    applicationId.setId(Integer.valueOf(id.toString()));
+    applicationId.setClusterTimestamp(clustertimestamp);
     return applicationId;
   }
   
   public static class Comparator 
-  implements java.util.Comparator<org.apache.hadoop.yarn.ApplicationID> {
+  implements java.util.Comparator<org.apache.hadoop.yarn.api.records.ApplicationId> {
 
+    
     @Override
-    public int compare(org.apache.hadoop.yarn.ApplicationID a1,
-        org.apache.hadoop.yarn.ApplicationID a2) {
+    public int compare(org.apache.hadoop.yarn.api.records.ApplicationId a1,
+        org.apache.hadoop.yarn.api.records.ApplicationId a2) {
       return a1.compareTo(a2);
     }
     

Modified: hadoop/mapreduce/branches/MR-279/yarn/yarn-server/yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/resource/Container.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/branches/MR-279/yarn/yarn-server/yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/resource/Container.java?rev=1087462&r1=1087461&r2=1087462&view=diff
==============================================================================
--- hadoop/mapreduce/branches/MR-279/yarn/yarn-server/yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/resource/Container.java (original)
+++ hadoop/mapreduce/branches/MR-279/yarn/yarn-server/yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/resource/Container.java Thu Mar 31 22:23:22 2011
@@ -18,57 +18,60 @@
 
 package org.apache.hadoop.yarn.server.resourcemanager.resource;
 
-import org.apache.hadoop.yarn.ApplicationID;
-import org.apache.hadoop.yarn.ContainerID;
-import org.apache.hadoop.yarn.ContainerState;
-import org.apache.hadoop.yarn.Resource;
+import org.apache.hadoop.yarn.api.records.ContainerState;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.factories.RecordFactory;
+import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
 
 public class Container {
   
+  private static final RecordFactory recordFactory = RecordFactoryProvider.getRecordFactory(null);
 
-  private static ContainerID getNewContainerId(ApplicationID applicationId, 
+  private static ContainerId getNewContainerId(ApplicationId applicationId, 
       int containerId) {
-    ContainerID id = new ContainerID();
-    id.appID = applicationId;
-    id.id = containerId;
+    ContainerId id = recordFactory.newRecordInstance(ContainerId.class);
+    id.setAppId(applicationId);
+    id.setId(containerId);
     return id;
   }
   
-  public static org.apache.hadoop.yarn.Container create(
-      org.apache.hadoop.yarn.Container c) {
-    org.apache.hadoop.yarn.Container container = new org.apache.hadoop.yarn.Container();
-    container.id = c.id;
-    container.hostName = c.hostName;
-    container.resource = c.resource;
-    container.state = c.state;
+  public static org.apache.hadoop.yarn.api.records.Container create(
+      org.apache.hadoop.yarn.api.records.Container c) {
+    org.apache.hadoop.yarn.api.records.Container container = recordFactory.newRecordInstance(org.apache.hadoop.yarn.api.records.Container.class);
+    container.setId(c.getId());
+    container.setHostName(c.getHostName());
+    container.setResource(c.getResource());
+    container.setState(c.getState());
     return container;
   }
 
-  public static org.apache.hadoop.yarn.Container create(
-      ApplicationID applicationId, int containerId, 
+  public static org.apache.hadoop.yarn.api.records.Container create(
+      ApplicationId applicationId, int containerId, 
       String hostName, Resource resource) {
-    ContainerID containerID = getNewContainerId(applicationId, containerId);
+    ContainerId containerID = getNewContainerId(applicationId, containerId);
     return create(containerID, hostName, resource);
   }
 
-  public static org.apache.hadoop.yarn.Container create(
-      ContainerID containerId,
+  public static org.apache.hadoop.yarn.api.records.Container create(
+      ContainerId containerId,
       String hostName, Resource resource) {
-    org.apache.hadoop.yarn.Container container = new org.apache.hadoop.yarn.Container();
-    container.id = containerId;
-    container.hostName = hostName;
-    container.resource = resource;
-    container.state = ContainerState.INTIALIZING;
+    org.apache.hadoop.yarn.api.records.Container container = recordFactory.newRecordInstance(org.apache.hadoop.yarn.api.records.Container.class);
+    container.setId(containerId);
+    container.setHostName(hostName);
+    container.setResource(resource);
+    container.setState(ContainerState.INITIALIZING);
     return container;
   }
   
   public static class Comparator 
-  implements java.util.Comparator<org.apache.hadoop.yarn.Container> {
+  implements java.util.Comparator<org.apache.hadoop.yarn.api.records.Container> {
 
     @Override
-    public int compare(org.apache.hadoop.yarn.Container c1,
-        org.apache.hadoop.yarn.Container c2) {
-      return c1.id.compareTo(c2.id);
+    public int compare(org.apache.hadoop.yarn.api.records.Container c1,
+        org.apache.hadoop.yarn.api.records.Container c2) {
+      return c1.compareTo(c2);
     }
   }
 }

Modified: hadoop/mapreduce/branches/MR-279/yarn/yarn-server/yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/resource/Priority.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/branches/MR-279/yarn/yarn-server/yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/resource/Priority.java?rev=1087462&r1=1087461&r2=1087462&view=diff
==============================================================================
--- hadoop/mapreduce/branches/MR-279/yarn/yarn-server/yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/resource/Priority.java (original)
+++ hadoop/mapreduce/branches/MR-279/yarn/yarn-server/yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/resource/Priority.java Thu Mar 31 22:23:22 2011
@@ -18,19 +18,21 @@
 
 package org.apache.hadoop.yarn.server.resourcemanager.resource;
 
+import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
+
 public class Priority {
   
-  public static org.apache.hadoop.yarn.Priority create(int prio) {
-    org.apache.hadoop.yarn.Priority priority = new org.apache.hadoop.yarn.Priority();
-    priority.priority = prio;
+  public static org.apache.hadoop.yarn.api.records.Priority create(int prio) {
+    org.apache.hadoop.yarn.api.records.Priority priority = RecordFactoryProvider.getRecordFactory(null).newRecordInstance(org.apache.hadoop.yarn.api.records.Priority.class);
+    priority.setPriority(prio);
     return priority;
   }
   
   public static class Comparator 
-  implements java.util.Comparator<org.apache.hadoop.yarn.Priority> {
+  implements java.util.Comparator<org.apache.hadoop.yarn.api.records.Priority> {
     @Override
-    public int compare(org.apache.hadoop.yarn.Priority o1, org.apache.hadoop.yarn.Priority o2) {
-      return o1.priority - o2.priority;
+    public int compare(org.apache.hadoop.yarn.api.records.Priority o1, org.apache.hadoop.yarn.api.records.Priority o2) {
+      return o1.getPriority() - o2.getPriority();
     }
   }
   

Modified: hadoop/mapreduce/branches/MR-279/yarn/yarn-server/yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/resource/Resource.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/branches/MR-279/yarn/yarn-server/yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/resource/Resource.java?rev=1087462&r1=1087461&r2=1087462&view=diff
==============================================================================
--- hadoop/mapreduce/branches/MR-279/yarn/yarn-server/yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/resource/Resource.java (original)
+++ hadoop/mapreduce/branches/MR-279/yarn/yarn-server/yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/resource/Resource.java Thu Mar 31 22:23:22 2011
@@ -20,51 +20,54 @@ package org.apache.hadoop.yarn.server.re
 
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.classification.InterfaceStability.Evolving;
+import org.apache.hadoop.yarn.factories.RecordFactory;
+import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
 
 @Private
 @Evolving
 public class Resource {
-  
-  public static final org.apache.hadoop.yarn.Resource NONE = createResource(0);
+  public static final RecordFactory recordFactory = RecordFactoryProvider.getRecordFactory(null);  
+  public static final org.apache.hadoop.yarn.api.records.Resource NONE = createResource(0);
+
 
-  public static org.apache.hadoop.yarn.Resource createResource(int memory) {
-    org.apache.hadoop.yarn.Resource resource = new org.apache.hadoop.yarn.Resource();
-    resource.memory = memory;
+  public static org.apache.hadoop.yarn.api.records.Resource createResource(int memory) {
+    org.apache.hadoop.yarn.api.records.Resource resource = recordFactory.newRecordInstance(org.apache.hadoop.yarn.api.records.Resource.class);
+    resource.setMemory(memory);
     return resource;
   }
   
-  public static void addResource(org.apache.hadoop.yarn.Resource lhs, 
-      org.apache.hadoop.yarn.Resource rhs) {
-    lhs.memory += rhs.memory;
+  public static void addResource(org.apache.hadoop.yarn.api.records.Resource lhs, 
+      org.apache.hadoop.yarn.api.records.Resource rhs) {
+    lhs.setMemory(lhs.getMemory() + rhs.getMemory());
   }
   
-  public static void subtractResource(org.apache.hadoop.yarn.Resource lhs, 
-      org.apache.hadoop.yarn.Resource rhs) {
-    lhs.memory -= rhs.memory;
+  public static void subtractResource(org.apache.hadoop.yarn.api.records.Resource lhs, 
+      org.apache.hadoop.yarn.api.records.Resource rhs) {
+    lhs.setMemory(lhs.getMemory() - rhs.getMemory());
   }
   
-  public static boolean equals(org.apache.hadoop.yarn.Resource lhs,
-      org.apache.hadoop.yarn.Resource rhs) {
-    return lhs.memory == rhs.memory;
+  public static boolean equals(org.apache.hadoop.yarn.api.records.Resource lhs,
+      org.apache.hadoop.yarn.api.records.Resource rhs) {
+    return lhs.getMemory() == rhs.getMemory();
   }
 
-  public static boolean lessThan(org.apache.hadoop.yarn.Resource lhs,
-      org.apache.hadoop.yarn.Resource rhs) {
-    return lhs.memory < rhs.memory;
+  public static boolean lessThan(org.apache.hadoop.yarn.api.records.Resource lhs,
+      org.apache.hadoop.yarn.api.records.Resource rhs) {
+    return lhs.getMemory() < rhs.getMemory();
   }
 
-  public static boolean lessThanOrEqual(org.apache.hadoop.yarn.Resource lhs,
-      org.apache.hadoop.yarn.Resource rhs) {
-    return lhs.memory <= rhs.memory;
+  public static boolean lessThanOrEqual(org.apache.hadoop.yarn.api.records.Resource lhs,
+      org.apache.hadoop.yarn.api.records.Resource rhs) {
+    return lhs.getMemory() <= rhs.getMemory();
   }
 
-  public static boolean greaterThan(org.apache.hadoop.yarn.Resource lhs,
-      org.apache.hadoop.yarn.Resource rhs) {
-    return lhs.memory > rhs.memory;
+  public static boolean greaterThan(org.apache.hadoop.yarn.api.records.Resource lhs,
+      org.apache.hadoop.yarn.api.records.Resource rhs) {
+    return lhs.getMemory() > rhs.getMemory();
   }
 
-  public static boolean greaterThanOrEqual(org.apache.hadoop.yarn.Resource lhs,
-      org.apache.hadoop.yarn.Resource rhs) {
-    return lhs.memory >= rhs.memory;
+  public static boolean greaterThanOrEqual(org.apache.hadoop.yarn.api.records.Resource lhs,
+      org.apache.hadoop.yarn.api.records.Resource rhs) {
+    return lhs.getMemory() >= rhs.getMemory();
   }
 }

Modified: hadoop/mapreduce/branches/MR-279/yarn/yarn-server/yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/resource/ResourceRequest.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/branches/MR-279/yarn/yarn-server/yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/resource/ResourceRequest.java?rev=1087462&r1=1087461&r2=1087462&view=diff
==============================================================================
--- hadoop/mapreduce/branches/MR-279/yarn/yarn-server/yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/resource/ResourceRequest.java (original)
+++ hadoop/mapreduce/branches/MR-279/yarn/yarn-server/yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/resource/ResourceRequest.java Thu Mar 31 22:23:22 2011
@@ -18,48 +18,50 @@
 
 package org.apache.hadoop.yarn.server.resourcemanager.resource;
 
-import org.apache.hadoop.yarn.Priority;
+import org.apache.hadoop.yarn.api.records.Priority;
+import org.apache.hadoop.yarn.factories.RecordFactory;
+import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
 
 public class ResourceRequest {
   
-  public static org.apache.hadoop.yarn.ResourceRequest create(
-      Priority priority, CharSequence hostName, 
-      org.apache.hadoop.yarn.Resource capability, int numContainers) {
-    org.apache.hadoop.yarn.ResourceRequest request = 
-      new org.apache.hadoop.yarn.ResourceRequest();
-    request.priority = priority;
-    request.hostName = hostName;
-    request.capability = capability;
-    request.numContainers = numContainers;
+  private static final RecordFactory recordFactory = RecordFactoryProvider.getRecordFactory(null);
+  
+  public static org.apache.hadoop.yarn.api.records.ResourceRequest create(
+      Priority priority, String hostName, 
+      org.apache.hadoop.yarn.api.records.Resource capability, int numContainers) {
+    org.apache.hadoop.yarn.api.records.ResourceRequest request = recordFactory.newRecordInstance(org.apache.hadoop.yarn.api.records.ResourceRequest.class);
+    request.setPriority(priority);
+    request.setHostName(hostName);
+    request.setCapability(capability);
+    request.setNumContainers(numContainers);
     return request;
   }
   
-  public static org.apache.hadoop.yarn.ResourceRequest create(
-      org.apache.hadoop.yarn.ResourceRequest r) {
-    org.apache.hadoop.yarn.ResourceRequest request = 
-      new org.apache.hadoop.yarn.ResourceRequest();
-    request.priority = r.priority;
-    request.hostName = r.hostName;
-    request.capability = r.capability;
-    request.numContainers = r.numContainers;
+  public static org.apache.hadoop.yarn.api.records.ResourceRequest create(
+      org.apache.hadoop.yarn.api.records.ResourceRequest r) {
+    org.apache.hadoop.yarn.api.records.ResourceRequest request = recordFactory.newRecordInstance(org.apache.hadoop.yarn.api.records.ResourceRequest.class);
+    request.setPriority(r.getPriority());
+    request.setHostName(r.getHostName());
+    request.setCapability(r.getCapability());
+    request.setNumContainers(r.getNumContainers());
     return request;
   }
   
   public static class Comparator 
-  implements java.util.Comparator<org.apache.hadoop.yarn.ResourceRequest> {
+  implements java.util.Comparator<org.apache.hadoop.yarn.api.records.ResourceRequest> {
     @Override
-    public int compare(org.apache.hadoop.yarn.ResourceRequest r1,
-        org.apache.hadoop.yarn.ResourceRequest r2) {
+    public int compare(org.apache.hadoop.yarn.api.records.ResourceRequest r1,
+        org.apache.hadoop.yarn.api.records.ResourceRequest r2) {
       
       // Compare priority, host and capability
-      int ret = r1.priority.compareTo(r2.priority);
+      int ret = r1.getPriority().compareTo(r2.getPriority());
       if (ret == 0) {
-        String h1 = r1.hostName.toString();
-        String h2 = r2.hostName.toString();
+        String h1 = r1.getHostName();
+        String h2 = r2.getHostName();
         ret = h1.compareTo(h2);
       }
       if (ret == 0) {
-        ret = r1.capability.compareTo(r2.capability);
+        ret = r1.getCapability().compareTo(r2.getCapability());
       }
       return ret;
     }

Modified: hadoop/mapreduce/branches/MR-279/yarn/yarn-server/yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/resourcetracker/NodeInfo.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/branches/MR-279/yarn/yarn-server/yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/resourcetracker/NodeInfo.java?rev=1087462&r1=1087461&r2=1087462&view=diff
==============================================================================
--- hadoop/mapreduce/branches/MR-279/yarn/yarn-server/yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/resourcetracker/NodeInfo.java (original)
+++ hadoop/mapreduce/branches/MR-279/yarn/yarn-server/yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/resourcetracker/NodeInfo.java Thu Mar 31 22:23:22 2011
@@ -19,7 +19,7 @@
 package org.apache.hadoop.yarn.server.resourcemanager.resourcetracker;
 
 import org.apache.hadoop.net.Node;
-import org.apache.hadoop.yarn.NodeID;
+import org.apache.hadoop.yarn.server.api.records.NodeId;
 
 /**
  * Node managers information on available resources 
@@ -31,7 +31,7 @@ public interface NodeInfo {
    * the node id of of this node.
    * @return the node id of this node.
    */
-  public NodeID getNodeID();
+  public NodeId getNodeID();
   /**
    * the hostname for this node.
    * @return the hostname for this node.
@@ -41,7 +41,7 @@ public interface NodeInfo {
    * the total available resource.
    * @return the total available resource.
    */
-  public org.apache.hadoop.yarn.Resource getTotalCapability();
+  public org.apache.hadoop.yarn.api.records.Resource getTotalCapability();
   /**
    * The rack name for this node manager.
    * @return the rack name.
@@ -56,12 +56,12 @@ public interface NodeInfo {
    * the available resource for this node.
    * @return the available resource this node.
    */
-  public org.apache.hadoop.yarn.Resource getAvailableResource();
+  public org.apache.hadoop.yarn.api.records.Resource getAvailableResource();
   /**
    * used resource on this node.
    * @return the used resource on this node.
    */
-  public org.apache.hadoop.yarn.Resource getUsedResource();
+  public org.apache.hadoop.yarn.api.records.Resource getUsedResource();
   /**
    * The current number of containers for this node
    * @return the number of containers

Modified: hadoop/mapreduce/branches/MR-279/yarn/yarn-server/yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/resourcetracker/NodeInfoTracker.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/branches/MR-279/yarn/yarn-server/yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/resourcetracker/NodeInfoTracker.java?rev=1087462&r1=1087461&r2=1087462&view=diff
==============================================================================
--- hadoop/mapreduce/branches/MR-279/yarn/yarn-server/yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/resourcetracker/NodeInfoTracker.java (original)
+++ hadoop/mapreduce/branches/MR-279/yarn/yarn-server/yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/resourcetracker/NodeInfoTracker.java Thu Mar 31 22:23:22 2011
@@ -18,8 +18,9 @@
 
 package org.apache.hadoop.yarn.server.resourcemanager.resourcetracker;
 
-import org.apache.hadoop.yarn.HeartbeatResponse;
-import org.apache.hadoop.yarn.NodeStatus;
+import org.apache.hadoop.yarn.factories.RecordFactory;
+import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
+import org.apache.hadoop.yarn.server.api.records.HeartbeatResponse;
 
 /**
  * Track the node info and heart beat responses for this node.
@@ -28,14 +29,15 @@ import org.apache.hadoop.yarn.NodeStatus
  */
 class NodeInfoTracker {
   private final NodeInfo node;
+  private final RecordFactory recordFactory = RecordFactoryProvider.getRecordFactory(null);
   HeartbeatResponse lastHeartBeatResponse;
-  private NodeStatus nodeStatus = new NodeStatus();
+  private org.apache.hadoop.yarn.server.api.records.NodeStatus nodeStatus = recordFactory.newRecordInstance(org.apache.hadoop.yarn.server.api.records.NodeStatus.class);
 
   public NodeInfoTracker(NodeInfo node, HeartbeatResponse lastHeartBeatResponse) {
     this.node = node;
     this.lastHeartBeatResponse = lastHeartBeatResponse;
-    this.nodeStatus.nodeId = node.getNodeID();
-    this.nodeStatus.lastSeen = System.currentTimeMillis();
+    this.nodeStatus.setNodeId(node.getNodeID());
+    this.nodeStatus.setLastSeen(System.currentTimeMillis());
   }
 
   public synchronized NodeInfo getNodeInfo() {
@@ -50,11 +52,11 @@ class NodeInfoTracker {
     this.lastHeartBeatResponse = heartBeatResponse;
   }
 
-  public synchronized void updateNodeStatus(NodeStatus nodeStatus) {
+  public synchronized void updateNodeStatus(org.apache.hadoop.yarn.server.api.records.NodeStatus nodeStatus) {
     this.nodeStatus = nodeStatus;
   }
 
-  public synchronized NodeStatus getNodeStatus() {
+  public synchronized org.apache.hadoop.yarn.server.api.records.NodeStatus getNodeStatus() {
     return this.nodeStatus;
   }
 }
\ No newline at end of file

Modified: hadoop/mapreduce/branches/MR-279/yarn/yarn-server/yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/resourcetracker/NodeStatus.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/branches/MR-279/yarn/yarn-server/yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/resourcetracker/NodeStatus.java?rev=1087462&r1=1087461&r2=1087462&view=diff
==============================================================================
--- hadoop/mapreduce/branches/MR-279/yarn/yarn-server/yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/resourcetracker/NodeStatus.java (original)
+++ hadoop/mapreduce/branches/MR-279/yarn/yarn-server/yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/resourcetracker/NodeStatus.java Thu Mar 31 22:23:22 2011
@@ -21,16 +21,17 @@ package org.apache.hadoop.yarn.server.re
 import java.util.List;
 import java.util.Map;
 
-import org.apache.hadoop.yarn.Container;
-import org.apache.hadoop.yarn.NodeID;
+import org.apache.hadoop.yarn.api.records.Container;
+import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
+import org.apache.hadoop.yarn.server.api.records.NodeId;
 
 
 public class NodeStatus {
-  public static org.apache.hadoop.yarn.NodeStatus createNodeStatus(
-      NodeID nodeId, Map<CharSequence, List<Container>> containers) {
-    org.apache.hadoop.yarn.NodeStatus nodeStatus = new org.apache.hadoop.yarn.NodeStatus();
-    nodeStatus.nodeId = nodeId;
-    nodeStatus.containers = containers;
+  public static org.apache.hadoop.yarn.server.api.records.NodeStatus createNodeStatus(
+      NodeId nodeId, Map<String, List<Container>> containers) {
+    org.apache.hadoop.yarn.server.api.records.NodeStatus nodeStatus = RecordFactoryProvider.getRecordFactory(null).newRecordInstance(org.apache.hadoop.yarn.server.api.records.NodeStatus.class);
+    nodeStatus.setNodeId(nodeId);
+    nodeStatus.addAllContainers(containers);
     return nodeStatus;
   }
 }

Modified: hadoop/mapreduce/branches/MR-279/yarn/yarn-server/yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/resourcetracker/RMResourceTrackerImpl.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/branches/MR-279/yarn/yarn-server/yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/resourcetracker/RMResourceTrackerImpl.java?rev=1087462&r1=1087461&r2=1087462&view=diff
==============================================================================
--- hadoop/mapreduce/branches/MR-279/yarn/yarn-server/yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/resourcetracker/RMResourceTrackerImpl.java (original)
+++ hadoop/mapreduce/branches/MR-279/yarn/yarn-server/yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/resourcetracker/RMResourceTrackerImpl.java Thu Mar 31 22:23:22 2011
@@ -30,7 +30,6 @@ import java.util.concurrent.atomic.Atomi
 
 import javax.crypto.SecretKey;
 
-import org.apache.avro.ipc.AvroRemoteException;
 import org.apache.avro.ipc.Server;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -42,17 +41,23 @@ import org.apache.hadoop.net.NetworkTopo
 import org.apache.hadoop.net.Node;
 import org.apache.hadoop.net.NodeBase;
 import org.apache.hadoop.security.SecurityInfo;
-import org.apache.hadoop.yarn.HeartbeatResponse;
-import org.apache.hadoop.yarn.NodeID;
-import org.apache.hadoop.yarn.NodeStatus;
-import org.apache.hadoop.yarn.RegistrationResponse;
-import org.apache.hadoop.yarn.Resource;
-import org.apache.hadoop.yarn.ResourceTracker;
-import org.apache.hadoop.yarn.YarnClusterMetrics;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.api.records.YarnClusterMetrics;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
+import org.apache.hadoop.yarn.factories.RecordFactory;
+import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
 import org.apache.hadoop.yarn.ipc.YarnRPC;
 import org.apache.hadoop.yarn.server.RMNMSecurityInfoClass;
 import org.apache.hadoop.yarn.server.YarnServerConfig;
+import org.apache.hadoop.yarn.server.api.ResourceTracker;
+import org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatRequest;
+import org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatResponse;
+import org.apache.hadoop.yarn.server.api.protocolrecords.RegisterNodeManagerRequest;
+import org.apache.hadoop.yarn.server.api.protocolrecords.RegisterNodeManagerResponse;
+import org.apache.hadoop.yarn.server.api.records.HeartbeatResponse;
+import org.apache.hadoop.yarn.server.api.records.NodeId;
+import org.apache.hadoop.yarn.server.api.records.RegistrationResponse;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.NodeResponse;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceListener;
 import org.apache.hadoop.yarn.server.security.ContainerTokenSecretManager;
@@ -70,21 +75,24 @@ ResourceTracker, ResourceContext {
    * so no use garbage collecting. Though admin can break the RM by bouncing 
    * nodemanagers on different ports again and again.
    */
-  private Map<String, NodeID> nodes = new ConcurrentHashMap<String, NodeID>();
-  private final Map<NodeID, NodeInfoTracker> nodeManagers = 
-    new ConcurrentHashMap<NodeID, NodeInfoTracker>();
+  private Map<String, NodeId> nodes = new ConcurrentHashMap<String, NodeId>();
+  private final Map<NodeId, NodeInfoTracker> nodeManagers = 
+    new ConcurrentHashMap<NodeId, NodeInfoTracker>();
   private final HeartBeatThread heartbeatThread;
-  private final TreeSet<NodeStatus> nmExpiryQueue =
-      new TreeSet<NodeStatus>(
-          new Comparator<NodeStatus>() {
-            public int compare(NodeStatus p1, NodeStatus p2) {
-              if (p1.lastSeen < p2.lastSeen) {
+  
+  private static final RecordFactory recordFactory = RecordFactoryProvider.getRecordFactory(null);
+  
+  private final TreeSet<org.apache.hadoop.yarn.server.api.records.NodeStatus> nmExpiryQueue =
+      new TreeSet<org.apache.hadoop.yarn.server.api.records.NodeStatus>(
+          new Comparator<org.apache.hadoop.yarn.server.api.records.NodeStatus>() {
+            public int compare(org.apache.hadoop.yarn.server.api.records.NodeStatus p1, org.apache.hadoop.yarn.server.api.records.NodeStatus p2) {
+              if (p1.getLastSeen() < p2.getLastSeen()) {
                 return -1;
-              } else if (p1.lastSeen > p2.lastSeen) {
+              } else if (p1.getLastSeen() > p2.getLastSeen()) {
                 return 1;
               } else {
-                return (p1.nodeId.id -
-                    p2.nodeId.id);
+                return (p1.getNodeId().getId() -
+                    p2.getNodeId().getId());
               }
             }
           }
@@ -95,13 +103,13 @@ ResourceTracker, ResourceContext {
   private Server server;
   private final ContainerTokenSecretManager containerTokenSecretManager;
   private final AtomicInteger nodeCounter = new AtomicInteger(0);
-  private static final HeartbeatResponse reboot = new HeartbeatResponse();
+  private static final HeartbeatResponse reboot = recordFactory.newRecordInstance(HeartbeatResponse.class);
   private long nmExpiryInterval;
 
   public RMResourceTrackerImpl(ContainerTokenSecretManager containerTokenSecretManager,
       ResourceListener listener) {
     super(RMResourceTrackerImpl.class.getName());
-    reboot.reboot = true;
+    reboot.setReboot(true);
     this.containerTokenSecretManager = containerTokenSecretManager;
     this.heartbeatThread = new HeartBeatThread();
     this.resourceListener = listener;
@@ -146,9 +154,11 @@ ResourceTracker, ResourceContext {
   }
 
   @Override
-  public RegistrationResponse registerNodeManager(CharSequence node,
-      Resource capability) throws AvroRemoteException {
-    NodeID nodeId = getNodeId(node);
+  public RegisterNodeManagerResponse registerNodeManager(RegisterNodeManagerRequest request) throws YarnRemoteException {
+    String node = request.getNode();
+    Resource capability = request.getResource();
+  
+    NodeId nodeId = getNodeId(node);
     NodeInfoTracker nTracker = null;
     
     synchronized(nodeManagers) {
@@ -157,82 +167,88 @@ ResourceTracker, ResourceContext {
         NodeInfo nodeManager = resourceListener.addNode(nodeId, node.toString(),
             resolve(node.toString()),
             capability);
-        HeartbeatResponse response = new HeartbeatResponse();
-        response.responseId = 0;
+        HeartbeatResponse response = recordFactory.newRecordInstance(HeartbeatResponse.class);
+        response.setResponseId(0);
         nTracker = new NodeInfoTracker(nodeManager, response);
         nodeManagers.put(nodeId, nTracker);
       } else {
         nTracker = nodeManagers.get(nodeId);
-        NodeStatus status = nTracker.getNodeStatus();
-        status.lastSeen = System.currentTimeMillis();
+        org.apache.hadoop.yarn.server.api.records.NodeStatus status = nTracker.getNodeStatus();
+        status.setLastSeen(System.currentTimeMillis());
         nTracker.updateNodeStatus(status);
       }
     }
     addForTracking(nTracker.getNodeStatus());
     LOG.info("NodeManager from node " + node + " registered with capability: " + 
-        capability.memory + ", assigned nodeId " + nodeId.id);
+        capability.getMemory() + ", assigned nodeId " + nodeId.getId());
 
-    RegistrationResponse regResponse = new RegistrationResponse();
-    regResponse.nodeID = nodeId;
+    RegistrationResponse regResponse = recordFactory.newRecordInstance(RegistrationResponse.class);
+    regResponse.setNodeId(nodeId);
     SecretKey secretKey =
       this.containerTokenSecretManager.createAndGetSecretKey(node);
-    regResponse.secretKey = ByteBuffer.wrap(secretKey.getEncoded());
-    return regResponse;
+    regResponse.setSecretKey(ByteBuffer.wrap(secretKey.getEncoded()));
+    RegisterNodeManagerResponse response = recordFactory.newRecordInstance(RegisterNodeManagerResponse.class);
+    response.setRegistrationResponse(regResponse);
+    return response;
   }
 
   @Override
-  public HeartbeatResponse nodeHeartbeat(NodeStatus nodeStatus)
-  throws AvroRemoteException {
-    nodeStatus.lastSeen = System.currentTimeMillis();
+  public NodeHeartbeatResponse nodeHeartbeat(NodeHeartbeatRequest request) throws YarnRemoteException {
+    org.apache.hadoop.yarn.server.api.records.NodeStatus nodeStatus = request.getNodeStatus();
+    nodeStatus.setLastSeen(System.currentTimeMillis());
     NodeInfoTracker nTracker = null;
+    NodeHeartbeatResponse nodeHbResponse = recordFactory.newRecordInstance(NodeHeartbeatResponse.class);
     synchronized(nodeManagers) {
-      nTracker = nodeManagers.get(nodeStatus.nodeId);
+      nTracker = nodeManagers.get(nodeStatus.getNodeId());
     }
     if (nTracker == null) {
       /* node does not exist */
-      LOG.info("Node not found rebooting " + nodeStatus.nodeId);
-      return reboot;
+      LOG.info("Node not found rebooting " + nodeStatus.getNodeId());
+      nodeHbResponse.setHeartbeatResponse(reboot);
+      return nodeHbResponse;
     }
 
     NodeInfo nodeInfo = nTracker.getNodeInfo();
     /* check to see if its an old heartbeat */    
-    if (nodeStatus.responseId + 1 == nTracker.getLastHeartBeatResponse().responseId) {
-      return nTracker.getLastHeartBeatResponse();
-    } else if (nodeStatus.responseId + 1 < nTracker.getLastHeartBeatResponse().responseId) {
+    if (nodeStatus.getResponseId() + 1 == nTracker.getLastHeartBeatResponse().getResponseId()) {
+      nodeHbResponse.setHeartbeatResponse(nTracker.getLastHeartBeatResponse());
+      return nodeHbResponse;
+    } else if (nodeStatus.getResponseId() + 1 < nTracker.getLastHeartBeatResponse().getResponseId()) {
       LOG.info("Too far behind rm response id:" + 
-          nTracker.lastHeartBeatResponse.responseId + " nm response id:" + nodeStatus.responseId);
-      return reboot;
+          nTracker.lastHeartBeatResponse.getResponseId() + " nm response id:" + nodeStatus.getResponseId());
+      nodeHbResponse.setHeartbeatResponse(reboot);
+      return nodeHbResponse;
     }
 
     /* inform any listeners of node heartbeats */
     NodeResponse nodeResponse = resourceListener.nodeUpdate(
-        nodeInfo, nodeStatus.containers);
+        nodeInfo, nodeStatus.getAllContainers());
 
     
-    HeartbeatResponse response = new HeartbeatResponse();
-    response.containersToCleanup = nodeResponse.getContainersToCleanUp();
+    HeartbeatResponse response = recordFactory.newRecordInstance(HeartbeatResponse.class);
+    response.addAllContainersToCleanup(nodeResponse.getContainersToCleanUp());
 
-    
-    response.appplicationsToCleanup = nodeResponse.getFinishedApplications();
-    response.responseId = nTracker.getLastHeartBeatResponse().responseId + 1;
+    response.addAllApplicationsToCleanup(nodeResponse.getFinishedApplications());
+    response.setResponseId(nTracker.getLastHeartBeatResponse().getResponseId() + 1);
 
     nTracker.refreshHeartBeatResponse(response);
     nTracker.updateNodeStatus(nodeStatus);
-    return response;
+    nodeHbResponse.setHeartbeatResponse(response);
+    return nodeHbResponse;
   }
 
   @Private
-  public synchronized NodeInfo getNodeManager(NodeID nodeId) {
+  public synchronized NodeInfo getNodeManager(NodeId nodeId) {
     NodeInfoTracker ntracker = nodeManagers.get(nodeId);
     return (ntracker == null ? null: ntracker.getNodeInfo());
   }
 
-  private synchronized NodeID getNodeId(CharSequence node) {
-    NodeID nodeId;
+  private synchronized NodeId getNodeId(String node) {
+    NodeId nodeId;
     nodeId = nodes.get(node);
     if (nodeId == null) {
-      nodeId = new NodeID();
-      nodeId.id = nodeCounter.getAndIncrement();
+      nodeId = recordFactory.newRecordInstance(NodeId.class);
+      nodeId.setId(nodeCounter.getAndIncrement());
       nodes.put(node.toString(), nodeId);
     }
     return nodeId;
@@ -240,8 +256,8 @@ ResourceTracker, ResourceContext {
 
   @Override
   public synchronized YarnClusterMetrics getClusterMetrics() {
-    YarnClusterMetrics ymetrics = new YarnClusterMetrics();
-    ymetrics.numNodeManagers = nodeManagers.size();
+    YarnClusterMetrics ymetrics = recordFactory.newRecordInstance(YarnClusterMetrics.class);
+    ymetrics.setNumNodeManagers(nodeManagers.size());
     return ymetrics;
   }
 
@@ -264,14 +280,14 @@ ResourceTracker, ResourceContext {
     return infoList;
   }
 
-  protected void addForTracking(NodeStatus status) {
+  protected void addForTracking(org.apache.hadoop.yarn.server.api.records.NodeStatus status) {
     synchronized(nmExpiryQueue) {
       nmExpiryQueue.add(status);
     }
   }
   
-  protected void expireNMs(List<NodeID> nodes) {
-    for (NodeID id: nodes) {
+  protected void expireNMs(List<NodeId> nodes) {
+    for (NodeId id: nodes) {
       synchronized (nodeManagers) {
         NodeInfo nInfo = nodeManagers.get(id).getNodeInfo();
         nodeManagers.remove(id);
@@ -299,29 +315,29 @@ ResourceTracker, ResourceContext {
        * through the expiry queue.
        */
       
-      List<NodeID> expired = new ArrayList<NodeID>();
+      List<NodeId> expired = new ArrayList<NodeId>();
       LOG.info("Starting expiring thread with interval " + nmExpiryInterval);
       
       while (!stop) {
-        NodeStatus leastRecent;
+        org.apache.hadoop.yarn.server.api.records.NodeStatus leastRecent;
         long now = System.currentTimeMillis();
         expired.clear();
         synchronized(nmExpiryQueue) {
           while ((nmExpiryQueue.size() > 0) &&
               (leastRecent = nmExpiryQueue.first()) != null &&
-              ((now - leastRecent.lastSeen) > 
+              ((now - leastRecent.getLastSeen()) > 
               nmExpiryInterval)) {
             nmExpiryQueue.remove(leastRecent);
             NodeInfoTracker info;
             synchronized(nodeManagers) {
-              info = nodeManagers.get(leastRecent.nodeId);
+              info = nodeManagers.get(leastRecent.getNodeId());
             }
             if (info == null) {
               continue;
             }
-            NodeStatus status = info.getNodeStatus();
-            if ((now - status.lastSeen) > nmExpiryInterval) {
-              expired.add(status.nodeId);
+            org.apache.hadoop.yarn.server.api.records.NodeStatus status = info.getNodeStatus();
+            if ((now - status.getLastSeen()) > nmExpiryInterval) {
+              expired.add(status.getNodeId());
             } else {
               nmExpiryQueue.add(status);
             }

Modified: hadoop/mapreduce/branches/MR-279/yarn/yarn-server/yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/resourcetracker/ResourceContext.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/branches/MR-279/yarn/yarn-server/yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/resourcetracker/ResourceContext.java?rev=1087462&r1=1087461&r2=1087462&view=diff
==============================================================================
--- hadoop/mapreduce/branches/MR-279/yarn/yarn-server/yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/resourcetracker/ResourceContext.java (original)
+++ hadoop/mapreduce/branches/MR-279/yarn/yarn-server/yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/resourcetracker/ResourceContext.java Thu Mar 31 22:23:22 2011
@@ -20,7 +20,8 @@ package org.apache.hadoop.yarn.server.re
 
 import java.util.List;
 
-import org.apache.hadoop.yarn.YarnClusterMetrics;
+import org.apache.hadoop.yarn.api.records.YarnClusterMetrics;
+
 
 /**
  * The read-only interface for cluster resource

Modified: hadoop/mapreduce/branches/MR-279/yarn/yarn-server/yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/Application.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/branches/MR-279/yarn/yarn-server/yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/Application.java?rev=1087462&r1=1087461&r2=1087462&view=diff
==============================================================================
--- hadoop/mapreduce/branches/MR-279/yarn/yarn-server/yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/Application.java (original)
+++ hadoop/mapreduce/branches/MR-279/yarn/yarn-server/yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/Application.java Thu Mar 31 22:23:22 2011
@@ -33,12 +33,14 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience.LimitedPrivate;
 import org.apache.hadoop.classification.InterfaceStability.Evolving;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.Container;
+import org.apache.hadoop.yarn.api.records.Priority;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.api.records.ResourceRequest;
+import org.apache.hadoop.yarn.factories.RecordFactory;
+import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
 import org.apache.hadoop.yarn.server.resourcemanager.resourcetracker.NodeInfo;
-import org.apache.hadoop.yarn.ApplicationID;
-import org.apache.hadoop.yarn.Container;
-import org.apache.hadoop.yarn.Priority;
-import org.apache.hadoop.yarn.Resource;
-import org.apache.hadoop.yarn.ResourceRequest;
 
 /**
  * This class keeps track of all the consumption of an application.
@@ -52,17 +54,18 @@ public class Application {
 
   private AtomicInteger containerCtr = new AtomicInteger(0);
 
-  final ApplicationID applicationId;
+  final ApplicationId applicationId;
   final Queue queue;
   final String user;
+  private final RecordFactory recordFactory = RecordFactoryProvider.getRecordFactory(null);
 
   final Set<Priority> priorities = 
     new TreeSet<Priority>(
         new org.apache.hadoop.yarn.server.resourcemanager.resource.Priority.Comparator());
   final Map<Priority, Map<String, ResourceRequest>> requests = 
     new HashMap<Priority, Map<String, ResourceRequest>>();
-  final Resource currentConsumption = new Resource();
-  final Resource overallConsumption = new Resource();
+  final Resource currentConsumption = recordFactory.newRecordInstance(Resource.class);
+  final Resource overallConsumption = recordFactory.newRecordInstance(Resource.class);
 
   /* Current consumption */
   List<Container> acquired = new ArrayList<Container>();
@@ -71,13 +74,13 @@ public class Application {
   List<Container> allocated = new ArrayList<Container>(); 
   Set<NodeInfo> applicationOnNodes = new HashSet<NodeInfo>();
   
-  public Application(ApplicationID applicationId, Queue queue, String user) {
+  public Application(ApplicationId applicationId, Queue queue, String user) {
     this.applicationId = applicationId;
     this.queue = queue;
     this.user = user; 
   }
 
-  public ApplicationID getApplicationId() {
+  public ApplicationId getApplicationId() {
     return applicationId;
   }
 
@@ -120,7 +123,7 @@ public class Application {
     // Metrics
     for (Container container : heartbeatContainers) {
       org.apache.hadoop.yarn.server.resourcemanager.resource.Resource.addResource(
-          overallConsumption, container.resource);
+          overallConsumption, container.getResource());
     }
 
     LOG.debug("acquire:" +
@@ -144,8 +147,8 @@ public class Application {
   synchronized public void updateResourceRequests(List<ResourceRequest> requests) {
     // Update resource requests
     for (ResourceRequest request : requests) {
-      Priority priority = request.priority;
-      String hostName = request.hostName.toString();
+      Priority priority = request.getPriority();
+      String hostName = request.getHostName();
 
       Map<String, ResourceRequest> asks = this.requests.get(priority);
 
@@ -171,12 +174,12 @@ public class Application {
       LOG.debug("update: " +
           "application=" + applicationId + " released=" + container);
       org.apache.hadoop.yarn.server.resourcemanager.resource.Resource.subtractResource(
-          currentConsumption, container.resource);
+          currentConsumption, container.getResource());
       for (Iterator<Container> i=acquired.iterator(); i.hasNext();) {
         Container c = i.next();
-        if (c.id.equals(container.id)) {
+        if (c.getId().equals(container.getId())) {
           i.remove();
-          LOG.info("Removed acquired container: " + container.id);
+          LOG.info("Removed acquired container: " + container.getId());
         }
       }
     }
@@ -239,13 +242,13 @@ public class Application {
     allocate(containers);
 
     // Update future requirements
-    nodeLocalRequest.numContainers -= containers.size();
+    nodeLocalRequest.setNumContainers(nodeLocalRequest.getNumContainers() - containers.size());
     ResourceRequest rackLocalRequest = 
       requests.get(priority).get(node.getRackName());
-    rackLocalRequest.numContainers -= containers.size();
+    rackLocalRequest.setNumContainers(rackLocalRequest.getNumContainers() - containers.size());
     ResourceRequest offSwitchRequest = 
       requests.get(priority).get(NodeManager.ANY);
-    offSwitchRequest.numContainers -= containers.size();
+    offSwitchRequest.setNumContainers(offSwitchRequest.getNumContainers() - containers.size());
   }
 
   /**
@@ -261,10 +264,10 @@ public class Application {
     allocate(containers);
 
     // Update future requirements
-    rackLocalRequest.numContainers -= containers.size();
+    rackLocalRequest.setNumContainers(rackLocalRequest.getNumContainers() - containers.size());
     ResourceRequest offSwitchRequest = 
       requests.get(priority).get(NodeManager.ANY);
-    offSwitchRequest.numContainers -= containers.size();
+    offSwitchRequest.setNumContainers(offSwitchRequest.getNumContainers() - containers.size());
   } 
 
   /**
@@ -280,19 +283,19 @@ public class Application {
     allocate(containers);
 
     // Update future requirements
-    offSwitchRequest.numContainers -= containers.size();
+    offSwitchRequest.setNumContainers(offSwitchRequest.getNumContainers() - containers.size());
   }
 
   synchronized private void allocate(List<Container> containers) {
     // Update consumption and track allocations
     for (Container container : containers) {
       org.apache.hadoop.yarn.server.resourcemanager.resource.Resource.addResource(
-          currentConsumption, container.resource);
+          currentConsumption, container.getResource());
 
       allocated.add(container);
 
       LOG.debug("allocate: applicationId=" + applicationId + 
-          " container=" + container.id + " host=" + container.hostName);
+          " container=" + container.getId() + " host=" + container.getHostName());
     }
   }
 

Modified: hadoop/mapreduce/branches/MR-279/yarn/yarn-server/yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/NodeManager.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/branches/MR-279/yarn/yarn-server/yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/NodeManager.java?rev=1087462&r1=1087461&r2=1087462&view=diff
==============================================================================
--- hadoop/mapreduce/branches/MR-279/yarn/yarn-server/yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/NodeManager.java (original)
+++ hadoop/mapreduce/branches/MR-279/yarn/yarn-server/yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/NodeManager.java Thu Mar 31 22:23:22 2011
@@ -31,13 +31,15 @@ import org.apache.commons.logging.LogFac
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.net.Node;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.Container;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.ContainerState;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.factories.RecordFactory;
+import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
+import org.apache.hadoop.yarn.server.api.records.NodeId;
 import org.apache.hadoop.yarn.server.resourcemanager.resourcetracker.NodeInfo;
-import org.apache.hadoop.yarn.ApplicationID;
-import org.apache.hadoop.yarn.Container;
-import org.apache.hadoop.yarn.ContainerID;
-import org.apache.hadoop.yarn.ContainerState;
-import org.apache.hadoop.yarn.NodeID;
-import org.apache.hadoop.yarn.Resource;
 
 /**
  * This class is used by ClusterInfo to keep track of all the applications/containers
@@ -48,26 +50,28 @@ import org.apache.hadoop.yarn.Resource;
 @Unstable
 public class NodeManager implements NodeInfo {
   private static final Log LOG = LogFactory.getLog(NodeManager.class);
-  private final NodeID nodeId;
+  private static final RecordFactory recordFactory = RecordFactoryProvider.getRecordFactory(null);
+  private final NodeId nodeId;
   private final String hostName;
   private Resource totalCapability;
-  private Resource availableResource = new Resource();
-  private Resource usedResource = new Resource();
+  private Resource availableResource = recordFactory.newRecordInstance(Resource.class);
+  private Resource usedResource = recordFactory.newRecordInstance(Resource.class);
   private final Node node;
   
+  
   private static final Container[] EMPTY_CONTAINER_ARRAY = new Container[] {};
   private static final List<Container> EMPTY_CONTAINER_LIST = Arrays.asList(EMPTY_CONTAINER_ARRAY);
-  private static final ApplicationID[] EMPTY_APPLICATION_ARRAY = new ApplicationID[]{};
-  private static final List<ApplicationID> EMPTY_APPLICATION_LIST = Arrays.asList(EMPTY_APPLICATION_ARRAY);
+  private static final ApplicationId[] EMPTY_APPLICATION_ARRAY = new ApplicationId[]{};
+  private static final List<ApplicationId> EMPTY_APPLICATION_LIST = Arrays.asList(EMPTY_APPLICATION_ARRAY);
   
   public static final String ANY = "*";  
   /* set of containers that are allocated containers */
-  private final Map<ContainerID, Container> allocatedContainers = 
-    new TreeMap<ContainerID, Container>();
+  private final Map<ContainerId, Container> allocatedContainers = 
+    new TreeMap<ContainerId, Container>();
     
   /* set of containers that are currently active on a node manager */
-  private final Map<ContainerID, Container> activeContainers =
-    new TreeMap<ContainerID, Container>();
+  private final Map<ContainerId, Container> activeContainers =
+    new TreeMap<ContainerId, Container>();
   
   /* set of containers that need to be cleaned */
   private final Set<Container> containersToClean = 
@@ -75,11 +79,11 @@ public class NodeManager implements Node
 
   
   /* the list of applications that have finished and need to be purged */
-  private final List<ApplicationID> finishedApplications = new ArrayList<ApplicationID>();
+  private final List<ApplicationId> finishedApplications = new ArrayList<ApplicationId>();
   
   private volatile int numContainers;
   
-  public NodeManager(NodeID nodeId, String hostname, 
+  public NodeManager(NodeId nodeId, String hostname, 
       Node node, Resource capability) {
     this.nodeId = nodeId;   
     this.totalCapability = capability; 
@@ -104,7 +108,7 @@ public class NodeManager implements Node
    * @param applicationId application
    * @param containers allocated containers
    */
-  public synchronized void allocateContainer(ApplicationID applicationId, 
+  public synchronized void allocateContainer(ApplicationId applicationId, 
       List<Container> containers) {
     if (containers == null) {
       LOG.error("Adding null containers for application " + applicationId);
@@ -117,8 +121,8 @@ public class NodeManager implements Node
     LOG.info("addContainers:" +
         " node=" + getHostName() + 
         " #containers=" + containers.size() + 
-        " available=" + getAvailableResource().memory + 
-        " used=" + getUsedResource().memory);
+        " available=" + getAvailableResource().getMemory() + 
+        " used=" + getUsedResource().getMemory());
   }
 
   /**
@@ -128,7 +132,7 @@ public class NodeManager implements Node
    * node manager.
    */
   public synchronized NodeResponse 
-    statusUpdate(Map<CharSequence,List<Container>> allContainers) {
+    statusUpdate(Map<String,List<Container>> allContainers) {
 
     if (allContainers == null) {
       return new NodeResponse(EMPTY_APPLICATION_LIST, EMPTY_CONTAINER_LIST,
@@ -137,7 +141,7 @@ public class NodeManager implements Node
        
     List<Container> listContainers = new ArrayList<Container>();
     // Iterate through the running containers and update their status
-    for (Map.Entry<CharSequence, List<Container>> e : 
+    for (Map.Entry<String, List<Container>> e : 
       allContainers.entrySet()) {
       listContainers.addAll(e.getValue());
     }
@@ -154,27 +158,27 @@ public class NodeManager implements Node
   private synchronized NodeResponse update(List<Container> containers) {
     List<Container> completedContainers = new ArrayList<Container>();
     List<Container> containersToCleanUp = new ArrayList<Container>();
-    List<ApplicationID> lastfinishedApplications = new ArrayList<ApplicationID>();
+    List<ApplicationId> lastfinishedApplications = new ArrayList<ApplicationId>();
     
     for (Container container : containers) {
-      if (allocatedContainers.remove(container.id) != null) {
-        activeContainers.put(container.id, container);
-        LOG.info("Activated container " + container.id + " on node " + 
+      if (allocatedContainers.remove(container.getId()) != null) {
+        activeContainers.put(container.getId(), container);
+        LOG.info("Activated container " + container.getId() + " on node " + 
          getHostName());
       }
 
-      if (container.state == ContainerState.COMPLETE) {
-        if (activeContainers.remove(container.id) != null) {
+      if (container.getState() == ContainerState.COMPLETE) {
+        if (activeContainers.remove(container.getId()) != null) {
           updateResource(container);
           LOG.info("Completed container " + container);
         }
         completedContainers.add(container);
-        LOG.info("Removed completed container " + container.id + " on node " + 
+        LOG.info("Removed completed container " + container.getId() + " on node " + 
             getHostName());
       }
-      else if (container.state != ContainerState.COMPLETE && 
-          (!allocatedContainers.containsKey(container.id)) && 
-          !activeContainers.containsKey(container.id)) {
+      else if (container.getState() != ContainerState.COMPLETE && 
+          (!allocatedContainers.containsKey(container.getId())) && 
+          !activeContainers.containsKey(container.getId())) {
         containersToCleanUp.add(container);
       }
     }
@@ -187,28 +191,28 @@ public class NodeManager implements Node
   }
   
   private synchronized void allocateContainer(Container container) {
-    deductAvailableResource(container.resource);
+    deductAvailableResource(container.getResource());
     ++numContainers;
     
-    allocatedContainers.put(container.id, container);
-    LOG.info("Allocated container " + container.id + 
+    allocatedContainers.put(container.getId(), container);
+    LOG.info("Allocated container " + container.getId() + 
         " to node " + getHostName());
     
-    LOG.info("Assigned container " + container.id + 
-        " of capacity " + container.resource + " on host " + getHostName() + 
+    LOG.info("Assigned container " + container.getId() + 
+        " of capacity " + container.getResource() + " on host " + getHostName() + 
         ", which currently has " + numContainers + " containers, " + 
         getUsedResource() + " used and " + 
         getAvailableResource() + " available");
   }
 
   private synchronized boolean isValidContainer(Container c) {    
-    if (activeContainers.containsKey(c.id) || allocatedContainers.containsKey(c.id))
+    if (activeContainers.containsKey(c.getId()) || allocatedContainers.containsKey(c.getId()))
       return true;
     return false;
   }
 
   private synchronized void updateResource(Container container) {
-    addAvailableResource(container.resource);
+    addAvailableResource(container.getResource());
     --numContainers;
   }
   
@@ -227,13 +231,13 @@ public class NodeManager implements Node
     /* remove the containers from the nodemanger */
     
     // Was this container launched?
-    activeContainers.remove(container.id);
-    allocatedContainers.remove(container.id);
+    activeContainers.remove(container.getId());
+    allocatedContainers.remove(container.getId());
     containersToClean.add(container);
     updateResource(container);
 
-    LOG.info("Released container " + container.id + 
-        " of capacity " + container.resource + " on host " + getHostName() + 
+    LOG.info("Released container " + container.getId() + 
+        " of capacity " + container.getResource() + " on host " + getHostName() + 
         ", which currently has " + numContainers + " containers, " + 
         getUsedResource() + " used and " + getAvailableResource()
         + " available" + ", release resources=" + true);
@@ -241,7 +245,7 @@ public class NodeManager implements Node
   }
 
   @Override
-  public NodeID getNodeID() {
+  public NodeId getNodeID() {
     return this.nodeId;
   }
 
@@ -296,7 +300,7 @@ public class NodeManager implements Node
         usedResource, resource);
   }
 
-  public synchronized void notifyFinishedApplication(ApplicationID applicationId) {  
+  public synchronized void notifyFinishedApplication(ApplicationId applicationId) {  
     finishedApplications.add(applicationId);
     /* make sure to iterate through the list and remove all the containers that 
      * belong to this application.
@@ -311,7 +315,7 @@ public class NodeManager implements Node
   @Override
   public String toString() {
     return "host: " + getHostName() + " #containers=" + getNumContainers() +  
-      " available=" + getAvailableResource().memory + 
-      " used=" + getUsedResource().memory;
+      " available=" + getAvailableResource().getMemory() + 
+      " used=" + getUsedResource().getMemory();
   }
  }

Modified: hadoop/mapreduce/branches/MR-279/yarn/yarn-server/yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/NodeResponse.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/branches/MR-279/yarn/yarn-server/yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/NodeResponse.java?rev=1087462&r1=1087461&r2=1087462&view=diff
==============================================================================
--- hadoop/mapreduce/branches/MR-279/yarn/yarn-server/yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/NodeResponse.java (original)
+++ hadoop/mapreduce/branches/MR-279/yarn/yarn-server/yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/NodeResponse.java Thu Mar 31 22:23:22 2011
@@ -2,8 +2,9 @@ package org.apache.hadoop.yarn.server.re
 
 import java.util.List;
 
-import org.apache.hadoop.yarn.ApplicationID;
-import org.apache.hadoop.yarn.Container;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.Container;
+
 
 /**
  * The class that encapsulates response from clusterinfo for 
@@ -12,15 +13,15 @@ import org.apache.hadoop.yarn.Container;
 public class NodeResponse {
   private final List<Container> completed;
   private final List<Container> toCleanUp;
-  private final List<ApplicationID> finishedApplications;
+  private final List<ApplicationId> finishedApplications;
   
-  public NodeResponse(List<ApplicationID> finishedApplications,
+  public NodeResponse(List<ApplicationId> finishedApplications,
       List<Container> completed, List<Container> toKill) {
     this.finishedApplications = finishedApplications;
     this.completed = completed;
     this.toCleanUp = toKill;
   }
-  public List<ApplicationID> getFinishedApplications() {
+  public List<ApplicationId> getFinishedApplications() {
     return this.finishedApplications;
   }
   public List<Container> getCompletedContainers() {

Modified: hadoop/mapreduce/branches/MR-279/yarn/yarn-server/yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ResourceListener.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/branches/MR-279/yarn/yarn-server/yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ResourceListener.java?rev=1087462&r1=1087461&r2=1087462&view=diff
==============================================================================
--- hadoop/mapreduce/branches/MR-279/yarn/yarn-server/yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ResourceListener.java (original)
+++ hadoop/mapreduce/branches/MR-279/yarn/yarn-server/yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ResourceListener.java Thu Mar 31 22:23:22 2011
@@ -24,10 +24,10 @@ import java.util.Map;
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.classification.InterfaceStability.Evolving;
 import org.apache.hadoop.net.Node;
+import org.apache.hadoop.yarn.api.records.Container;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.server.api.records.NodeId;
 import org.apache.hadoop.yarn.server.resourcemanager.resourcetracker.NodeInfo;
-import org.apache.hadoop.yarn.Container;
-import org.apache.hadoop.yarn.NodeID;
-import org.apache.hadoop.yarn.Resource;
 
 /**
  * This interface is implemented by services which want to get notified
@@ -45,7 +45,7 @@ public interface ResourceListener {
    * @param capability the resource  capability of the node.
    * @return the {@link NodeInfo} object that tracks this nodemanager.
    */
-  public NodeInfo addNode(NodeID nodeId,String hostName,
+  public NodeInfo addNode(NodeId nodeId,String hostName,
       Node node, Resource capability);
   
   /**
@@ -62,5 +62,5 @@ public interface ResourceListener {
    * applications to clean.
    */
   public NodeResponse nodeUpdate(NodeInfo nodeInfo, 
-      Map<CharSequence,List<Container>> containers);
-}
\ No newline at end of file
+      Map<String,List<Container>> containers);
+}

Modified: hadoop/mapreduce/branches/MR-279/yarn/yarn-server/yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/YarnScheduler.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/branches/MR-279/yarn/yarn-server/yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/YarnScheduler.java?rev=1087462&r1=1087461&r2=1087462&view=diff
==============================================================================
--- hadoop/mapreduce/branches/MR-279/yarn/yarn-server/yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/YarnScheduler.java (original)
+++ hadoop/mapreduce/branches/MR-279/yarn/yarn-server/yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/YarnScheduler.java Thu Mar 31 22:23:22 2011
@@ -21,9 +21,11 @@ package org.apache.hadoop.yarn.server.re
 import java.io.IOException;
 import java.util.List;
 
-import org.apache.hadoop.yarn.ApplicationID;
-import org.apache.hadoop.yarn.Container;
-import org.apache.hadoop.yarn.ResourceRequest;
+
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.Container;
+import org.apache.hadoop.yarn.api.records.Priority;
+import org.apache.hadoop.yarn.api.records.ResourceRequest;
 
 /**
  * This interface is used by the components to talk to the
@@ -39,7 +41,7 @@ public interface YarnScheduler {
    * @return
    * @throws IOException
    */
-  List<Container> allocate(ApplicationID applicationId,
+  List<Container> allocate(ApplicationId applicationId,
       List<ResourceRequest> ask, List<Container> release)
       throws IOException;
 }