You are viewing a plain text version of this content. The canonical link for it is here.
Posted to mapreduce-commits@hadoop.apache.org by to...@apache.org on 2011/12/15 00:34:10 UTC

svn commit: r1214546 [4/5] - in /hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project: ./ conf/ hadoop-mapreduce-client/ hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/ hadoop-mapreduce-client/hadoop-map...

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java?rev=1214546&r1=1214545&r2=1214546&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java Wed Dec 14 23:34:04 2011
@@ -22,7 +22,9 @@ import java.io.IOException;
 import java.net.InetAddress;
 import java.net.UnknownHostException;
 import java.util.ArrayList;
+import java.util.Collections;
 import java.util.HashMap;
+import java.util.LinkedList;
 import java.util.List;
 import java.util.Map;
 import java.util.concurrent.ConcurrentMap;
@@ -56,6 +58,7 @@ import org.apache.hadoop.yarn.server.api
 import org.apache.hadoop.yarn.server.api.records.NodeStatus;
 import org.apache.hadoop.yarn.server.api.records.RegistrationResponse;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.ContainerManagerImpl;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.Application;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerImpl;
 import org.apache.hadoop.yarn.server.nodemanager.metrics.NodeManagerMetrics;
@@ -63,10 +66,12 @@ import org.apache.hadoop.yarn.server.sec
 import org.apache.hadoop.yarn.server.security.ContainerTokenSecretManager;
 import org.apache.hadoop.yarn.service.Service;
 import org.apache.hadoop.yarn.service.Service.STATE;
+import org.apache.hadoop.yarn.util.BuilderUtils;
 import org.junit.After;
 import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
+import static org.mockito.Mockito.mock;
 
 public class TestNodeStatusUpdater {
 
@@ -216,7 +221,7 @@ public class TestNodeStatusUpdater {
       HeartbeatResponse response = recordFactory
           .newRecordInstance(HeartbeatResponse.class);
       response.setResponseId(heartBeatID);
-      
+
       NodeHeartbeatResponse nhResponse = recordFactory
           .newRecordInstance(NodeHeartbeatResponse.class);
       nhResponse.setHeartbeatResponse(response);
@@ -241,6 +246,48 @@ public class TestNodeStatusUpdater {
       return resourceTracker;
     }
   }
+
+  private class MyNodeStatusUpdater3 extends NodeStatusUpdaterImpl {
+    public ResourceTracker resourceTracker;
+    private Context context;
+
+    public MyNodeStatusUpdater3(Context context, Dispatcher dispatcher,
+        NodeHealthCheckerService healthChecker, NodeManagerMetrics metrics,
+        ContainerTokenSecretManager containerTokenSecretManager) {
+      super(context, dispatcher, healthChecker, metrics,
+          containerTokenSecretManager);
+      this.context = context;
+      this.resourceTracker = new MyResourceTracker3(this.context);
+    }
+
+    @Override
+    protected ResourceTracker getRMClient() {
+      return resourceTracker;
+    }
+    
+    @Override
+    protected boolean isSecurityEnabled() {
+      return true;
+    }
+  }
+
+  private class MyNodeManager extends NodeManager {
+    
+    private MyNodeStatusUpdater3 nodeStatusUpdater;
+    @Override
+    protected NodeStatusUpdater createNodeStatusUpdater(Context context,
+        Dispatcher dispatcher, NodeHealthCheckerService healthChecker,
+        ContainerTokenSecretManager containerTokenSecretManager) {
+      this.nodeStatusUpdater =
+          new MyNodeStatusUpdater3(context, dispatcher, healthChecker, metrics,
+              containerTokenSecretManager);
+      return this.nodeStatusUpdater;
+    }
+
+    protected MyNodeStatusUpdater3 getNodeStatusUpdater() {
+      return this.nodeStatusUpdater;
+    }
+  }
   
   // 
   private class MyResourceTracker2 implements ResourceTracker {
@@ -276,6 +323,65 @@ public class TestNodeStatusUpdater {
     }
   }
   
+  private class MyResourceTracker3 implements ResourceTracker {
+    public NodeAction heartBeatNodeAction = NodeAction.NORMAL;
+    public NodeAction registerNodeAction = NodeAction.NORMAL;
+    private Map<ApplicationId, List<Long>> keepAliveRequests =
+        new HashMap<ApplicationId, List<Long>>();
+    private ApplicationId appId = BuilderUtils.newApplicationId(1, 1);
+    private final Context context;
+
+    MyResourceTracker3(Context context) {
+      this.context = context;
+    }
+    
+    @Override
+    public RegisterNodeManagerResponse registerNodeManager(
+        RegisterNodeManagerRequest request) throws YarnRemoteException {
+
+      RegisterNodeManagerResponse response =
+          recordFactory.newRecordInstance(RegisterNodeManagerResponse.class);
+      RegistrationResponse regResponse =
+          recordFactory.newRecordInstance(RegistrationResponse.class);
+      regResponse.setNodeAction(registerNodeAction);
+      response.setRegistrationResponse(regResponse);
+      return response;
+    }
+
+    @Override
+    public NodeHeartbeatResponse nodeHeartbeat(NodeHeartbeatRequest request)
+        throws YarnRemoteException {
+      LOG.info("Got heartBeatId: [" + heartBeatID +"]");
+      NodeStatus nodeStatus = request.getNodeStatus();
+      nodeStatus.setResponseId(heartBeatID++);
+      HeartbeatResponse response =
+          recordFactory.newRecordInstance(HeartbeatResponse.class);
+      response.setResponseId(heartBeatID);
+      response.setNodeAction(heartBeatNodeAction);
+
+      if (nodeStatus.getKeepAliveApplications() != null
+          && nodeStatus.getKeepAliveApplications().size() > 0) {
+        for (ApplicationId appId : nodeStatus.getKeepAliveApplications()) {
+          List<Long> list = keepAliveRequests.get(appId);
+          if (list == null) {
+            list = new LinkedList<Long>();
+            keepAliveRequests.put(appId, list);
+          }
+          list.add(System.currentTimeMillis());
+        }
+      }
+      if (heartBeatID == 2) {
+        LOG.info("Sending FINISH_APP for application: [" + appId + "]");
+        this.context.getApplications().put(appId, mock(Application.class));
+        response.addAllApplicationsToCleanup(Collections.singletonList(appId));
+      }
+      NodeHeartbeatResponse nhResponse =
+          recordFactory.newRecordInstance(NodeHeartbeatResponse.class);
+      nhResponse.setHeartbeatResponse(response);
+      return nhResponse;
+    }
+  }
+
   @Before
   public void clearError() {
     nmStartError = null;
@@ -456,6 +562,38 @@ public class TestNodeStatusUpdater {
     verifyNodeStartFailure("Starting of RPC Server failed");
   }
 
+  @Test
+  public void testApplicationKeepAlive() throws Exception {
+    MyNodeManager nm = new MyNodeManager();
+    try {
+      YarnConfiguration conf = createNMConfig();
+      conf.setBoolean(YarnConfiguration.LOG_AGGREGATION_ENABLED, true);
+      conf.setLong(YarnConfiguration.RM_NM_EXPIRY_INTERVAL_MS,
+          4000l);
+      nm.init(conf);
+      nm.start();
+      // HB 2 -> app cancelled by RM.
+      while (heartBeatID < 12) {
+        Thread.sleep(1000l);
+      }
+      MyResourceTracker3 rt =
+          (MyResourceTracker3) nm.getNodeStatusUpdater().getRMClient();
+      rt.context.getApplications().remove(rt.appId);
+      Assert.assertEquals(1, rt.keepAliveRequests.size());
+      int numKeepAliveRequests = rt.keepAliveRequests.get(rt.appId).size();
+      LOG.info("Number of Keep Alive Requests: [" + numKeepAliveRequests + "]");
+      Assert.assertTrue(numKeepAliveRequests == 2 || numKeepAliveRequests == 3);
+      while (heartBeatID < 20) {
+        Thread.sleep(1000l);
+      }
+      int numKeepAliveRequests2 = rt.keepAliveRequests.get(rt.appId).size();
+      Assert.assertEquals(numKeepAliveRequests, numKeepAliveRequests2);
+    } finally {
+      if (nm.getServiceState() == STATE.STARTED)
+        nm.stop();
+    }
+  }
+
   private void verifyNodeStartFailure(String errMessage) {
     YarnConfiguration conf = createNMConfig();
     nm.init(conf);

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/loghandler/TestNonAggregatingLogHandler.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/loghandler/TestNonAggregatingLogHandler.java?rev=1214546&r1=1214545&r2=1214546&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/loghandler/TestNonAggregatingLogHandler.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/loghandler/TestNonAggregatingLogHandler.java Wed Dec 14 23:34:04 2011
@@ -68,7 +68,7 @@ public class TestNonAggregatingLogHandle
             + localLogDirs[1].getAbsolutePath();
 
     conf.set(YarnConfiguration.NM_LOG_DIRS, localLogDirsString);
-    conf.setBoolean(YarnConfiguration.NM_LOG_AGGREGATION_ENABLED, false);
+    conf.setBoolean(YarnConfiguration.LOG_AGGREGATION_ENABLED, false);
     conf.setLong(YarnConfiguration.NM_LOG_RETAIN_SECONDS, 0l);
 
     DrainDispatcher dispatcher = createDispatcher(conf);
@@ -142,7 +142,7 @@ public class TestNonAggregatingLogHandle
             + localLogDirs[1].getAbsolutePath();
 
     conf.set(YarnConfiguration.NM_LOG_DIRS, localLogDirsString);
-    conf.setBoolean(YarnConfiguration.NM_LOG_AGGREGATION_ENABLED, false);
+    conf.setBoolean(YarnConfiguration.LOG_AGGREGATION_ENABLED, false);
 
     conf.setLong(YarnConfiguration.NM_LOG_RETAIN_SECONDS, 10800l);
 

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java?rev=1214546&r1=1214545&r2=1214546&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java Wed Dec 14 23:34:04 2011
@@ -173,7 +173,7 @@ public class RMAppManager implements Eve
     } else {
       // Inform the DelegationTokenRenewer
       if (UserGroupInformation.isSecurityEnabled()) {
-        rmContext.getDelegationTokenRenewer().removeApplication(applicationId);
+        rmContext.getDelegationTokenRenewer().applicationFinished(applicationId);
       }
       
       completedApps.add(applicationId);  

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java?rev=1214546&r1=1214545&r2=1214546&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java Wed Dec 14 23:34:04 2011
@@ -412,7 +412,7 @@ public class ResourceManager extends Com
 
   protected void startWepApp() {
     Builder<ApplicationMasterService> builder = 
-      WebApps.$for("cluster", masterService).at(
+      WebApps.$for("cluster", ApplicationMasterService.class, masterService, "ws").at(
           this.conf.get(YarnConfiguration.RM_WEBAPP_ADDRESS,
           YarnConfiguration.DEFAULT_RM_WEBAPP_ADDRESS)); 
     if(YarnConfiguration.getRMWebAppHostAndPort(conf).

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceTrackerService.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceTrackerService.java?rev=1214546&r1=1214545&r2=1214546&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceTrackerService.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceTrackerService.java Wed Dec 14 23:34:04 2011
@@ -272,7 +272,8 @@ public class ResourceTrackerService exte
     // 4. Send status to RMNode, saving the latest response.
     this.rmContext.getDispatcher().getEventHandler().handle(
         new RMNodeStatusEvent(nodeId, remoteNodeStatus.getNodeHealthStatus(),
-            remoteNodeStatus.getContainersStatuses(), latestResponse));
+            remoteNodeStatus.getContainersStatuses(), 
+            remoteNodeStatus.getKeepAliveApplications(), latestResponse));
 
     nodeHeartBeatResponse.setHeartbeatResponse(latestResponse);
     return nodeHeartBeatResponse;

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java?rev=1214546&r1=1214545&r2=1214546&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java Wed Dec 14 23:34:04 2011
@@ -262,6 +262,16 @@ public class RMNodeImpl implements RMNod
 
   }
 
+  @Private
+  public List<ContainerId> getContainersToCleanUp() {
+    this.readLock.lock();
+    try {
+      return new ArrayList<ContainerId>(containersToClean);
+    } finally {
+      this.readLock.unlock();
+    }
+  }
+  
   @Override
   public List<ContainerId> pullContainersToCleanUp() {
 
@@ -342,7 +352,6 @@ public class RMNodeImpl implements RMNod
 
     @Override
     public void transition(RMNodeImpl rmNode, RMNodeEvent event) {
-
       rmNode.containersToClean.add(((
           RMNodeCleanContainerEvent) event).getContainerId());
     }
@@ -396,8 +405,17 @@ public class RMNodeImpl implements RMNod
       List<ContainerStatus> completedContainers = 
           new ArrayList<ContainerStatus>();
       for (ContainerStatus remoteContainer : statusEvent.getContainers()) {
-        // Process running containers
         ContainerId containerId = remoteContainer.getContainerId();
+        
+        // Don't bother with containers already scheduled for cleanup,
+        // the scheduler doens't need to know any more about this container
+        if (rmNode.containersToClean.contains(containerId)) {
+          LOG.info("Container " + containerId + " already scheduled for " +
+          		"cleanup, no further processing");
+          continue;
+        }
+        
+        // Process running containers
         if (remoteContainer.getState() == ContainerState.RUNNING) {
           if (!rmNode.justLaunchedContainers.containsKey(containerId)) {
             // Just launched container. RM knows about it the first time.
@@ -414,7 +432,9 @@ public class RMNodeImpl implements RMNod
       rmNode.context.getDispatcher().getEventHandler().handle(
           new NodeUpdateSchedulerEvent(rmNode, newlyLaunchedContainers, 
               completedContainers));
-
+      
+      rmNode.context.getDelegationTokenRenewer().updateKeepAliveApplications(
+          statusEvent.getKeepAliveAppIds());
       return RMNodeState.RUNNING;
     }
   }

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeStatusEvent.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeStatusEvent.java?rev=1214546&r1=1214545&r2=1214546&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeStatusEvent.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeStatusEvent.java Wed Dec 14 23:34:04 2011
@@ -20,6 +20,7 @@ package org.apache.hadoop.yarn.server.re
 
 import java.util.List;
 
+import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ContainerStatus;
 import org.apache.hadoop.yarn.api.records.NodeHealthStatus;
 import org.apache.hadoop.yarn.api.records.NodeId;
@@ -28,15 +29,17 @@ import org.apache.hadoop.yarn.server.api
 public class RMNodeStatusEvent extends RMNodeEvent {
 
   private final NodeHealthStatus nodeHealthStatus;
-  private List<ContainerStatus> containersCollection;
+  private final List<ContainerStatus> containersCollection;
   private final HeartbeatResponse latestResponse;
+  private final List<ApplicationId> keepAliveAppIds;
 
   public RMNodeStatusEvent(NodeId nodeId, NodeHealthStatus nodeHealthStatus,
-      List<ContainerStatus> collection,
+      List<ContainerStatus> collection, List<ApplicationId> keepAliveAppIds,
       HeartbeatResponse latestResponse) {
     super(nodeId, RMNodeEventType.STATUS_UPDATE);
     this.nodeHealthStatus = nodeHealthStatus;
     this.containersCollection = collection;
+    this.keepAliveAppIds = keepAliveAppIds;
     this.latestResponse = latestResponse;
   }
 
@@ -51,4 +54,8 @@ public class RMNodeStatusEvent extends R
   public HeartbeatResponse getLatestResponse() {
     return this.latestResponse;
   }
-}
+  
+  public List<ApplicationId> getKeepAliveAppIds() {
+    return this.keepAliveAppIds;
+  }
+}
\ No newline at end of file

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/DelegationTokenRenewer.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/DelegationTokenRenewer.java?rev=1214546&r1=1214545&r2=1214546&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/DelegationTokenRenewer.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/DelegationTokenRenewer.java Wed Dec 14 23:34:04 2011
@@ -20,14 +20,19 @@ package org.apache.hadoop.yarn.server.re
 
 import java.io.IOException;
 import java.security.PrivilegedExceptionAction;
+import java.util.ArrayList;
 import java.util.Collection;
 import java.util.Collections;
 import java.util.Date;
 import java.util.HashSet;
 import java.util.Iterator;
+import java.util.List;
+import java.util.Map.Entry;
 import java.util.Set;
 import java.util.Timer;
 import java.util.TimerTask;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
 import java.util.concurrent.LinkedBlockingQueue;
 
 import org.apache.commons.logging.Log;
@@ -40,6 +45,7 @@ import org.apache.hadoop.security.UserGr
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.service.AbstractService;
 
 /**
@@ -65,7 +71,16 @@ public class DelegationTokenRenewer exte
   // appId=>List<tokens>
   private Set<DelegationTokenToRenew> delegationTokens = 
     Collections.synchronizedSet(new HashSet<DelegationTokenToRenew>());
+  
+  private final ConcurrentMap<ApplicationId, Long> delayedRemovalMap =
+      new ConcurrentHashMap<ApplicationId, Long>();
 
+  private long tokenRemovalDelayMs;
+  
+  private Thread delayedRemovalThread;
+  
+  private boolean tokenKeepAliveEnabled;
+  
   public DelegationTokenRenewer() {
     super(DelegationTokenRenewer.class.getName());
   }
@@ -73,6 +88,12 @@ public class DelegationTokenRenewer exte
   @Override
   public synchronized void init(Configuration conf) {
     super.init(conf);
+    this.tokenKeepAliveEnabled =
+        conf.getBoolean(YarnConfiguration.LOG_AGGREGATION_ENABLED,
+            YarnConfiguration.DEFAULT_LOG_AGGREGATION_ENABLED);
+    this.tokenRemovalDelayMs =
+        conf.getInt(YarnConfiguration.RM_NM_EXPIRY_INTERVAL_MS,
+            YarnConfiguration.DEFAULT_RM_NM_EXPIRY_INTERVAL_MS);
   }
 
   @Override
@@ -81,6 +102,12 @@ public class DelegationTokenRenewer exte
     
     dtCancelThread.start();
     renewalTimer = new Timer(true);
+    if (tokenKeepAliveEnabled) {
+      delayedRemovalThread =
+          new Thread(new DelayedTokenRemovalRunnable(getConfig()),
+              "DelayedTokenCanceller");
+      delayedRemovalThread.start();
+    }
   }
 
   @Override
@@ -94,6 +121,14 @@ public class DelegationTokenRenewer exte
     } catch (InterruptedException e) {
       e.printStackTrace();
     }
+    if (tokenKeepAliveEnabled && delayedRemovalThread != null) {
+      delayedRemovalThread.interrupt();
+      try {
+        delayedRemovalThread.join(1000);
+      } catch (InterruptedException e) {
+        LOG.info("Interrupted while joining on delayed removal thread.", e);
+      }
+    }
     
     super.stop();
   }
@@ -343,12 +378,38 @@ public class DelegationTokenRenewer exte
     if(t.timerTask!=null)
       t.timerTask.cancel();
   }
-  
+
   /**
    * Removing delegation token for completed applications.
    * @param applicationId completed application
    */
-  public void removeApplication(ApplicationId applicationId) {
+  public void applicationFinished(ApplicationId applicationId) {
+    if (!tokenKeepAliveEnabled) {
+      removeApplicationFromRenewal(applicationId);
+    } else {
+      delayedRemovalMap.put(applicationId, System.currentTimeMillis()
+          + tokenRemovalDelayMs);
+    }
+  }
+
+  /**
+   * Add a list of applications to the keep alive list. If an appId already
+   * exists, update it's keep-alive time.
+   * 
+   * @param appIds
+   *          the list of applicationIds to be kept alive.
+   * 
+   */
+  public void updateKeepAliveApplications(List<ApplicationId> appIds) {
+    if (tokenKeepAliveEnabled && appIds != null && appIds.size() > 0) {
+      for (ApplicationId appId : appIds) {
+        delayedRemovalMap.put(appId, System.currentTimeMillis()
+            + tokenRemovalDelayMs);
+      }
+    }
+  }
+
+  private void removeApplicationFromRenewal(ApplicationId applicationId) {
     synchronized (delegationTokens) {
       Iterator<DelegationTokenToRenew> it = delegationTokens.iterator();
       while(it.hasNext()) {
@@ -371,4 +432,50 @@ public class DelegationTokenRenewer exte
       }
     }
   }
+
+  /**
+   * Takes care of cancelling app delegation tokens after the configured
+   * cancellation delay, taking into consideration keep-alive requests.
+   * 
+   */
+  private class DelayedTokenRemovalRunnable implements Runnable {
+
+    private long waitTimeMs;
+
+    DelayedTokenRemovalRunnable(Configuration conf) {
+      waitTimeMs =
+          conf.getLong(
+              YarnConfiguration.RM_DELAYED_DELEGATION_TOKEN_REMOVAL_INTERVAL_MS,
+              YarnConfiguration.DEFAULT_RM_DELAYED_DELEGATION_TOKEN_REMOVAL_INTERVAL_MS);
+    }
+
+    @Override
+    public void run() {
+      List<ApplicationId> toCancel = new ArrayList<ApplicationId>();
+      while (!Thread.currentThread().isInterrupted()) {
+        Iterator<Entry<ApplicationId, Long>> it =
+            delayedRemovalMap.entrySet().iterator();
+        toCancel.clear();
+        while (it.hasNext()) {
+          Entry<ApplicationId, Long> e = it.next();
+          if (e.getValue() < System.currentTimeMillis()) {
+            toCancel.add(e.getKey());
+          }
+        }
+        for (ApplicationId appId : toCancel) {
+          removeApplicationFromRenewal(appId);
+          delayedRemovalMap.remove(appId);
+        }
+        synchronized (this) {
+          try {
+            wait(waitTimeMs);
+          } catch (InterruptedException e) {
+            LOG.info("Delayed Deletion Thread Interrupted. Shutting it down");
+            return;
+          }
+        }
+      }
+    }
+  }
+  
 }

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AboutBlock.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AboutBlock.java?rev=1214546&r1=1214545&r2=1214546&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AboutBlock.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AboutBlock.java Wed Dec 14 23:34:04 2011
@@ -18,10 +18,9 @@
 
 package org.apache.hadoop.yarn.server.resourcemanager.webapp;
 
-import org.apache.hadoop.util.VersionInfo;
 import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ClusterInfo;
 import org.apache.hadoop.yarn.util.Times;
-import org.apache.hadoop.yarn.util.YarnVersionInfo;
 import org.apache.hadoop.yarn.webapp.view.HtmlBlock;
 import org.apache.hadoop.yarn.webapp.view.InfoBlock;
 
@@ -30,25 +29,25 @@ import com.google.inject.Inject;
 public class AboutBlock extends HtmlBlock {
   final ResourceManager rm;
 
-  @Inject 
+  @Inject
   AboutBlock(ResourceManager rm, ViewContext ctx) {
     super(ctx);
     this.rm = rm;
   }
-  
+
   @Override
   protected void render(Block html) {
     html._(MetricsOverviewTable.class);
-    long ts = ResourceManager.clusterTimeStamp;
     ResourceManager rm = getInstance(ResourceManager.class);
+    ClusterInfo cinfo = new ClusterInfo(rm);
     info("Cluster overview").
-      _("Cluster ID:", ts).
-      _("ResourceManager state:", rm.getServiceState()).
-      _("ResourceManager started on:", Times.format(ts)).
-      _("ResourceManager version:", YarnVersionInfo.getBuildVersion() +
-          " on " + YarnVersionInfo.getDate()).
-      _("Hadoop version:", VersionInfo.getBuildVersion() +
-          " on " + VersionInfo.getDate());
+      _("Cluster ID:", cinfo.getClusterId()).
+      _("ResourceManager state:", cinfo.getState()).
+      _("ResourceManager started on:", Times.format(cinfo.getStartedOn())).
+      _("ResourceManager version:", cinfo.getRMBuildVersion() +
+          " on " + cinfo.getRMVersionBuiltOn()).
+      _("Hadoop version:", cinfo.getHadoopBuildVersion() +
+          " on " + cinfo.getHadoopVersionBuiltOn());
     html._(InfoBlock.class);
   }
 

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppsBlock.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppsBlock.java?rev=1214546&r1=1214545&r2=1214546&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppsBlock.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppsBlock.java Wed Dec 14 23:34:04 2011
@@ -23,6 +23,7 @@ import static org.apache.hadoop.yarn.web
 import static org.apache.hadoop.yarn.webapp.view.JQueryUI._PROGRESSBAR_VALUE;
 
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppInfo;
 import org.apache.hadoop.yarn.webapp.hamlet.Hamlet;
 import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TABLE;
 import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TBODY;
@@ -56,23 +57,18 @@ class AppsBlock extends HtmlBlock {
         tbody();
     int i = 0;
     for (RMApp app : list.apps.values()) {
-      String appId = app.getApplicationId().toString();
-      String trackingUrl = app.getTrackingUrl();
-      boolean trackingUrlIsNotReady = trackingUrl == null || trackingUrl.isEmpty() || "N/A".equalsIgnoreCase(trackingUrl);
-	  String ui = trackingUrlIsNotReady ? "UNASSIGNED" :
-          (app.getFinishTime() == 0 ? 
-              "ApplicationMaster" : "History");
-      String percent = String.format("%.1f", app.getProgress() * 100);
+      AppInfo appInfo = new AppInfo(app, true);
+      String percent = String.format("%.1f", appInfo.getProgress());
       tbody.
         tr().
           td().
-            br().$title(String.valueOf(app.getApplicationId().getId()))._(). // for sorting
-            a(url("app", appId), appId)._().
-          td(app.getUser().toString()).
-          td(app.getName().toString()).
-          td(app.getQueue().toString()).
-          td(app.getState().toString()).
-          td(app.getFinalApplicationStatus().toString()).
+            br().$title(appInfo.getAppIdNum())._(). // for sorting
+            a(url("app", appInfo.getAppId()), appInfo.getAppId())._().
+          td(appInfo.getUser()).
+          td(appInfo.getName()).
+          td(appInfo.getQueue()).
+          td(appInfo.getState()).
+          td(appInfo.getFinalStatus()).
           td().
             br().$title(percent)._(). // for sorting
             div(_PROGRESSBAR).
@@ -80,9 +76,9 @@ class AppsBlock extends HtmlBlock {
               div(_PROGRESSBAR_VALUE).
                 $style(join("width:", percent, '%'))._()._()._().
           td().
-            a(trackingUrlIsNotReady ?
-              "#" : join("http://", trackingUrl), ui)._().
-          td(app.getDiagnostics().toString())._();
+            a(!appInfo.isTrackingUrlReady()?
+              "#" : appInfo.getTrackingUrlPretty(), appInfo.getTrackingUI())._().
+          td(appInfo.getNote())._();
       if (list.rendering != Render.HTML && ++i >= 20) break;
     }
     tbody._()._();

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppsList.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppsList.java?rev=1214546&r1=1214545&r2=1214546&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppsList.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppsList.java Wed Dec 14 23:34:04 2011
@@ -31,6 +31,7 @@ import java.util.concurrent.ConcurrentMa
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppInfo;
 import org.apache.hadoop.yarn.webapp.Controller.RequestContext;
 import org.apache.hadoop.yarn.webapp.ToJSON;
 import org.apache.hadoop.yarn.webapp.view.JQueryUI.Render;
@@ -54,31 +55,27 @@ class AppsList implements ToJSON {
     out.append('[');
     boolean first = true;
     for (RMApp app : apps.values()) {
+      AppInfo appInfo = new AppInfo(app, false);
       if (first) {
         first = false;
       } else {
         out.append(",\n");
       }
-      String appID = app.getApplicationId().toString();
-      String trackingUrl = app.getTrackingUrl();
-      boolean trackingUrlIsNotReady = trackingUrl == null
-          || trackingUrl.isEmpty() || "N/A".equalsIgnoreCase(trackingUrl);
-      String ui = trackingUrlIsNotReady ? "UNASSIGNED"
-          : (app.getFinishTime() == 0 ? "ApplicationMaster" : "History");
       out.append("[\"");
-      appendSortable(out, app.getApplicationId().getId());
-      appendLink(out, appID, rc.prefix(), "app", appID).append(_SEP).
-          append(escapeHtml(app.getUser().toString())).append(_SEP).
-          append(escapeHtml(app.getName().toString())).append(_SEP).
-          append(escapeHtml(app.getQueue())).append(_SEP).
-          append(app.getState().toString()).append(_SEP).
-          append(app.getFinalApplicationStatus().toString()).append(_SEP);
-      appendProgressBar(out, app.getProgress()).append(_SEP);
-      appendLink(out, ui, rc.prefix(),
-          trackingUrlIsNotReady ?
-            "#" : "http://", trackingUrl).
+      appendSortable(out, appInfo.getAppIdNum());
+      appendLink(out, appInfo.getAppId(), rc.prefix(), "app",
+          appInfo.getAppId()).append(_SEP).
+          append(escapeHtml(appInfo.getUser())).append(_SEP).
+          append(escapeHtml(appInfo.getName())).append(_SEP).
+          append(escapeHtml(appInfo.getQueue())).append(_SEP).
+          append(appInfo.getState()).append(_SEP).
+          append(appInfo.getFinalStatus()).append(_SEP);
+      appendProgressBar(out, appInfo.getProgress()).append(_SEP);
+      appendLink(out, appInfo.getTrackingUI(), rc.prefix(),
+          !appInfo.isTrackingUrlReady() ?
+            "#" : appInfo.getTrackingUrlPretty()).
           append(_SEP).append(escapeJavaScript(escapeHtml(
-                              app.getDiagnostics().toString()))).
+                              appInfo.getNote()))).
           append("\"]");
     }
     out.append(']');

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/CapacitySchedulerPage.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/CapacitySchedulerPage.java?rev=1214546&r1=1214545&r2=1214546&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/CapacitySchedulerPage.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/CapacitySchedulerPage.java Wed Dec 14 23:34:04 2011
@@ -18,19 +18,23 @@
 
 package org.apache.hadoop.yarn.server.resourcemanager.webapp;
 
-import com.google.inject.Inject;
-import com.google.inject.servlet.RequestScoped;
+import static org.apache.hadoop.yarn.util.StringHelper.join;
 
 import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CSQueue;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.ParentQueue;
-import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CSQueue;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.CapacitySchedulerInfo;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.CapacitySchedulerQueueInfo;
 import org.apache.hadoop.yarn.webapp.SubView;
 import org.apache.hadoop.yarn.webapp.hamlet.Hamlet;
-import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.*;
+import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.DIV;
+import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.LI;
+import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.UL;
 import org.apache.hadoop.yarn.webapp.view.HtmlBlock;
 
-import static org.apache.hadoop.yarn.util.StringHelper.*;
+import com.google.inject.Inject;
+import com.google.inject.servlet.RequestScoped;
 
 class CapacitySchedulerPage extends RmView {
   static final String _Q = ".ui-state-default.ui-corner-all";
@@ -47,22 +51,21 @@ class CapacitySchedulerPage extends RmVi
 
   public static class QueueBlock extends HtmlBlock {
     final Parent parent;
+    final CapacitySchedulerInfo sinfo;
 
     @Inject QueueBlock(Parent parent) {
       this.parent = parent;
+      sinfo = new CapacitySchedulerInfo(parent.queue);
     }
 
     @Override
     public void render(Block html) {
       UL<Hamlet> ul = html.ul();
-      CSQueue parentQueue = parent.queue;
-      for (CSQueue queue : parentQueue.getChildQueues()) {
-        float used = queue.getUsedCapacity();
-        float set = queue.getCapacity();
+      for (CapacitySchedulerQueueInfo info : sinfo.getSubQueues()) {
+        float used = info.getUsedCapacity() / 100;
+        float set = info.getCapacity() / 100;
         float delta = Math.abs(set - used) + 0.001f;
-        float max = queue.getMaximumCapacity();
-        if (max < EPSILON || max > 1f) max = 1f;
-        //String absMaxPct = percent(queue.getAbsoluteMaximumCapacity());
+        float max = info.getMaxCapacity() / 100;
         LI<UL<Hamlet>> li = ul.
           li().
             a(_Q).$style(width(max * WIDTH_F)).
@@ -72,14 +75,16 @@ class CapacitySchedulerPage extends RmVi
               span().$style(join(width(delta/max), ';',
                 used > set ? OVER : UNDER, ';',
                 used > set ? left(set/max) : left(used/max)))._('.')._().
-              span(".q", queue.getQueuePath().substring(5))._();
-        if (queue instanceof ParentQueue) {
-          parent.queue = queue;
+              span(".q", info.getQueuePath().substring(5))._();
+        if (info.getQueue() instanceof ParentQueue) {
+          // this could be optimized better
+          parent.queue = info.getQueue();
           li.
             _(QueueBlock.class);
         }
         li._();
       }
+
       ul._();
     }
   }
@@ -111,8 +116,9 @@ class CapacitySchedulerPage extends RmVi
       } else {
         CSQueue root = cs.getRootQueue();
         parent.queue = root;
-        float used = root.getUsedCapacity();
-        float set = root.getCapacity();
+        CapacitySchedulerInfo sinfo = new CapacitySchedulerInfo(parent.queue);
+        float used = sinfo.getUsedCapacity() / 100;
+        float set = sinfo.getCapacity() / 100;
         float delta = Math.abs(set - used) + 0.001f;
         ul.
           li().

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/DefaultSchedulerPage.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/DefaultSchedulerPage.java?rev=1214546&r1=1214545&r2=1214546&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/DefaultSchedulerPage.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/DefaultSchedulerPage.java Wed Dec 14 23:34:04 2011
@@ -18,22 +18,20 @@
 
 package org.apache.hadoop.yarn.server.resourcemanager.webapp;
 
-import com.google.inject.Inject;
+import static org.apache.hadoop.yarn.util.StringHelper.join;
 
+import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
 import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.FifoSchedulerInfo;
 import org.apache.hadoop.yarn.webapp.SubView;
 import org.apache.hadoop.yarn.webapp.hamlet.Hamlet;
-import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.*;
+import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.DIV;
+import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.UL;
 import org.apache.hadoop.yarn.webapp.view.HtmlBlock;
-import org.apache.hadoop.yarn.api.records.QueueInfo;
-import org.apache.hadoop.yarn.api.records.QueueState;
-import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
-import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNodeReport;
-import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
 import org.apache.hadoop.yarn.webapp.view.InfoBlock;
 
-import static org.apache.hadoop.yarn.util.StringHelper.*;
+import com.google.inject.Inject;
 
 class DefaultSchedulerPage extends RmView {
   static final String _Q = ".ui-state-default.ui-corner-all";
@@ -44,66 +42,35 @@ class DefaultSchedulerPage extends RmVie
   static final float EPSILON = 1e-8f;
 
   static class QueueInfoBlock extends HtmlBlock {
-    final RMContext rmContext;
-    final FifoScheduler fs;
-    final String qName;
-    final QueueInfo qInfo;
+    final FifoSchedulerInfo sinfo;
 
     @Inject QueueInfoBlock(RMContext context, ViewContext ctx, ResourceManager rm) {
       super(ctx);
-      this.rmContext = context;
-
-      fs = (FifoScheduler) rm.getResourceScheduler();
-      qName = fs.getQueueInfo("",false,false).getQueueName();
-      qInfo = fs.getQueueInfo(qName,true,true);
+      sinfo = new FifoSchedulerInfo(rm);
     }
 
     @Override public void render(Block html) {
-      String minmemoryresource = 
-                Integer.toString(fs.getMinimumResourceCapability().getMemory());
-      String maxmemoryresource = 
-                Integer.toString(fs.getMaximumResourceCapability().getMemory());
-      String qstate = (qInfo.getQueueState() == QueueState.RUNNING) ?
-                       "Running" :
-                           (qInfo.getQueueState() == QueueState.STOPPED) ?
-                                  "Stopped" : "Unknown";
-
-      int usedNodeMem      = 0;
-      int availNodeMem     = 0;
-      int totNodeMem       = 0;
-      int nodeContainers   = 0;
-
-      for (RMNode ni : this.rmContext.getRMNodes().values()) {
-        SchedulerNodeReport report = fs.getNodeReport(ni.getNodeID());
-        usedNodeMem += report.getUsedResource().getMemory();
-        availNodeMem += report.getAvailableResource().getMemory();
-        totNodeMem += ni.getTotalCapability().getMemory();
-        nodeContainers += fs.getNodeReport(ni.getNodeID()).getNumContainers();
-      }
-
-      info("\'" + qName + "\' Queue Status").
-        _("Queue State:" , qstate).
-        _("Minimum Queue Memory Capacity:" , minmemoryresource).
-        _("Maximum Queue Memory Capacity:" , maxmemoryresource).
-        _("Number of Nodes:" , Integer.toString(this.rmContext.getRMNodes().size())).
-        _("Used Node Capacity:" , Integer.toString(usedNodeMem)).
-        _("Available Node Capacity:" , Integer.toString(availNodeMem)).
-        _("Total Node Capacity:" , Integer.toString(totNodeMem)).
-        _("Number of Node Containers:" , Integer.toString(nodeContainers));
+      info("\'" + sinfo.getQueueName() + "\' Queue Status").
+        _("Queue State:" , sinfo.getState()).
+        _("Minimum Queue Memory Capacity:" , Integer.toString(sinfo.getMinQueueMemoryCapacity())).
+        _("Maximum Queue Memory Capacity:" , Integer.toString(sinfo.getMaxQueueMemoryCapacity())).
+        _("Number of Nodes:" , Integer.toString(sinfo.getNumNodes())).
+        _("Used Node Capacity:" , Integer.toString(sinfo.getUsedNodeCapacity())).
+        _("Available Node Capacity:" , Integer.toString(sinfo.getAvailNodeCapacity())).
+        _("Total Node Capacity:" , Integer.toString(sinfo.getTotalNodeCapacity())).
+        _("Number of Node Containers:" , Integer.toString(sinfo.getNumContainers()));
 
       html._(InfoBlock.class);
     }
   }
 
   static class QueuesBlock extends HtmlBlock {
+    final FifoSchedulerInfo sinfo;
     final FifoScheduler fs;
-    final String qName;
-    final QueueInfo qInfo;
 
     @Inject QueuesBlock(ResourceManager rm) {
+      sinfo = new FifoSchedulerInfo(rm);
       fs = (FifoScheduler) rm.getResourceScheduler();
-      qName = fs.getQueueInfo("",false,false).getQueueName();
-      qInfo = fs.getQueueInfo(qName,false,false);
     }
 
     @Override
@@ -123,8 +90,8 @@ class DefaultSchedulerPage extends RmVie
               span().$style(Q_END)._("100% ")._().
               span(".q", "default")._()._();
       } else {
-        float used = qInfo.getCurrentCapacity();
-        float set = qInfo.getCapacity();
+        float used = sinfo.getUsedCapacity();
+        float set = sinfo.getCapacity();
         float delta = Math.abs(set - used) + 0.001f;
         ul.
           li().
@@ -133,7 +100,7 @@ class DefaultSchedulerPage extends RmVie
               span().$style(Q_END)._("100%")._().
               span().$style(join(width(delta), ';', used > set ? OVER : UNDER,
                 ';', used > set ? left(set) : left(used)))._(".")._().
-              span(".q", qName)._().
+              span(".q", sinfo.getQueueName())._().
             _(QueueInfoBlock.class)._();
       }
 

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/MetricsOverviewTable.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/MetricsOverviewTable.java?rev=1214546&r1=1214545&r2=1214546&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/MetricsOverviewTable.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/MetricsOverviewTable.java Wed Dec 14 23:34:04 2011
@@ -19,11 +19,11 @@
 package org.apache.hadoop.yarn.server.resourcemanager.webapp;
 
 import org.apache.hadoop.util.StringUtils;
-import org.apache.hadoop.yarn.server.resourcemanager.ClusterMetrics;
 import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
 import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
-import org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueMetrics;
-import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ClusterMetricsInfo;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.UserMetricsInfo;
+
 import org.apache.hadoop.yarn.webapp.hamlet.Hamlet;
 import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.DIV;
 import org.apache.hadoop.yarn.webapp.view.HtmlBlock;
@@ -36,12 +36,12 @@ import com.google.inject.Inject;
  * current user is using on the cluster.
  */
 public class MetricsOverviewTable extends HtmlBlock {
-  private static final long BYTES_IN_GB = 1024 * 1024 * 1024;
-  
+  private static final long BYTES_IN_MB = 1024 * 1024;
+
   private final RMContext rmContext;
   private final ResourceManager rm;
 
-  @Inject 
+  @Inject
   MetricsOverviewTable(RMContext context, ResourceManager rm, ViewContext ctx) {
     super(ctx);
     this.rmContext = context;
@@ -55,22 +55,7 @@ public class MetricsOverviewTable extend
     //CSS in the correct spot
     html.style(".metrics {margin-bottom:5px}"); 
     
-    ResourceScheduler rs = rm.getResourceScheduler();
-    QueueMetrics metrics = rs.getRootQueueMetrics();
-    ClusterMetrics clusterMetrics = ClusterMetrics.getMetrics();
-    
-    int appsSubmitted = metrics.getAppsSubmitted();
-    int reservedGB = metrics.getReservedGB();
-    int availableGB = metrics.getAvailableGB();
-    int allocatedGB = metrics.getAllocatedGB();
-    int containersAllocated = metrics.getAllocatedContainers();
-    int totalGB = availableGB + reservedGB + allocatedGB;
-
-    int totalNodes = clusterMetrics.getNumNMs();
-    int lostNodes = clusterMetrics.getNumLostNMs();
-    int unhealthyNodes = clusterMetrics.getUnhealthyNMs();
-    int decommissionedNodes = clusterMetrics.getNumDecommisionedNMs();
-    int rebootedNodes = clusterMetrics.getNumRebootedNMs();
+    ClusterMetricsInfo clusterMetrics = new ClusterMetricsInfo(this.rm, this.rmContext);
 
     
     DIV<Hamlet> div = html.div().$class("metrics");
@@ -92,30 +77,23 @@ public class MetricsOverviewTable extend
     _().
     tbody().$class("ui-widget-content").
       tr().
-        td(String.valueOf(appsSubmitted)).
-        td(String.valueOf(containersAllocated)).
-        td(StringUtils.byteDesc(allocatedGB * BYTES_IN_GB)).
-        td(StringUtils.byteDesc(totalGB * BYTES_IN_GB)).
-        td(StringUtils.byteDesc(reservedGB * BYTES_IN_GB)).
-        td().a(url("nodes"),String.valueOf(totalNodes))._(). 
-        td().a(url("nodes/decommissioned"),String.valueOf(decommissionedNodes))._(). 
-        td().a(url("nodes/lost"),String.valueOf(lostNodes))._().
-        td().a(url("nodes/unhealthy"),String.valueOf(unhealthyNodes))._().
-        td().a(url("nodes/rebooted"),String.valueOf(rebootedNodes))._().
+        td(String.valueOf(clusterMetrics.getAppsSubmitted())).
+        td(String.valueOf(clusterMetrics.getContainersAllocated())).
+        td(StringUtils.byteDesc(clusterMetrics.getAllocatedMB() * BYTES_IN_MB)).
+        td(StringUtils.byteDesc(clusterMetrics.getTotalMB() * BYTES_IN_MB)).
+        td(StringUtils.byteDesc(clusterMetrics.getReservedMB() * BYTES_IN_MB)).
+        td().a(url("nodes"),String.valueOf(clusterMetrics.getTotalNodes()))._().
+        td().a(url("nodes/decommissioned"),String.valueOf(clusterMetrics.getDecommissionedNodes()))._().
+        td().a(url("nodes/lost"),String.valueOf(clusterMetrics.getLostNodes()))._().
+        td().a(url("nodes/unhealthy"),String.valueOf(clusterMetrics.getUnhealthyNodes()))._().
+        td().a(url("nodes/rebooted"),String.valueOf(clusterMetrics.getRebootedNodes()))._().
       _().
     _()._();
-    
+
     String user = request().getRemoteUser();
     if (user != null) {
-      QueueMetrics userMetrics = metrics.getUserMetrics(user);
-      if(userMetrics != null) {
-        int myAppsSubmitted = userMetrics.getAppsSubmitted();
-        int myRunningContainers = userMetrics.getAllocatedContainers();
-        int myPendingContainers = userMetrics.getPendingContainers();
-        int myReservedContainers = userMetrics.getReservedContainers();
-        int myReservedGB = userMetrics.getReservedGB();
-        int myPendingGB = userMetrics.getPendingGB();
-        int myAllocatedGB = userMetrics.getAllocatedGB();
+      UserMetricsInfo userMetrics = new UserMetricsInfo(this.rm, this.rmContext, user);
+      if (userMetrics.metricsAvailable()) {
         div.table("#usermetricsoverview").
         thead().$class("ui-widget-header").
           tr().
@@ -130,13 +108,13 @@ public class MetricsOverviewTable extend
         _().
         tbody().$class("ui-widget-content").
           tr().
-            td(String.valueOf(myAppsSubmitted)).
-            td(String.valueOf(myRunningContainers)).
-            td(String.valueOf(myPendingContainers)).
-            td(String.valueOf(myReservedContainers)).
-            td(StringUtils.byteDesc(myAllocatedGB * BYTES_IN_GB)).
-            td(StringUtils.byteDesc(myPendingGB * BYTES_IN_GB)).
-            td(StringUtils.byteDesc(myReservedGB * BYTES_IN_GB)).
+            td(String.valueOf(userMetrics.getAppsSubmitted())).
+            td(String.valueOf(userMetrics.getRunningContainers())).
+            td(String.valueOf(userMetrics.getPendingContainers())).
+            td(String.valueOf(userMetrics.getReservedContainers())).
+            td(StringUtils.byteDesc(userMetrics.getAllocatedMB() * BYTES_IN_MB)).
+            td(StringUtils.byteDesc(userMetrics.getPendingMB() * BYTES_IN_MB)).
+            td(StringUtils.byteDesc(userMetrics.getReservedMB() * BYTES_IN_MB)).
           _().
         _()._();
       }

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodesPage.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodesPage.java?rev=1214546&r1=1214545&r2=1214546&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodesPage.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodesPage.java Wed Dec 14 23:34:04 2011
@@ -25,14 +25,12 @@ import static org.apache.hadoop.yarn.web
 import static org.apache.hadoop.yarn.webapp.view.JQueryUI.tableInit;
 
 import org.apache.hadoop.util.StringUtils;
-import org.apache.hadoop.yarn.api.records.NodeHealthStatus;
-import org.apache.hadoop.yarn.api.records.NodeId;
 import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
 import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
 import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
 import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeState;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
-import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNodeReport;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.NodeInfo;
 import org.apache.hadoop.yarn.util.Times;
 import org.apache.hadoop.yarn.webapp.SubView;
 import org.apache.hadoop.yarn.webapp.hamlet.Hamlet;
@@ -45,9 +43,9 @@ import com.google.inject.Inject;
 class NodesPage extends RmView {
 
   static class NodesBlock extends HtmlBlock {
-    private static final long BYTES_IN_MB = 1024 * 1024;
     final RMContext rmContext;
     final ResourceManager rm;
+    private static final long BYTES_IN_MB = 1024 * 1024;
 
     @Inject
     NodesBlock(RMContext context, ResourceManager rm, ViewContext ctx) {
@@ -59,7 +57,7 @@ class NodesPage extends RmView {
     @Override
     protected void render(Block html) {
       html._(MetricsOverviewTable.class);
-      
+
       ResourceScheduler sched = rm.getResourceScheduler();
       String type = $(NODE_STATE);
       TBODY<TABLE<Hamlet>> tbody = html.table("#nodes").
@@ -88,27 +86,18 @@ class NodesPage extends RmView {
             continue;
           }
         }
-        NodeId id = ni.getNodeID();
-        SchedulerNodeReport report = sched.getNodeReport(id);
-        int numContainers = 0;
-        int usedMemory = 0;
-        int availableMemory = 0;
-        if(report != null) {
-          numContainers = report.getNumContainers();
-          usedMemory = report.getUsedResource().getMemory();
-          availableMemory = report.getAvailableResource().getMemory();
-        }
-
-        NodeHealthStatus health = ni.getNodeHealthStatus();
+        NodeInfo info = new NodeInfo(ni, sched);
+        int usedMemory = (int)info.getUsedMemory();
+        int availableMemory = (int)info.getAvailableMemory();
         tbody.tr().
-            td(ni.getRackName()).
-            td(String.valueOf(ni.getState())).
-            td(String.valueOf(ni.getNodeID().toString())).
-            td().a("http://" + ni.getHttpAddress(), ni.getHttpAddress())._().
-            td(health.getIsNodeHealthy() ? "Healthy" : "Unhealthy").
-            td(Times.format(health.getLastHealthReportTime())).
-            td(String.valueOf(health.getHealthReport())).
-            td(String.valueOf(numContainers)).
+            td(info.getRack()).
+            td(info.getState()).
+            td(info.getNodeId()).
+            td().a("http://" + info.getNodeHTTPAddress(), info.getNodeHTTPAddress())._().
+            td(info.getHealthStatus()).
+            td(Times.format(info.getLastHealthUpdate())).
+            td(info.getHealthReport()).
+            td(String.valueOf(info.getNumContainers())).
             td().br().$title(String.valueOf(usedMemory))._().
               _(StringUtils.byteDesc(usedMemory * BYTES_IN_MB))._().
             td().br().$title(String.valueOf(usedMemory))._().

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebApp.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebApp.java?rev=1214546&r1=1214545&r2=1214546&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebApp.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebApp.java Wed Dec 14 23:34:04 2011
@@ -23,6 +23,7 @@ import static org.apache.hadoop.yarn.uti
 import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
 import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
 import org.apache.hadoop.yarn.server.security.ApplicationACLsManager;
+import org.apache.hadoop.yarn.webapp.GenericExceptionHandler;
 import org.apache.hadoop.yarn.webapp.WebApp;
 
 /**
@@ -41,6 +42,9 @@ public class RMWebApp extends WebApp {
 
   @Override
   public void setup() {
+    bind(JAXBContextResolver.class);
+    bind(RMWebServices.class);
+    bind(GenericExceptionHandler.class);
     if (rm != null) {
       bind(ResourceManager.class).toInstance(rm);
       bind(RMContext.class).toInstance(rm.getRMContext());

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RmController.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RmController.java?rev=1214546&r1=1214545&r2=1214546&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RmController.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RmController.java Wed Dec 14 23:34:04 2011
@@ -26,17 +26,16 @@ import javax.servlet.http.HttpServletRes
 
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.StringUtils;
-import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ApplicationAccessType;
-import org.apache.hadoop.yarn.api.records.Container;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
 import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppInfo;
 import org.apache.hadoop.yarn.server.security.ApplicationACLsManager;
 import org.apache.hadoop.yarn.util.Apps;
-import org.apache.hadoop.yarn.util.ConverterUtils;
 import org.apache.hadoop.yarn.util.Times;
 import org.apache.hadoop.yarn.webapp.Controller;
 import org.apache.hadoop.yarn.webapp.ResponseInfo;
@@ -73,13 +72,14 @@ public class RmController extends Contro
     }
     ApplicationId appID = Apps.toAppID(aid);
     RMContext context = getInstance(RMContext.class);
-    RMApp app = context.getRMApps().get(appID);
-    if (app == null) {
+    RMApp rmApp = context.getRMApps().get(appID);
+    if (rmApp == null) {
       // TODO: handle redirect to jobhistory server
       setStatus(HttpServletResponse.SC_NOT_FOUND);
       setTitle("Application not found: "+ aid);
       return;
     }
+    AppInfo app = new AppInfo(rmApp, true);
 
     // Check for the authorization.
     String remoteUser = request().getRemoteUser();
@@ -98,32 +98,22 @@ public class RmController extends Contro
     }
 
     setTitle(join("Application ", aid));
-    String trackingUrl = app.getTrackingUrl();
-    boolean trackingUrlIsNotReady = trackingUrl == null
-        || trackingUrl.isEmpty() || "N/A".equalsIgnoreCase(trackingUrl);
-    String ui = trackingUrlIsNotReady ? "UNASSIGNED" :
-        (app.getFinishTime() == 0 ? "ApplicationMaster" : "History");
 
     ResponseInfo info = info("Application Overview").
       _("User:", app.getUser()).
       _("Name:", app.getName()).
-      _("State:", app.getState().toString()).
-      _("FinalStatus:", app.getFinalApplicationStatus().toString()).
+      _("State:", app.getState()).
+      _("FinalStatus:", app.getFinalStatus()).
       _("Started:", Times.format(app.getStartTime())).
       _("Elapsed:", StringUtils.formatTime(
         Times.elapsed(app.getStartTime(), app.getFinishTime()))).
-      _("Tracking URL:", trackingUrlIsNotReady ?
-        "#" : join("http://", trackingUrl), ui).
-      _("Diagnostics:", app.getDiagnostics());
-    Container masterContainer = app.getCurrentAppAttempt()
-        .getMasterContainer();
-    if (masterContainer != null) {
-      String url = join("http://", masterContainer.getNodeHttpAddress(),
-          "/node", "/containerlogs/",
-          ConverterUtils.toString(masterContainer.getId()));
-      info._("AM container logs:", url, url);
+      _("Tracking URL:", !app.isTrackingUrlReady() ?
+        "#" : app.getTrackingUrlPretty(), app.getTrackingUI()).
+      _("Diagnostics:", app.getNote());
+    if (app.amContainerLogsExist()) {
+      info._("AM container logs:", app.getAMContainerLogs(), app.getAMContainerLogs());
     } else {
-      info._("AM container logs:", "AM not yet registered with RM");
+      info._("AM container logs:", "");
     }
     render(AppPage.class);
   }

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java?rev=1214546&r1=1214545&r2=1214546&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java Wed Dec 14 23:34:04 2011
@@ -30,7 +30,9 @@ import org.apache.hadoop.yarn.api.record
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
 import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
+import org.apache.hadoop.yarn.api.records.NodeId;
 import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.event.Dispatcher;
 import org.apache.hadoop.yarn.server.resourcemanager.amlauncher.AMLauncherEvent;
 import org.apache.hadoop.yarn.server.resourcemanager.amlauncher.ApplicationMasterLauncher;
 import org.apache.hadoop.yarn.server.resourcemanager.recovery.StoreFactory;
@@ -40,12 +42,16 @@ import org.apache.hadoop.yarn.server.res
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptEventType;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptState;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.event.RMAppAttemptLaunchFailedEvent;
+import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
+import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeEvent;
+import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeEventType;
+import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeImpl;
+import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeState;
 import org.apache.hadoop.yarn.util.Records;
 import org.apache.log4j.Level;
 import org.apache.log4j.LogManager;
 import org.apache.log4j.Logger;
 
-
 public class MockRM extends ResourceManager {
 
   public MockRM() {
@@ -59,48 +65,50 @@ public class MockRM extends ResourceMana
     rootLogger.setLevel(Level.DEBUG);
   }
 
-  public void waitForState(ApplicationId appId, RMAppState finalState) 
+  public void waitForState(ApplicationId appId, RMAppState finalState)
       throws Exception {
     RMApp app = getRMContext().getRMApps().get(appId);
     Assert.assertNotNull("app shouldn't be null", app);
     int timeoutSecs = 0;
-    while (!finalState.equals(app.getState()) &&
-        timeoutSecs++ < 20) {
-      System.out.println("App State is : " + app.getState() +
-          " Waiting for state : " + finalState);
+    while (!finalState.equals(app.getState()) && timeoutSecs++ < 20) {
+      System.out.println("App State is : " + app.getState()
+          + " Waiting for state : " + finalState);
       Thread.sleep(500);
     }
     System.out.println("App State is : " + app.getState());
-    Assert.assertEquals("App state is not correct (timedout)",
-        finalState, app.getState());
+    Assert.assertEquals("App state is not correct (timedout)", finalState,
+        app.getState());
   }
-  
-  // get new application id 
+
+  // get new application id
   public GetNewApplicationResponse getNewAppId() throws Exception {
     ClientRMProtocol client = getClientRMService();
-    return client.getNewApplication(Records.newRecord(GetNewApplicationRequest.class));	  
+    return client.getNewApplication(Records
+        .newRecord(GetNewApplicationRequest.class));
   }
 
-  //client
+  // client
   public RMApp submitApp(int masterMemory) throws Exception {
     ClientRMProtocol client = getClientRMService();
-    GetNewApplicationResponse resp = client.getNewApplication(Records.newRecord(GetNewApplicationRequest.class));
+    GetNewApplicationResponse resp = client.getNewApplication(Records
+        .newRecord(GetNewApplicationRequest.class));
     ApplicationId appId = resp.getApplicationId();
-    
-    SubmitApplicationRequest req = Records.newRecord(SubmitApplicationRequest.class);
-    ApplicationSubmissionContext sub = 
-        Records.newRecord(ApplicationSubmissionContext.class);
+
+    SubmitApplicationRequest req = Records
+        .newRecord(SubmitApplicationRequest.class);
+    ApplicationSubmissionContext sub = Records
+        .newRecord(ApplicationSubmissionContext.class);
     sub.setApplicationId(appId);
     sub.setApplicationName("");
     sub.setUser("");
-    ContainerLaunchContext clc = 
-        Records.newRecord(ContainerLaunchContext.class);
-    Resource capability = Records.newRecord(Resource.class);    
+    ContainerLaunchContext clc = Records
+        .newRecord(ContainerLaunchContext.class);
+    Resource capability = Records.newRecord(Resource.class);
     capability.setMemory(masterMemory);
     clc.setResource(capability);
     sub.setAMContainerSpec(clc);
     req.setApplicationSubmissionContext(sub);
-    
+
     client.submitApplication(req);
     // make sure app is immediately available after submit
     waitForState(appId, RMAppState.ACCEPTED);
@@ -113,28 +121,54 @@ public class MockRM extends ResourceMana
     return nm;
   }
 
+  public void sendNodeStarted(MockNM nm) throws Exception {
+    RMNodeImpl node = (RMNodeImpl) getRMContext().getRMNodes().get(
+        nm.getNodeId());
+    node.handle(new RMNodeEvent(nm.getNodeId(), RMNodeEventType.STARTED));
+  }
+
+  public void NMwaitForState(NodeId nodeid, RMNodeState finalState)
+      throws Exception {
+    RMNode node = getRMContext().getRMNodes().get(nodeid);
+    Assert.assertNotNull("node shouldn't be null", node);
+    int timeoutSecs = 0;
+    while (!finalState.equals(node.getState()) && timeoutSecs++ < 20) {
+      System.out.println("Node State is : " + node.getState()
+          + " Waiting for state : " + finalState);
+      Thread.sleep(500);
+    }
+    System.out.println("Node State is : " + node.getState());
+    Assert.assertEquals("Node state is not correct (timedout)", finalState,
+        node.getState());
+  }
+
   public void killApp(ApplicationId appId) throws Exception {
     ClientRMProtocol client = getClientRMService();
-    KillApplicationRequest req = Records.newRecord(KillApplicationRequest.class);
+    KillApplicationRequest req = Records
+        .newRecord(KillApplicationRequest.class);
     req.setApplicationId(appId);
     client.forceKillApplication(req);
   }
 
-  //from AMLauncher
-  public MockAM sendAMLaunched(ApplicationAttemptId appAttemptId) throws Exception {
+  // from AMLauncher
+  public MockAM sendAMLaunched(ApplicationAttemptId appAttemptId)
+      throws Exception {
     MockAM am = new MockAM(getRMContext(), masterService, appAttemptId);
     am.waitForState(RMAppAttemptState.ALLOCATED);
-    getRMContext().getDispatcher().getEventHandler().handle(
-        new RMAppAttemptEvent(appAttemptId, RMAppAttemptEventType.LAUNCHED));
+    getRMContext()
+        .getDispatcher()
+        .getEventHandler()
+        .handle(
+            new RMAppAttemptEvent(appAttemptId, RMAppAttemptEventType.LAUNCHED));
     return am;
   }
 
-  
-  public void sendAMLaunchFailed(ApplicationAttemptId appAttemptId) throws Exception {
+  public void sendAMLaunchFailed(ApplicationAttemptId appAttemptId)
+      throws Exception {
     MockAM am = new MockAM(getRMContext(), masterService, appAttemptId);
     am.waitForState(RMAppAttemptState.ALLOCATED);
-    getRMContext().getDispatcher().getEventHandler().handle(
-        new RMAppAttemptLaunchFailedEvent(appAttemptId, "Failed"));
+    getRMContext().getDispatcher().getEventHandler()
+        .handle(new RMAppAttemptLaunchFailedEvent(appAttemptId, "Failed"));
   }
 
   @Override
@@ -143,8 +177,9 @@ public class MockRM extends ResourceMana
         rmAppManager, applicationACLsManager) {
       @Override
       public void start() {
-        //override to not start rpc handler
+        // override to not start rpc handler
       }
+
       @Override
       public void stop() {
         // don't do anything
@@ -155,11 +190,12 @@ public class MockRM extends ResourceMana
   @Override
   protected ResourceTrackerService createResourceTrackerService() {
     return new ResourceTrackerService(getRMContext(), nodesListManager,
-        this.nmLivelinessMonitor, this.containerTokenSecretManager){
+        this.nmLivelinessMonitor, this.containerTokenSecretManager) {
       @Override
       public void start() {
-        //override to not start rpc handler
+        // override to not start rpc handler
       }
+
       @Override
       public void stop() {
         // don't do anything
@@ -173,8 +209,9 @@ public class MockRM extends ResourceMana
         this.appTokenSecretManager, scheduler) {
       @Override
       public void start() {
-        //override to not start rpc handler
+        // override to not start rpc handler
       }
+
       @Override
       public void stop() {
         // don't do anything
@@ -184,17 +221,18 @@ public class MockRM extends ResourceMana
 
   @Override
   protected ApplicationMasterLauncher createAMLauncher() {
-    return new ApplicationMasterLauncher(
-        this.appTokenSecretManager, this.clientToAMSecretManager,
-        getRMContext()) {
+    return new ApplicationMasterLauncher(this.appTokenSecretManager,
+        this.clientToAMSecretManager, getRMContext()) {
       @Override
       public void start() {
-        //override to not start rpc handler
+        // override to not start rpc handler
       }
+
       @Override
-      public void  handle(AMLauncherEvent appEvent) {
-        //don't do anything
+      public void handle(AMLauncherEvent appEvent) {
+        // don't do anything
       }
+
       @Override
       public void stop() {
         // don't do anything
@@ -203,31 +241,31 @@ public class MockRM extends ResourceMana
   }
 
   @Override
-  protected AdminService createAdminService(
-      ClientRMService clientRMService, 
+  protected AdminService createAdminService(ClientRMService clientRMService,
       ApplicationMasterService applicationMasterService,
       ResourceTrackerService resourceTrackerService) {
-    return new AdminService(
-        getConfig(), scheduler, getRMContext(), this.nodesListManager,
-        clientRMService, applicationMasterService, resourceTrackerService){
+    return new AdminService(getConfig(), scheduler, getRMContext(),
+        this.nodesListManager, clientRMService, applicationMasterService,
+        resourceTrackerService) {
       @Override
       public void start() {
-        //override to not start rpc handler
+        // override to not start rpc handler
       }
+
       @Override
       public void stop() {
         // don't do anything
       }
     };
   }
-  
+
   public NodesListManager getNodesListManager() {
     return this.nodesListManager;
   }
 
   @Override
   protected void startWepApp() {
-    //override to disable webapp
+    // override to disable webapp
   }
 
 }

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/MockAsm.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/MockAsm.java?rev=1214546&r1=1214545&r2=1214546&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/MockAsm.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/MockAsm.java Wed Dec 14 23:34:04 2011
@@ -284,6 +284,11 @@ public abstract class MockAsm extends Mo
       public FinalApplicationStatus getFinalApplicationStatus() {
         return FinalApplicationStatus.UNDEFINED;
       }
+
+      @Override
+      public RMAppAttempt getCurrentAppAttempt() {
+        return null;
+      }
       
     };
   }

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestDelegationTokenRenewer.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestDelegationTokenRenewer.java?rev=1214546&r1=1214545&r2=1214546&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestDelegationTokenRenewer.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestDelegationTokenRenewer.java Wed Dec 14 23:34:04 2011
@@ -20,12 +20,12 @@ package org.apache.hadoop.yarn.server.re
 
 
 import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
 import java.io.IOException;
 import java.net.URI;
 import java.net.URISyntaxException;
+import java.util.Collections;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -44,6 +44,7 @@ import org.apache.hadoop.security.token.
 import org.apache.hadoop.security.token.delegation.DelegationKey;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.util.BuilderUtils;
 import org.junit.After;
 import org.junit.Before;
@@ -328,7 +329,7 @@ public class TestDelegationTokenRenewer 
 
     ApplicationId applicationId_1 = BuilderUtils.newApplicationId(0, 1);
     delegationTokenRenewer.addApplication(applicationId_1, ts, true);
-    delegationTokenRenewer.removeApplication(applicationId_1);
+    delegationTokenRenewer.applicationFinished(applicationId_1);
     
     numberOfExpectedRenewals = Renewer.counter; // number of renewals so far
     try {
@@ -343,7 +344,7 @@ public class TestDelegationTokenRenewer 
     // also renewing of the cancelled token should fail
     try {
       token4.renew(conf);
-      assertTrue("Renewal of canceled token didn't fail", false);
+      fail("Renewal of cancelled token should have failed");
     } catch (InvalidToken ite) {
       //expected
     }
@@ -377,7 +378,7 @@ public class TestDelegationTokenRenewer 
 
     ApplicationId applicationId_1 = BuilderUtils.newApplicationId(0, 1);
     delegationTokenRenewer.addApplication(applicationId_1, ts, false);
-    delegationTokenRenewer.removeApplication(applicationId_1);
+    delegationTokenRenewer.applicationFinished(applicationId_1);
     
     int numberOfExpectedRenewals = Renewer.counter; // number of renewals so far
     try {
@@ -393,4 +394,123 @@ public class TestDelegationTokenRenewer 
     // been canceled
     token1.renew(conf);
   }
+  
+  /**
+   * Basic idea of the test:
+   * 0. Setup token KEEP_ALIVE
+   * 1. create tokens.
+   * 2. register them for renewal - to be cancelled on app complete
+   * 3. Complete app.
+   * 4. Verify token is alive within the KEEP_ALIVE time
+   * 5. Verify token has been cancelled after the KEEP_ALIVE_TIME
+   * @throws IOException
+   * @throws URISyntaxException
+   */
+  @Test
+  public void testDTKeepAlive1 () throws Exception {
+    DelegationTokenRenewer localDtr = new DelegationTokenRenewer();
+    Configuration lconf = new Configuration(conf);
+    lconf.setBoolean(YarnConfiguration.LOG_AGGREGATION_ENABLED, true);
+    //Keep tokens alive for 6 seconds.
+    lconf.setLong(YarnConfiguration.RM_NM_EXPIRY_INTERVAL_MS, 6000l);
+    //Try removing tokens every second.
+    lconf.setLong(
+        YarnConfiguration.RM_DELAYED_DELEGATION_TOKEN_REMOVAL_INTERVAL_MS,
+        1000l);
+    localDtr.init(lconf);
+    localDtr.start();
+    
+    MyFS dfs = (MyFS)FileSystem.get(lconf);
+    LOG.info("dfs="+(Object)dfs.hashCode() + ";conf="+lconf.hashCode());
+    
+    Credentials ts = new Credentials();
+    // get the delegation tokens
+    MyToken token1 = dfs.getDelegationToken(new Text("user1"));
+
+    String nn1 = DelegationTokenRenewer.SCHEME + "://host1:0";
+    ts.addToken(new Text(nn1), token1);
+
+    // register the tokens for renewal
+    ApplicationId applicationId_0 =  BuilderUtils.newApplicationId(0, 0);
+    localDtr.addApplication(applicationId_0, ts, true);
+    localDtr.applicationFinished(applicationId_0);
+ 
+    Thread.sleep(3000l);
+
+    //Token should still be around. Renewal should not fail.
+    token1.renew(lconf);
+
+    //Allow the keepalive time to run out
+    Thread.sleep(6000l);
+
+    //The token should have been cancelled at this point. Renewal will fail.
+    try {
+      token1.renew(lconf);
+      fail("Renewal of cancelled token should have failed");
+    } catch (InvalidToken ite) {}
+  }
+
+  /**
+   * Basic idea of the test:
+   * 0. Setup token KEEP_ALIVE
+   * 1. create tokens.
+   * 2. register them for renewal - to be cancelled on app complete
+   * 3. Complete app.
+   * 4. Verify token is alive within the KEEP_ALIVE time
+   * 5. Send an explicity KEEP_ALIVE_REQUEST
+   * 6. Verify token KEEP_ALIVE time is renewed.
+   * 7. Verify token has been cancelled after the renewed KEEP_ALIVE_TIME.
+   * @throws IOException
+   * @throws URISyntaxException
+   */
+  @Test
+  public void testDTKeepAlive2() throws Exception {
+    DelegationTokenRenewer localDtr = new DelegationTokenRenewer();
+    Configuration lconf = new Configuration(conf);
+    lconf.setBoolean(YarnConfiguration.LOG_AGGREGATION_ENABLED, true);
+    //Keep tokens alive for 6 seconds.
+    lconf.setLong(YarnConfiguration.RM_NM_EXPIRY_INTERVAL_MS, 6000l);
+    //Try removing tokens every second.
+    lconf.setLong(
+        YarnConfiguration.RM_DELAYED_DELEGATION_TOKEN_REMOVAL_INTERVAL_MS,
+        1000l);
+    localDtr.init(lconf);
+    localDtr.start();
+    
+    MyFS dfs = (MyFS)FileSystem.get(lconf);
+    LOG.info("dfs="+(Object)dfs.hashCode() + ";conf="+lconf.hashCode());
+
+    Credentials ts = new Credentials();
+    // get the delegation tokens
+    MyToken token1 = dfs.getDelegationToken(new Text("user1"));
+    
+    String nn1 = DelegationTokenRenewer.SCHEME + "://host1:0";
+    ts.addToken(new Text(nn1), token1);
+
+    // register the tokens for renewal
+    ApplicationId applicationId_0 =  BuilderUtils.newApplicationId(0, 0);
+    localDtr.addApplication(applicationId_0, ts, true);
+    localDtr.applicationFinished(applicationId_0);
+
+    Thread.sleep(4000l);
+
+    //Send another keep alive.
+    localDtr.updateKeepAliveApplications(Collections
+        .singletonList(applicationId_0));
+    //Renewal should not fail.
+    token1.renew(lconf);
+
+    //Token should be around after this. 
+    Thread.sleep(4500l);
+    //Renewal should not fail. - ~1.5 seconds for keepalive timeout.
+    token1.renew(lconf);
+
+    //Allow the keepalive time to run out
+    Thread.sleep(3000l);
+    //The token should have been cancelled at this point. Renewal will fail.
+    try {
+      token1.renew(lconf);
+      fail("Renewal of cancelled token should have failed");
+    } catch (InvalidToken ite) {}
+  }
 }