You are viewing a plain text version of this content. The canonical link for it is here.
Posted to mapreduce-commits@hadoop.apache.org by st...@apache.org on 2009/11/28 21:26:22 UTC

svn commit: r885145 [17/34] - in /hadoop/mapreduce/branches/MAPREDUCE-233: ./ .eclipse.templates/ .eclipse.templates/.launches/ conf/ ivy/ lib/ src/benchmarks/gridmix/ src/benchmarks/gridmix/pipesort/ src/benchmarks/gridmix2/ src/benchmarks/gridmix2/sr...

Modified: hadoop/mapreduce/branches/MAPREDUCE-233/src/java/org/apache/hadoop/mapred/JobQueueTaskScheduler.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/branches/MAPREDUCE-233/src/java/org/apache/hadoop/mapred/JobQueueTaskScheduler.java?rev=885145&r1=885144&r2=885145&view=diff
==============================================================================
--- hadoop/mapreduce/branches/MAPREDUCE-233/src/java/org/apache/hadoop/mapred/JobQueueTaskScheduler.java (original)
+++ hadoop/mapreduce/branches/MAPREDUCE-233/src/java/org/apache/hadoop/mapred/JobQueueTaskScheduler.java Sat Nov 28 20:26:01 2009
@@ -25,6 +25,7 @@
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.mapreduce.server.jobtracker.JTConfig;
 import org.apache.hadoop.mapreduce.server.jobtracker.TaskTracker;
 
 /**
@@ -71,7 +72,7 @@
   @Override
   public synchronized void setConf(Configuration conf) {
     super.setConf(conf);
-    padFraction = conf.getFloat("mapred.jobtracker.taskalloc.capacitypad", 
+    padFraction = conf.getFloat(JTConfig.JT_TASK_ALLOC_PAD_FRACTION, 
                                  0.01f);
     this.eagerTaskInitializationListener =
       new EagerTaskInitializationListener(conf);

Modified: hadoop/mapreduce/branches/MAPREDUCE-233/src/java/org/apache/hadoop/mapred/JobStatus.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/branches/MAPREDUCE-233/src/java/org/apache/hadoop/mapred/JobStatus.java?rev=885145&r1=885144&r2=885145&view=diff
==============================================================================
--- hadoop/mapreduce/branches/MAPREDUCE-233/src/java/org/apache/hadoop/mapred/JobStatus.java (original)
+++ hadoop/mapreduce/branches/MAPREDUCE-233/src/java/org/apache/hadoop/mapred/JobStatus.java Sat Nov 28 20:26:01 2009
@@ -17,41 +17,32 @@
  */
 package org.apache.hadoop.mapred;
 
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
-
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.io.Writable;
-import org.apache.hadoop.io.WritableFactories;
-import org.apache.hadoop.io.WritableFactory;
-import org.apache.hadoop.io.WritableUtils;
-
 /**************************************************
  * Describes the current status of a job.  This is
  * not intended to be a comprehensive piece of data.
  * For that, look at JobProfile.
- **************************************************/
-public class JobStatus implements Writable, Cloneable {
-
-  static {                                      // register a ctor
-    WritableFactories.setFactory
-      (JobStatus.class,
-       new WritableFactory() {
-         public Writable newInstance() { return new JobStatus(); }
-       });
-  }
-
-  public static final int RUNNING = 1;
-  public static final int SUCCEEDED = 2;
-  public static final int FAILED = 3;
-  public static final int PREP = 4;
-  public static final int KILLED = 5;
+ *************************************************
+ *@deprecated Use {@link org.apache.hadoop.mapreduce.JobStatus} instead
+ **/
+@Deprecated
+public class JobStatus extends org.apache.hadoop.mapreduce.JobStatus {
+
+  public static final int RUNNING = 
+    org.apache.hadoop.mapreduce.JobStatus.State.RUNNING.getValue();
+  public static final int SUCCEEDED = 
+    org.apache.hadoop.mapreduce.JobStatus.State.SUCCEEDED.getValue();
+  public static final int FAILED = 
+    org.apache.hadoop.mapreduce.JobStatus.State.FAILED.getValue();
+  public static final int PREP = 
+    org.apache.hadoop.mapreduce.JobStatus.State.PREP.getValue();
+  public static final int KILLED = 
+    org.apache.hadoop.mapreduce.JobStatus.State.KILLED.getValue();
 
   private static final String UNKNOWN = "UNKNOWN";
-  private static final String[] runStates =
-      {UNKNOWN, "RUNNING", "SUCCEEDED", "FAILED", "PREP", "KILLED"};
   
+  private static final String[] runStates =
+    {UNKNOWN, "RUNNING", "SUCCEEDED", "FAILED", "PREP", "KILLED"};
+
   /**
    * Helper method to get human-readable state of the job.
    * @param state job state
@@ -64,25 +55,17 @@
     return runStates[state];
   }
   
-  private JobID jobid;
-  private float mapProgress;
-  private float reduceProgress;
-  private float cleanupProgress;
-  private float setupProgress;
-  private int runState;
-  private long startTime;
-  private String user;
-  private JobPriority priority;
-  private String schedulingInfo="NA";
-
-  private String jobName;
-  private String jobFile;
-  private long finishTime;
-  private boolean isRetired;
-  private String historyFile = "";
-  private String trackingUrl ="";
-
-    
+  static org.apache.hadoop.mapreduce.JobStatus.State getEnum(int state) {
+    switch (state) {
+      case 1: return org.apache.hadoop.mapreduce.JobStatus.State.RUNNING;
+      case 2: return org.apache.hadoop.mapreduce.JobStatus.State.SUCCEEDED;
+      case 3: return org.apache.hadoop.mapreduce.JobStatus.State.FAILED;
+      case 4: return org.apache.hadoop.mapreduce.JobStatus.State.PREP;
+      case 5: return org.apache.hadoop.mapreduce.JobStatus.State.KILLED;
+    }
+    return null;
+  }
+  
   /**
    */
   public JobStatus() {
@@ -164,295 +147,179 @@
                     float reduceProgress, float cleanupProgress, 
                     int runState, JobPriority jp, String user, String jobName, 
                     String jobFile, String trackingUrl) {
-     this.jobid = jobid;
-     this.setupProgress = setupProgress;
-     this.mapProgress = mapProgress;
-     this.reduceProgress = reduceProgress;
-     this.cleanupProgress = cleanupProgress;
-     this.runState = runState;
-     this.user = user;
-     if (jp == null) {
-       throw new IllegalArgumentException("Job Priority cannot be null.");
-     }
-     priority = jp;
-     this.jobName = jobName;
-     this.jobFile = jobFile;
-     this.trackingUrl = trackingUrl;
+     super(jobid, setupProgress, mapProgress, reduceProgress, cleanupProgress,
+       getEnum(runState), org.apache.hadoop.mapreduce.JobPriority.valueOf(jp.name()),
+       user, jobName, jobFile, trackingUrl);
    }
    
+  public static JobStatus downgrade(org.apache.hadoop.mapreduce.JobStatus stat){
+    JobStatus old = new JobStatus(JobID.downgrade(stat.getJobID()),
+      stat.getSetupProgress(), stat.getMapProgress(), stat.getReduceProgress(),
+      stat.getCleanupProgress(), stat.getState().getValue(), 
+      JobPriority.valueOf(stat.getPriority().name()),
+      stat.getUsername(), stat.getJobName(), stat.getJobFile(),
+      stat.getTrackingUrl());
+    old.setStartTime(stat.getStartTime());
+    old.setFinishTime(stat.getFinishTime());
+    old.setSchedulingInfo(stat.getSchedulingInfo());
+    old.setHistoryFile(stat.getHistoryFile());
+    return old;
+  }
   /**
    * @deprecated use getJobID instead
    */
   @Deprecated
-  public String getJobId() { return jobid.toString(); }
+  public String getJobId() { return getJobID().toString(); }
   
   /**
    * @return The jobid of the Job
    */
-  public JobID getJobID() { return jobid; }
-    
-  /**
-   * @return Percentage of progress in maps 
-   */
-  public synchronized float mapProgress() { return mapProgress; }
-    
-  /**
-   * Sets the map progress of this job
-   * @param p The value of map progress to set to
-   */
-  synchronized void setMapProgress(float p) { 
-    this.mapProgress = (float) Math.min(1.0, Math.max(0.0, p)); 
-  }
-
-  /**
-   * @return Percentage of progress in cleanup 
-   */
-  public synchronized float cleanupProgress() { return cleanupProgress; }
-    
-  /**
-   * Sets the cleanup progress of this job
-   * @param p The value of cleanup progress to set to
-   */
-  synchronized void setCleanupProgress(float p) { 
-    this.cleanupProgress = (float) Math.min(1.0, Math.max(0.0, p)); 
-  }
-
-  /**
-   * @return Percentage of progress in setup 
-   */
-  public synchronized float setupProgress() { return setupProgress; }
-    
-  /**
-   * Sets the setup progress of this job
-   * @param p The value of setup progress to set to
-   */
-  synchronized void setSetupProgress(float p) { 
-    this.setupProgress = (float) Math.min(1.0, Math.max(0.0, p)); 
-  }
-
-  /**
-   * @return Percentage of progress in reduce 
-   */
-  public synchronized float reduceProgress() { return reduceProgress; }
-    
-  /**
-   * Sets the reduce progress of this Job
-   * @param p The value of reduce progress to set to
-   */
-  synchronized void setReduceProgress(float p) { 
-    this.reduceProgress = (float) Math.min(1.0, Math.max(0.0, p)); 
-  }
-    
-  /**
-   * @return running state of the job
-   */
-  public synchronized int getRunState() { return runState; }
-    
-  /**
-   * Change the current run state of the job.
-   */
-  public synchronized void setRunState(int state) {
-    this.runState = state;
-  }
-
-  /** 
-   * Set the start time of the job
-   * @param startTime The startTime of the job
-   */
-  synchronized void setStartTime(long startTime) { this.startTime = startTime;}
-    
-  /**
-   * @return start time of the job
-   */
-  synchronized public long getStartTime() { return startTime;}
-
-  @Override
-  public Object clone() {
-    try {
-      return super.clone();
-    } catch (CloneNotSupportedException cnse) {
-      // Shouldn't happen since we do implement Clonable
-      throw new InternalError(cnse.toString());
-    }
-  }
-  
-  /**
-   * @param user The username of the job
-   */
-  synchronized void setUsername(String userName) { this.user = userName;}
-
-  /**
-   * @return the username of the job
-   */
-  public synchronized String getUsername() { return this.user;}
-  
-  /**
-   * Gets the Scheduling information associated to a particular Job.
-   * @return the scheduling information of the job
-   */
-  public synchronized String getSchedulingInfo() {
-   return schedulingInfo;
-  }
-
-  /**
-   * Used to set the scheduling information associated to a particular Job.
-   * 
-   * @param schedulingInfo Scheduling information of the job
-   */
-  public synchronized void setSchedulingInfo(String schedulingInfo) {
-    this.schedulingInfo = schedulingInfo;
-  }
+  public JobID getJobID() { return JobID.downgrade(super.getJobID()); }
   
   /**
    * Return the priority of the job
    * @return job priority
    */
-   public synchronized JobPriority getJobPriority() { return priority; }
-  
-  /**
-   * Set the priority of the job, defaulting to NORMAL.
-   * @param jp new job priority
-   */
-   public synchronized void setJobPriority(JobPriority jp) {
-     if (jp == null) {
-       throw new IllegalArgumentException("Job priority cannot be null.");
-     }
-     priority = jp;
+   public synchronized JobPriority getJobPriority() { 
+     return JobPriority.valueOf(super.getPriority().name());
    }
-  
+
    /**
-    * Returns true if the status is for a completed job.
+    * Sets the map progress of this job
+    * @param p The value of map progress to set to
     */
-   public synchronized boolean isJobComplete() {
-     return (runState == JobStatus.SUCCEEDED || runState == JobStatus.FAILED 
-             || runState == JobStatus.KILLED);
-   }
-
-  ///////////////////////////////////////
-  // Writable
-  ///////////////////////////////////////
-  public synchronized void write(DataOutput out) throws IOException {
-    jobid.write(out);
-    out.writeFloat(setupProgress);
-    out.writeFloat(mapProgress);
-    out.writeFloat(reduceProgress);
-    out.writeFloat(cleanupProgress);
-    out.writeInt(runState);
-    out.writeLong(startTime);
-    Text.writeString(out, user);
-    WritableUtils.writeEnum(out, priority);
-    Text.writeString(out, schedulingInfo);
-    out.writeLong(finishTime);
-    out.writeBoolean(isRetired);
-    Text.writeString(out, historyFile);
-    Text.writeString(out, jobName);
-    Text.writeString(out, trackingUrl);
-    Text.writeString(out, jobFile);
-  }
-
-  public synchronized void readFields(DataInput in) throws IOException {
-    this.jobid = JobID.read(in);
-    this.setupProgress = in.readFloat();
-    this.mapProgress = in.readFloat();
-    this.reduceProgress = in.readFloat();
-    this.cleanupProgress = in.readFloat();
-    this.runState = in.readInt();
-    this.startTime = in.readLong();
-    this.user = Text.readString(in);
-    this.priority = WritableUtils.readEnum(in, JobPriority.class);
-    this.schedulingInfo = Text.readString(in);
-    this.finishTime = in.readLong();
-    this.isRetired = in.readBoolean();
-    this.historyFile = Text.readString(in);
-    this.jobName = Text.readString(in);
-    this.trackingUrl = Text.readString(in);
-    this.jobFile = Text.readString(in);
-  }
+   protected synchronized void setMapProgress(float p) { 
+     super.setMapProgress(p); 
+   }
 
-  /**
-   * Get the user-specified job name.
-   */
-  public String getJobName() {
-    return jobName;
-  }
+   /**
+    * Sets the cleanup progress of this job
+    * @param p The value of cleanup progress to set to
+    */
+   protected synchronized void setCleanupProgress(float p) { 
+     super.setCleanupProgress(p); 
+   }
 
-  /**
-   * Get the configuration file for the job.
-   */
-  public String getJobFile() {
-    return jobFile;
-  }
+   /**
+    * Sets the setup progress of this job
+    * @param p The value of setup progress to set to
+    */
+   protected synchronized void setSetupProgress(float p) { 
+     super.setSetupProgress(p); 
+   }
 
-  /**
-   * Get the link to the web-ui for details of the job.
-   */
-  public synchronized String getTrackingUrl() {
-    return trackingUrl;
-  }
+   /**
+    * Sets the reduce progress of this Job
+    * @param p The value of reduce progress to set to
+    */
+   protected synchronized void setReduceProgress(float p) { 
+     super.setReduceProgress(p); 
+   }
+     
+   /** 
+    * Set the finish time of the job
+    * @param finishTime The finishTime of the job
+    */
+   protected synchronized void setFinishTime(long finishTime) {
+     super.setFinishTime(finishTime);
+   }
 
-  /**
-   * Get the finish time of the job.
-   */
-  public synchronized long getFinishTime() { 
-    return finishTime;
-  }
+   /**
+    * Set the job history file url for a completed job
+    */
+   protected synchronized void setHistoryFile(String historyFile) {
+     super.setHistoryFile(historyFile);
+   }
 
-  /**
-   * Check whether the job has retired.
-   */
-  public synchronized boolean isRetired() {
-    return isRetired;
-  }
+   /**
+    * Set the link to the web-ui for details of the job.
+    */
+   protected synchronized void setTrackingUrl(String trackingUrl) {
+     super.setTrackingUrl(trackingUrl);
+   }
 
-  /**
-   * @return the job history file name for a completed job. If job is not 
-   * completed or history file not available then return null.
-   */
-  public synchronized String getHistoryFile() {
-    return historyFile;
-  }
+   /**
+    * Set the job retire flag to true.
+    */
+   protected synchronized void setRetired() {
+     super.setRetired();
+   }
 
- /** 
-   * Set the finish time of the job
-   * @param finishTime The finishTime of the job
-   */
-  synchronized void setFinishTime(long finishTime) {
-    this.finishTime = finishTime;
-  }
+   /**
+    * Change the current run state of the job.
+    */
+   protected synchronized void setRunState(int state) {
+     super.setState(getEnum(state));
+   }
 
-  /**
-   * Set the job history file url for a completed job
-   */
-  synchronized void setHistoryFile(String historyFile) {
-    this.historyFile = historyFile;
-  }
+   /**
+    * @return running state of the job
+    */
+   public synchronized int getRunState() { return super.getState().getValue(); }
+     
 
-  /**
-   * Set the link to the web-ui for details of the job.
-   */
-  synchronized void setTrackingUrl(String trackingUrl) {
-    this.trackingUrl = trackingUrl;
-  }
+   /** 
+    * Set the start time of the job
+    * @param startTime The startTime of the job
+    */
+   protected synchronized void setStartTime(long startTime) { 
+     super.setStartTime(startTime);
+   }
+     
+   /**
+    * @param userName The username of the job
+    */
+   protected synchronized void setUsername(String userName) { 
+     super.setUsername(userName);
+   }
 
+   /**
+    * Used to set the scheduling information associated to a particular Job.
+    * 
+    * @param schedulingInfo Scheduling information of the job
+    */
+   protected synchronized void setSchedulingInfo(String schedulingInfo) {
+     super.setSchedulingInfo(schedulingInfo);
+   }
+   
   /**
-   * Set the job retire flag to true.
+   * Set the priority of the job, defaulting to NORMAL.
+   * @param jp new job priority
    */
-  synchronized void setRetired() {
-    this.isRetired = true;
-  }
+   public synchronized void setJobPriority(JobPriority jp) {
+     super.setPriority(
+       org.apache.hadoop.mapreduce.JobPriority.valueOf(jp.name()));
+   }
+  
+   /**
+    * @return Percentage of progress in maps 
+    */
+   public synchronized float mapProgress() { return super.getMapProgress(); }
+     
+   /**
+    * @return Percentage of progress in cleanup 
+    */
+   public synchronized float cleanupProgress() { 
+     return super.getCleanupProgress(); 
+   }
+     
+   /**
+    * @return Percentage of progress in setup 
+    */
+   public synchronized float setupProgress() { 
+     return super.getSetupProgress(); 
+   }
+     
+   /**
+    * @return Percentage of progress in reduce 
+    */
+   public synchronized float reduceProgress() { 
+     return super.getReduceProgress(); 
+   }
 
-  public String toString() {
-    StringBuffer buffer = new StringBuffer();
-    buffer.append("job-id : " + jobid);
-    buffer.append("map-progress : " + mapProgress);
-    buffer.append("reduce-progress : " + reduceProgress);
-    buffer.append("cleanup-progress : " + cleanupProgress);
-    buffer.append("setup-progress : " + setupProgress);
-    buffer.append("runstate : " + runState);
-    buffer.append("start-time : " + startTime);
-    buffer.append("user-name : " + user);
-    buffer.append("priority : " + priority);
-    buffer.append("scheduling-info : " + schedulingInfo);
-    return buffer.toString();
-  }
-}
+   // A utility to convert new job runstates to the old ones.
+   static int getOldNewJobRunState(
+     org.apache.hadoop.mapreduce.JobStatus.State state) {
+     return state.getValue();
+   }
+}
\ No newline at end of file

Modified: hadoop/mapreduce/branches/MAPREDUCE-233/src/java/org/apache/hadoop/mapred/JobSubmissionProtocol.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/branches/MAPREDUCE-233/src/java/org/apache/hadoop/mapred/JobSubmissionProtocol.java?rev=885145&r1=885144&r2=885145&view=diff
==============================================================================
--- hadoop/mapreduce/branches/MAPREDUCE-233/src/java/org/apache/hadoop/mapred/JobSubmissionProtocol.java (original)
+++ hadoop/mapreduce/branches/MAPREDUCE-233/src/java/org/apache/hadoop/mapred/JobSubmissionProtocol.java Sat Nov 28 20:26:01 2009
@@ -1,230 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.mapred;
-
-import java.io.IOException;
-
-import org.apache.hadoop.ipc.VersionedProtocol;
-
-/** 
- * Protocol that a JobClient and the central JobTracker use to communicate.  The
- * JobClient can use these methods to submit a Job for execution, and learn about
- * the current system status.
- */ 
-interface JobSubmissionProtocol extends VersionedProtocol {
-  /* 
-   *Changing the versionID to 2L since the getTaskCompletionEvents method has
-   *changed.
-   *Changed to 4 since killTask(String,boolean) is added
-   *Version 4: added jobtracker state to ClusterStatus
-   *Version 5: max_tasks in ClusterStatus is replaced by
-   * max_map_tasks and max_reduce_tasks for HADOOP-1274
-   * Version 6: change the counters representation for HADOOP-2248
-   * Version 7: added getAllJobs for HADOOP-2487
-   * Version 8: change {job|task}id's to use corresponding objects rather that strings.
-   * Version 9: change the counter representation for HADOOP-1915
-   * Version 10: added getSystemDir for HADOOP-3135
-   * Version 11: changed JobProfile to include the queue name for HADOOP-3698
-   * Version 12: Added getCleanupTaskReports and 
-   *             cleanupProgress to JobStatus as part of HADOOP-3150
-   * Version 13: Added getJobQueueInfos and getJobQueueInfo(queue name)
-   *             and getAllJobs(queue) as a part of HADOOP-3930
-   * Version 14: Added setPriority for HADOOP-4124
-   * Version 15: Added KILLED status to JobStatus as part of HADOOP-3924            
-   * Version 16: Added getSetupTaskReports and 
-   *             setupProgress to JobStatus as part of HADOOP-4261           
-   * Version 17: getClusterStatus returns the amount of memory used by 
-   *             the server. HADOOP-4435
-   * Version 18: Added blacklisted trackers to the ClusterStatus 
-   *             for HADOOP-4305
-   * Version 19: Modified TaskReport to have TIP status and modified the
-   *             method getClusterStatus() to take a boolean argument
-   *             for HADOOP-4807
-   * Version 20: Modified ClusterStatus to have the tasktracker expiry
-   *             interval for HADOOP-4939
-   * Version 21: Modified TaskID to be aware of the new TaskTypes                                 
-   * Version 22: Added method getQueueAclsForCurrentUser to get queue acls info
-   *             for a user
-   * Version 23: Modified the JobQueueInfo class to inlucde queue state.
-   *             Part of HADOOP-5913.  
-   * Version 24: Modified ClusterStatus to include BlackListInfo class which 
-   *             encapsulates reasons and report for blacklisted node.          
-   * Version 25: Added fields to JobStatus for HADOOP-817.
-   */
-  public static final long versionID = 25L;
-
-  /**
-   * Allocate a name for the job.
-   * @return a unique job name for submitting jobs.
-   * @throws IOException
-   */
-  public JobID getNewJobId() throws IOException;
-
-  /**
-   * Submit a Job for execution.  Returns the latest profile for
-   * that job.
-   * The job files should be submitted in <b>system-dir</b>/<b>jobName</b>.
-   */
-  public JobStatus submitJob(JobID jobName) throws IOException;
-
-  /**
-   * Get the current status of the cluster
-   * @param detailed if true then report tracker names as well
-   * @return summary of the state of the cluster
-   */
-  public ClusterStatus getClusterStatus(boolean detailed) throws IOException;
-  
-    
-  /**
-   * Kill the indicated job
-   */
-  public void killJob(JobID jobid) throws IOException;
-
-  /**
-   * Set the priority of the specified job
-   * @param jobid ID of the job
-   * @param priority Priority to be set for the job
-   */
-  public void setJobPriority(JobID jobid, String priority) 
-                                                      throws IOException;
-  /**
-   * Kill indicated task attempt.
-   * @param taskId the id of the task to kill.
-   * @param shouldFail if true the task is failed and added to failed tasks list, otherwise
-   * it is just killed, w/o affecting job failure status.  
-   */ 
-  public boolean killTask(TaskAttemptID taskId, boolean shouldFail) throws IOException;
-  
-  /**
-   * Grab a handle to a job that is already known to the JobTracker.
-   * @return Profile of the job, or null if not found. 
-   */
-  public JobProfile getJobProfile(JobID jobid) throws IOException;
-
-  /**
-   * Grab a handle to a job that is already known to the JobTracker.
-   * @return Status of the job, or null if not found.
-   */
-  public JobStatus getJobStatus(JobID jobid) throws IOException;
-
-  /**
-   * Grab the current job counters
-   */
-  public Counters getJobCounters(JobID jobid) throws IOException;
-    
-  /**
-   * Grab a bunch of info on the map tasks that make up the job
-   */
-  public TaskReport[] getMapTaskReports(JobID jobid) throws IOException;
-
-  /**
-   * Grab a bunch of info on the reduce tasks that make up the job
-   */
-  public TaskReport[] getReduceTaskReports(JobID jobid) throws IOException;
-
-  /**
-   * Grab a bunch of info on the cleanup tasks that make up the job
-   */
-  public TaskReport[] getCleanupTaskReports(JobID jobid) throws IOException;
-
-  /**
-   * Grab a bunch of info on the setup tasks that make up the job
-   */
-  public TaskReport[] getSetupTaskReports(JobID jobid) throws IOException;
-
-  /**
-   * A MapReduce system always operates on a single filesystem.  This 
-   * function returns the fs name.  ('local' if the localfs; 'addr:port' 
-   * if dfs).  The client can then copy files into the right locations 
-   * prior to submitting the job.
-   */
-  public String getFilesystemName() throws IOException;
-
-  /** 
-   * Get the jobs that are not completed and not failed
-   * @return array of JobStatus for the running/to-be-run
-   * jobs.
-   */
-  public JobStatus[] jobsToComplete() throws IOException;
-    
-  /** 
-   * Get all the jobs submitted. 
-   * @return array of JobStatus for the submitted jobs
-   */
-  public JobStatus[] getAllJobs() throws IOException;
-  
-  /**
-   * Get task completion events for the jobid, starting from fromEventId. 
-   * Returns empty aray if no events are available. 
-   * @param jobid job id 
-   * @param fromEventId event id to start from.
-   * @param maxEvents the max number of events we want to look at 
-   * @return array of task completion events. 
-   * @throws IOException
-   */
-  public TaskCompletionEvent[] getTaskCompletionEvents(JobID jobid
-      , int fromEventId, int maxEvents) throws IOException;
-    
-  /**
-   * Get the diagnostics for a given task in a given job
-   * @param taskId the id of the task
-   * @return an array of the diagnostic messages
-   */
-  public String[] getTaskDiagnostics(TaskAttemptID taskId) 
-  throws IOException;
-
-  /**
-   * Grab the jobtracker system directory path where job-specific files are to be placed.
-   * 
-   * @return the system directory where job-specific files are to be placed.
-   */
-  public String getSystemDir();  
-  
-  /**
-   * Gets set of Job Queues associated with the Job Tracker
-   * 
-   * @return Array of the Job Queue Information Object
-   * @throws IOException 
-   */
-  public JobQueueInfo[] getQueues() throws IOException;
-  
-  /**
-   * Gets scheduling information associated with the particular Job queue
-   * 
-   * @param queue Queue Name
-   * @return Scheduling Information of the Queue
-   * @throws IOException 
-   */
-  public JobQueueInfo getQueueInfo(String queue) throws IOException;
-  
-  /**
-   * Gets all the jobs submitted to the particular Queue
-   * @param queue Queue name
-   * @return array of JobStatus for the submitted jobs
-   * @throws IOException
-   */
-  public JobStatus[] getJobsFromQueue(String queue) throws IOException;
-  
-  /**
-   * Gets the Queue ACLs for current user
-   * @return array of QueueAclsInfo object for current user.
-   * @throws IOException
-   */
-  public QueueAclsInfo[] getQueueAclsForCurrentUser() throws IOException;
-}