You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by ma...@apache.org on 2011/10/06 00:37:53 UTC

svn commit: r1179465 [3/3] - in /hadoop/common/branches/branch-0.20-security-205/src: core/org/apache/hadoop/util/ mapred/ mapred/org/apache/hadoop/mapred/ mapred/org/apache/hadoop/mapreduce/ test/org/apache/hadoop/mapred/ test/org/apache/hadoop/util/

Modified: hadoop/common/branches/branch-0.20-security-205/src/mapred/org/apache/hadoop/mapred/TaskTrackerStatus.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security-205/src/mapred/org/apache/hadoop/mapred/TaskTrackerStatus.java?rev=1179465&r1=1179464&r2=1179465&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.20-security-205/src/mapred/org/apache/hadoop/mapred/TaskTrackerStatus.java (original)
+++ hadoop/common/branches/branch-0.20-security-205/src/mapred/org/apache/hadoop/mapred/TaskTrackerStatus.java Wed Oct  5 22:37:52 2011
@@ -55,6 +55,7 @@ public class TaskTrackerStatus implement
   private int maxReduceTasks;
   private TaskTrackerHealthStatus healthStatus;
    
+  public static final int UNAVAILABLE = -1;
   /**
    * Class representing a collection of resources on this tasktracker.
    */
@@ -66,6 +67,13 @@ public class TaskTrackerStatus implement
     private long reduceSlotMemorySizeOnTT;
     private long availableSpace;
     
+    private long availableVirtualMemory = UNAVAILABLE; // in byte
+    private long availablePhysicalMemory = UNAVAILABLE; // in byte
+    private int numProcessors = UNAVAILABLE;
+    private long cumulativeCpuTime = UNAVAILABLE; // in millisecond
+    private long cpuFrequency = UNAVAILABLE; // in kHz
+    private float cpuUsage = UNAVAILABLE; // in %
+
     ResourceStatus() {
       totalVirtualMemory = JobConf.DISABLED_MEMORY_LIMIT;
       totalPhysicalMemory = JobConf.DISABLED_MEMORY_LIMIT;
@@ -172,21 +180,160 @@ public class TaskTrackerStatus implement
     long getAvailableSpace() {
       return availableSpace;
     }
+
+    /**
+     * Set the amount of available virtual memory on the tasktracker.
+     * If the input is not a valid number, it will be set to UNAVAILABLE
+     *
+     * @param vmem amount of available virtual memory on the tasktracker
+     *                    in bytes.
+     */
+    void setAvailableVirtualMemory(long availableMem) {
+      availableVirtualMemory = availableMem > 0 ?
+                               availableMem : UNAVAILABLE;
+    }
+
+    /**
+     * Get the amount of available virtual memory on the tasktracker.
+     * Will return UNAVAILABLE if it cannot be obtained
+     *
+     * @return the amount of available virtual memory on the tasktracker
+     *             in bytes.
+     */
+    long getAvailableVirtualMemory() {
+      return availableVirtualMemory;
+    }
+
+    /**
+     * Set the amount of available physical memory on the tasktracker.
+     * If the input is not a valid number, it will be set to UNAVAILABLE
+     *
+     * @param availableRAM amount of available physical memory on the
+     *                     tasktracker in bytes.
+     */
+    void setAvailablePhysicalMemory(long availableRAM) {
+      availablePhysicalMemory = availableRAM > 0 ?
+                                availableRAM : UNAVAILABLE;
+    }
+
+    /**
+     * Get the amount of available physical memory on the tasktracker.
+     * Will return UNAVAILABLE if it cannot be obtained
+     *
+     * @return amount of available physical memory on the tasktracker in bytes.
+     */
+    long getAvailablePhysicalMemory() {
+      return availablePhysicalMemory;
+    }
+
+    /**
+     * Set the CPU frequency of this TaskTracker
+     * If the input is not a valid number, it will be set to UNAVAILABLE
+     *
+     * @param cpuFrequency CPU frequency in kHz
+     */
+    public void setCpuFrequency(long cpuFrequency) {
+      this.cpuFrequency = cpuFrequency > 0 ?
+                          cpuFrequency : UNAVAILABLE;
+    }
+
+    /**
+     * Get the CPU frequency of this TaskTracker
+     * Will return UNAVAILABLE if it cannot be obtained
+     *
+     * @return CPU frequency in kHz
+     */
+    public long getCpuFrequency() {
+      return cpuFrequency;
+    }
+
+    /**
+     * Set the number of processors on this TaskTracker
+     * If the input is not a valid number, it will be set to UNAVAILABLE
+     *
+     * @param numProcessors number of processors
+     */
+    public void setNumProcessors(int numProcessors) {
+      this.numProcessors = numProcessors > 0 ?
+                           numProcessors : UNAVAILABLE;
+    }
+
+    /**
+     * Get the number of processors on this TaskTracker
+     * Will return UNAVAILABLE if it cannot be obtained
+     *
+     * @return number of processors
+     */
+    public int getNumProcessors() {
+      return numProcessors;
+    }
+
+    /**
+     * Set the cumulative CPU time on this TaskTracker since it is up
+     * It can be set to UNAVAILABLE if it is currently unavailable.
+     *
+     * @param cumulativeCpuTime Used CPU time in millisecond
+     */
+    public void setCumulativeCpuTime(long cumulativeCpuTime) {
+      this.cumulativeCpuTime = cumulativeCpuTime > 0 ?
+                               cumulativeCpuTime : UNAVAILABLE;
+    }
+
+    /**
+     * Get the cumulative CPU time on this TaskTracker since it is up
+     * Will return UNAVAILABLE if it cannot be obtained
+     *
+     * @return used CPU time in milliseconds
+     */
+    public long getCumulativeCpuTime() {
+      return cumulativeCpuTime;
+    }
+    
+    /**
+     * Set the CPU usage on this TaskTracker
+     * 
+     * @param cpuUsage CPU usage in %
+     */
+    public void setCpuUsage(float cpuUsage) {
+      this.cpuUsage = cpuUsage;
+    }
+
+    /**
+     * Get the CPU usage on this TaskTracker
+     * Will return UNAVAILABLE if it cannot be obtained
+     *
+     * @return CPU usage in %
+     */
+    public float getCpuUsage() {
+      return cpuUsage;
+    }
     
     public void write(DataOutput out) throws IOException {
       WritableUtils.writeVLong(out, totalVirtualMemory);
       WritableUtils.writeVLong(out, totalPhysicalMemory);
+      WritableUtils.writeVLong(out, availableVirtualMemory);
+      WritableUtils.writeVLong(out, availablePhysicalMemory);
       WritableUtils.writeVLong(out, mapSlotMemorySizeOnTT);
       WritableUtils.writeVLong(out, reduceSlotMemorySizeOnTT);
       WritableUtils.writeVLong(out, availableSpace);
+      WritableUtils.writeVLong(out, cumulativeCpuTime);
+      WritableUtils.writeVLong(out, cpuFrequency);
+      WritableUtils.writeVInt(out, numProcessors);
+      out.writeFloat(getCpuUsage());
     }
     
     public void readFields(DataInput in) throws IOException {
       totalVirtualMemory = WritableUtils.readVLong(in);
       totalPhysicalMemory = WritableUtils.readVLong(in);
+      availableVirtualMemory = WritableUtils.readVLong(in);
+      availablePhysicalMemory = WritableUtils.readVLong(in);
       mapSlotMemorySizeOnTT = WritableUtils.readVLong(in);
       reduceSlotMemorySizeOnTT = WritableUtils.readVLong(in);
       availableSpace = WritableUtils.readVLong(in);
+      cumulativeCpuTime = WritableUtils.readVLong(in);
+      cpuFrequency = WritableUtils.readVLong(in);
+      numProcessors = WritableUtils.readVInt(in);
+      setCpuUsage(in.readFloat());
     }
   }
   

Modified: hadoop/common/branches/branch-0.20-security-205/src/mapred/org/apache/hadoop/mapred/Task_Counter.properties
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security-205/src/mapred/org/apache/hadoop/mapred/Task_Counter.properties?rev=1179465&r1=1179464&r2=1179465&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.20-security-205/src/mapred/org/apache/hadoop/mapred/Task_Counter.properties (original)
+++ hadoop/common/branches/branch-0.20-security-205/src/mapred/org/apache/hadoop/mapred/Task_Counter.properties Wed Oct  5 22:37:52 2011
@@ -17,4 +17,8 @@ REDUCE_OUTPUT_RECORDS.name=    Reduce ou
 REDUCE_SKIPPED_RECORDS.name=   Reduce skipped records
 REDUCE_SKIPPED_GROUPS.name=    Reduce skipped groups
 SPILLED_RECORDS.name=          Spilled Records
+COMMITTED_HEAP_BYTES.name=     Total committed heap usage (bytes)
+CPU_MILLISECONDS.name=         CPU time spent (ms)
+PHYSICAL_MEMORY_BYTES.name=    Physical memory (bytes) snapshot
+VIRTUAL_MEMORY_BYTES.name=     Virtual memory (bytes) snapshot
 

Modified: hadoop/common/branches/branch-0.20-security-205/src/mapred/org/apache/hadoop/mapreduce/Counter.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security-205/src/mapred/org/apache/hadoop/mapreduce/Counter.java?rev=1179465&r1=1179464&r2=1179465&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.20-security-205/src/mapred/org/apache/hadoop/mapreduce/Counter.java (original)
+++ hadoop/common/branches/branch-0.20-security-205/src/mapred/org/apache/hadoop/mapreduce/Counter.java Wed Oct  5 22:37:52 2011
@@ -104,6 +104,14 @@ public class Counter implements Writable
   }
     
   /**
+   * Set this counter by the given value
+   * @param value the value to set
+   */
+  public synchronized void setValue(long value) {
+    this.value = value;
+  }
+
+  /**
    * Increment this counter by the given value
    * @param incr the value to increase this counter by
    */

Modified: hadoop/common/branches/branch-0.20-security-205/src/test/org/apache/hadoop/mapred/TestCounters.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security-205/src/test/org/apache/hadoop/mapred/TestCounters.java?rev=1179465&r1=1179464&r2=1179465&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.20-security-205/src/test/org/apache/hadoop/mapred/TestCounters.java (original)
+++ hadoop/common/branches/branch-0.20-security-205/src/test/org/apache/hadoop/mapred/TestCounters.java Wed Oct  5 22:37:52 2011
@@ -20,6 +20,9 @@ package org.apache.hadoop.mapred;
 import junit.framework.TestCase;
 import java.io.IOException;
 import java.text.ParseException;
+import java.util.Random;
+
+import org.apache.hadoop.mapred.Counters.Counter;
 
 /**
  * TestCounters checks the sanity and recoverability of {@code Counters}
@@ -90,6 +93,33 @@ public class TestCounters extends TestCa
     }
   }
   
+  /**
+   * Verify counter value works
+   */
+  public void testCounterValue() {
+    final int NUMBER_TESTS = 100;
+    final int NUMBER_INC = 10;
+    final Random rand = new Random();
+    for (int i = 0; i < NUMBER_TESTS; i++) {
+      long initValue = rand.nextInt();
+      long expectedValue = initValue;
+      Counter counter = new Counter("foo", "bar", expectedValue);
+      assertEquals("Counter value is not initialized correctly",
+                   expectedValue, counter.getValue());
+      for (int j = 0; j < NUMBER_INC; j++) {
+        int incValue = rand.nextInt();
+        counter.increment(incValue);
+        expectedValue += incValue;
+        assertEquals("Counter value is not incremented correctly",
+                     expectedValue, counter.getValue());
+      }
+      expectedValue = rand.nextInt();
+      counter.setValue(expectedValue);
+      assertEquals("Counter value is not set correctly",
+                   expectedValue, counter.getValue());
+    }
+  }
+  
   public static void main(String[] args) throws IOException {
     new TestCounters().testCounters();
   }

Modified: hadoop/common/branches/branch-0.20-security-205/src/test/org/apache/hadoop/mapred/TestJobCounters.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security-205/src/test/org/apache/hadoop/mapred/TestJobCounters.java?rev=1179465&r1=1179464&r2=1179465&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.20-security-205/src/test/org/apache/hadoop/mapred/TestJobCounters.java (original)
+++ hadoop/common/branches/branch-0.20-security-205/src/test/org/apache/hadoop/mapred/TestJobCounters.java Wed Oct  5 22:37:52 2011
@@ -23,12 +23,12 @@ import java.io.FileWriter;
 import java.io.Writer;
 import java.io.BufferedWriter;
 import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.List;
 import java.util.StringTokenizer;
 
 import junit.framework.TestCase;
-import junit.extensions.TestSetup;
-import junit.framework.Test;
-import junit.framework.TestSuite;
 
 import static org.apache.hadoop.mapred.Task.Counter.SPILLED_RECORDS;
 import static org.apache.hadoop.mapred.Task.Counter.MAP_INPUT_RECORDS;
@@ -36,12 +36,17 @@ import static org.apache.hadoop.mapred.T
 import static org.apache.hadoop.mapred.Task.Counter.MAP_INPUT_BYTES;
 import static org.apache.hadoop.mapred.Task.Counter.MAP_OUTPUT_BYTES;
 import static org.apache.hadoop.mapred.Task.Counter.MAP_OUTPUT_MATERIALIZED_BYTES;
+import static org.apache.hadoop.mapred.Task.Counter.COMMITTED_HEAP_BYTES;
 
+import org.apache.commons.lang.RandomStringUtils;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.io.IntWritable;
+import org.apache.hadoop.io.LongWritable;
 import org.apache.hadoop.io.Text;
+import org.apache.hadoop.io.Writable;
+import org.apache.hadoop.io.WritableComparable;
 import org.apache.hadoop.mapreduce.Job;
 import org.apache.hadoop.mapreduce.Mapper;
 import org.apache.hadoop.mapreduce.Reducer;
@@ -450,4 +455,249 @@ public class TestJobCounters extends Tes
       }
     }
   }
+  
+  /** 
+   * Increases the JVM's heap usage to the specified target value.
+   */
+  static class MemoryLoader {
+    private static final int DEFAULT_UNIT_LOAD_SIZE = 10 * 1024 * 1024; // 10mb
+    
+    // the target value to reach
+    private long targetValue;
+    // a list to hold the load objects
+    private List<String> loadObjects = new ArrayList<String>();
+    
+    MemoryLoader(long targetValue) {
+      this.targetValue = targetValue;
+    }
+    
+    /**
+     * Loads the memory to the target value.
+     */
+    void load() {
+      while (Runtime.getRuntime().totalMemory() < targetValue) {
+        System.out.println("Loading memory with " + DEFAULT_UNIT_LOAD_SIZE 
+                           + " characters. Current usage : " 
+                           + Runtime.getRuntime().totalMemory());
+        // load some objects in the memory
+        loadObjects.add(RandomStringUtils.random(DEFAULT_UNIT_LOAD_SIZE));
+
+        // sleep for 100ms
+        try {
+          Thread.sleep(100);
+        } catch (InterruptedException ie) {}
+      }
+    }
+  }
+
+  /**
+   * A mapper that increases the JVM's heap usage to a target value configured 
+   * via {@link MemoryLoaderMapper#TARGET_VALUE} using a {@link MemoryLoader}.
+   */
+  @SuppressWarnings({"deprecation", "unchecked"})
+  static class MemoryLoaderMapper 
+  extends MapReduceBase 
+  implements org.apache.hadoop.mapred.Mapper<WritableComparable, Writable, 
+                    WritableComparable, Writable> {
+    static final String TARGET_VALUE = "map.memory-loader.target-value";
+    
+    private static MemoryLoader loader = null;
+    
+    public void map(WritableComparable key, Writable val, 
+                    OutputCollector<WritableComparable, Writable> output,
+                    Reporter reporter)
+    throws IOException {
+      assertNotNull("Mapper not configured!", loader);
+      
+      // load the memory
+      loader.load();
+      
+      // work as identity mapper
+      output.collect(key, val);
+    }
+
+    public void configure(JobConf conf) {
+      loader = new MemoryLoader(conf.getLong(TARGET_VALUE, -1));
+    }
+  }
+
+  /** 
+   * A reducer that increases the JVM's heap usage to a target value configured 
+   * via {@link MemoryLoaderReducer#TARGET_VALUE} using a {@link MemoryLoader}.
+   */
+  @SuppressWarnings({"deprecation", "unchecked"})
+  static class MemoryLoaderReducer extends MapReduceBase 
+  implements org.apache.hadoop.mapred.Reducer<WritableComparable, Writable, 
+                     WritableComparable, Writable> {
+    static final String TARGET_VALUE = "reduce.memory-loader.target-value";
+    private static MemoryLoader loader = null;
+    
+    public void reduce(WritableComparable key, Iterator<Writable> val, 
+                       OutputCollector<WritableComparable, Writable> output,
+                       Reporter reporter)
+    throws IOException {
+      assertNotNull("Reducer not configured!", loader);
+      
+      // load the memory
+      loader.load();
+      
+      // work as identity reducer
+      output.collect(key, key);
+    }
+
+    public void configure(JobConf conf) {
+      loader = new MemoryLoader(conf.getLong(TARGET_VALUE, -1));
+    }
+  }
+
+  @SuppressWarnings("deprecation")
+  private long getTaskCounterUsage (JobClient client, JobID id, int numReports,
+                                    int taskId, boolean isMap) 
+  throws Exception {
+    TaskReport[] reports = null;
+    if (isMap) {
+      reports = client.getMapTaskReports(id);
+    } else {
+      reports = client.getReduceTaskReports(id);
+    }
+    
+    assertNotNull("No reports found for " + (isMap? "map" : "reduce") + " tasks" 
+                  + "' in job " + id, reports);
+    // make sure that the total number of reports match the expected
+    assertEquals("Mismatch in task id", numReports, reports.length);
+    
+    Counters counters = reports[taskId].getCounters();
+    
+    return counters.getCounter(COMMITTED_HEAP_BYTES);
+  }
+
+  // set up heap options, target value for memory loader and the output 
+  // directory before running the job
+  @SuppressWarnings("deprecation")
+  private static RunningJob runHeapUsageTestJob(JobConf conf, Path testRootDir,
+                              String heapOptions, long targetMapValue,
+                              long targetReduceValue, FileSystem fs, 
+                              JobClient client, Path inDir) 
+  throws IOException {
+    // define a job
+    JobConf jobConf = new JobConf(conf);
+    
+    // configure the jobs
+    jobConf.setNumMapTasks(1);
+    jobConf.setNumReduceTasks(1);
+    jobConf.setMapperClass(MemoryLoaderMapper.class);
+    jobConf.setReducerClass(MemoryLoaderReducer.class);
+    jobConf.setInputFormat(TextInputFormat.class);
+    jobConf.setOutputKeyClass(LongWritable.class);
+    jobConf.setOutputValueClass(Text.class);
+    jobConf.setMaxMapAttempts(1);
+    jobConf.setMaxReduceAttempts(1);
+    jobConf.set(JobConf.MAPRED_TASK_JAVA_OPTS, heapOptions);
+    
+    // set the targets
+    jobConf.setLong(MemoryLoaderMapper.TARGET_VALUE, targetMapValue);
+    jobConf.setLong(MemoryLoaderReducer.TARGET_VALUE, targetReduceValue);
+    
+    // set the input directory for the job
+    FileInputFormat.setInputPaths(jobConf, inDir);
+    
+    // define job output folder
+    Path outDir = new Path(testRootDir, "out");
+    fs.delete(outDir, true);
+    FileOutputFormat.setOutputPath(jobConf, outDir);
+    
+    // run the job
+    RunningJob job = client.submitJob(jobConf);
+    job.waitForCompletion();
+    JobID jobID = job.getID();
+    assertTrue("Job " + jobID + " failed!", job.isSuccessful());
+    
+    return job;
+  }
+
+  /**
+   * Tests {@link TaskCounter}'s {@link TaskCounter.COMMITTED_HEAP_BYTES}. 
+   * The test consists of running a low-memory job which consumes less heap 
+   * memory and then running a high-memory job which consumes more heap memory, 
+   * and then ensuring that COMMITTED_HEAP_BYTES of low-memory job is smaller 
+   * than that of the high-memory job.
+   * @throws IOException
+   */
+  @SuppressWarnings("deprecation")
+  public void testHeapUsageCounter() throws Exception {
+    JobConf conf = new JobConf();
+    // create a local filesystem handle
+    FileSystem fileSystem = FileSystem.getLocal(conf);
+    
+    // define test root directories
+    File rootDir =
+      new File(System.getProperty("test.build.data", "/tmp"));
+    File testRootDir = new File(rootDir, "testHeapUsageCounter");
+    // cleanup the test root directory
+    Path testRootDirPath = new Path(testRootDir.toString());
+    fileSystem.delete(testRootDirPath, true);
+    // set the current working directory
+    fileSystem.setWorkingDirectory(testRootDirPath);
+    
+    fileSystem.deleteOnExit(testRootDirPath);
+    
+    // create a mini cluster using the local file system
+    MiniMRCluster mrCluster = 
+      new MiniMRCluster(1, fileSystem.getUri().toString(), 1);
+    
+    try {
+      conf = mrCluster.createJobConf();
+      JobClient jobClient = new JobClient(conf);
+
+      // define job input
+      File file = new File(testRootDir, "in");
+      Path inDir = new Path(file.toString());
+      // create input data
+      createWordsFile(file);
+
+      // configure and run a low memory job which will run without loading the
+      // jvm's heap
+      RunningJob lowMemJob = 
+        runHeapUsageTestJob(conf, testRootDirPath, "-Xms32m -Xmx1G", 
+                            0, 0, fileSystem, jobClient, inDir);
+      JobID lowMemJobID = lowMemJob.getID();
+      long lowMemJobMapHeapUsage = getTaskCounterUsage(jobClient, lowMemJobID, 
+                                                       1, 0, true);
+      System.out.println("Job1 (low memory job) map task heap usage: " 
+                         + lowMemJobMapHeapUsage);
+      long lowMemJobReduceHeapUsage =
+        getTaskCounterUsage(jobClient, lowMemJobID, 1, 0, false);
+      System.out.println("Job1 (low memory job) reduce task heap usage: " 
+                         + lowMemJobReduceHeapUsage);
+
+      // configure and run a high memory job which will load the jvm's heap
+      RunningJob highMemJob = 
+        runHeapUsageTestJob(conf, testRootDirPath, "-Xms32m -Xmx1G", 
+                            lowMemJobMapHeapUsage + 256*1024*1024, 
+                            lowMemJobReduceHeapUsage + 256*1024*1024,
+                            fileSystem, jobClient, inDir);
+      JobID highMemJobID = highMemJob.getID();
+
+      long highMemJobMapHeapUsage = getTaskCounterUsage(jobClient, highMemJobID,
+                                                        1, 0, true);
+      System.out.println("Job2 (high memory job) map task heap usage: " 
+                         + highMemJobMapHeapUsage);
+      long highMemJobReduceHeapUsage =
+        getTaskCounterUsage(jobClient, highMemJobID, 1, 0, false);
+      System.out.println("Job2 (high memory job) reduce task heap usage: " 
+                         + highMemJobReduceHeapUsage);
+
+      assertTrue("Incorrect map heap usage reported by the map task", 
+                 lowMemJobMapHeapUsage < highMemJobMapHeapUsage);
+
+      assertTrue("Incorrect reduce heap usage reported by the reduce task", 
+                 lowMemJobReduceHeapUsage < highMemJobReduceHeapUsage);
+    } finally {
+      // shutdown the mr cluster
+      mrCluster.shutdown();
+      try {
+        fileSystem.delete(testRootDirPath, true);
+      } catch (IOException ioe) {} 
+    }
+  }
 }
\ No newline at end of file

Added: hadoop/common/branches/branch-0.20-security-205/src/test/org/apache/hadoop/mapred/TestTTResourceReporting.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security-205/src/test/org/apache/hadoop/mapred/TestTTResourceReporting.java?rev=1179465&view=auto
==============================================================================
--- hadoop/common/branches/branch-0.20-security-205/src/test/org/apache/hadoop/mapred/TestTTResourceReporting.java (added)
+++ hadoop/common/branches/branch-0.20-security-205/src/test/org/apache/hadoop/mapred/TestTTResourceReporting.java Wed Oct  5 22:37:52 2011
@@ -0,0 +1,366 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.mapred;
+
+import java.io.IOException;
+import java.util.List;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.examples.SleepJob;
+import org.apache.hadoop.mapred.Counters;
+import org.apache.hadoop.mapred.JobConf;
+import org.apache.hadoop.mapred.JobTracker;
+import org.apache.hadoop.mapred.Task;
+import org.apache.hadoop.mapred.TaskStatus;
+import org.apache.hadoop.mapreduce.server.jobtracker.TaskTracker;
+import org.apache.hadoop.mapred.TaskTrackerStatus;
+import org.apache.hadoop.mapred.Task.Counter;
+import org.apache.hadoop.util.DummyResourceCalculatorPlugin;
+import org.apache.hadoop.util.LinuxResourceCalculatorPlugin;
+import org.apache.hadoop.util.ResourceCalculatorPlugin;
+import org.apache.hadoop.util.ToolRunner;
+
+import junit.framework.TestCase;
+import org.junit.Test;
+import org.junit.After;
+
+/**
+ * This test class tests the functionality related to configuring, reporting
+ * and computing memory related parameters in a Map/Reduce cluster.
+ * 
+ * Each test sets up a {@link MiniMRCluster} with a locally defined 
+ * {@link org.apache.hadoop.mapred.TaskScheduler}. This scheduler validates 
+ * the memory related configuration is correctly computed and reported from 
+ * the tasktracker in 
+ * {@link org.apache.hadoop.mapred.TaskScheduler#assignTasks(TaskTrackerStatus)}.
+ */
+public class TestTTResourceReporting extends TestCase {
+
+  static final Log LOG = LogFactory.getLog(TestTTResourceReporting.class);
+  
+  private MiniMRCluster miniMRCluster;
+
+  /**
+   * Fake scheduler to test the proper reporting of memory values by TT
+   */
+  public static class FakeTaskScheduler extends JobQueueTaskScheduler {
+    
+    private boolean hasPassed = true;
+    private boolean hasDynamicValuePassed = true;
+    private String message;
+    
+    public FakeTaskScheduler() {
+      super();
+    }
+    
+    public boolean hasTestPassed() {
+      return hasPassed;
+    }
+
+    public boolean hasDynamicTestPassed() {
+      return hasDynamicValuePassed;
+    }
+    
+    public String getFailureMessage() {
+      return message;
+    }
+    
+    @Override
+    public List<Task> assignTasks(TaskTracker taskTracker)
+        throws IOException {
+      TaskTrackerStatus status = taskTracker.getStatus();
+      long totalVirtualMemoryOnTT =
+          getConf().getLong("totalVmemOnTT", -1);
+      long totalPhysicalMemoryOnTT =
+          getConf().getLong("totalPmemOnTT", -1);
+      long mapSlotMemorySize =
+          getConf().getLong("mapSlotMemorySize", -1);
+      long reduceSlotMemorySize =
+          getConf()
+              .getLong("reduceSlotMemorySize", -1);
+      long availableVirtualMemoryOnTT =
+          getConf().getLong("availableVmemOnTT", -1);
+      long availablePhysicalMemoryOnTT =
+          getConf().getLong("availablePmemOnTT", -1);
+      long cumulativeCpuTime =
+          getConf().getLong("cumulativeCpuTime", -1);
+      long cpuFrequency =
+          getConf().getLong("cpuFrequency", -1);
+      int numProcessors =
+          getConf().getInt("numProcessors", -1);
+      float cpuUsage =
+          getConf().getFloat("cpuUsage", -1);
+
+      long reportedTotalVirtualMemoryOnTT =
+          status.getResourceStatus().getTotalVirtualMemory();
+      long reportedTotalPhysicalMemoryOnTT =
+          status.getResourceStatus().getTotalPhysicalMemory();
+      long reportedMapSlotMemorySize =
+          status.getResourceStatus().getMapSlotMemorySizeOnTT();
+      long reportedReduceSlotMemorySize =
+          status.getResourceStatus().getReduceSlotMemorySizeOnTT();
+      long reportedAvailableVirtualMemoryOnTT =
+          status.getResourceStatus().getAvailableVirtualMemory();
+      long reportedAvailablePhysicalMemoryOnTT =
+          status.getResourceStatus().getAvailablePhysicalMemory();
+      long reportedCumulativeCpuTime =
+          status.getResourceStatus().getCumulativeCpuTime();
+      long reportedCpuFrequency = status.getResourceStatus().getCpuFrequency();
+      int reportedNumProcessors = status.getResourceStatus().getNumProcessors();
+      float reportedCpuUsage = status.getResourceStatus().getCpuUsage();
+
+      message =
+          "expected memory values : "
+              + "(totalVirtualMemoryOnTT, totalPhysicalMemoryOnTT, "
+              + "availableVirtualMemoryOnTT, availablePhysicalMemoryOnTT, "
+              + "mapSlotMemSize, reduceSlotMemorySize, cumulativeCpuTime, "
+              + "cpuFrequency, numProcessors, cpuUsage) = ("
+              + totalVirtualMemoryOnTT + ", "
+              + totalPhysicalMemoryOnTT + ","
+              + availableVirtualMemoryOnTT + ", "
+              + availablePhysicalMemoryOnTT + ","
+              + mapSlotMemorySize + ","
+              + reduceSlotMemorySize + ","
+              + cumulativeCpuTime + ","
+              + cpuFrequency + ","
+              + numProcessors + ","
+              + cpuUsage
+              +")";
+      message +=
+          "\nreported memory values : "
+              + "(totalVirtualMemoryOnTT, totalPhysicalMemoryOnTT, "
+              + "availableVirtualMemoryOnTT, availablePhysicalMemoryOnTT, "
+              + "reportedMapSlotMemorySize, reportedReduceSlotMemorySize, "
+              + "reportedCumulativeCpuTime, reportedCpuFrequency, "
+              + "reportedNumProcessors, cpuUsage) = ("
+              + reportedTotalVirtualMemoryOnTT + ", "
+              + reportedTotalPhysicalMemoryOnTT + ","
+              + reportedAvailableVirtualMemoryOnTT + ", "
+              + reportedAvailablePhysicalMemoryOnTT + ","
+              + reportedMapSlotMemorySize + ","
+              + reportedReduceSlotMemorySize + ","
+              + reportedCumulativeCpuTime + ","
+              + reportedCpuFrequency + ","
+              + reportedNumProcessors + ","
+              + reportedCpuUsage
+               + ")";
+      LOG.info(message);
+      hasDynamicValuePassed = true;
+      // Check task resource status in task reports
+      for (TaskStatus taskStatus : status.getTaskReports()) {
+        Counters counters = taskStatus.getCounters();
+        // This should be zero because the initial CPU time is subtracted.
+        long procCumulativeCpuTime = 0;
+        long procVirtualMemorySize =
+          getConf().getLong("procVirtualMemorySize", -1);
+        long procPhysicalMemorySize =
+          getConf().getLong("procPhysicalMemorySize", -1);
+        long reportedProcCumulativeCpuTime =
+          counters.findCounter(Task.Counter.CPU_MILLISECONDS).getValue();
+        long reportedProcVirtualMemorySize =
+          counters.findCounter(Task.Counter.VIRTUAL_MEMORY_BYTES).getValue();
+        long reportedProcPhysicalMemorySize =
+          counters.findCounter(Task.Counter.PHYSICAL_MEMORY_BYTES).getValue();
+        String procMessage =
+          "expected values : "
+              + "(procCumulativeCpuTime, procVirtualMemorySize,"
+              + " procPhysicalMemorySize) = ("
+              + procCumulativeCpuTime + ", "
+              + procVirtualMemorySize + ", "
+              + procPhysicalMemorySize + ")";
+        procMessage +=
+          "\nreported values : "
+              + "(procCumulativeCpuTime, procVirtualMemorySize,"
+              + " procPhysicalMemorySize) = ("
+              + reportedProcCumulativeCpuTime + ", "
+              + reportedProcVirtualMemorySize + ", "
+              + reportedProcPhysicalMemorySize + ")";
+        LOG.info(procMessage);
+        message += "\n" + procMessage;
+        if (procCumulativeCpuTime != reportedProcCumulativeCpuTime ||
+            procVirtualMemorySize != reportedProcVirtualMemorySize ||
+            procPhysicalMemorySize != reportedProcPhysicalMemorySize) {
+          hasDynamicValuePassed = false;
+        }
+      }
+      hasPassed = true;
+      if (totalVirtualMemoryOnTT != reportedTotalVirtualMemoryOnTT
+          || totalPhysicalMemoryOnTT != reportedTotalPhysicalMemoryOnTT
+          || mapSlotMemorySize != reportedMapSlotMemorySize
+          || reduceSlotMemorySize != reportedReduceSlotMemorySize
+          || numProcessors != reportedNumProcessors) {
+        hasPassed = false;
+      }
+      // These values changes every moment on the node so it can only be
+      // tested by DummyMemoryCalculatorPlugin. Need to check them separately
+      if (availableVirtualMemoryOnTT != reportedAvailableVirtualMemoryOnTT
+          || availablePhysicalMemoryOnTT != reportedAvailablePhysicalMemoryOnTT
+          || cumulativeCpuTime != reportedCumulativeCpuTime
+          || cpuFrequency != reportedCpuFrequency
+          || cpuUsage != reportedCpuUsage) {
+        hasDynamicValuePassed = false;
+      }
+      return super.assignTasks(taskTracker);
+    }
+  }
+
+  /**
+   * Test that verifies default values are configured and reported correctly.
+   * 
+   * @throws Exception
+   */
+  public void testDefaultResourceValues()
+      throws Exception {
+    JobConf conf = new JobConf();
+    try {
+      // Memory values are disabled by default.
+      conf.setClass(
+          org.apache.hadoop.mapred.TaskTracker.TT_RESOURCE_CALCULATOR_PLUGIN,       
+          DummyResourceCalculatorPlugin.class, ResourceCalculatorPlugin.class);
+      setUpCluster(conf);
+      JobConf jobConf = miniMRCluster.createJobConf();
+      jobConf.setClass(
+          org.apache.hadoop.mapred.TaskTracker.TT_RESOURCE_CALCULATOR_PLUGIN,
+          DummyResourceCalculatorPlugin.class, ResourceCalculatorPlugin.class);
+      runSleepJob(jobConf);
+      verifyTestResults();
+    } finally {
+      tearDownCluster();
+    }
+  }
+
+  /**
+   * Test that verifies that configured values are reported correctly.
+   * 
+   * @throws Exception
+   */
+  public void testConfiguredResourceValues()
+      throws Exception {
+    JobConf conf = new JobConf();
+    conf.setLong("totalVmemOnTT", 4 * 1024 * 1024 * 1024L);
+    conf.setLong("totalPmemOnTT", 2 * 1024 * 1024 * 1024L);
+    conf.setLong("mapSlotMemorySize", 1 * 512L);
+    conf.setLong("reduceSlotMemorySize", 1 * 1024L);
+    conf.setLong("availableVmemOnTT", 4 * 1024 * 1024 * 1024L);
+    conf.setLong("availablePmemOnTT", 2 * 1024 * 1024 * 1024L);
+    conf.setLong("cumulativeCpuTime", 10000L);
+    conf.setLong("cpuFrequency", 2000000L);
+    conf.setInt("numProcessors", 8);
+    conf.setFloat("cpuUsage", 15.5F);
+    conf.setLong("procCumulativeCpuTime", 1000L);
+    conf.setLong("procVirtualMemorySize", 2 * 1024 * 1024 * 1024L);
+    conf.setLong("procPhysicalMemorySize", 1024 * 1024 * 1024L);
+
+    conf.setClass(
+        org.apache.hadoop.mapred.TaskTracker.TT_RESOURCE_CALCULATOR_PLUGIN,       
+        DummyResourceCalculatorPlugin.class, ResourceCalculatorPlugin.class);
+    conf.setLong(DummyResourceCalculatorPlugin.MAXVMEM_TESTING_PROPERTY,
+        4 * 1024 * 1024 * 1024L);
+    conf.setLong(DummyResourceCalculatorPlugin.MAXPMEM_TESTING_PROPERTY,
+        2 * 1024 * 1024 * 1024L);
+    conf.setLong(JobTracker.MAPRED_CLUSTER_MAP_MEMORY_MB_PROPERTY, 512L);
+    conf.setLong(JobTracker.MAPRED_CLUSTER_REDUCE_MEMORY_MB_PROPERTY, 1024L);
+    conf.setLong(DummyResourceCalculatorPlugin.CUMULATIVE_CPU_TIME, 10000L);
+    conf.setLong(DummyResourceCalculatorPlugin.CPU_FREQUENCY, 2000000L);
+    conf.setInt(DummyResourceCalculatorPlugin.NUM_PROCESSORS, 8);
+    conf.setFloat(DummyResourceCalculatorPlugin.CPU_USAGE, 15.5F);
+    try {
+      setUpCluster(conf);
+      JobConf jobConf = miniMRCluster.createJobConf();
+      jobConf.setMemoryForMapTask(1 * 1024L);
+      jobConf.setMemoryForReduceTask(2 * 1024L);
+      jobConf.setClass(
+        org.apache.hadoop.mapred.TaskTracker.TT_RESOURCE_CALCULATOR_PLUGIN,
+        DummyResourceCalculatorPlugin.class, ResourceCalculatorPlugin.class);
+      jobConf.setLong(DummyResourceCalculatorPlugin.PROC_CUMULATIVE_CPU_TIME, 1000L);
+      jobConf.setLong(DummyResourceCalculatorPlugin.PROC_VMEM_TESTING_PROPERTY,
+                      2 * 1024 * 1024 * 1024L);
+      jobConf.setLong(DummyResourceCalculatorPlugin.PROC_PMEM_TESTING_PROPERTY,
+                      1024 * 1024 * 1024L);
+      runSleepJob(jobConf);
+      verifyTestResults();
+    } finally {
+      tearDownCluster();
+    }
+  }
+
+  /**
+   * Test that verifies that total memory values are calculated and reported
+   * correctly.
+   * 
+   * @throws Exception
+   */
+  public void testResourceValuesOnLinux()
+      throws Exception {
+    if (!System.getProperty("os.name").startsWith("Linux")) {
+      return;
+    }
+
+    JobConf conf = new JobConf();
+    LinuxResourceCalculatorPlugin plugin = new LinuxResourceCalculatorPlugin();
+    // In this case, we only check these four fields because they are static
+    conf.setLong("totalVmemOnTT", plugin.getVirtualMemorySize());
+    conf.setLong("totalPmemOnTT", plugin.getPhysicalMemorySize());
+    conf.setLong("numProcessors", plugin.getNumProcessors());
+
+    try {
+      setUpCluster(conf);
+      runSleepJob(miniMRCluster.createJobConf());
+      verifyTestResults(true);
+    } finally {
+      tearDownCluster();
+    }
+  }
+
+  private void setUpCluster(JobConf conf)
+                                throws Exception {
+    conf.setClass("mapred.jobtracker.taskScheduler",
+        TestTTResourceReporting.FakeTaskScheduler.class, TaskScheduler.class);
+    conf.set("mapred.job.tracker.handler.count", "1");
+    miniMRCluster = new MiniMRCluster(1, "file:///", 3, null, null, conf);
+  }
+  
+  private void runSleepJob(JobConf conf) throws Exception {
+    String[] args = { "-m", "1", "-r", "1",
+                      "-mt", "10", "-rt", "10" };
+    ToolRunner.run(conf, new SleepJob(), args);
+  }
+
+  private void verifyTestResults() {
+    verifyTestResults(false);
+  }
+
+  private void verifyTestResults(boolean excludeDynamic) {
+    FakeTaskScheduler scheduler = 
+      (FakeTaskScheduler)miniMRCluster.getJobTrackerRunner().
+                              getJobTracker().getTaskScheduler();
+    assertTrue(scheduler.getFailureMessage(), scheduler.hasTestPassed());
+    if (!excludeDynamic) {
+      assertTrue(scheduler.getFailureMessage(),
+                 scheduler.hasDynamicTestPassed());
+    }
+  }
+  
+  //TODO make it after
+  private void tearDownCluster() {
+    if (miniMRCluster != null) {
+      miniMRCluster.shutdown();
+    }
+  }
+}

Added: hadoop/common/branches/branch-0.20-security-205/src/test/org/apache/hadoop/util/DummyResourceCalculatorPlugin.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security-205/src/test/org/apache/hadoop/util/DummyResourceCalculatorPlugin.java?rev=1179465&view=auto
==============================================================================
--- hadoop/common/branches/branch-0.20-security-205/src/test/org/apache/hadoop/util/DummyResourceCalculatorPlugin.java (added)
+++ hadoop/common/branches/branch-0.20-security-205/src/test/org/apache/hadoop/util/DummyResourceCalculatorPlugin.java Wed Oct  5 22:37:52 2011
@@ -0,0 +1,114 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.util;
+
+/**
+ * Plugin class to test resource information reported by TT. Use
+ * configuration items {@link #MAXVMEM_TESTING_PROPERTY} and
+ * {@link #MAXPMEM_TESTING_PROPERTY} to tell TT the total vmem and the total
+ * pmem. Use configuration items {@link #NUM_PROCESSORS},
+ * {@link #CPU_FREQUENCY}, {@link #CUMULATIVE_CPU_TIME} and {@link #CPU_USAGE}
+ * to tell TT the CPU information.
+ */
+public class DummyResourceCalculatorPlugin extends ResourceCalculatorPlugin {
+
+  /** max vmem on the TT */
+  public static final String MAXVMEM_TESTING_PROPERTY =
+      "mapred.tasktracker.maxvmem.testing";
+  /** max pmem on the TT */
+  public static final String MAXPMEM_TESTING_PROPERTY =
+      "mapred.tasktracker.maxpmem.testing";
+  /** number of processors for testing */
+  public static final String NUM_PROCESSORS =
+      "mapred.tasktracker.numprocessors.testing";
+  /** CPU frequency for testing */
+  public static final String CPU_FREQUENCY =
+      "mapred.tasktracker.cpufrequency.testing";
+  /** cumulative CPU usage time for testing */
+  public static final String CUMULATIVE_CPU_TIME =
+      "mapred.tasktracker.cumulativecputime.testing";
+  /** CPU usage percentage for testing */
+  public static final String CPU_USAGE =
+      "mapred.tasktracker.cpuusage.testing";
+  /** process cumulative CPU usage time for testing */
+  public static final String PROC_CUMULATIVE_CPU_TIME =
+      "mapred.tasktracker.proccumulativecputime.testing";
+  /** process pmem for testing*/
+  public static final String PROC_PMEM_TESTING_PROPERTY =
+      "mapred.tasktracker.procpmem.testing";
+  /** process vmem for testing*/
+  public static final String PROC_VMEM_TESTING_PROPERTY =
+      "mapred.tasktracker.procvmem.testing";
+
+  /** {@inheritDoc} */
+  @Override
+  public long getVirtualMemorySize() {
+    return getConf().getLong(MAXVMEM_TESTING_PROPERTY, -1);
+  }
+
+  /** {@inheritDoc} */
+  @Override
+  public long getPhysicalMemorySize() {
+    return getConf().getLong(MAXPMEM_TESTING_PROPERTY, -1);
+  }
+
+  /** {@inheritDoc} */
+  @Override
+  public long getAvailableVirtualMemorySize() {
+    return getConf().getLong(MAXVMEM_TESTING_PROPERTY, -1);
+  }
+
+  /** {@inheritDoc} */
+  @Override
+  public long getAvailablePhysicalMemorySize() {
+    return getConf().getLong(MAXPMEM_TESTING_PROPERTY, -1);
+  }
+
+  /** {@inheritDoc} */
+  @Override
+  public int getNumProcessors() {
+    return getConf().getInt(NUM_PROCESSORS, -1);
+  }
+
+  /** {@inheritDoc} */
+  @Override
+  public long getCpuFrequency() {
+    return getConf().getLong(CPU_FREQUENCY, -1);
+  }
+
+  /** {@inheritDoc} */
+  @Override
+  public long getCumulativeCpuTime() {
+    return getConf().getLong(CUMULATIVE_CPU_TIME, -1);
+  }
+
+  /** {@inheritDoc} */
+  @Override
+  public float getCpuUsage() {
+    return getConf().getFloat(CPU_USAGE, -1);
+  }
+
+  @Override
+  public ProcResourceValues getProcResourceValues() {
+    long cpuTime = getConf().getLong(PROC_CUMULATIVE_CPU_TIME, -1);
+    long pMem = getConf().getLong(PROC_PMEM_TESTING_PROPERTY, -1);
+    long vMem = getConf().getLong(PROC_VMEM_TESTING_PROPERTY, -1);
+    return new ProcResourceValues(cpuTime, pMem, vMem);
+  }
+}

Added: hadoop/common/branches/branch-0.20-security-205/src/test/org/apache/hadoop/util/TestLinuxResourceCalculatorPlugin.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security-205/src/test/org/apache/hadoop/util/TestLinuxResourceCalculatorPlugin.java?rev=1179465&view=auto
==============================================================================
--- hadoop/common/branches/branch-0.20-security-205/src/test/org/apache/hadoop/util/TestLinuxResourceCalculatorPlugin.java (added)
+++ hadoop/common/branches/branch-0.20-security-205/src/test/org/apache/hadoop/util/TestLinuxResourceCalculatorPlugin.java Wed Oct  5 22:37:52 2011
@@ -0,0 +1,234 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.util;
+
+import java.io.File;
+import java.io.FileWriter;
+import java.io.IOException;
+import java.util.Random;
+
+import junit.framework.TestCase;
+
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.mapred.TaskTrackerStatus;
+import org.junit.Test;
+
+/**
+ * A JUnit test to test {@link LinuxResourceCalculatorPlugin}
+ * Create the fake /proc/ information and verify the parsing and calculation
+ */
+public class TestLinuxResourceCalculatorPlugin extends TestCase {
+  /**
+   * LinuxResourceCalculatorPlugin with a fake timer
+   */
+  static class FakeLinuxResourceCalculatorPlugin extends
+      LinuxResourceCalculatorPlugin {
+    
+	  long currentTime = 0;
+	  public FakeLinuxResourceCalculatorPlugin(String procfsMemFile,
+			                                       String procfsCpuFile,
+			                                       String procfsStatFile,
+			                                       long jiffyLengthInMillis) {
+	    super(procfsMemFile, procfsCpuFile, procfsStatFile, jiffyLengthInMillis);
+	  }
+	  @Override
+	  long getCurrentTime() {
+	    return currentTime;
+	  }
+	  public void advanceTime(long adv) {
+	    currentTime += adv * jiffyLengthInMillis;
+	  }
+  }
+  private static final FakeLinuxResourceCalculatorPlugin plugin;
+  private static String TEST_ROOT_DIR = new Path(System.getProperty(
+         "test.build.data", "/tmp")).toString().replace(' ', '+');
+  private static final String FAKE_MEMFILE;
+  private static final String FAKE_CPUFILE;
+  private static final String FAKE_STATFILE;
+  private static final long FAKE_JIFFY_LENGTH = 10L;
+  static {
+    int randomNum = (new Random()).nextInt(1000000000);
+    FAKE_MEMFILE = TEST_ROOT_DIR + File.separator + "MEMINFO_" + randomNum;
+    FAKE_CPUFILE = TEST_ROOT_DIR + File.separator + "CPUINFO_" + randomNum;
+    FAKE_STATFILE = TEST_ROOT_DIR + File.separator + "STATINFO_" + randomNum;
+    plugin = new FakeLinuxResourceCalculatorPlugin(FAKE_MEMFILE, FAKE_CPUFILE,
+                                                   FAKE_STATFILE,
+                                                   FAKE_JIFFY_LENGTH);
+  }
+  static final String MEMINFO_FORMAT = 
+	  "MemTotal:      %d kB\n" +
+	  "MemFree:         %d kB\n" +
+	  "Buffers:        138244 kB\n" +
+	  "Cached:         947780 kB\n" +
+	  "SwapCached:     142880 kB\n" +
+	  "Active:        3229888 kB\n" +
+	  "Inactive:       %d kB\n" +
+	  "SwapTotal:     %d kB\n" +
+	  "SwapFree:      %d kB\n" +
+	  "Dirty:          122012 kB\n" +
+	  "Writeback:           0 kB\n" +
+	  "AnonPages:     2710792 kB\n" +
+	  "Mapped:          24740 kB\n" +
+	  "Slab:           132528 kB\n" +
+	  "SReclaimable:   105096 kB\n" +
+	  "SUnreclaim:      27432 kB\n" +
+	  "PageTables:      11448 kB\n" +
+	  "NFS_Unstable:        0 kB\n" +
+	  "Bounce:              0 kB\n" +
+	  "CommitLimit:   4125904 kB\n" +
+	  "Committed_AS:  4143556 kB\n" +
+	  "VmallocTotal: 34359738367 kB\n" +
+	  "VmallocUsed:      1632 kB\n" +
+	  "VmallocChunk: 34359736375 kB\n" +
+	  "HugePages_Total:     0\n" +
+	  "HugePages_Free:      0\n" +
+	  "HugePages_Rsvd:      0\n" +
+	  "Hugepagesize:     2048 kB";
+  
+  static final String CPUINFO_FORMAT =
+    "processor : %s\n" +
+    "vendor_id : AuthenticAMD\n" +
+    "cpu family  : 15\n" +
+    "model   : 33\n" +
+    "model name  : Dual Core AMD Opteron(tm) Processor 280\n" +
+    "stepping  : 2\n" +
+    "cpu MHz   : %f\n" +
+    "cache size  : 1024 KB\n" +
+    "physical id : 0\n" +
+    "siblings  : 2\n" +
+    "core id   : 0\n" +
+    "cpu cores : 2\n" +
+    "fpu   : yes\n" +
+    "fpu_exception : yes\n" +
+    "cpuid level : 1\n" +
+    "wp    : yes\n" +
+    "flags   : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov " +
+    "pat pse36 clflush mmx fxsr sse sse2 ht syscall nx mmxext fxsr_opt lm " +
+    "3dnowext 3dnow pni lahf_lm cmp_legacy\n" +
+    "bogomips  : 4792.41\n" +
+    "TLB size  : 1024 4K pages\n" +
+    "clflush size  : 64\n" +
+    "cache_alignment : 64\n" +
+    "address sizes : 40 bits physical, 48 bits virtual\n" +
+    "power management: ts fid vid ttp";
+  
+  static final String STAT_FILE_FORMAT = 
+    "cpu  %d %d %d 1646495089 831319 48713 164346 0\n" +
+    "cpu0 15096055 30805 3823005 411456015 206027 13 14269 0\n" +
+    "cpu1 14760561 89890 6432036 408707910 456857 48074 130857 0\n" +
+    "cpu2 12761169 20842 3758639 413976772 98028 411 10288 0\n" +
+    "cpu3 12355207 47322 5789691 412354390 70406 213 8931 0\n" +
+    "intr 114648668 20010764 2 0 945665 2 0 0 0 0 0 0 0 4 0 0 0 0 0 0\n" +
+    "ctxt 242017731764\n" +
+    "btime 1257808753\n" +
+    "processes 26414943\n" +
+    "procs_running 1\n" +
+    "procs_blocked 0\n";
+  
+  /**
+   * Test parsing /proc/stat and /proc/cpuinfo
+   * @throws IOException
+   */
+  public void testParsingProcStatAndCpuFile() throws IOException {
+    // Write fake /proc/cpuinfo file.
+    long numProcessors = 8;
+    long cpuFrequencyKHz = 2392781;
+    String fileContent = "";
+    for (int i = 0; i < numProcessors; i++) {
+      fileContent += String.format(CPUINFO_FORMAT, i, cpuFrequencyKHz / 1000D) +
+                     "\n";
+    }
+    File tempFile = new File(FAKE_CPUFILE);
+    tempFile.deleteOnExit();
+    FileWriter fWriter = new FileWriter(FAKE_CPUFILE);
+    fWriter.write(fileContent);
+    fWriter.close();
+    assertEquals(plugin.getNumProcessors(), numProcessors);
+    assertEquals(plugin.getCpuFrequency(), cpuFrequencyKHz);
+    
+    // Write fake /proc/stat file.
+    long uTime = 54972994;
+    long nTime = 188860;
+    long sTime = 19803373;
+    tempFile = new File(FAKE_STATFILE);
+    tempFile.deleteOnExit();
+    updateStatFile(uTime, nTime, sTime);
+    assertEquals(plugin.getCumulativeCpuTime(),
+                 FAKE_JIFFY_LENGTH * (uTime + nTime + sTime));
+    assertEquals(plugin.getCpuUsage(), (float)(TaskTrackerStatus.UNAVAILABLE));
+    
+    // Advance the time and sample again to test the CPU usage calculation
+    uTime += 100L;
+    plugin.advanceTime(200L);
+    updateStatFile(uTime, nTime, sTime);
+    assertEquals(plugin.getCumulativeCpuTime(),
+                 FAKE_JIFFY_LENGTH * (uTime + nTime + sTime));
+    assertEquals(plugin.getCpuUsage(), 6.25F);
+    
+    // Advance the time and sample again. This time, we call getCpuUsage() only.
+    uTime += 600L;
+    plugin.advanceTime(300L);
+    updateStatFile(uTime, nTime, sTime);
+    assertEquals(plugin.getCpuUsage(), 25F);
+    
+    // Advance very short period of time (one jiffy length).
+    // In this case, CPU usage should not be updated.
+    uTime += 1L;
+    plugin.advanceTime(1L);
+    updateStatFile(uTime, nTime, sTime);
+    assertEquals(plugin.getCumulativeCpuTime(),
+                 FAKE_JIFFY_LENGTH * (uTime + nTime + sTime));
+    assertEquals(plugin.getCpuUsage(), 25F); // CPU usage is not updated.
+  }
+  
+  /**
+   * Write information to fake /proc/stat file
+   */
+  private void updateStatFile(long uTime, long nTime, long sTime)
+    throws IOException {
+    FileWriter fWriter = new FileWriter(FAKE_STATFILE);
+    fWriter.write(String.format(STAT_FILE_FORMAT, uTime, nTime, sTime));
+    fWriter.close();
+  }
+  
+  /**
+   * Test parsing /proc/meminfo
+   * @throws IOException
+   */
+  public void testParsingProcMemFile() throws IOException {
+    long memTotal = 4058864L;
+    long memFree = 99632L;
+    long inactive = 567732L;
+    long swapTotal = 2096472L;
+    long swapFree = 1818480L;
+    File tempFile = new File(FAKE_MEMFILE);
+    tempFile.deleteOnExit();
+    FileWriter fWriter = new FileWriter(FAKE_MEMFILE);
+    fWriter.write(String.format(MEMINFO_FORMAT,
+      memTotal, memFree, inactive, swapTotal, swapFree));
+    
+    fWriter.close();
+    assertEquals(plugin.getAvailablePhysicalMemorySize(),
+                 1024L * (memFree + inactive));
+    assertEquals(plugin.getAvailableVirtualMemorySize(),
+                 1024L * (memFree + inactive + swapFree));
+    assertEquals(plugin.getPhysicalMemorySize(), 1024L * memTotal);
+    assertEquals(plugin.getVirtualMemorySize(), 1024L * (memTotal + swapTotal));
+  }
+}

Modified: hadoop/common/branches/branch-0.20-security-205/src/test/org/apache/hadoop/util/TestProcfsBasedProcessTree.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security-205/src/test/org/apache/hadoop/util/TestProcfsBasedProcessTree.java?rev=1179465&r1=1179464&r2=1179465&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.20-security-205/src/test/org/apache/hadoop/util/TestProcfsBasedProcessTree.java (original)
+++ hadoop/common/branches/branch-0.20-security-205/src/test/org/apache/hadoop/util/TestProcfsBasedProcessTree.java Wed Oct  5 22:37:52 2011
@@ -22,6 +22,7 @@ import java.io.BufferedWriter;
 import java.io.File;
 import java.io.FileWriter;
 import java.io.IOException;
+import java.util.Arrays;
 import java.util.Random;
 import java.util.Vector;
 import java.util.regex.Matcher;
@@ -182,12 +183,12 @@ public class TestProcfsBasedProcessTree 
 
     LOG.info("Process-tree dump follows: \n" + processTreeDump);
     assertTrue("Process-tree dump doesn't start with a proper header",
-        processTreeDump.startsWith("\t|- PID PPID PGRPID SESSID CMD_NAME "
-            + "VMEM_USAGE(BYTES) FULL_CMD_LINE\n"));
+        processTreeDump.startsWith("\t|- PID PPID PGRPID SESSID CMD_NAME " +
+        "USER_MODE_TIME(MILLIS) SYSTEM_TIME(MILLIS) VMEM_USAGE(BYTES) " +
+        "RSSMEM_USAGE(PAGES) FULL_CMD_LINE\n"));
     for (int i = N; i >= 0; i--) {
-      String cmdLineDump =
-          "\\|- [0-9]+ [0-9]+ [0-9]+ [0-9]+ \\(sh\\) [0-9]+ sh " + shellScript
-              + " " + i;
+      String cmdLineDump = "\\|- [0-9]+ [0-9]+ [0-9]+ [0-9]+ \\(sh\\)" +
+          " [0-9]+ [0-9]+ [0-9]+ [0-9]+ sh " + shellScript + " " + i;
       Pattern pat = Pattern.compile(cmdLineDump);
       Matcher mat = pat.matcher(processTreeDump);
       assertTrue("Process-tree dump doesn't contain the cmdLineDump of " + i
@@ -221,7 +222,10 @@ public class TestProcfsBasedProcessTree 
     String ppid;
     String pgrpId;
     String session;
-    String vmem;
+    String vmem = "0";
+    String rssmemPage = "0";
+    String utime = "0";
+    String stime = "0";
     
     public ProcessStatInfo(String[] statEntries) {
       pid = statEntries[0];
@@ -230,27 +234,35 @@ public class TestProcfsBasedProcessTree 
       pgrpId = statEntries[3];
       session = statEntries[4];
       vmem = statEntries[5];
+      if (statEntries.length > 6) {
+        rssmemPage = statEntries[6];
+      }
+      if (statEntries.length > 7) {
+        utime = statEntries[7];
+        stime = statEntries[8];
+      }
     }
     
     // construct a line that mimics the procfs stat file.
     // all unused numerical entries are set to 0.
     public String getStatLine() {
       return String.format("%s (%s) S %s %s %s 0 0 0" +
-                      " 0 0 0 0 0 0 0 0 0 0 0 0 0 %s 0 0 0" +
+                      " 0 0 0 0 %s %s 0 0 0 0 0 0 0 %s %s 0 0" +
                       " 0 0 0 0 0 0 0 0" +
                       " 0 0 0 0 0", 
-                      pid, name, ppid, pgrpId, session, vmem);
+                      pid, name, ppid, pgrpId, session,
+                      utime, stime, vmem, rssmemPage);
     }
   }
   
   /**
    * A basic test that creates a few process directories and writes
-   * stat files. Verifies that the virtual memory is correctly  
+   * stat files. Verifies that the cpu time and memory is correctly
    * computed.
    * @throws IOException if there was a problem setting up the
    *                      fake procfs directories or files.
    */
-  public void testVirtualMemoryForProcessTree() throws IOException {
+  public void testCpuAndMemoryForProcessTree() throws IOException {
 
     // test processes
     String[] pids = { "100", "200", "300", "400" };
@@ -265,13 +277,13 @@ public class TestProcfsBasedProcessTree 
       // assuming processes 100, 200, 300 are in tree and 400 is not.
       ProcessStatInfo[] procInfos = new ProcessStatInfo[4];
       procInfos[0] = new ProcessStatInfo(new String[] 
-                                  {"100", "proc1", "1", "100", "100", "100000"});
+          {"100", "proc1", "1", "100", "100", "100000", "100", "1000", "200"});
       procInfos[1] = new ProcessStatInfo(new String[] 
-                                  {"200", "proc2", "100", "100", "100", "200000"});
+          {"200", "proc2", "100", "100", "100", "200000", "200", "2000", "400"});
       procInfos[2] = new ProcessStatInfo(new String[] 
-                                  {"300", "proc3", "200", "100", "100", "300000"});
+          {"300", "proc3", "200", "100", "100", "300000", "300", "3000", "600"});
       procInfos[3] = new ProcessStatInfo(new String[] 
-                                  {"400", "proc4", "1", "400", "400", "400000"});
+          {"400", "proc4", "1", "400", "400", "400000", "400", "4000", "800"});
       
       writeStatFiles(procfsRootDir, pids, procInfos);
       
@@ -282,8 +294,36 @@ public class TestProcfsBasedProcessTree 
       processTree.getProcessTree();
       
       // verify cumulative memory
-      assertEquals("Cumulative memory does not match", 
-              Long.parseLong("600000"), processTree.getCumulativeVmem());
+      assertEquals("Cumulative virtual memory does not match", 600000L,
+                   processTree.getCumulativeVmem());
+
+      // verify rss memory
+      long cumuRssMem = ProcfsBasedProcessTree.PAGE_SIZE > 0 ?
+                        600L * ProcfsBasedProcessTree.PAGE_SIZE : 0L;
+      assertEquals("Cumulative rss memory does not match",
+                   cumuRssMem, processTree.getCumulativeRssmem());
+
+      // verify cumulative cpu time
+      long cumuCpuTime = ProcfsBasedProcessTree.JIFFY_LENGTH_IN_MILLIS > 0 ?
+             7200L * ProcfsBasedProcessTree.JIFFY_LENGTH_IN_MILLIS : 0L;
+      assertEquals("Cumulative cpu time does not match",
+                   cumuCpuTime, processTree.getCumulativeCpuTime());
+
+      // test the cpu time again to see if it cumulates
+      procInfos[0] = new ProcessStatInfo(new String[]
+          {"100", "proc1", "1", "100", "100", "100000", "100", "2000", "300"});
+      procInfos[1] = new ProcessStatInfo(new String[]
+          {"200", "proc2", "100", "100", "100", "200000", "200", "3000", "500"});
+      writeStatFiles(procfsRootDir, pids, procInfos);
+
+      // build the process tree.
+      processTree.getProcessTree();
+
+      // verify cumulative cpu time again
+      cumuCpuTime = ProcfsBasedProcessTree.JIFFY_LENGTH_IN_MILLIS > 0 ?
+             9400L * ProcfsBasedProcessTree.JIFFY_LENGTH_IN_MILLIS : 0L;
+      assertEquals("Cumulative cpu time does not match",
+                   cumuCpuTime, processTree.getCumulativeCpuTime());
     } finally {
       FileUtil.fullyDelete(procfsRootDir);
     }
@@ -295,7 +335,7 @@ public class TestProcfsBasedProcessTree 
    * @throws IOException if there was a problem setting up the
    *                      fake procfs directories or files.
    */
-  public void testVMemForOlderProcesses() throws IOException {
+  public void testMemForOlderProcesses() throws IOException {
     // initial list of processes
     String[] pids = { "100", "200", "300", "400" };
     // create the fake procfs root directory. 
@@ -309,13 +349,13 @@ public class TestProcfsBasedProcessTree 
       // assuming 100, 200 and 400 are in tree, 300 is not.
       ProcessStatInfo[] procInfos = new ProcessStatInfo[4];
       procInfos[0] = new ProcessStatInfo(new String[] 
-                                  {"100", "proc1", "1", "100", "100", "100000"});
+                        {"100", "proc1", "1", "100", "100", "100000", "100"});
       procInfos[1] = new ProcessStatInfo(new String[] 
-                                  {"200", "proc2", "100", "100", "100", "200000"});
+                        {"200", "proc2", "100", "100", "100", "200000", "200"});
       procInfos[2] = new ProcessStatInfo(new String[] 
-                                  {"300", "proc3", "1", "300", "300", "300000"});
+                        {"300", "proc3", "1", "300", "300", "300000", "300"});
       procInfos[3] = new ProcessStatInfo(new String[] 
-                                  {"400", "proc4", "100", "100", "100", "400000"});
+                        {"400", "proc4", "100", "100", "100", "400000", "400"});
       
       writeStatFiles(procfsRootDir, pids, procInfos);
       
@@ -326,51 +366,69 @@ public class TestProcfsBasedProcessTree 
       processTree.getProcessTree();
       
       // verify cumulative memory
-      assertEquals("Cumulative memory does not match", 
-              Long.parseLong("700000"), processTree.getCumulativeVmem());
-      
+      assertEquals("Cumulative memory does not match",
+                   700000L, processTree.getCumulativeVmem());
+
       // write one more process as child of 100.
       String[] newPids = { "500" };
       setupPidDirs(procfsRootDir, newPids);
       
       ProcessStatInfo[] newProcInfos = new ProcessStatInfo[1];
       newProcInfos[0] = new ProcessStatInfo(new String[]
-                             {"500", "proc5", "100", "100", "100", "500000"});
+                      {"500", "proc5", "100", "100", "100", "500000", "500"});
       writeStatFiles(procfsRootDir, newPids, newProcInfos);
       
-      // check vmem includes the new process.
+      // check memory includes the new process.
       processTree.getProcessTree();
-      assertEquals("Cumulative memory does not include new process",
-              Long.parseLong("1200000"), processTree.getCumulativeVmem());
+      assertEquals("Cumulative vmem does not include new process",
+                   1200000L, processTree.getCumulativeVmem());
+      long cumuRssMem = ProcfsBasedProcessTree.PAGE_SIZE > 0 ?
+                        1200L * ProcfsBasedProcessTree.PAGE_SIZE : 0L;
+      assertEquals("Cumulative rssmem does not include new process",
+                   cumuRssMem, processTree.getCumulativeRssmem());
       
       // however processes older than 1 iteration will retain the older value
-      assertEquals("Cumulative memory shouldn't have included new process",
-              Long.parseLong("700000"), processTree.getCumulativeVmem(1));
-      
+      assertEquals("Cumulative vmem shouldn't have included new process",
+                   700000L, processTree.getCumulativeVmem(1));
+      cumuRssMem = ProcfsBasedProcessTree.PAGE_SIZE > 0 ?
+                   700L * ProcfsBasedProcessTree.PAGE_SIZE : 0L;
+      assertEquals("Cumulative rssmem shouldn't have included new process",
+                   cumuRssMem, processTree.getCumulativeRssmem(1));
+
       // one more process
       newPids = new String[]{ "600" };
       setupPidDirs(procfsRootDir, newPids);
       
       newProcInfos = new ProcessStatInfo[1];
       newProcInfos[0] = new ProcessStatInfo(new String[]
-                                     {"600", "proc6", "100", "100", "100", "600000"});
+                      {"600", "proc6", "100", "100", "100", "600000", "600"});
       writeStatFiles(procfsRootDir, newPids, newProcInfos);
 
       // refresh process tree
       processTree.getProcessTree();
       
       // processes older than 2 iterations should be same as before.
-      assertEquals("Cumulative memory shouldn't have included new processes",
-          Long.parseLong("700000"), processTree.getCumulativeVmem(2));
-      
+      assertEquals("Cumulative vmem shouldn't have included new processes",
+                   700000L, processTree.getCumulativeVmem(2));
+      cumuRssMem = ProcfsBasedProcessTree.PAGE_SIZE > 0 ?
+                   700L * ProcfsBasedProcessTree.PAGE_SIZE : 0L;
+      assertEquals("Cumulative rssmem shouldn't have included new processes",
+                   cumuRssMem, processTree.getCumulativeRssmem(2));
+
       // processes older than 1 iteration should not include new process,
       // but include process 500
-      assertEquals("Cumulative memory shouldn't have included new processes",
-          Long.parseLong("1200000"), processTree.getCumulativeVmem(1));
-      
+      assertEquals("Cumulative vmem shouldn't have included new processes",
+                   1200000L, processTree.getCumulativeVmem(1));
+      cumuRssMem = ProcfsBasedProcessTree.PAGE_SIZE > 0 ?
+                   1200L * ProcfsBasedProcessTree.PAGE_SIZE : 0L;
+      assertEquals("Cumulative rssmem shouldn't have included new processes",
+                   cumuRssMem, processTree.getCumulativeRssmem(1));
+
       // no processes older than 3 iterations, this should be 0
       assertEquals("Getting non-zero vmem for processes older than 3 iterations",
                     0L, processTree.getCumulativeVmem(3));
+      assertEquals("Getting non-zero rssmem for processes older than 3 iterations",
+                    0L, processTree.getCumulativeRssmem(3));
     } finally {
       FileUtil.fullyDelete(procfsRootDir);
     }
@@ -395,24 +453,18 @@ public class TestProcfsBasedProcessTree 
       int numProcesses = pids.length;
       // Processes 200, 300, 400 and 500 are descendants of 100. 600 is not.
       ProcessStatInfo[] procInfos = new ProcessStatInfo[numProcesses];
-      procInfos[0] =
-          new ProcessStatInfo(new String[] { "100", "proc1", "1", "100",
-              "100", "100000" });
-      procInfos[1] =
-          new ProcessStatInfo(new String[] { "200", "proc2", "100", "100",
-              "100", "200000" });
-      procInfos[2] =
-          new ProcessStatInfo(new String[] { "300", "proc3", "200", "100",
-              "100", "300000" });
-      procInfos[3] =
-          new ProcessStatInfo(new String[] { "400", "proc4", "200", "100",
-              "100", "400000" });
-      procInfos[4] =
-          new ProcessStatInfo(new String[] { "500", "proc5", "400", "100",
-              "100", "400000" });
-      procInfos[5] =
-          new ProcessStatInfo(new String[] { "600", "proc6", "1", "1", "1",
-              "400000" });
+      procInfos[0] = new ProcessStatInfo(new String[] {
+          "100", "proc1", "1", "100", "100", "100000", "100", "1000", "200"});
+      procInfos[1] = new ProcessStatInfo(new String[] {
+          "200", "proc2", "100", "100", "100", "200000", "200", "2000", "400"});
+      procInfos[2] = new ProcessStatInfo(new String[] {
+          "300", "proc3", "200", "100", "100", "300000", "300", "3000", "600"});
+      procInfos[3] = new ProcessStatInfo(new String[] {
+          "400", "proc4", "200", "100", "100", "400000", "400", "4000", "800"});
+      procInfos[4] = new ProcessStatInfo(new String[] {
+          "500", "proc5", "400", "100", "100", "400000", "400", "4000", "800"});
+      procInfos[5] = new ProcessStatInfo(new String[] {
+          "600", "proc6", "1", "1", "1", "400000", "400", "4000", "800"});
 
       String[] cmdLines = new String[numProcesses];
       cmdLines[0] = "proc1 arg1 arg2";
@@ -435,15 +487,17 @@ public class TestProcfsBasedProcessTree 
 
       LOG.info("Process-tree dump follows: \n" + processTreeDump);
       assertTrue("Process-tree dump doesn't start with a proper header",
-          processTreeDump.startsWith("\t|- PID PPID PGRPID SESSID CMD_NAME "
-              + "VMEM_USAGE(BYTES) FULL_CMD_LINE\n"));
+          processTreeDump.startsWith("\t|- PID PPID PGRPID SESSID CMD_NAME " +
+          "USER_MODE_TIME(MILLIS) SYSTEM_TIME(MILLIS) VMEM_USAGE(BYTES) " +
+          "RSSMEM_USAGE(PAGES) FULL_CMD_LINE\n"));
       for (int i = 0; i < 5; i++) {
         ProcessStatInfo p = procInfos[i];
         assertTrue(
             "Process-tree dump doesn't contain the cmdLineDump of process "
                 + p.pid, processTreeDump.contains("\t|- " + p.pid + " "
                 + p.ppid + " " + p.pgrpId + " " + p.session + " (" + p.name
-                + ") " + p.vmem + " " + cmdLines[i]));
+                + ") " + p.utime + " " + p.stime + " " + p.vmem + " "
+                + p.rssmemPage + " " + cmdLines[i]));
       }
 
       // 600 should not be in the dump
@@ -452,7 +506,7 @@ public class TestProcfsBasedProcessTree 
           "Process-tree dump shouldn't contain the cmdLineDump of process "
               + p.pid, processTreeDump.contains("\t|- " + p.pid + " " + p.ppid
               + " " + p.pgrpId + " " + p.session + " (" + p.name + ") "
-              + p.vmem + " " + cmdLines[5]));
+              + p.utime + " " + p.stime + " " + p.vmem + " " + cmdLines[5]));
     } finally {
       FileUtil.fullyDelete(procfsRootDir);
     }