You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by am...@apache.org on 2011/09/28 14:42:29 UTC
svn commit: r1176853 - in /hadoop/common/branches/branch-0.20-security/src:
core/org/apache/hadoop/util/ test/org/apache/hadoop/mapred/
test/org/apache/hadoop/util/
Author: amarrk
Date: Wed Sep 28 12:42:28 2011
New Revision: 1176853
URL: http://svn.apache.org/viewvc?rev=1176853&view=rev
Log:
MAPREDUCE-2777. Adding new files
Added:
hadoop/common/branches/branch-0.20-security/src/core/org/apache/hadoop/util/LinuxResourceCalculatorPlugin.java
hadoop/common/branches/branch-0.20-security/src/core/org/apache/hadoop/util/ResourceCalculatorPlugin.java
hadoop/common/branches/branch-0.20-security/src/test/org/apache/hadoop/mapred/TestTTResourceReporting.java
hadoop/common/branches/branch-0.20-security/src/test/org/apache/hadoop/util/DummyResourceCalculatorPlugin.java
hadoop/common/branches/branch-0.20-security/src/test/org/apache/hadoop/util/TestLinuxResourceCalculatorPlugin.java
Added: hadoop/common/branches/branch-0.20-security/src/core/org/apache/hadoop/util/LinuxResourceCalculatorPlugin.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security/src/core/org/apache/hadoop/util/LinuxResourceCalculatorPlugin.java?rev=1176853&view=auto
==============================================================================
--- hadoop/common/branches/branch-0.20-security/src/core/org/apache/hadoop/util/LinuxResourceCalculatorPlugin.java (added)
+++ hadoop/common/branches/branch-0.20-security/src/core/org/apache/hadoop/util/LinuxResourceCalculatorPlugin.java Wed Sep 28 12:42:28 2011
@@ -0,0 +1,407 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.util;
+
+import java.io.BufferedReader;
+import java.io.FileNotFoundException;
+import java.io.FileReader;
+import java.io.IOException;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+
+/**
+ * Plugin to calculate resource information on Linux systems.
+ */
+public class LinuxResourceCalculatorPlugin extends ResourceCalculatorPlugin {
+ private static final Log LOG =
+ LogFactory.getLog(LinuxResourceCalculatorPlugin.class);
+
+ /**
+ * proc's meminfo virtual file has keys-values in the format
+ * "key:[ \t]*value[ \t]kB".
+ */
+ private static final String PROCFS_MEMFILE = "/proc/meminfo";
+ private static final Pattern PROCFS_MEMFILE_FORMAT =
+ Pattern.compile("^([a-zA-Z]*):[ \t]*([0-9]*)[ \t]kB");
+
+ // We need the values for the following keys in meminfo
+ private static final String MEMTOTAL_STRING = "MemTotal";
+ private static final String SWAPTOTAL_STRING = "SwapTotal";
+ private static final String MEMFREE_STRING = "MemFree";
+ private static final String SWAPFREE_STRING = "SwapFree";
+ private static final String INACTIVE_STRING = "Inactive";
+ private static final int UNAVAILABLE = -1;
+
+ /**
+ * Patterns for parsing /proc/cpuinfo
+ */
+ private static final String PROCFS_CPUINFO = "/proc/cpuinfo";
+ private static final Pattern PROCESSOR_FORMAT =
+ Pattern.compile("^processor[ \t]:[ \t]*([0-9]*)");
+ private static final Pattern FREQUENCY_FORMAT =
+ Pattern.compile("^cpu MHz[ \t]*:[ \t]*([0-9.]*)");
+
+ /**
+ * Pattern for parsing /proc/stat
+ */
+ private static final String PROCFS_STAT = "/proc/stat";
+ private static final Pattern CPU_TIME_FORMAT =
+ Pattern.compile("^cpu[ \t]*([0-9]*)" +
+ "[ \t]*([0-9]*)[ \t]*([0-9]*)[ \t].*");
+
+ private String procfsMemFile;
+ private String procfsCpuFile;
+ private String procfsStatFile;
+ long jiffyLengthInMillis;
+
+ private long ramSize = 0;
+ private long swapSize = 0;
+ private long ramSizeFree = 0; // free ram space on the machine (kB)
+ private long swapSizeFree = 0; // free swap space on the machine (kB)
+ private long inactiveSize = 0; // inactive cache memory (kB)
+ private int numProcessors = 0; // number of processors on the system
+ private long cpuFrequency = 0L; // CPU frequency on the system (kHz)
+ private long cumulativeCpuTime = 0L; // CPU used time since system is on (ms)
+ private long lastCumulativeCpuTime = 0L; // CPU used time read last time (ms)
+ // Unix timestamp while reading the CPU time (ms)
+ private float cpuUsage = UNAVAILABLE;
+ private long sampleTime = UNAVAILABLE;
+ private long lastSampleTime = UNAVAILABLE;
+ private ProcfsBasedProcessTree pTree = null;
+
+ boolean readMemInfoFile = false;
+ boolean readCpuInfoFile = false;
+
+ /**
+ * Get current time
+ * @return Unix time stamp in millisecond
+ */
+ long getCurrentTime() {
+ return System.currentTimeMillis();
+ }
+
+ public LinuxResourceCalculatorPlugin() {
+ procfsMemFile = PROCFS_MEMFILE;
+ procfsCpuFile = PROCFS_CPUINFO;
+ procfsStatFile = PROCFS_STAT;
+ jiffyLengthInMillis = ProcfsBasedProcessTree.JIFFY_LENGTH_IN_MILLIS;
+ String pid = System.getenv().get("JVM_PID");
+ pTree = new ProcfsBasedProcessTree(pid);
+ }
+
+ /**
+ * Constructor which allows assigning the /proc/ directories. This will be
+ * used only in unit tests
+ * @param procfsMemFile fake file for /proc/meminfo
+ * @param procfsCpuFile fake file for /proc/cpuinfo
+ * @param procfsStatFile fake file for /proc/stat
+ * @param jiffyLengthInMillis fake jiffy length value
+ */
+ public LinuxResourceCalculatorPlugin(String procfsMemFile,
+ String procfsCpuFile,
+ String procfsStatFile,
+ long jiffyLengthInMillis) {
+ this.procfsMemFile = procfsMemFile;
+ this.procfsCpuFile = procfsCpuFile;
+ this.procfsStatFile = procfsStatFile;
+ this.jiffyLengthInMillis = jiffyLengthInMillis;
+ String pid = System.getenv().get("JVM_PID");
+ pTree = new ProcfsBasedProcessTree(pid);
+ }
+
+ /**
+ * Read /proc/meminfo, parse and compute memory information only once
+ */
+ private void readProcMemInfoFile() {
+ readProcMemInfoFile(false);
+ }
+
+ /**
+ * Read /proc/meminfo, parse and compute memory information
+ * @param readAgain if false, read only on the first time
+ */
+ private void readProcMemInfoFile(boolean readAgain) {
+
+ if (readMemInfoFile && !readAgain) {
+ return;
+ }
+
+ // Read "/proc/memInfo" file
+ BufferedReader in = null;
+ FileReader fReader = null;
+ try {
+ fReader = new FileReader(procfsMemFile);
+ in = new BufferedReader(fReader);
+ } catch (FileNotFoundException f) {
+ // shouldn't happen....
+ return;
+ }
+
+ Matcher mat = null;
+
+ try {
+ String str = in.readLine();
+ while (str != null) {
+ mat = PROCFS_MEMFILE_FORMAT.matcher(str);
+ if (mat.find()) {
+ if (mat.group(1).equals(MEMTOTAL_STRING)) {
+ ramSize = Long.parseLong(mat.group(2));
+ } else if (mat.group(1).equals(SWAPTOTAL_STRING)) {
+ swapSize = Long.parseLong(mat.group(2));
+ } else if (mat.group(1).equals(MEMFREE_STRING)) {
+ ramSizeFree = Long.parseLong(mat.group(2));
+ } else if (mat.group(1).equals(SWAPFREE_STRING)) {
+ swapSizeFree = Long.parseLong(mat.group(2));
+ } else if (mat.group(1).equals(INACTIVE_STRING)) {
+ inactiveSize = Long.parseLong(mat.group(2));
+ }
+ }
+ str = in.readLine();
+ }
+ } catch (IOException io) {
+ LOG.warn("Error reading the stream " + io);
+ } finally {
+ // Close the streams
+ try {
+ fReader.close();
+ try {
+ in.close();
+ } catch (IOException i) {
+ LOG.warn("Error closing the stream " + in);
+ }
+ } catch (IOException i) {
+ LOG.warn("Error closing the stream " + fReader);
+ }
+ }
+
+ readMemInfoFile = true;
+ }
+
+ /**
+ * Read /proc/cpuinfo, parse and calculate CPU information
+ */
+ private void readProcCpuInfoFile() {
+ // This directory needs to be read only once
+ if (readCpuInfoFile) {
+ return;
+ }
+ // Read "/proc/cpuinfo" file
+ BufferedReader in = null;
+ FileReader fReader = null;
+ try {
+ fReader = new FileReader(procfsCpuFile);
+ in = new BufferedReader(fReader);
+ } catch (FileNotFoundException f) {
+ // shouldn't happen....
+ return;
+ }
+ Matcher mat = null;
+ try {
+ numProcessors = 0;
+ String str = in.readLine();
+ while (str != null) {
+ mat = PROCESSOR_FORMAT.matcher(str);
+ if (mat.find()) {
+ numProcessors++;
+ }
+ mat = FREQUENCY_FORMAT.matcher(str);
+ if (mat.find()) {
+ cpuFrequency = (long)(Double.parseDouble(mat.group(1)) * 1000); // kHz
+ }
+ str = in.readLine();
+ }
+ } catch (IOException io) {
+ LOG.warn("Error reading the stream " + io);
+ } finally {
+ // Close the streams
+ try {
+ fReader.close();
+ try {
+ in.close();
+ } catch (IOException i) {
+ LOG.warn("Error closing the stream " + in);
+ }
+ } catch (IOException i) {
+ LOG.warn("Error closing the stream " + fReader);
+ }
+ }
+ readCpuInfoFile = true;
+ }
+
+ /**
+ * Read /proc/stat file, parse and calculate cumulative CPU
+ */
+ private void readProcStatFile() {
+ // Read "/proc/stat" file
+ BufferedReader in = null;
+ FileReader fReader = null;
+ try {
+ fReader = new FileReader(procfsStatFile);
+ in = new BufferedReader(fReader);
+ } catch (FileNotFoundException f) {
+ // shouldn't happen....
+ return;
+ }
+
+ Matcher mat = null;
+ try {
+ String str = in.readLine();
+ while (str != null) {
+ mat = CPU_TIME_FORMAT.matcher(str);
+ if (mat.find()) {
+ long uTime = Long.parseLong(mat.group(1));
+ long nTime = Long.parseLong(mat.group(2));
+ long sTime = Long.parseLong(mat.group(3));
+ cumulativeCpuTime = uTime + nTime + sTime; // milliseconds
+ break;
+ }
+ str = in.readLine();
+ }
+ cumulativeCpuTime *= jiffyLengthInMillis;
+ } catch (IOException io) {
+ LOG.warn("Error reading the stream " + io);
+ } finally {
+ // Close the streams
+ try {
+ fReader.close();
+ try {
+ in.close();
+ } catch (IOException i) {
+ LOG.warn("Error closing the stream " + in);
+ }
+ } catch (IOException i) {
+ LOG.warn("Error closing the stream " + fReader);
+ }
+ }
+ }
+
+ /** {@inheritDoc} */
+ @Override
+ public long getPhysicalMemorySize() {
+ readProcMemInfoFile();
+ return ramSize * 1024;
+ }
+
+ /** {@inheritDoc} */
+ @Override
+ public long getVirtualMemorySize() {
+ readProcMemInfoFile();
+ return (ramSize + swapSize) * 1024;
+ }
+
+ /** {@inheritDoc} */
+ @Override
+ public long getAvailablePhysicalMemorySize() {
+ readProcMemInfoFile(true);
+ return (ramSizeFree + inactiveSize) * 1024;
+ }
+
+ /** {@inheritDoc} */
+ @Override
+ public long getAvailableVirtualMemorySize() {
+ readProcMemInfoFile(true);
+ return (ramSizeFree + swapSizeFree + inactiveSize) * 1024;
+ }
+
+ /** {@inheritDoc} */
+ @Override
+ public int getNumProcessors() {
+ readProcCpuInfoFile();
+ return numProcessors;
+ }
+
+ /** {@inheritDoc} */
+ @Override
+ public long getCpuFrequency() {
+ readProcCpuInfoFile();
+ return cpuFrequency;
+ }
+
+ /** {@inheritDoc} */
+ @Override
+ public long getCumulativeCpuTime() {
+ readProcStatFile();
+ return cumulativeCpuTime;
+ }
+
+ /** {@inheritDoc} */
+ @Override
+ public float getCpuUsage() {
+ readProcStatFile();
+ sampleTime = getCurrentTime();
+ if (lastSampleTime == UNAVAILABLE ||
+ lastSampleTime > sampleTime) {
+ // lastSampleTime > sampleTime may happen when the system time is changed
+ lastSampleTime = sampleTime;
+ lastCumulativeCpuTime = cumulativeCpuTime;
+ return cpuUsage;
+ }
+ // When lastSampleTime is sufficiently old, update cpuUsage.
+ // Also take a sample of the current time and cumulative CPU time for the
+ // use of the next calculation.
+ final long MINIMUM_UPDATE_INTERVAL = 10 * jiffyLengthInMillis;
+ if (sampleTime > lastSampleTime + MINIMUM_UPDATE_INTERVAL) {
+ cpuUsage = (float)(cumulativeCpuTime - lastCumulativeCpuTime) * 100F /
+ ((float)(sampleTime - lastSampleTime) * getNumProcessors());
+ lastSampleTime = sampleTime;
+ lastCumulativeCpuTime = cumulativeCpuTime;
+ }
+ return cpuUsage;
+ }
+
+ /**
+ * Test the {@link LinuxResourceCalculatorPlugin}
+ *
+ * @param args
+ */
+ public static void main(String[] args) {
+ LinuxResourceCalculatorPlugin plugin = new LinuxResourceCalculatorPlugin();
+ System.out.println("Physical memory Size (bytes) : "
+ + plugin.getPhysicalMemorySize());
+ System.out.println("Total Virtual memory Size (bytes) : "
+ + plugin.getVirtualMemorySize());
+ System.out.println("Available Physical memory Size (bytes) : "
+ + plugin.getAvailablePhysicalMemorySize());
+ System.out.println("Total Available Virtual memory Size (bytes) : "
+ + plugin.getAvailableVirtualMemorySize());
+ System.out.println("Number of Processors : " + plugin.getNumProcessors());
+ System.out.println("CPU frequency (kHz) : " + plugin.getCpuFrequency());
+ System.out.println("Cumulative CPU time (ms) : " +
+ plugin.getCumulativeCpuTime());
+ try {
+ // Sleep so we can compute the CPU usage
+ Thread.sleep(500L);
+ } catch (InterruptedException e) {
+ // do nothing
+ }
+ System.out.println("CPU usage % : " + plugin.getCpuUsage());
+ }
+
+ @Override
+ public ProcResourceValues getProcResourceValues() {
+ pTree = pTree.getProcessTree();
+ long cpuTime = pTree.getCumulativeCpuTime();
+ long pMem = pTree.getCumulativeRssmem();
+ long vMem = pTree.getCumulativeVmem();
+ return new ProcResourceValues(cpuTime, pMem, vMem);
+ }
+}
Added: hadoop/common/branches/branch-0.20-security/src/core/org/apache/hadoop/util/ResourceCalculatorPlugin.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security/src/core/org/apache/hadoop/util/ResourceCalculatorPlugin.java?rev=1176853&view=auto
==============================================================================
--- hadoop/common/branches/branch-0.20-security/src/core/org/apache/hadoop/util/ResourceCalculatorPlugin.java (added)
+++ hadoop/common/branches/branch-0.20-security/src/core/org/apache/hadoop/util/ResourceCalculatorPlugin.java Wed Sep 28 12:42:28 2011
@@ -0,0 +1,159 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.util;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.conf.Configured;
+import org.apache.hadoop.util.ReflectionUtils;
+
+/**
+ * Plugin to calculate resource information on the system.
+ *
+ */
+public abstract class ResourceCalculatorPlugin extends Configured {
+
+ /**
+ * Obtain the total size of the virtual memory present in the system.
+ *
+ * @return virtual memory size in bytes.
+ */
+ public abstract long getVirtualMemorySize();
+
+ /**
+ * Obtain the total size of the physical memory present in the system.
+ *
+ * @return physical memory size bytes.
+ */
+ public abstract long getPhysicalMemorySize();
+
+ /**
+ * Obtain the total size of the available virtual memory present
+ * in the system.
+ *
+ * @return available virtual memory size in bytes.
+ */
+ public abstract long getAvailableVirtualMemorySize();
+
+ /**
+ * Obtain the total size of the available physical memory present
+ * in the system.
+ *
+ * @return available physical memory size bytes.
+ */
+ public abstract long getAvailablePhysicalMemorySize();
+
+ /**
+ * Obtain the total number of processors present on the system.
+ *
+ * @return number of processors
+ */
+ public abstract int getNumProcessors();
+
+ /**
+ * Obtain the CPU frequency of on the system.
+ *
+ * @return CPU frequency in kHz
+ */
+ public abstract long getCpuFrequency();
+
+ /**
+ * Obtain the cumulative CPU time since the system is on.
+ *
+ * @return cumulative CPU time in milliseconds
+ */
+ public abstract long getCumulativeCpuTime();
+
+ /**
+ * Obtain the CPU usage % of the machine. Return -1 if it is unavailable
+ *
+ * @return CPU usage in %
+ */
+ public abstract float getCpuUsage();
+
+ /**
+ * Obtain resource status used by current process tree.
+ */
+ public abstract ProcResourceValues getProcResourceValues();
+
+ public static class ProcResourceValues {
+ private final long cumulativeCpuTime;
+ private final long physicalMemorySize;
+ private final long virtualMemorySize;
+ public ProcResourceValues(long cumulativeCpuTime, long physicalMemorySize,
+ long virtualMemorySize) {
+ this.cumulativeCpuTime = cumulativeCpuTime;
+ this.physicalMemorySize = physicalMemorySize;
+ this.virtualMemorySize = virtualMemorySize;
+ }
+ /**
+ * Obtain the physical memory size used by current process tree.
+ * @return physical memory size in bytes.
+ */
+ public long getPhysicalMemorySize() {
+ return physicalMemorySize;
+ }
+
+ /**
+ * Obtain the virtual memory size used by a current process tree.
+ * @return virtual memory size in bytes.
+ */
+ public long getVirtualMemorySize() {
+ return virtualMemorySize;
+ }
+
+ /**
+ * Obtain the cumulative CPU time used by a current process tree.
+ * @return cumulative CPU time in milliseconds
+ */
+ public long getCumulativeCpuTime() {
+ return cumulativeCpuTime;
+ }
+ }
+
+ /**
+ * Get the ResourceCalculatorPlugin from the class name and configure it. If
+ * class name is null, this method will try and return a memory calculator
+ * plugin available for this system.
+ *
+ * @param clazz class-name
+ * @param conf configure the plugin with this.
+ * @return ResourceCalculatorPlugin
+ */
+ public static ResourceCalculatorPlugin getResourceCalculatorPlugin(
+ Class<? extends ResourceCalculatorPlugin> clazz, Configuration conf) {
+
+ if (clazz != null) {
+ return ReflectionUtils.newInstance(clazz, conf);
+ }
+
+ // No class given, try a os specific class
+ try {
+ String osName = System.getProperty("os.name");
+ if (osName.startsWith("Linux")) {
+ return new LinuxResourceCalculatorPlugin();
+ }
+ } catch (SecurityException se) {
+ // Failed to get Operating System name.
+ return null;
+ }
+
+ // Not supported on this system.
+ return null;
+ }
+}
Added: hadoop/common/branches/branch-0.20-security/src/test/org/apache/hadoop/mapred/TestTTResourceReporting.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security/src/test/org/apache/hadoop/mapred/TestTTResourceReporting.java?rev=1176853&view=auto
==============================================================================
--- hadoop/common/branches/branch-0.20-security/src/test/org/apache/hadoop/mapred/TestTTResourceReporting.java (added)
+++ hadoop/common/branches/branch-0.20-security/src/test/org/apache/hadoop/mapred/TestTTResourceReporting.java Wed Sep 28 12:42:28 2011
@@ -0,0 +1,366 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.mapred;
+
+import java.io.IOException;
+import java.util.List;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.examples.SleepJob;
+import org.apache.hadoop.mapred.Counters;
+import org.apache.hadoop.mapred.JobConf;
+import org.apache.hadoop.mapred.JobTracker;
+import org.apache.hadoop.mapred.Task;
+import org.apache.hadoop.mapred.TaskStatus;
+import org.apache.hadoop.mapreduce.server.jobtracker.TaskTracker;
+import org.apache.hadoop.mapred.TaskTrackerStatus;
+import org.apache.hadoop.mapred.Task.Counter;
+import org.apache.hadoop.util.DummyResourceCalculatorPlugin;
+import org.apache.hadoop.util.LinuxResourceCalculatorPlugin;
+import org.apache.hadoop.util.ResourceCalculatorPlugin;
+import org.apache.hadoop.util.ToolRunner;
+
+import junit.framework.TestCase;
+import org.junit.Test;
+import org.junit.After;
+
+/**
+ * This test class tests the functionality related to configuring, reporting
+ * and computing memory related parameters in a Map/Reduce cluster.
+ *
+ * Each test sets up a {@link MiniMRCluster} with a locally defined
+ * {@link org.apache.hadoop.mapred.TaskScheduler}. This scheduler validates
+ * the memory related configuration is correctly computed and reported from
+ * the tasktracker in
+ * {@link org.apache.hadoop.mapred.TaskScheduler#assignTasks(TaskTrackerStatus)}.
+ */
+public class TestTTResourceReporting extends TestCase {
+
+ static final Log LOG = LogFactory.getLog(TestTTResourceReporting.class);
+
+ private MiniMRCluster miniMRCluster;
+
+ /**
+ * Fake scheduler to test the proper reporting of memory values by TT
+ */
+ public static class FakeTaskScheduler extends JobQueueTaskScheduler {
+
+ private boolean hasPassed = true;
+ private boolean hasDynamicValuePassed = true;
+ private String message;
+
+ public FakeTaskScheduler() {
+ super();
+ }
+
+ public boolean hasTestPassed() {
+ return hasPassed;
+ }
+
+ public boolean hasDynamicTestPassed() {
+ return hasDynamicValuePassed;
+ }
+
+ public String getFailureMessage() {
+ return message;
+ }
+
+ @Override
+ public List<Task> assignTasks(TaskTracker taskTracker)
+ throws IOException {
+ TaskTrackerStatus status = taskTracker.getStatus();
+ long totalVirtualMemoryOnTT =
+ getConf().getLong("totalVmemOnTT", -1);
+ long totalPhysicalMemoryOnTT =
+ getConf().getLong("totalPmemOnTT", -1);
+ long mapSlotMemorySize =
+ getConf().getLong("mapSlotMemorySize", -1);
+ long reduceSlotMemorySize =
+ getConf()
+ .getLong("reduceSlotMemorySize", -1);
+ long availableVirtualMemoryOnTT =
+ getConf().getLong("availableVmemOnTT", -1);
+ long availablePhysicalMemoryOnTT =
+ getConf().getLong("availablePmemOnTT", -1);
+ long cumulativeCpuTime =
+ getConf().getLong("cumulativeCpuTime", -1);
+ long cpuFrequency =
+ getConf().getLong("cpuFrequency", -1);
+ int numProcessors =
+ getConf().getInt("numProcessors", -1);
+ float cpuUsage =
+ getConf().getFloat("cpuUsage", -1);
+
+ long reportedTotalVirtualMemoryOnTT =
+ status.getResourceStatus().getTotalVirtualMemory();
+ long reportedTotalPhysicalMemoryOnTT =
+ status.getResourceStatus().getTotalPhysicalMemory();
+ long reportedMapSlotMemorySize =
+ status.getResourceStatus().getMapSlotMemorySizeOnTT();
+ long reportedReduceSlotMemorySize =
+ status.getResourceStatus().getReduceSlotMemorySizeOnTT();
+ long reportedAvailableVirtualMemoryOnTT =
+ status.getResourceStatus().getAvailableVirtualMemory();
+ long reportedAvailablePhysicalMemoryOnTT =
+ status.getResourceStatus().getAvailablePhysicalMemory();
+ long reportedCumulativeCpuTime =
+ status.getResourceStatus().getCumulativeCpuTime();
+ long reportedCpuFrequency = status.getResourceStatus().getCpuFrequency();
+ int reportedNumProcessors = status.getResourceStatus().getNumProcessors();
+ float reportedCpuUsage = status.getResourceStatus().getCpuUsage();
+
+ message =
+ "expected memory values : "
+ + "(totalVirtualMemoryOnTT, totalPhysicalMemoryOnTT, "
+ + "availableVirtualMemoryOnTT, availablePhysicalMemoryOnTT, "
+ + "mapSlotMemSize, reduceSlotMemorySize, cumulativeCpuTime, "
+ + "cpuFrequency, numProcessors, cpuUsage) = ("
+ + totalVirtualMemoryOnTT + ", "
+ + totalPhysicalMemoryOnTT + ","
+ + availableVirtualMemoryOnTT + ", "
+ + availablePhysicalMemoryOnTT + ","
+ + mapSlotMemorySize + ","
+ + reduceSlotMemorySize + ","
+ + cumulativeCpuTime + ","
+ + cpuFrequency + ","
+ + numProcessors + ","
+ + cpuUsage
+ +")";
+ message +=
+ "\nreported memory values : "
+ + "(totalVirtualMemoryOnTT, totalPhysicalMemoryOnTT, "
+ + "availableVirtualMemoryOnTT, availablePhysicalMemoryOnTT, "
+ + "reportedMapSlotMemorySize, reportedReduceSlotMemorySize, "
+ + "reportedCumulativeCpuTime, reportedCpuFrequency, "
+ + "reportedNumProcessors, cpuUsage) = ("
+ + reportedTotalVirtualMemoryOnTT + ", "
+ + reportedTotalPhysicalMemoryOnTT + ","
+ + reportedAvailableVirtualMemoryOnTT + ", "
+ + reportedAvailablePhysicalMemoryOnTT + ","
+ + reportedMapSlotMemorySize + ","
+ + reportedReduceSlotMemorySize + ","
+ + reportedCumulativeCpuTime + ","
+ + reportedCpuFrequency + ","
+ + reportedNumProcessors + ","
+ + reportedCpuUsage
+ + ")";
+ LOG.info(message);
+ hasDynamicValuePassed = true;
+ // Check task resource status in task reports
+ for (TaskStatus taskStatus : status.getTaskReports()) {
+ Counters counters = taskStatus.getCounters();
+ // This should be zero because the initial CPU time is subtracted.
+ long procCumulativeCpuTime = 0;
+ long procVirtualMemorySize =
+ getConf().getLong("procVirtualMemorySize", -1);
+ long procPhysicalMemorySize =
+ getConf().getLong("procPhysicalMemorySize", -1);
+ long reportedProcCumulativeCpuTime =
+ counters.findCounter(Task.Counter.CPU_MILLISECONDS).getValue();
+ long reportedProcVirtualMemorySize =
+ counters.findCounter(Task.Counter.VIRTUAL_MEMORY_BYTES).getValue();
+ long reportedProcPhysicalMemorySize =
+ counters.findCounter(Task.Counter.PHYSICAL_MEMORY_BYTES).getValue();
+ String procMessage =
+ "expected values : "
+ + "(procCumulativeCpuTime, procVirtualMemorySize,"
+ + " procPhysicalMemorySize) = ("
+ + procCumulativeCpuTime + ", "
+ + procVirtualMemorySize + ", "
+ + procPhysicalMemorySize + ")";
+ procMessage +=
+ "\nreported values : "
+ + "(procCumulativeCpuTime, procVirtualMemorySize,"
+ + " procPhysicalMemorySize) = ("
+ + reportedProcCumulativeCpuTime + ", "
+ + reportedProcVirtualMemorySize + ", "
+ + reportedProcPhysicalMemorySize + ")";
+ LOG.info(procMessage);
+ message += "\n" + procMessage;
+ if (procCumulativeCpuTime != reportedProcCumulativeCpuTime ||
+ procVirtualMemorySize != reportedProcVirtualMemorySize ||
+ procPhysicalMemorySize != reportedProcPhysicalMemorySize) {
+ hasDynamicValuePassed = false;
+ }
+ }
+ hasPassed = true;
+ if (totalVirtualMemoryOnTT != reportedTotalVirtualMemoryOnTT
+ || totalPhysicalMemoryOnTT != reportedTotalPhysicalMemoryOnTT
+ || mapSlotMemorySize != reportedMapSlotMemorySize
+ || reduceSlotMemorySize != reportedReduceSlotMemorySize
+ || numProcessors != reportedNumProcessors) {
+ hasPassed = false;
+ }
+ // These values changes every moment on the node so it can only be
+ // tested by DummyMemoryCalculatorPlugin. Need to check them separately
+ if (availableVirtualMemoryOnTT != reportedAvailableVirtualMemoryOnTT
+ || availablePhysicalMemoryOnTT != reportedAvailablePhysicalMemoryOnTT
+ || cumulativeCpuTime != reportedCumulativeCpuTime
+ || cpuFrequency != reportedCpuFrequency
+ || cpuUsage != reportedCpuUsage) {
+ hasDynamicValuePassed = false;
+ }
+ return super.assignTasks(taskTracker);
+ }
+ }
+
+ /**
+ * Test that verifies default values are configured and reported correctly.
+ *
+ * @throws Exception
+ */
+ public void testDefaultResourceValues()
+ throws Exception {
+ JobConf conf = new JobConf();
+ try {
+ // Memory values are disabled by default.
+ conf.setClass(
+ org.apache.hadoop.mapred.TaskTracker.TT_RESOURCE_CALCULATOR_PLUGIN,
+ DummyResourceCalculatorPlugin.class, ResourceCalculatorPlugin.class);
+ setUpCluster(conf);
+ JobConf jobConf = miniMRCluster.createJobConf();
+ jobConf.setClass(
+ org.apache.hadoop.mapred.TaskTracker.TT_RESOURCE_CALCULATOR_PLUGIN,
+ DummyResourceCalculatorPlugin.class, ResourceCalculatorPlugin.class);
+ runSleepJob(jobConf);
+ verifyTestResults();
+ } finally {
+ tearDownCluster();
+ }
+ }
+
+ /**
+ * Test that verifies that configured values are reported correctly.
+ *
+ * @throws Exception
+ */
+ public void testConfiguredResourceValues()
+ throws Exception {
+ JobConf conf = new JobConf();
+ conf.setLong("totalVmemOnTT", 4 * 1024 * 1024 * 1024L);
+ conf.setLong("totalPmemOnTT", 2 * 1024 * 1024 * 1024L);
+ conf.setLong("mapSlotMemorySize", 1 * 512L);
+ conf.setLong("reduceSlotMemorySize", 1 * 1024L);
+ conf.setLong("availableVmemOnTT", 4 * 1024 * 1024 * 1024L);
+ conf.setLong("availablePmemOnTT", 2 * 1024 * 1024 * 1024L);
+ conf.setLong("cumulativeCpuTime", 10000L);
+ conf.setLong("cpuFrequency", 2000000L);
+ conf.setInt("numProcessors", 8);
+ conf.setFloat("cpuUsage", 15.5F);
+ conf.setLong("procCumulativeCpuTime", 1000L);
+ conf.setLong("procVirtualMemorySize", 2 * 1024 * 1024 * 1024L);
+ conf.setLong("procPhysicalMemorySize", 1024 * 1024 * 1024L);
+
+ conf.setClass(
+ org.apache.hadoop.mapred.TaskTracker.TT_RESOURCE_CALCULATOR_PLUGIN,
+ DummyResourceCalculatorPlugin.class, ResourceCalculatorPlugin.class);
+ conf.setLong(DummyResourceCalculatorPlugin.MAXVMEM_TESTING_PROPERTY,
+ 4 * 1024 * 1024 * 1024L);
+ conf.setLong(DummyResourceCalculatorPlugin.MAXPMEM_TESTING_PROPERTY,
+ 2 * 1024 * 1024 * 1024L);
+ conf.setLong(JobTracker.MAPRED_CLUSTER_MAP_MEMORY_MB_PROPERTY, 512L);
+ conf.setLong(JobTracker.MAPRED_CLUSTER_REDUCE_MEMORY_MB_PROPERTY, 1024L);
+ conf.setLong(DummyResourceCalculatorPlugin.CUMULATIVE_CPU_TIME, 10000L);
+ conf.setLong(DummyResourceCalculatorPlugin.CPU_FREQUENCY, 2000000L);
+ conf.setInt(DummyResourceCalculatorPlugin.NUM_PROCESSORS, 8);
+ conf.setFloat(DummyResourceCalculatorPlugin.CPU_USAGE, 15.5F);
+ try {
+ setUpCluster(conf);
+ JobConf jobConf = miniMRCluster.createJobConf();
+ jobConf.setMemoryForMapTask(1 * 1024L);
+ jobConf.setMemoryForReduceTask(2 * 1024L);
+ jobConf.setClass(
+ org.apache.hadoop.mapred.TaskTracker.TT_RESOURCE_CALCULATOR_PLUGIN,
+ DummyResourceCalculatorPlugin.class, ResourceCalculatorPlugin.class);
+ jobConf.setLong(DummyResourceCalculatorPlugin.PROC_CUMULATIVE_CPU_TIME, 1000L);
+ jobConf.setLong(DummyResourceCalculatorPlugin.PROC_VMEM_TESTING_PROPERTY,
+ 2 * 1024 * 1024 * 1024L);
+ jobConf.setLong(DummyResourceCalculatorPlugin.PROC_PMEM_TESTING_PROPERTY,
+ 1024 * 1024 * 1024L);
+ runSleepJob(jobConf);
+ verifyTestResults();
+ } finally {
+ tearDownCluster();
+ }
+ }
+
+ /**
+ * Test that verifies that total memory values are calculated and reported
+ * correctly.
+ *
+ * @throws Exception
+ */
+ public void testResourceValuesOnLinux()
+ throws Exception {
+ if (!System.getProperty("os.name").startsWith("Linux")) {
+ return;
+ }
+
+ JobConf conf = new JobConf();
+ LinuxResourceCalculatorPlugin plugin = new LinuxResourceCalculatorPlugin();
+ // In this case, we only check these four fields because they are static
+ conf.setLong("totalVmemOnTT", plugin.getVirtualMemorySize());
+ conf.setLong("totalPmemOnTT", plugin.getPhysicalMemorySize());
+ conf.setLong("numProcessors", plugin.getNumProcessors());
+
+ try {
+ setUpCluster(conf);
+ runSleepJob(miniMRCluster.createJobConf());
+ verifyTestResults(true);
+ } finally {
+ tearDownCluster();
+ }
+ }
+
+ private void setUpCluster(JobConf conf)
+ throws Exception {
+ conf.setClass("mapred.jobtracker.taskScheduler",
+ TestTTResourceReporting.FakeTaskScheduler.class, TaskScheduler.class);
+ conf.set("mapred.job.tracker.handler.count", "1");
+ miniMRCluster = new MiniMRCluster(1, "file:///", 3, null, null, conf);
+ }
+
+ private void runSleepJob(JobConf conf) throws Exception {
+ String[] args = { "-m", "1", "-r", "1",
+ "-mt", "10", "-rt", "10" };
+ ToolRunner.run(conf, new SleepJob(), args);
+ }
+
+ private void verifyTestResults() {
+ verifyTestResults(false);
+ }
+
+ private void verifyTestResults(boolean excludeDynamic) {
+ FakeTaskScheduler scheduler =
+ (FakeTaskScheduler)miniMRCluster.getJobTrackerRunner().
+ getJobTracker().getTaskScheduler();
+ assertTrue(scheduler.getFailureMessage(), scheduler.hasTestPassed());
+ if (!excludeDynamic) {
+ assertTrue(scheduler.getFailureMessage(),
+ scheduler.hasDynamicTestPassed());
+ }
+ }
+
+ //TODO make it after
+ private void tearDownCluster() {
+ if (miniMRCluster != null) {
+ miniMRCluster.shutdown();
+ }
+ }
+}
Added: hadoop/common/branches/branch-0.20-security/src/test/org/apache/hadoop/util/DummyResourceCalculatorPlugin.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security/src/test/org/apache/hadoop/util/DummyResourceCalculatorPlugin.java?rev=1176853&view=auto
==============================================================================
--- hadoop/common/branches/branch-0.20-security/src/test/org/apache/hadoop/util/DummyResourceCalculatorPlugin.java (added)
+++ hadoop/common/branches/branch-0.20-security/src/test/org/apache/hadoop/util/DummyResourceCalculatorPlugin.java Wed Sep 28 12:42:28 2011
@@ -0,0 +1,114 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.util;
+
+/**
+ * Plugin class to test resource information reported by TT. Use
+ * configuration items {@link #MAXVMEM_TESTING_PROPERTY} and
+ * {@link #MAXPMEM_TESTING_PROPERTY} to tell TT the total vmem and the total
+ * pmem. Use configuration items {@link #NUM_PROCESSORS},
+ * {@link #CPU_FREQUENCY}, {@link #CUMULATIVE_CPU_TIME} and {@link #CPU_USAGE}
+ * to tell TT the CPU information.
+ */
+public class DummyResourceCalculatorPlugin extends ResourceCalculatorPlugin {
+
+ /** max vmem on the TT */
+ public static final String MAXVMEM_TESTING_PROPERTY =
+ "mapred.tasktracker.maxvmem.testing";
+ /** max pmem on the TT */
+ public static final String MAXPMEM_TESTING_PROPERTY =
+ "mapred.tasktracker.maxpmem.testing";
+ /** number of processors for testing */
+ public static final String NUM_PROCESSORS =
+ "mapred.tasktracker.numprocessors.testing";
+ /** CPU frequency for testing */
+ public static final String CPU_FREQUENCY =
+ "mapred.tasktracker.cpufrequency.testing";
+ /** cumulative CPU usage time for testing */
+ public static final String CUMULATIVE_CPU_TIME =
+ "mapred.tasktracker.cumulativecputime.testing";
+ /** CPU usage percentage for testing */
+ public static final String CPU_USAGE =
+ "mapred.tasktracker.cpuusage.testing";
+ /** process cumulative CPU usage time for testing */
+ public static final String PROC_CUMULATIVE_CPU_TIME =
+ "mapred.tasktracker.proccumulativecputime.testing";
+ /** process pmem for testing*/
+ public static final String PROC_PMEM_TESTING_PROPERTY =
+ "mapred.tasktracker.procpmem.testing";
+ /** process vmem for testing*/
+ public static final String PROC_VMEM_TESTING_PROPERTY =
+ "mapred.tasktracker.procvmem.testing";
+
+ /** {@inheritDoc} */
+ @Override
+ public long getVirtualMemorySize() {
+ return getConf().getLong(MAXVMEM_TESTING_PROPERTY, -1);
+ }
+
+ /** {@inheritDoc} */
+ @Override
+ public long getPhysicalMemorySize() {
+ return getConf().getLong(MAXPMEM_TESTING_PROPERTY, -1);
+ }
+
+ /** {@inheritDoc} */
+ @Override
+ public long getAvailableVirtualMemorySize() {
+ return getConf().getLong(MAXVMEM_TESTING_PROPERTY, -1);
+ }
+
+ /** {@inheritDoc} */
+ @Override
+ public long getAvailablePhysicalMemorySize() {
+ return getConf().getLong(MAXPMEM_TESTING_PROPERTY, -1);
+ }
+
+ /** {@inheritDoc} */
+ @Override
+ public int getNumProcessors() {
+ return getConf().getInt(NUM_PROCESSORS, -1);
+ }
+
+ /** {@inheritDoc} */
+ @Override
+ public long getCpuFrequency() {
+ return getConf().getLong(CPU_FREQUENCY, -1);
+ }
+
+ /** {@inheritDoc} */
+ @Override
+ public long getCumulativeCpuTime() {
+ return getConf().getLong(CUMULATIVE_CPU_TIME, -1);
+ }
+
+ /** {@inheritDoc} */
+ @Override
+ public float getCpuUsage() {
+ return getConf().getFloat(CPU_USAGE, -1);
+ }
+
+ @Override
+ public ProcResourceValues getProcResourceValues() {
+ long cpuTime = getConf().getLong(PROC_CUMULATIVE_CPU_TIME, -1);
+ long pMem = getConf().getLong(PROC_PMEM_TESTING_PROPERTY, -1);
+ long vMem = getConf().getLong(PROC_VMEM_TESTING_PROPERTY, -1);
+ return new ProcResourceValues(cpuTime, pMem, vMem);
+ }
+}
Added: hadoop/common/branches/branch-0.20-security/src/test/org/apache/hadoop/util/TestLinuxResourceCalculatorPlugin.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security/src/test/org/apache/hadoop/util/TestLinuxResourceCalculatorPlugin.java?rev=1176853&view=auto
==============================================================================
--- hadoop/common/branches/branch-0.20-security/src/test/org/apache/hadoop/util/TestLinuxResourceCalculatorPlugin.java (added)
+++ hadoop/common/branches/branch-0.20-security/src/test/org/apache/hadoop/util/TestLinuxResourceCalculatorPlugin.java Wed Sep 28 12:42:28 2011
@@ -0,0 +1,234 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.util;
+
+import java.io.File;
+import java.io.FileWriter;
+import java.io.IOException;
+import java.util.Random;
+
+import junit.framework.TestCase;
+
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.mapred.TaskTrackerStatus;
+import org.junit.Test;
+
+/**
+ * A JUnit test to test {@link LinuxResourceCalculatorPlugin}
+ * Create the fake /proc/ information and verify the parsing and calculation
+ */
+public class TestLinuxResourceCalculatorPlugin extends TestCase {
+ /**
+ * LinuxResourceCalculatorPlugin with a fake timer
+ */
+ static class FakeLinuxResourceCalculatorPlugin extends
+ LinuxResourceCalculatorPlugin {
+
+ long currentTime = 0;
+ public FakeLinuxResourceCalculatorPlugin(String procfsMemFile,
+ String procfsCpuFile,
+ String procfsStatFile,
+ long jiffyLengthInMillis) {
+ super(procfsMemFile, procfsCpuFile, procfsStatFile, jiffyLengthInMillis);
+ }
+ @Override
+ long getCurrentTime() {
+ return currentTime;
+ }
+ public void advanceTime(long adv) {
+ currentTime += adv * jiffyLengthInMillis;
+ }
+ }
+ private static final FakeLinuxResourceCalculatorPlugin plugin;
+ private static String TEST_ROOT_DIR = new Path(System.getProperty(
+ "test.build.data", "/tmp")).toString().replace(' ', '+');
+ private static final String FAKE_MEMFILE;
+ private static final String FAKE_CPUFILE;
+ private static final String FAKE_STATFILE;
+ private static final long FAKE_JIFFY_LENGTH = 10L;
+ static {
+ int randomNum = (new Random()).nextInt(1000000000);
+ FAKE_MEMFILE = TEST_ROOT_DIR + File.separator + "MEMINFO_" + randomNum;
+ FAKE_CPUFILE = TEST_ROOT_DIR + File.separator + "CPUINFO_" + randomNum;
+ FAKE_STATFILE = TEST_ROOT_DIR + File.separator + "STATINFO_" + randomNum;
+ plugin = new FakeLinuxResourceCalculatorPlugin(FAKE_MEMFILE, FAKE_CPUFILE,
+ FAKE_STATFILE,
+ FAKE_JIFFY_LENGTH);
+ }
+ static final String MEMINFO_FORMAT =
+ "MemTotal: %d kB\n" +
+ "MemFree: %d kB\n" +
+ "Buffers: 138244 kB\n" +
+ "Cached: 947780 kB\n" +
+ "SwapCached: 142880 kB\n" +
+ "Active: 3229888 kB\n" +
+ "Inactive: %d kB\n" +
+ "SwapTotal: %d kB\n" +
+ "SwapFree: %d kB\n" +
+ "Dirty: 122012 kB\n" +
+ "Writeback: 0 kB\n" +
+ "AnonPages: 2710792 kB\n" +
+ "Mapped: 24740 kB\n" +
+ "Slab: 132528 kB\n" +
+ "SReclaimable: 105096 kB\n" +
+ "SUnreclaim: 27432 kB\n" +
+ "PageTables: 11448 kB\n" +
+ "NFS_Unstable: 0 kB\n" +
+ "Bounce: 0 kB\n" +
+ "CommitLimit: 4125904 kB\n" +
+ "Committed_AS: 4143556 kB\n" +
+ "VmallocTotal: 34359738367 kB\n" +
+ "VmallocUsed: 1632 kB\n" +
+ "VmallocChunk: 34359736375 kB\n" +
+ "HugePages_Total: 0\n" +
+ "HugePages_Free: 0\n" +
+ "HugePages_Rsvd: 0\n" +
+ "Hugepagesize: 2048 kB";
+
+ static final String CPUINFO_FORMAT =
+ "processor : %s\n" +
+ "vendor_id : AuthenticAMD\n" +
+ "cpu family : 15\n" +
+ "model : 33\n" +
+ "model name : Dual Core AMD Opteron(tm) Processor 280\n" +
+ "stepping : 2\n" +
+ "cpu MHz : %f\n" +
+ "cache size : 1024 KB\n" +
+ "physical id : 0\n" +
+ "siblings : 2\n" +
+ "core id : 0\n" +
+ "cpu cores : 2\n" +
+ "fpu : yes\n" +
+ "fpu_exception : yes\n" +
+ "cpuid level : 1\n" +
+ "wp : yes\n" +
+ "flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov " +
+ "pat pse36 clflush mmx fxsr sse sse2 ht syscall nx mmxext fxsr_opt lm " +
+ "3dnowext 3dnow pni lahf_lm cmp_legacy\n" +
+ "bogomips : 4792.41\n" +
+ "TLB size : 1024 4K pages\n" +
+ "clflush size : 64\n" +
+ "cache_alignment : 64\n" +
+ "address sizes : 40 bits physical, 48 bits virtual\n" +
+ "power management: ts fid vid ttp";
+
+ static final String STAT_FILE_FORMAT =
+ "cpu %d %d %d 1646495089 831319 48713 164346 0\n" +
+ "cpu0 15096055 30805 3823005 411456015 206027 13 14269 0\n" +
+ "cpu1 14760561 89890 6432036 408707910 456857 48074 130857 0\n" +
+ "cpu2 12761169 20842 3758639 413976772 98028 411 10288 0\n" +
+ "cpu3 12355207 47322 5789691 412354390 70406 213 8931 0\n" +
+ "intr 114648668 20010764 2 0 945665 2 0 0 0 0 0 0 0 4 0 0 0 0 0 0\n" +
+ "ctxt 242017731764\n" +
+ "btime 1257808753\n" +
+ "processes 26414943\n" +
+ "procs_running 1\n" +
+ "procs_blocked 0\n";
+
+ /**
+ * Test parsing /proc/stat and /proc/cpuinfo
+ * @throws IOException
+ */
+ public void testParsingProcStatAndCpuFile() throws IOException {
+ // Write fake /proc/cpuinfo file.
+ long numProcessors = 8;
+ long cpuFrequencyKHz = 2392781;
+ String fileContent = "";
+ for (int i = 0; i < numProcessors; i++) {
+ fileContent += String.format(CPUINFO_FORMAT, i, cpuFrequencyKHz / 1000D) +
+ "\n";
+ }
+ File tempFile = new File(FAKE_CPUFILE);
+ tempFile.deleteOnExit();
+ FileWriter fWriter = new FileWriter(FAKE_CPUFILE);
+ fWriter.write(fileContent);
+ fWriter.close();
+ assertEquals(plugin.getNumProcessors(), numProcessors);
+ assertEquals(plugin.getCpuFrequency(), cpuFrequencyKHz);
+
+ // Write fake /proc/stat file.
+ long uTime = 54972994;
+ long nTime = 188860;
+ long sTime = 19803373;
+ tempFile = new File(FAKE_STATFILE);
+ tempFile.deleteOnExit();
+ updateStatFile(uTime, nTime, sTime);
+ assertEquals(plugin.getCumulativeCpuTime(),
+ FAKE_JIFFY_LENGTH * (uTime + nTime + sTime));
+ assertEquals(plugin.getCpuUsage(), (float)(TaskTrackerStatus.UNAVAILABLE));
+
+ // Advance the time and sample again to test the CPU usage calculation
+ uTime += 100L;
+ plugin.advanceTime(200L);
+ updateStatFile(uTime, nTime, sTime);
+ assertEquals(plugin.getCumulativeCpuTime(),
+ FAKE_JIFFY_LENGTH * (uTime + nTime + sTime));
+ assertEquals(plugin.getCpuUsage(), 6.25F);
+
+ // Advance the time and sample again. This time, we call getCpuUsage() only.
+ uTime += 600L;
+ plugin.advanceTime(300L);
+ updateStatFile(uTime, nTime, sTime);
+ assertEquals(plugin.getCpuUsage(), 25F);
+
+ // Advance very short period of time (one jiffy length).
+ // In this case, CPU usage should not be updated.
+ uTime += 1L;
+ plugin.advanceTime(1L);
+ updateStatFile(uTime, nTime, sTime);
+ assertEquals(plugin.getCumulativeCpuTime(),
+ FAKE_JIFFY_LENGTH * (uTime + nTime + sTime));
+ assertEquals(plugin.getCpuUsage(), 25F); // CPU usage is not updated.
+ }
+
+ /**
+ * Write information to fake /proc/stat file
+ */
+ private void updateStatFile(long uTime, long nTime, long sTime)
+ throws IOException {
+ FileWriter fWriter = new FileWriter(FAKE_STATFILE);
+ fWriter.write(String.format(STAT_FILE_FORMAT, uTime, nTime, sTime));
+ fWriter.close();
+ }
+
+ /**
+ * Test parsing /proc/meminfo
+ * @throws IOException
+ */
+ public void testParsingProcMemFile() throws IOException {
+ long memTotal = 4058864L;
+ long memFree = 99632L;
+ long inactive = 567732L;
+ long swapTotal = 2096472L;
+ long swapFree = 1818480L;
+ File tempFile = new File(FAKE_MEMFILE);
+ tempFile.deleteOnExit();
+ FileWriter fWriter = new FileWriter(FAKE_MEMFILE);
+ fWriter.write(String.format(MEMINFO_FORMAT,
+ memTotal, memFree, inactive, swapTotal, swapFree));
+
+ fWriter.close();
+ assertEquals(plugin.getAvailablePhysicalMemorySize(),
+ 1024L * (memFree + inactive));
+ assertEquals(plugin.getAvailableVirtualMemorySize(),
+ 1024L * (memFree + inactive + swapFree));
+ assertEquals(plugin.getPhysicalMemorySize(), 1024L * memTotal);
+ assertEquals(plugin.getVirtualMemorySize(), 1024L * (memTotal + swapTotal));
+ }
+}