You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by sw...@apache.org on 2014/09/22 20:02:11 UTC

[01/22] AMBARI-5707. Metrics system prototype implementation. (swagle)

Repository: ambari
Updated Branches:
  refs/heads/branch-metrics-dev [created] 865d187e3


http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/setup.py
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/setup.py b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/setup.py
new file mode 100644
index 0000000..98b24a1
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/setup.py
@@ -0,0 +1,198 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2009 Giampaolo Rodola'. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""psutil is a cross-platform library for retrieving information on
+running processes and system utilization (CPU, memory, disks, network)
+in Python.
+"""
+
+import os
+import sys
+try:
+    from setuptools import setup, Extension
+except ImportError:
+    from distutils.core import setup, Extension
+
+
+HERE = os.path.abspath(os.path.dirname(__file__))
+
+
+def get_version():
+    INIT = os.path.join(HERE, 'psutil/__init__.py')
+    f = open(INIT, 'r')
+    try:
+        for line in f:
+            if line.startswith('__version__'):
+                ret = eval(line.strip().split(' = ')[1])
+                assert ret.count('.') == 2, ret
+                for num in ret.split('.'):
+                    assert num.isdigit(), ret
+                return ret
+        else:
+            raise ValueError("couldn't find version string")
+    finally:
+        f.close()
+
+
+def get_description():
+    README = os.path.join(HERE, 'README')
+    f = open(README, 'r')
+    try:
+        return f.read()
+    finally:
+        f.close()
+
+
+# POSIX
+if os.name == 'posix':
+    posix_extension = Extension(
+        '_psutil_posix',
+        sources=['psutil/_psutil_posix.c'],
+    )
+# Windows
+if sys.platform.startswith("win32"):
+
+    def get_winver():
+        maj, min = sys.getwindowsversion()[0:2]
+        return '0x0%s' % ((maj * 100) + min)
+
+    extensions = [Extension(
+        '_psutil_windows',
+        sources=[
+            'psutil/_psutil_windows.c',
+            'psutil/_psutil_common.c',
+            'psutil/arch/windows/process_info.c',
+            'psutil/arch/windows/process_handles.c',
+            'psutil/arch/windows/security.c',
+        ],
+        define_macros=[
+            # be nice to mingw, see:
+            # http://www.mingw.org/wiki/Use_more_recent_defined_functions
+            ('_WIN32_WINNT', get_winver()),
+            ('_AVAIL_WINVER_', get_winver()),
+            # see: https://code.google.com/p/psutil/issues/detail?id=348
+            ('PSAPI_VERSION', 1),
+        ],
+        libraries=[
+            "psapi", "kernel32", "advapi32", "shell32", "netapi32", "iphlpapi",
+            "wtsapi32",
+        ],
+        # extra_compile_args=["/Z7"],
+        # extra_link_args=["/DEBUG"]
+    )]
+# OS X
+elif sys.platform.startswith("darwin"):
+    extensions = [Extension(
+        '_psutil_osx',
+        sources=[
+            'psutil/_psutil_osx.c',
+            'psutil/_psutil_common.c',
+            'psutil/arch/osx/process_info.c'
+        ],
+        extra_link_args=[
+            '-framework', 'CoreFoundation', '-framework', 'IOKit'
+        ],
+    ),
+        posix_extension,
+    ]
+# FreeBSD
+elif sys.platform.startswith("freebsd"):
+    extensions = [Extension(
+        '_psutil_bsd',
+        sources=[
+            'psutil/_psutil_bsd.c',
+            'psutil/_psutil_common.c',
+            'psutil/arch/bsd/process_info.c'
+        ],
+        libraries=["devstat"]),
+        posix_extension,
+    ]
+# Linux
+elif sys.platform.startswith("linux"):
+    extensions = [Extension(
+        '_psutil_linux',
+        sources=['psutil/_psutil_linux.c']),
+        posix_extension,
+    ]
+# Solaris
+elif sys.platform.lower().startswith('sunos'):
+    extensions = [Extension(
+        '_psutil_sunos',
+        sources=['psutil/_psutil_sunos.c'],
+        libraries=['kstat', 'nsl'],),
+        posix_extension,
+    ]
+else:
+    sys.exit('platform %s is not supported' % sys.platform)
+
+
+def main():
+    setup_args = dict(
+        name='psutil',
+        version=get_version(),
+        description=__doc__,
+        long_description=get_description(),
+        keywords=[
+            'ps', 'top', 'kill', 'free', 'lsof', 'netstat', 'nice',
+            'tty', 'ionice', 'uptime', 'taskmgr', 'process', 'df',
+            'iotop', 'iostat', 'ifconfig', 'taskset', 'who', 'pidof',
+            'pmap', 'smem', 'monitoring', 'ulimit', 'prlimit',
+        ],
+        author='Giampaolo Rodola',
+        author_email='g.rodola <at> gmail <dot> com',
+        url='http://code.google.com/p/psutil/',
+        platforms='Platform Independent',
+        license='BSD',
+        packages=['psutil'],
+        # see: python setup.py register --list-classifiers
+        classifiers=[
+            'Development Status :: 5 - Production/Stable',
+            'Environment :: Console',
+            'Environment :: Win32 (MS Windows)',
+            'Intended Audience :: Developers',
+            'Intended Audience :: Information Technology',
+            'Intended Audience :: System Administrators',
+            'License :: OSI Approved :: BSD License',
+            'Operating System :: MacOS :: MacOS X',
+            'Operating System :: Microsoft :: Windows :: Windows NT/2000',
+            'Operating System :: Microsoft',
+            'Operating System :: OS Independent',
+            'Operating System :: POSIX :: BSD :: FreeBSD',
+            'Operating System :: POSIX :: Linux',
+            'Operating System :: POSIX :: SunOS/Solaris',
+            'Operating System :: POSIX',
+            'Programming Language :: C',
+            'Programming Language :: Python :: 2',
+            'Programming Language :: Python :: 2.4',
+            'Programming Language :: Python :: 2.5',
+            'Programming Language :: Python :: 2.6',
+            'Programming Language :: Python :: 2.7',
+            'Programming Language :: Python :: 3',
+            'Programming Language :: Python :: 3.0',
+            'Programming Language :: Python :: 3.1',
+            'Programming Language :: Python :: 3.2',
+            'Programming Language :: Python :: 3.3',
+            'Programming Language :: Python :: 3.4',
+            'Programming Language :: Python :: Implementation :: CPython',
+            'Programming Language :: Python :: Implementation :: PyPy',
+            'Programming Language :: Python',
+            'Topic :: Software Development :: Libraries :: Python Modules',
+            'Topic :: Software Development :: Libraries',
+            'Topic :: System :: Benchmark',
+            'Topic :: System :: Hardware',
+            'Topic :: System :: Monitoring',
+            'Topic :: System :: Networking :: Monitoring',
+            'Topic :: System :: Networking',
+            'Topic :: System :: Systems Administration',
+            'Topic :: Utilities',
+        ],
+    )
+    if extensions is not None:
+        setup_args["ext_modules"] = extensions
+    setup(**setup_args)
+
+if __name__ == '__main__':
+    main()

http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/pom.xml
----------------------------------------------------------------------
diff --git a/ambari-metrics/pom.xml b/ambari-metrics/pom.xml
new file mode 100644
index 0000000..4bc6c82
--- /dev/null
+++ b/ambari-metrics/pom.xml
@@ -0,0 +1,149 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
+                             http://maven.apache.org/xsd/maven-4.0.0.xsd">
+  <parent>
+    <groupId>org.apache.ambari</groupId>
+    <artifactId>ambari-project</artifactId>
+    <version>1.3.0-SNAPSHOT</version>
+    <relativePath>../ambari-project</relativePath>
+  </parent>
+  <modelVersion>4.0.0</modelVersion>
+  <artifactId>ambari-metrics</artifactId>
+  <packaging>pom</packaging>
+  <properties>
+    <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
+    <python.ver>python &gt;= 2.6</python.ver>
+    <deb.python.ver>python (&gt;= 2.6)</deb.python.ver>
+  </properties>
+  <repositories>
+    <repository>
+      <id>apache-hadoop</id>
+      <name>hdp</name>
+      <url>http://54.235.92.15/nexus/content/groups/public/</url>
+    </repository>
+  </repositories>
+  <build>
+    <plugins>
+      <plugin>
+        <artifactId>maven-assembly-plugin</artifactId>
+        <executions>
+          <execution>
+            <id>build-tarball</id>
+            <phase>none</phase>
+            <goals>
+              <goal>single</goal>
+            </goals>
+          </execution>
+        </executions>
+      </plugin>
+      <plugin>
+        <groupId>org.codehaus.mojo</groupId>
+        <artifactId>build-helper-maven-plugin</artifactId>
+        <version>1.8</version>
+        <executions>
+          <execution>
+            <id>parse-version</id>
+            <phase>validate</phase>
+            <goals>
+              <goal>parse-version</goal>
+            </goals>
+          </execution>
+          <execution>
+            <id>regex-property</id>
+            <goals>
+              <goal>regex-property</goal>
+            </goals>
+            <configuration>
+              <name>ambariVersion</name>
+              <value>${project.version}</value>
+              <regex>^([0-9]+)\.([0-9]+)\.([0-9]+)(\.|-).*</regex>
+              <replacement>$1.$2.$3</replacement>
+              <failIfNoMatch>false</failIfNoMatch>
+            </configuration>
+          </execution>
+        </executions>
+      </plugin>
+      <plugin>
+        <artifactId>maven-compiler-plugin</artifactId>
+        <version>3.0</version>
+      </plugin>
+      <plugin>
+        <groupId>org.apache.maven.plugins</groupId>
+        <artifactId>maven-clean-plugin</artifactId>
+        <configuration>
+          <filesets>
+            <fileset>
+              <directory>${basedir}</directory>
+              <followSymlinks>false</followSymlinks>
+              <includes>
+                <include>**/*.pyc</include>
+              </includes>
+            </fileset>
+          </filesets>
+        </configuration>
+      </plugin>
+      <!--
+      <plugin>
+        <artifactId>maven-assembly-plugin</artifactId>
+        <configuration>
+          <descriptors>
+            <descriptor>src/main/assemblies/metrics.xml</descriptor>
+          </descriptors>
+          <tarLongFileMode>gnu</tarLongFileMode>
+        </configuration>
+        <executions>
+          <execution>
+            <id>build-tarball</id>
+            <phase>package</phase>
+            <goals>
+              <goal>single</goal>
+            </goals>
+          </execution>
+        </executions>
+      </plugin>
+      -->
+      <plugin>
+        <groupId>org.codehaus.mojo</groupId>
+        <artifactId>rpm-maven-plugin</artifactId>
+        <version>2.0.1</version>
+        <executions>
+          <execution>
+            <!-- unbinds rpm creation from maven lifecycle -->
+            <phase>none</phase>
+            <goals>
+              <goal>rpm</goal>
+            </goals>
+          </execution>
+        </executions>
+        <configuration>
+          <copyright>2012, Apache Software Foundation</copyright>
+          <group>Development</group>
+          <description>Maven Recipe: RPM Package.</description>
+          <autoRequires>false</autoRequires>
+          <requires>
+            <require>${python.ver}</require>
+            <require>gcc</require>
+            <require>python-devel</require>
+          </requires>
+        </configuration>
+      </plugin>
+    </plugins>
+  </build>
+
+  <dependencies>
+  </dependencies>
+  <profiles>
+    <profile>
+      <id>metrics2-standalone</id>
+    </profile>
+  </profiles>
+
+  <modules>
+    <module>ambari-metrics-hadoop-sink</module>
+    <module>ambari-metrics-hadoop-timelineservice</module>
+    <module>ambari-metrics-host-monitoring</module>
+  </modules>
+
+</project>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index cb47889..5544ef5 100644
--- a/pom.xml
+++ b/pom.xml
@@ -23,6 +23,12 @@
   <name>Ambari Main</name>
   <version>1.3.0-SNAPSHOT</version>
   <description>Ambari</description>
+  <modules>
+    <module>ambari-metrics-hadoop-timelineservice</module>
+    <module>ambari-metrics</module>
+    <module>ambari-metrics</module>
+    <module>ambari-metrics</module>
+  </modules>
   <properties>
     <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
     <clover.license>${user.home}/clover.license</clover.license>
@@ -305,6 +311,7 @@
         <module>ambari-agent</module>
         <module>ambari-client</module>
         <module>ambari-shell</module>
+        <module>ambari-metrics</module>
       </modules>
     </profile>
     <profile>


[02/22] AMBARI-5707. Metrics system prototype implementation. (swagle)

Posted by sw...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/_psutil_windows.h
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/_psutil_windows.h b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/_psutil_windows.h
new file mode 100644
index 0000000..546704e
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/_psutil_windows.h
@@ -0,0 +1,70 @@
+/*
+ * Copyright (c) 2009, Jay Loden, Giampaolo Rodola'. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include <Python.h>
+#include <windows.h>
+
+// --- per-process functions
+
+static PyObject* psutil_proc_cmdline(PyObject* self, PyObject* args);
+static PyObject* psutil_proc_cpu_affinity_get(PyObject* self, PyObject* args);
+static PyObject* psutil_proc_cpu_affinity_set(PyObject* self, PyObject* args);
+static PyObject* psutil_proc_cpu_times(PyObject* self, PyObject* args);
+static PyObject* psutil_proc_cpu_times_2(PyObject* self, PyObject* args);
+static PyObject* psutil_proc_create_time(PyObject* self, PyObject* args);
+static PyObject* psutil_proc_create_time_2(PyObject* self, PyObject* args);
+static PyObject* psutil_proc_cwd(PyObject* self, PyObject* args);
+static PyObject* psutil_proc_exe(PyObject* self, PyObject* args);
+static PyObject* psutil_proc_io_counters(PyObject* self, PyObject* args);
+static PyObject* psutil_proc_io_counters_2(PyObject* self, PyObject* args);
+static PyObject* psutil_proc_is_suspended(PyObject* self, PyObject* args);
+static PyObject* psutil_proc_kill(PyObject* self, PyObject* args);
+static PyObject* psutil_proc_memory_info(PyObject* self, PyObject* args);
+static PyObject* psutil_proc_memory_info_2(PyObject* self, PyObject* args);
+static PyObject* psutil_proc_memory_maps(PyObject* self, PyObject* args);
+static PyObject* psutil_proc_num_ctx_switches(PyObject* self, PyObject* args);
+static PyObject* psutil_proc_num_handles(PyObject* self, PyObject* args);
+static PyObject* psutil_proc_num_handles_2(PyObject* self, PyObject* args);
+static PyObject* psutil_proc_num_threads(PyObject* self, PyObject* args);
+static PyObject* psutil_proc_open_files(PyObject* self, PyObject* args);
+static PyObject* psutil_proc_priority_get(PyObject* self, PyObject* args);
+static PyObject* psutil_proc_priority_set(PyObject* self, PyObject* args);
+static PyObject* psutil_proc_resume(PyObject* self, PyObject* args);
+static PyObject* psutil_proc_suspend(PyObject* self, PyObject* args);
+static PyObject* psutil_proc_threads(PyObject* self, PyObject* args);
+static PyObject* psutil_proc_username(PyObject* self, PyObject* args);
+static PyObject* psutil_proc_wait(PyObject* self, PyObject* args);
+
+#if (PSUTIL_WINVER >= 0x0600)  // Windows Vista
+static PyObject* psutil_proc_io_priority_get(PyObject* self, PyObject* args);
+static PyObject* psutil_proc_io_priority_set(PyObject* self, PyObject* args);
+#endif
+
+// --- system-related functions
+
+static PyObject* psutil_boot_time(PyObject* self, PyObject* args);
+static PyObject* psutil_cpu_count_logical(PyObject* self, PyObject* args);
+static PyObject* psutil_cpu_count_phys(PyObject* self, PyObject* args);
+static PyObject* psutil_cpu_times(PyObject* self, PyObject* args);
+static PyObject* psutil_disk_io_counters(PyObject* self, PyObject* args);
+static PyObject* psutil_disk_partitions(PyObject* self, PyObject* args);
+static PyObject* psutil_disk_usage(PyObject* self, PyObject* args);
+static PyObject* psutil_net_connections(PyObject* self, PyObject* args);
+static PyObject* psutil_net_io_counters(PyObject* self, PyObject* args);
+static PyObject* psutil_per_cpu_times(PyObject* self, PyObject* args);
+static PyObject* psutil_pid_exists(PyObject* self, PyObject* args);
+static PyObject* psutil_pids(PyObject* self, PyObject* args);
+static PyObject* psutil_ppid_map(PyObject* self, PyObject* args);
+static PyObject* psutil_users(PyObject* self, PyObject* args);
+static PyObject* psutil_virtual_mem(PyObject* self, PyObject* args);
+
+// --- windows API bindings
+
+static PyObject* psutil_win32_QueryDosDevice(PyObject* self, PyObject* args);
+
+// --- internal
+
+int psutil_proc_suspend_or_resume(DWORD pid, int suspend);

http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/_pswindows.py
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/_pswindows.py b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/_pswindows.py
new file mode 100644
index 0000000..1a786f1
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/_pswindows.py
@@ -0,0 +1,485 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Windows platform implementation."""
+
+import errno
+import os
+import sys
+
+from psutil import _common
+from psutil._common import conn_tmap, usage_percent, isfile_strict
+from psutil._compat import PY3, xrange, wraps, lru_cache, namedtuple
+import _psutil_windows as cext
+
+# process priority constants, import from __init__.py:
+# http://msdn.microsoft.com/en-us/library/ms686219(v=vs.85).aspx
+__extra__all__ = ["ABOVE_NORMAL_PRIORITY_CLASS", "BELOW_NORMAL_PRIORITY_CLASS",
+                  "HIGH_PRIORITY_CLASS", "IDLE_PRIORITY_CLASS",
+                  "NORMAL_PRIORITY_CLASS", "REALTIME_PRIORITY_CLASS",
+                  #
+                  "CONN_DELETE_TCB",
+                  ]
+
+# --- module level constants (gets pushed up to psutil module)
+
+CONN_DELETE_TCB = "DELETE_TCB"
+WAIT_TIMEOUT = 0x00000102  # 258 in decimal
+ACCESS_DENIED_SET = frozenset([errno.EPERM, errno.EACCES,
+                               cext.ERROR_ACCESS_DENIED])
+
+TCP_STATUSES = {
+    cext.MIB_TCP_STATE_ESTAB: _common.CONN_ESTABLISHED,
+    cext.MIB_TCP_STATE_SYN_SENT: _common.CONN_SYN_SENT,
+    cext.MIB_TCP_STATE_SYN_RCVD: _common.CONN_SYN_RECV,
+    cext.MIB_TCP_STATE_FIN_WAIT1: _common.CONN_FIN_WAIT1,
+    cext.MIB_TCP_STATE_FIN_WAIT2: _common.CONN_FIN_WAIT2,
+    cext.MIB_TCP_STATE_TIME_WAIT: _common.CONN_TIME_WAIT,
+    cext.MIB_TCP_STATE_CLOSED: _common.CONN_CLOSE,
+    cext.MIB_TCP_STATE_CLOSE_WAIT: _common.CONN_CLOSE_WAIT,
+    cext.MIB_TCP_STATE_LAST_ACK: _common.CONN_LAST_ACK,
+    cext.MIB_TCP_STATE_LISTEN: _common.CONN_LISTEN,
+    cext.MIB_TCP_STATE_CLOSING: _common.CONN_CLOSING,
+    cext.MIB_TCP_STATE_DELETE_TCB: CONN_DELETE_TCB,
+    cext.PSUTIL_CONN_NONE: _common.CONN_NONE,
+}
+
+
+scputimes = namedtuple('scputimes', ['user', 'system', 'idle'])
+svmem = namedtuple('svmem', ['total', 'available', 'percent', 'used', 'free'])
+pextmem = namedtuple(
+    'pextmem', ['num_page_faults', 'peak_wset', 'wset', 'peak_paged_pool',
+                'paged_pool', 'peak_nonpaged_pool', 'nonpaged_pool',
+                'pagefile', 'peak_pagefile', 'private'])
+pmmap_grouped = namedtuple('pmmap_grouped', ['path', 'rss'])
+pmmap_ext = namedtuple(
+    'pmmap_ext', 'addr perms ' + ' '.join(pmmap_grouped._fields))
+
+# set later from __init__.py
+NoSuchProcess = None
+AccessDenied = None
+TimeoutExpired = None
+
+
+@lru_cache(maxsize=512)
+def _win32_QueryDosDevice(s):
+    return cext.win32_QueryDosDevice(s)
+
+
+def _convert_raw_path(s):
+    # convert paths using native DOS format like:
+    # "\Device\HarddiskVolume1\Windows\systemew\file.txt"
+    # into: "C:\Windows\systemew\file.txt"
+    if PY3 and not isinstance(s, str):
+        s = s.decode('utf8')
+    rawdrive = '\\'.join(s.split('\\')[:3])
+    driveletter = _win32_QueryDosDevice(rawdrive)
+    return os.path.join(driveletter, s[len(rawdrive):])
+
+
+# --- public functions
+
+
+def virtual_memory():
+    """System virtual memory as a namedtuple."""
+    mem = cext.virtual_mem()
+    totphys, availphys, totpagef, availpagef, totvirt, freevirt = mem
+    #
+    total = totphys
+    avail = availphys
+    free = availphys
+    used = total - avail
+    percent = usage_percent((total - avail), total, _round=1)
+    return svmem(total, avail, percent, used, free)
+
+
+def swap_memory():
+    """Swap system memory as a (total, used, free, sin, sout) tuple."""
+    mem = cext.virtual_mem()
+    total = mem[2]
+    free = mem[3]
+    used = total - free
+    percent = usage_percent(used, total, _round=1)
+    return _common.sswap(total, used, free, percent, 0, 0)
+
+
+def disk_usage(path):
+    """Return disk usage associated with path."""
+    try:
+        total, free = cext.disk_usage(path)
+    except WindowsError:
+        if not os.path.exists(path):
+            msg = "No such file or directory: '%s'" % path
+            raise OSError(errno.ENOENT, msg)
+        raise
+    used = total - free
+    percent = usage_percent(used, total, _round=1)
+    return _common.sdiskusage(total, used, free, percent)
+
+
+def disk_partitions(all):
+    """Return disk partitions."""
+    rawlist = cext.disk_partitions(all)
+    return [_common.sdiskpart(*x) for x in rawlist]
+
+
+def cpu_times():
+    """Return system CPU times as a named tuple."""
+    user, system, idle = cext.cpu_times()
+    return scputimes(user, system, idle)
+
+
+def per_cpu_times():
+    """Return system per-CPU times as a list of named tuples."""
+    ret = []
+    for cpu_t in cext.per_cpu_times():
+        user, system, idle = cpu_t
+        item = scputimes(user, system, idle)
+        ret.append(item)
+    return ret
+
+
+def cpu_count_logical():
+    """Return the number of logical CPUs in the system."""
+    return cext.cpu_count_logical()
+
+
+def cpu_count_physical():
+    """Return the number of physical CPUs in the system."""
+    return cext.cpu_count_phys()
+
+
+def boot_time():
+    """The system boot time expressed in seconds since the epoch."""
+    return cext.boot_time()
+
+
+def net_connections(kind, _pid=-1):
+    """Return socket connections.  If pid == -1 return system-wide
+    connections (as opposed to connections opened by one process only).
+    """
+    if kind not in conn_tmap:
+        raise ValueError("invalid %r kind argument; choose between %s"
+                         % (kind, ', '.join([repr(x) for x in conn_tmap])))
+    families, types = conn_tmap[kind]
+    rawlist = cext.net_connections(_pid, families, types)
+    ret = []
+    for item in rawlist:
+        fd, fam, type, laddr, raddr, status, pid = item
+        status = TCP_STATUSES[status]
+        if _pid == -1:
+            nt = _common.sconn(fd, fam, type, laddr, raddr, status, pid)
+        else:
+            nt = _common.pconn(fd, fam, type, laddr, raddr, status)
+        ret.append(nt)
+    return ret
+
+
+def users():
+    """Return currently connected users as a list of namedtuples."""
+    retlist = []
+    rawlist = cext.users()
+    for item in rawlist:
+        user, hostname, tstamp = item
+        nt = _common.suser(user, None, hostname, tstamp)
+        retlist.append(nt)
+    return retlist
+
+
+pids = cext.pids
+pid_exists = cext.pid_exists
+net_io_counters = cext.net_io_counters
+disk_io_counters = cext.disk_io_counters
+ppid_map = cext.ppid_map  # not meant to be public
+
+
+def wrap_exceptions(fun):
+    """Decorator which translates bare OSError and WindowsError
+    exceptions into NoSuchProcess and AccessDenied.
+    """
+    @wraps(fun)
+    def wrapper(self, *args, **kwargs):
+        try:
+            return fun(self, *args, **kwargs)
+        except OSError:
+            # support for private module import
+            if NoSuchProcess is None or AccessDenied is None:
+                raise
+            err = sys.exc_info()[1]
+            if err.errno in ACCESS_DENIED_SET:
+                raise AccessDenied(self.pid, self._name)
+            if err.errno == errno.ESRCH:
+                raise NoSuchProcess(self.pid, self._name)
+            raise
+    return wrapper
+
+
+class Process(object):
+    """Wrapper class around underlying C implementation."""
+
+    __slots__ = ["pid", "_name"]
+
+    def __init__(self, pid):
+        self.pid = pid
+        self._name = None
+
+    @wrap_exceptions
+    def name(self):
+        """Return process name, which on Windows is always the final
+        part of the executable.
+        """
+        # This is how PIDs 0 and 4 are always represented in taskmgr
+        # and process-hacker.
+        if self.pid == 0:
+            return "System Idle Process"
+        elif self.pid == 4:
+            return "System"
+        else:
+            return os.path.basename(self.exe())
+
+    @wrap_exceptions
+    def exe(self):
+        # Note: os.path.exists(path) may return False even if the file
+        # is there, see:
+        # http://stackoverflow.com/questions/3112546/os-path-exists-lies
+        return _convert_raw_path(cext.proc_exe(self.pid))
+
+    @wrap_exceptions
+    def cmdline(self):
+        return cext.proc_cmdline(self.pid)
+
+    def ppid(self):
+        try:
+            return ppid_map()[self.pid]
+        except KeyError:
+            raise NoSuchProcess(self.pid, self._name)
+
+    def _get_raw_meminfo(self):
+        try:
+            return cext.proc_memory_info(self.pid)
+        except OSError:
+            err = sys.exc_info()[1]
+            if err.errno in ACCESS_DENIED_SET:
+                return cext.proc_memory_info_2(self.pid)
+            raise
+
+    @wrap_exceptions
+    def memory_info(self):
+        # on Windows RSS == WorkingSetSize and VSM == PagefileUsage
+        # fields of PROCESS_MEMORY_COUNTERS struct:
+        # http://msdn.microsoft.com/en-us/library/windows/desktop/
+        #     ms684877(v=vs.85).aspx
+        t = self._get_raw_meminfo()
+        return _common.pmem(t[2], t[7])
+
+    @wrap_exceptions
+    def memory_info_ex(self):
+        return pextmem(*self._get_raw_meminfo())
+
+    def memory_maps(self):
+        try:
+            raw = cext.proc_memory_maps(self.pid)
+        except OSError:
+            # XXX - can't use wrap_exceptions decorator as we're
+            # returning a generator; probably needs refactoring.
+            err = sys.exc_info()[1]
+            if err.errno in ACCESS_DENIED_SET:
+                raise AccessDenied(self.pid, self._name)
+            if err.errno == errno.ESRCH:
+                raise NoSuchProcess(self.pid, self._name)
+            raise
+        else:
+            for addr, perm, path, rss in raw:
+                path = _convert_raw_path(path)
+                addr = hex(addr)
+                yield (addr, perm, path, rss)
+
+    @wrap_exceptions
+    def kill(self):
+        return cext.proc_kill(self.pid)
+
+    @wrap_exceptions
+    def wait(self, timeout=None):
+        if timeout is None:
+            timeout = cext.INFINITE
+        else:
+            # WaitForSingleObject() expects time in milliseconds
+            timeout = int(timeout * 1000)
+        ret = cext.proc_wait(self.pid, timeout)
+        if ret == WAIT_TIMEOUT:
+            # support for private module import
+            if TimeoutExpired is None:
+                raise RuntimeError("timeout expired")
+            raise TimeoutExpired(timeout, self.pid, self._name)
+        return ret
+
+    @wrap_exceptions
+    def username(self):
+        if self.pid in (0, 4):
+            return 'NT AUTHORITY\\SYSTEM'
+        return cext.proc_username(self.pid)
+
+    @wrap_exceptions
+    def create_time(self):
+        # special case for kernel process PIDs; return system boot time
+        if self.pid in (0, 4):
+            return boot_time()
+        try:
+            return cext.proc_create_time(self.pid)
+        except OSError:
+            err = sys.exc_info()[1]
+            if err.errno in ACCESS_DENIED_SET:
+                return cext.proc_create_time_2(self.pid)
+            raise
+
+    @wrap_exceptions
+    def num_threads(self):
+        return cext.proc_num_threads(self.pid)
+
+    @wrap_exceptions
+    def threads(self):
+        rawlist = cext.proc_threads(self.pid)
+        retlist = []
+        for thread_id, utime, stime in rawlist:
+            ntuple = _common.pthread(thread_id, utime, stime)
+            retlist.append(ntuple)
+        return retlist
+
+    @wrap_exceptions
+    def cpu_times(self):
+        try:
+            ret = cext.proc_cpu_times(self.pid)
+        except OSError:
+            err = sys.exc_info()[1]
+            if err.errno in ACCESS_DENIED_SET:
+                ret = cext.proc_cpu_times_2(self.pid)
+            else:
+                raise
+        return _common.pcputimes(*ret)
+
+    @wrap_exceptions
+    def suspend(self):
+        return cext.proc_suspend(self.pid)
+
+    @wrap_exceptions
+    def resume(self):
+        return cext.proc_resume(self.pid)
+
+    @wrap_exceptions
+    def cwd(self):
+        if self.pid in (0, 4):
+            raise AccessDenied(self.pid, self._name)
+        # return a normalized pathname since the native C function appends
+        # "\\" at the and of the path
+        path = cext.proc_cwd(self.pid)
+        return os.path.normpath(path)
+
+    @wrap_exceptions
+    def open_files(self):
+        if self.pid in (0, 4):
+            return []
+        retlist = []
+        # Filenames come in in native format like:
+        # "\Device\HarddiskVolume1\Windows\systemew\file.txt"
+        # Convert the first part in the corresponding drive letter
+        # (e.g. "C:\") by using Windows's QueryDosDevice()
+        raw_file_names = cext.proc_open_files(self.pid)
+        for file in raw_file_names:
+            file = _convert_raw_path(file)
+            if isfile_strict(file) and file not in retlist:
+                ntuple = _common.popenfile(file, -1)
+                retlist.append(ntuple)
+        return retlist
+
+    @wrap_exceptions
+    def connections(self, kind='inet'):
+        return net_connections(kind, _pid=self.pid)
+
+    @wrap_exceptions
+    def nice_get(self):
+        return cext.proc_priority_get(self.pid)
+
+    @wrap_exceptions
+    def nice_set(self, value):
+        return cext.proc_priority_set(self.pid, value)
+
+    # available on Windows >= Vista
+    if hasattr(cext, "proc_io_priority_get"):
+        @wrap_exceptions
+        def ionice_get(self):
+            return cext.proc_io_priority_get(self.pid)
+
+        @wrap_exceptions
+        def ionice_set(self, value, _):
+            if _:
+                raise TypeError("set_proc_ionice() on Windows takes only "
+                                "1 argument (2 given)")
+            if value not in (2, 1, 0):
+                raise ValueError("value must be 2 (normal), 1 (low) or 0 "
+                                 "(very low); got %r" % value)
+            return cext.proc_io_priority_set(self.pid, value)
+
+    @wrap_exceptions
+    def io_counters(self):
+        try:
+            ret = cext.proc_io_counters(self.pid)
+        except OSError:
+            err = sys.exc_info()[1]
+            if err.errno in ACCESS_DENIED_SET:
+                ret = cext.proc_io_counters_2(self.pid)
+            else:
+                raise
+        return _common.pio(*ret)
+
+    @wrap_exceptions
+    def status(self):
+        suspended = cext.proc_is_suspended(self.pid)
+        if suspended:
+            return _common.STATUS_STOPPED
+        else:
+            return _common.STATUS_RUNNING
+
+    @wrap_exceptions
+    def cpu_affinity_get(self):
+        from_bitmask = lambda x: [i for i in xrange(64) if (1 << i) & x]
+        bitmask = cext.proc_cpu_affinity_get(self.pid)
+        return from_bitmask(bitmask)
+
+    @wrap_exceptions
+    def cpu_affinity_set(self, value):
+        def to_bitmask(l):
+            if not l:
+                raise ValueError("invalid argument %r" % l)
+            out = 0
+            for b in l:
+                out |= 2 ** b
+            return out
+
+        # SetProcessAffinityMask() states that ERROR_INVALID_PARAMETER
+        # is returned for an invalid CPU but this seems not to be true,
+        # therefore we check CPUs validy beforehand.
+        allcpus = list(range(len(per_cpu_times())))
+        for cpu in value:
+            if cpu not in allcpus:
+                raise ValueError("invalid CPU %r" % cpu)
+
+        bitmask = to_bitmask(value)
+        cext.proc_cpu_affinity_set(self.pid, bitmask)
+
+    @wrap_exceptions
+    def num_handles(self):
+        try:
+            return cext.proc_num_handles(self.pid)
+        except OSError:
+            err = sys.exc_info()[1]
+            if err.errno in ACCESS_DENIED_SET:
+                return cext.proc_num_handles_2(self.pid)
+            raise
+
+    @wrap_exceptions
+    def num_ctx_switches(self):
+        tupl = cext.proc_num_ctx_switches(self.pid)
+        return _common.pctxsw(*tupl)

http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/arch/bsd/process_info.c
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/arch/bsd/process_info.c b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/arch/bsd/process_info.c
new file mode 100644
index 0000000..1c19556
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/arch/bsd/process_info.c
@@ -0,0 +1,285 @@
+/*
+ * Copyright (c) 2009, Jay Loden, Giampaolo Rodola'. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ *
+ * Helper functions related to fetching process information.
+ * Used by _psutil_bsd module methods.
+ */
+
+
+#include <Python.h>
+#include <assert.h>
+#include <errno.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <sys/types.h>
+#include <sys/sysctl.h>
+#include <sys/param.h>
+#include <sys/user.h>
+#include <sys/proc.h>
+#include <signal.h>
+
+#include "process_info.h"
+
+
+/*
+ * Returns a list of all BSD processes on the system.  This routine
+ * allocates the list and puts it in *procList and a count of the
+ * number of entries in *procCount.  You are responsible for freeing
+ * this list (use "free" from System framework).
+ * On success, the function returns 0.
+ * On error, the function returns a BSD errno value.
+ */
+int
+psutil_get_proc_list(struct kinfo_proc **procList, size_t *procCount)
+{
+    int err;
+    struct kinfo_proc *result;
+    int done;
+    static const int name[] = { CTL_KERN, KERN_PROC, KERN_PROC_PROC, 0 };
+    // Declaring name as const requires us to cast it when passing it to
+    // sysctl because the prototype doesn't include the const modifier.
+    size_t              length;
+
+    assert( procList != NULL);
+    assert(*procList == NULL);
+    assert(procCount != NULL);
+
+    *procCount = 0;
+
+    /*
+     * We start by calling sysctl with result == NULL and length == 0.
+     * That will succeed, and set length to the appropriate length.
+     * We then allocate a buffer of that size and call sysctl again
+     * with that buffer.  If that succeeds, we're done.  If that fails
+     * with ENOMEM, we have to throw away our buffer and loop.  Note
+     * that the loop causes use to call sysctl with NULL again; this
+     * is necessary because the ENOMEM failure case sets length to
+     * the amount of data returned, not the amount of data that
+     * could have been returned.
+     */
+    result = NULL;
+    done = 0;
+    do {
+        assert(result == NULL);
+        // Call sysctl with a NULL buffer.
+        length = 0;
+        err = sysctl((int *)name, (sizeof(name) / sizeof(*name)) - 1,
+                     NULL, &length, NULL, 0);
+        if (err == -1)
+            err = errno;
+
+        // Allocate an appropriately sized buffer based on the results
+        // from the previous call.
+        if (err == 0) {
+            result = malloc(length);
+            if (result == NULL)
+                err = ENOMEM;
+        }
+
+        // Call sysctl again with the new buffer.  If we get an ENOMEM
+        // error, toss away our buffer and start again.
+        if (err == 0) {
+            err = sysctl((int *) name, (sizeof(name) / sizeof(*name)) - 1,
+                         result, &length, NULL, 0);
+            if (err == -1)
+                err = errno;
+            if (err == 0) {
+                done = 1;
+            }
+            else if (err == ENOMEM) {
+                assert(result != NULL);
+                free(result);
+                result = NULL;
+                err = 0;
+            }
+        }
+    } while (err == 0 && ! done);
+
+    // Clean up and establish post conditions.
+    if (err != 0 && result != NULL) {
+        free(result);
+        result = NULL;
+    }
+
+    *procList = result;
+    *procCount = length / sizeof(struct kinfo_proc);
+
+    assert((err == 0) == (*procList != NULL));
+    return err;
+}
+
+
+char
+*psutil_get_cmd_path(long pid, size_t *pathsize)
+{
+    int mib[4];
+    char *path;
+    size_t size = 0;
+
+    /*
+     * Make a sysctl() call to get the raw argument space of the process.
+     */
+    mib[0] = CTL_KERN;
+    mib[1] = KERN_PROC;
+    mib[2] = KERN_PROC_PATHNAME;
+    mib[3] = pid;
+
+    // call with a null buffer first to determine if we need a buffer
+    if (sysctl(mib, 4, NULL, &size, NULL, 0) == -1) {
+        return NULL;
+    }
+
+    path = malloc(size);
+    if (path == NULL) {
+        PyErr_NoMemory();
+        return NULL;
+    }
+
+    *pathsize = size;
+    if (sysctl(mib, 4, path, &size, NULL, 0) == -1) {
+        free(path);
+        return NULL;       // Insufficient privileges
+    }
+
+    return path;
+}
+
+
+/*
+ * XXX no longer used; it probably makese sense to remove it.
+ * Borrowed from psi Python System Information project
+ *
+ * Get command arguments and environment variables.
+ *
+ * Based on code from ps.
+ *
+ * Returns:
+ *      0 for success;
+ *      -1 for failure (Exception raised);
+ *      1 for insufficient privileges.
+ */
+char
+*psutil_get_cmd_args(long pid, size_t *argsize)
+{
+    int mib[4], argmax;
+    size_t size = sizeof(argmax);
+    char *procargs = NULL;
+
+    // Get the maximum process arguments size.
+    mib[0] = CTL_KERN;
+    mib[1] = KERN_ARGMAX;
+
+    size = sizeof(argmax);
+    if (sysctl(mib, 2, &argmax, &size, NULL, 0) == -1)
+        return NULL;
+
+    // Allocate space for the arguments.
+    procargs = (char *)malloc(argmax);
+    if (procargs == NULL) {
+        PyErr_NoMemory();
+        return NULL;
+    }
+
+    /*
+     * Make a sysctl() call to get the raw argument space of the process.
+     */
+    mib[0] = CTL_KERN;
+    mib[1] = KERN_PROC;
+    mib[2] = KERN_PROC_ARGS;
+    mib[3] = pid;
+
+    size = argmax;
+    if (sysctl(mib, 4, procargs, &size, NULL, 0) == -1) {
+        free(procargs);
+        return NULL;       // Insufficient privileges
+    }
+
+    // return string and set the length of arguments
+    *argsize = size;
+    return procargs;
+}
+
+
+// returns the command line as a python list object
+PyObject *
+psutil_get_arg_list(long pid)
+{
+    char *argstr = NULL;
+    int pos = 0;
+    size_t argsize = 0;
+    PyObject *retlist = Py_BuildValue("[]");
+    PyObject *item = NULL;
+
+    if (pid < 0) {
+        return retlist;
+    }
+
+    argstr = psutil_get_cmd_args(pid, &argsize);
+    if (argstr == NULL) {
+        goto error;
+    }
+
+    // args are returned as a flattened string with \0 separators between
+    // arguments add each string to the list then step forward to the next
+    // separator
+    if (argsize > 0) {
+        while (pos < argsize) {
+            item = Py_BuildValue("s", &argstr[pos]);
+            if (!item)
+                goto error;
+            if (PyList_Append(retlist, item))
+                goto error;
+            Py_DECREF(item);
+            pos = pos + strlen(&argstr[pos]) + 1;
+        }
+    }
+
+    free(argstr);
+    return retlist;
+
+error:
+    Py_XDECREF(item);
+    Py_DECREF(retlist);
+    if (argstr != NULL)
+        free(argstr);
+    return NULL;
+}
+
+
+/*
+ * Return 1 if PID exists in the current process list, else 0.
+ */
+int
+psutil_pid_exists(long pid)
+{
+    int kill_ret;
+    if (pid < 0) {
+        return 0;
+    }
+
+    // if kill returns success of permission denied we know it's a valid PID
+    kill_ret = kill(pid , 0);
+    if ((0 == kill_ret) || (EPERM == errno)) {
+        return 1;
+    }
+
+    // otherwise return 0 for PID not found
+    return 0;
+}
+
+
+/*
+ * Set exception to AccessDenied if pid exists else NoSuchProcess.
+ */
+int
+psutil_raise_ad_or_nsp(pid) {
+    if (psutil_pid_exists(pid) == 0) {
+        NoSuchProcess();
+    }
+    else {
+        AccessDenied();
+    }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/arch/bsd/process_info.h
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/arch/bsd/process_info.h b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/arch/bsd/process_info.h
new file mode 100644
index 0000000..858bd88
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/arch/bsd/process_info.h
@@ -0,0 +1,15 @@
+/*
+ * Copyright (c) 2009, Jay Loden, Giampaolo Rodola'. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include <Python.h>
+
+typedef struct kinfo_proc kinfo_proc;
+
+char *psutil_get_cmd_args(long pid, size_t *argsize);
+char *psutil_get_cmd_path(long pid, size_t *pathsize);
+int psutil_get_proc_list(struct kinfo_proc **procList, size_t *procCount);
+int psutil_pid_exists(long pid);
+PyObject* psutil_get_arg_list(long pid);

http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/arch/osx/process_info.c
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/arch/osx/process_info.c b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/arch/osx/process_info.c
new file mode 100644
index 0000000..be8092e
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/arch/osx/process_info.c
@@ -0,0 +1,293 @@
+/*
+ * Copyright (c) 2009, Jay Loden, Giampaolo Rodola'. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ *
+ * Helper functions related to fetching process information.
+ * Used by _psutil_osx module methods.
+ */
+
+
+#include <Python.h>
+#include <assert.h>
+#include <errno.h>
+#include <limits.h>  // for INT_MAX
+#include <stdbool.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <signal.h>
+#include <sys/sysctl.h>
+#include <libproc.h>
+
+#include "process_info.h"
+#include "../../_psutil_common.h"
+
+
+/*
+ * Return 1 if PID exists in the current process list, else 0.
+ */
+int
+psutil_pid_exists(long pid)
+{
+    int kill_ret;
+
+    // save some time if it's an invalid PID
+    if (pid < 0) {
+        return 0;
+    }
+
+    // if kill returns success of permission denied we know it's a valid PID
+    kill_ret = kill(pid , 0);
+    if ( (0 == kill_ret) || (EPERM == errno) ) {
+        return 1;
+    }
+
+    // otherwise return 0 for PID not found
+    return 0;
+}
+
+
+/*
+ * Returns a list of all BSD processes on the system.  This routine
+ * allocates the list and puts it in *procList and a count of the
+ * number of entries in *procCount.  You are responsible for freeing
+ * this list (use "free" from System framework).
+ * On success, the function returns 0.
+ * On error, the function returns a BSD errno value.
+ */
+int
+psutil_get_proc_list(kinfo_proc **procList, size_t *procCount)
+{
+    // Declaring mib as const requires use of a cast since the
+    // sysctl prototype doesn't include the const modifier.
+    static const int mib3[3] = { CTL_KERN, KERN_PROC, KERN_PROC_ALL };
+    size_t           size, size2;
+    void            *ptr;
+    int              err, lim = 8;  // some limit
+
+    assert( procList != NULL);
+    assert(*procList == NULL);
+    assert(procCount != NULL);
+
+    *procCount = 0;
+
+    /*
+     * We start by calling sysctl with ptr == NULL and size == 0.
+     * That will succeed, and set size to the appropriate length.
+     * We then allocate a buffer of at least that size and call
+     * sysctl with that buffer.  If that succeeds, we're done.
+     * If that call fails with ENOMEM, we throw the buffer away
+     * and try again.
+     * Note that the loop calls sysctl with NULL again.  This is
+     * is necessary because the ENOMEM failure case sets size to
+     * the amount of data returned, not the amount of data that
+     * could have been returned.
+     */
+    while (lim-- > 0) {
+        size = 0;
+        if (sysctl((int *)mib3, 3, NULL, &size, NULL, 0) == -1) {
+            return errno;
+        }
+
+        size2 = size + (size >> 3);  // add some
+        if (size2 > size) {
+            ptr = malloc(size2);
+            if (ptr == NULL) {
+                ptr = malloc(size);
+            } else {
+                size = size2;
+            }
+        }
+        else {
+            ptr = malloc(size);
+        }
+        if (ptr == NULL) {
+            return ENOMEM;
+        }
+
+        if (sysctl((int *)mib3, 3, ptr, &size, NULL, 0) == -1) {
+            err = errno;
+            free(ptr);
+            if (err != ENOMEM) {
+                return err;
+            }
+
+        } else {
+            *procList = (kinfo_proc *)ptr;
+            *procCount = size / sizeof(kinfo_proc);
+            return 0;
+        }
+    }
+    return ENOMEM;
+}
+
+
+// Read the maximum argument size for processes
+int
+psutil_get_argmax()
+{
+    int argmax;
+    int mib[] = { CTL_KERN, KERN_ARGMAX };
+    size_t size = sizeof(argmax);
+
+    if (sysctl(mib, 2, &argmax, &size, NULL, 0) == 0) {
+        return argmax;
+    }
+    return 0;
+}
+
+
+// return process args as a python list
+PyObject *
+psutil_get_arg_list(long pid)
+{
+    int mib[3];
+    int nargs;
+    int len;
+    char *procargs = NULL;
+    char *arg_ptr;
+    char *arg_end;
+    char *curr_arg;
+    size_t argmax;
+    PyObject *arg = NULL;
+    PyObject *arglist = NULL;
+
+    // special case for PID 0 (kernel_task) where cmdline cannot be fetched
+    if (pid == 0) {
+        return Py_BuildValue("[]");
+    }
+
+    // read argmax and allocate memory for argument space.
+    argmax = psutil_get_argmax();
+    if (! argmax) {
+        PyErr_SetFromErrno(PyExc_OSError);
+        goto error;
+    }
+
+    procargs = (char *)malloc(argmax);
+    if (NULL == procargs) {
+        PyErr_SetFromErrno(PyExc_OSError);
+        goto error;
+    }
+
+    // read argument space
+    mib[0] = CTL_KERN;
+    mib[1] = KERN_PROCARGS2;
+    mib[2] = pid;
+    if (sysctl(mib, 3, procargs, &argmax, NULL, 0) < 0) {
+        if (EINVAL == errno) {
+            // EINVAL == access denied OR nonexistent PID
+            if ( psutil_pid_exists(pid) ) {
+                AccessDenied();
+            } else {
+                NoSuchProcess();
+            }
+        }
+        goto error;
+    }
+
+    arg_end = &procargs[argmax];
+    // copy the number of arguments to nargs
+    memcpy(&nargs, procargs, sizeof(nargs));
+
+    arg_ptr = procargs + sizeof(nargs);
+    len = strlen(arg_ptr);
+    arg_ptr += len + 1;
+
+    if (arg_ptr == arg_end) {
+        free(procargs);
+        return Py_BuildValue("[]");
+    }
+
+    // skip ahead to the first argument
+    for (; arg_ptr < arg_end; arg_ptr++) {
+        if (*arg_ptr != '\0') {
+            break;
+        }
+    }
+
+    // iterate through arguments
+    curr_arg = arg_ptr;
+    arglist = Py_BuildValue("[]");
+    if (!arglist)
+        goto error;
+    while (arg_ptr < arg_end && nargs > 0) {
+        if (*arg_ptr++ == '\0') {
+            arg = Py_BuildValue("s", curr_arg);
+            if (!arg)
+                goto error;
+            if (PyList_Append(arglist, arg))
+                goto error;
+            Py_DECREF(arg);
+            // iterate to next arg and decrement # of args
+            curr_arg = arg_ptr;
+            nargs--;
+        }
+    }
+
+    free(procargs);
+    return arglist;
+
+error:
+    Py_XDECREF(arg);
+    Py_XDECREF(arglist);
+    if (procargs != NULL)
+        free(procargs);
+    return NULL;
+}
+
+
+int
+psutil_get_kinfo_proc(pid_t pid, struct kinfo_proc *kp)
+{
+    int mib[4];
+    size_t len;
+    mib[0] = CTL_KERN;
+    mib[1] = KERN_PROC;
+    mib[2] = KERN_PROC_PID;
+    mib[3] = pid;
+
+    // fetch the info with sysctl()
+    len = sizeof(struct kinfo_proc);
+
+    // now read the data from sysctl
+    if (sysctl(mib, 4, kp, &len, NULL, 0) == -1) {
+        // raise an exception and throw errno as the error
+        PyErr_SetFromErrno(PyExc_OSError);
+        return -1;
+    }
+
+    // sysctl succeeds but len is zero, happens when process has gone away
+    if (len == 0) {
+        NoSuchProcess();
+        return -1;
+    }
+    return 0;
+}
+
+
+/*
+ * A thin wrapper around proc_pidinfo()
+ */
+int
+psutil_proc_pidinfo(long pid, int flavor, void *pti, int size)
+{
+    int ret = proc_pidinfo((int)pid, flavor, 0, pti, size);
+    if (ret == 0) {
+        if (! psutil_pid_exists(pid)) {
+            NoSuchProcess();
+            return 0;
+        }
+        else {
+            AccessDenied();
+            return 0;
+        }
+    }
+    else if (ret != size) {
+        AccessDenied();
+        return 0;
+    }
+    else {
+        return 1;
+    }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/arch/osx/process_info.h
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/arch/osx/process_info.h b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/arch/osx/process_info.h
new file mode 100644
index 0000000..c89c857
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/arch/osx/process_info.h
@@ -0,0 +1,16 @@
+/*
+ * Copyright (c) 2009, Jay Loden, Giampaolo Rodola'. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include <Python.h>
+
+typedef struct kinfo_proc kinfo_proc;
+
+int psutil_get_argmax(void);
+int psutil_get_kinfo_proc(pid_t pid, struct kinfo_proc *kp);
+int psutil_get_proc_list(kinfo_proc **procList, size_t *procCount);
+int psutil_pid_exists(long pid);
+int psutil_proc_pidinfo(long pid, int flavor, void *pti, int size);
+PyObject* psutil_get_arg_list(long pid);

http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/arch/windows/glpi.h
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/arch/windows/glpi.h b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/arch/windows/glpi.h
new file mode 100644
index 0000000..6f98483
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/arch/windows/glpi.h
@@ -0,0 +1,41 @@
+// mingw headers are missing this
+
+typedef enum _LOGICAL_PROCESSOR_RELATIONSHIP {
+    RelationProcessorCore,
+    RelationNumaNode,
+    RelationCache,
+    RelationProcessorPackage,
+    RelationGroup,
+    RelationAll=0xffff
+} LOGICAL_PROCESSOR_RELATIONSHIP;
+
+typedef enum _PROCESSOR_CACHE_TYPE {
+    CacheUnified,CacheInstruction,CacheData,CacheTrace
+} PROCESSOR_CACHE_TYPE;
+
+typedef struct _CACHE_DESCRIPTOR {
+    BYTE Level;
+    BYTE Associativity;
+    WORD LineSize;
+    DWORD Size;
+    PROCESSOR_CACHE_TYPE Type;
+} CACHE_DESCRIPTOR,*PCACHE_DESCRIPTOR;
+
+typedef struct _SYSTEM_LOGICAL_PROCESSOR_INFORMATION {
+    ULONG_PTR ProcessorMask;
+    LOGICAL_PROCESSOR_RELATIONSHIP Relationship;
+    union {
+        struct {
+            BYTE Flags;
+        } ProcessorCore;
+        struct {
+            DWORD NodeNumber;
+        } NumaNode;
+        CACHE_DESCRIPTOR Cache;
+        ULONGLONG Reserved[2];
+    };
+} SYSTEM_LOGICAL_PROCESSOR_INFORMATION,*PSYSTEM_LOGICAL_PROCESSOR_INFORMATION;
+
+WINBASEAPI WINBOOL WINAPI
+GetLogicalProcessorInformation(PSYSTEM_LOGICAL_PROCESSOR_INFORMATION Buffer,
+                               PDWORD ReturnedLength);
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/arch/windows/ntextapi.h
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/arch/windows/ntextapi.h b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/arch/windows/ntextapi.h
new file mode 100644
index 0000000..298c078
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/arch/windows/ntextapi.h
@@ -0,0 +1,287 @@
+/*
+ * Copyright (c) 2009, Jay Loden, Giampaolo Rodola'. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+typedef enum _KTHREAD_STATE {
+    Initialized,
+    Ready,
+    Running,
+    Standby,
+    Terminated,
+    Waiting,
+    Transition,
+    DeferredReady,
+    GateWait,
+    MaximumThreadState
+} KTHREAD_STATE, *PKTHREAD_STATE;
+
+typedef enum _KWAIT_REASON {
+    Executive = 0,
+    FreePage = 1,
+    PageIn = 2,
+    PoolAllocation = 3,
+    DelayExecution = 4,
+    Suspended = 5,
+    UserRequest = 6,
+    WrExecutive = 7,
+    WrFreePage = 8,
+    WrPageIn = 9,
+    WrPoolAllocation = 10,
+    WrDelayExecution = 11,
+    WrSuspended = 12,
+    WrUserRequest = 13,
+    WrEventPair = 14,
+    WrQueue = 15,
+    WrLpcReceive = 16,
+    WrLpcReply = 17,
+    WrVirtualMemory = 18,
+    WrPageOut = 19,
+    WrRendezvous = 20,
+    Spare2 = 21,
+    Spare3 = 22,
+    Spare4 = 23,
+    Spare5 = 24,
+    WrCalloutStack = 25,
+    WrKernel = 26,
+    WrResource = 27,
+    WrPushLock = 28,
+    WrMutex = 29,
+    WrQuantumEnd = 30,
+    WrDispatchInt = 31,
+    WrPreempted = 32,
+    WrYieldExecution = 33,
+    WrFastMutex = 34,
+    WrGuardedMutex = 35,
+    WrRundown = 36,
+    MaximumWaitReason = 37
+} KWAIT_REASON, *PKWAIT_REASON;
+
+typedef struct _CLIENT_ID {
+    HANDLE UniqueProcess;
+    HANDLE UniqueThread;
+} CLIENT_ID, *PCLIENT_ID;
+
+
+typedef struct _UNICODE_STRING {
+    USHORT Length;
+    USHORT MaximumLength;
+    PWSTR Buffer;
+} UNICODE_STRING, *PUNICODE_STRING;
+
+typedef struct _SYSTEM_TIMEOFDAY_INFORMATION {
+    LARGE_INTEGER BootTime;
+    LARGE_INTEGER CurrentTime;
+    LARGE_INTEGER TimeZoneBias;
+    ULONG TimeZoneId;
+    ULONG Reserved;
+    ULONGLONG BootTimeBias;
+    ULONGLONG SleepTimeBias;
+} SYSTEM_TIMEOFDAY_INFORMATION, *PSYSTEM_TIMEOFDAY_INFORMATION;
+
+typedef struct _SYSTEM_THREAD_INFORMATION {
+    LARGE_INTEGER KernelTime;
+    LARGE_INTEGER UserTime;
+    LARGE_INTEGER CreateTime;
+    ULONG WaitTime;
+    PVOID StartAddress;
+    CLIENT_ID ClientId;
+    LONG Priority;
+    LONG BasePriority;
+    ULONG ContextSwitches;
+    ULONG ThreadState;
+    KWAIT_REASON WaitReason;
+} SYSTEM_THREAD_INFORMATION, *PSYSTEM_THREAD_INFORMATION;
+
+typedef struct _TEB *PTEB;
+
+// private
+typedef struct _SYSTEM_EXTENDED_THREAD_INFORMATION {
+    SYSTEM_THREAD_INFORMATION ThreadInfo;
+    PVOID StackBase;
+    PVOID StackLimit;
+    PVOID Win32StartAddress;
+    PTEB TebBase;
+    ULONG_PTR Reserved2;
+    ULONG_PTR Reserved3;
+    ULONG_PTR Reserved4;
+} SYSTEM_EXTENDED_THREAD_INFORMATION, *PSYSTEM_EXTENDED_THREAD_INFORMATION;
+
+typedef struct _SYSTEM_PROCESS_INFORMATION {
+    ULONG NextEntryOffset;
+    ULONG NumberOfThreads;
+    LARGE_INTEGER SpareLi1;
+    LARGE_INTEGER SpareLi2;
+    LARGE_INTEGER SpareLi3;
+    LARGE_INTEGER CreateTime;
+    LARGE_INTEGER UserTime;
+    LARGE_INTEGER KernelTime;
+    UNICODE_STRING ImageName;
+    LONG BasePriority;
+    HANDLE UniqueProcessId;
+    HANDLE InheritedFromUniqueProcessId;
+    ULONG HandleCount;
+    ULONG SessionId;
+    ULONG_PTR PageDirectoryBase;
+    SIZE_T PeakVirtualSize;
+    SIZE_T VirtualSize;
+    DWORD PageFaultCount;
+    SIZE_T PeakWorkingSetSize;
+    SIZE_T WorkingSetSize;
+    SIZE_T QuotaPeakPagedPoolUsage;
+    SIZE_T QuotaPagedPoolUsage;
+    SIZE_T QuotaPeakNonPagedPoolUsage;
+    SIZE_T QuotaNonPagedPoolUsage;
+    SIZE_T PagefileUsage;
+    SIZE_T PeakPagefileUsage;
+    SIZE_T PrivatePageCount;
+    LARGE_INTEGER ReadOperationCount;
+    LARGE_INTEGER WriteOperationCount;
+    LARGE_INTEGER OtherOperationCount;
+    LARGE_INTEGER ReadTransferCount;
+    LARGE_INTEGER WriteTransferCount;
+    LARGE_INTEGER OtherTransferCount;
+    SYSTEM_THREAD_INFORMATION Threads[1];
+} SYSTEM_PROCESS_INFORMATION, *PSYSTEM_PROCESS_INFORMATION;
+
+
+// structures and enums from winternl.h (not available under mingw)
+typedef struct _SYSTEM_PROCESSOR_PERFORMANCE_INFORMATION {
+    LARGE_INTEGER IdleTime;
+    LARGE_INTEGER KernelTime;
+    LARGE_INTEGER UserTime;
+    LARGE_INTEGER Reserved1[2];
+    ULONG Reserved2;
+} SYSTEM_PROCESSOR_PERFORMANCE_INFORMATION,
+    *PSYSTEM_PROCESSOR_PERFORMANCE_INFORMATION;
+
+
+typedef enum _SYSTEM_INFORMATION_CLASS {
+    SystemBasicInformation = 0,
+    SystemPerformanceInformation = 2,
+    SystemTimeOfDayInformation = 3,
+    SystemProcessInformation = 5,
+    SystemProcessorPerformanceInformation = 8,
+    SystemInterruptInformation = 23,
+    SystemExceptionInformation = 33,
+    SystemRegistryQuotaInformation = 37,
+    SystemLookasideInformation = 45
+} SYSTEM_INFORMATION_CLASS;
+
+
+// ================================================
+// psutil.users() support
+// ================================================
+
+typedef struct _WINSTATION_INFO {
+    BYTE Reserved1[72];
+    ULONG SessionId;
+    BYTE Reserved2[4];
+    FILETIME ConnectTime;
+    FILETIME DisconnectTime;
+    FILETIME LastInputTime;
+    FILETIME LoginTime;
+    BYTE Reserved3[1096];
+    FILETIME CurrentTime;
+} WINSTATION_INFO, *PWINSTATION_INFO;
+
+typedef enum _WINSTATIONINFOCLASS {
+     WinStationInformation = 8
+} WINSTATIONINFOCLASS;
+
+typedef BOOLEAN (WINAPI * PWINSTATIONQUERYINFORMATIONW)
+                 (HANDLE,ULONG,WINSTATIONINFOCLASS,PVOID,ULONG,PULONG);
+
+typedef struct _WINSTATIONINFORMATIONW {
+    BYTE Reserved2[70];
+    ULONG LogonId;
+    BYTE Reserved3[1140];
+} WINSTATIONINFORMATIONW, *PWINSTATIONINFORMATIONW;
+
+// mingw support:
+// http://www.koders.com/c/fid7C02CAE627C526914CDEB427405B51DF393A5EFA.aspx
+#ifndef _INC_WTSAPI
+typedef struct _WTS_CLIENT_ADDRESS {
+    DWORD AddressFamily;  // AF_INET, AF_IPX, AF_NETBIOS, AF_UNSPEC
+    BYTE  Address[20];    // client network address
+} WTS_CLIENT_ADDRESS, * PWTS_CLIENT_ADDRESS;
+
+HANDLE WINAPI WTSOpenServerA(IN LPSTR pServerName);
+
+VOID WINAPI WTSCloseServer(IN HANDLE hServer);
+#endif
+
+
+/*
+ * NtQueryInformationProcess code taken from
+ * http://wj32.wordpress.com/2009/01/24/howto-get-the-command-line-of-processes/
+ * typedefs needed to compile against ntdll functions not exposted in the API
+ */
+typedef LONG NTSTATUS;
+
+typedef NTSTATUS (NTAPI *_NtQueryInformationProcess)(
+    HANDLE ProcessHandle,
+    DWORD ProcessInformationClass,
+    PVOID ProcessInformation,
+    DWORD ProcessInformationLength,
+    PDWORD ReturnLength
+);
+
+typedef NTSTATUS (NTAPI *_NtSetInformationProcess)(
+    HANDLE ProcessHandle,
+    DWORD ProcessInformationClass,
+    PVOID ProcessInformation,
+    DWORD ProcessInformationLength
+);
+
+typedef struct _PROCESS_BASIC_INFORMATION {
+    PVOID Reserved1;
+    PVOID PebBaseAddress;
+    PVOID Reserved2[2];
+    ULONG_PTR UniqueProcessId;
+    PVOID Reserved3;
+} PROCESS_BASIC_INFORMATION, *PPROCESS_BASIC_INFORMATION;
+
+typedef enum _PROCESSINFOCLASS {
+    ProcessBasicInformation,
+    ProcessQuotaLimits,
+    ProcessIoCounters,
+    ProcessVmCounters,
+    ProcessTimes,
+    ProcessBasePriority,
+    ProcessRaisePriority,
+    ProcessDebugPort,
+    ProcessExceptionPort,
+    ProcessAccessToken,
+    ProcessLdtInformation,
+    ProcessLdtSize,
+    ProcessDefaultHardErrorMode,
+    ProcessIoPortHandlers,
+    ProcessPooledUsageAndLimits,
+    ProcessWorkingSetWatch,
+    ProcessUserModeIOPL,
+    ProcessEnableAlignmentFaultFixup,
+    ProcessPriorityClass,
+    ProcessWx86Information,
+    ProcessHandleCount,
+    ProcessAffinityMask,
+    ProcessPriorityBoost,
+    ProcessDeviceMap,
+    ProcessSessionInformation,
+    ProcessForegroundInformation,
+    ProcessWow64Information,
+    /* added after XP+ */
+    ProcessImageFileName,
+    ProcessLUIDDeviceMapsEnabled,
+    ProcessBreakOnTermination,
+    ProcessDebugObjectHandle,
+    ProcessDebugFlags,
+    ProcessHandleTracing,
+    ProcessIoPriority,
+    ProcessExecuteFlags,
+    ProcessResourceManagement,
+    ProcessCookie,
+    ProcessImageInformation,
+    MaxProcessInfoClass
+} PROCESSINFOCLASS;

http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/arch/windows/process_handles.c
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/arch/windows/process_handles.c b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/arch/windows/process_handles.c
new file mode 100644
index 0000000..a8a9de1
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/arch/windows/process_handles.c
@@ -0,0 +1,336 @@
+/*
+ * Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ *
+ */
+
+#ifndef UNICODE
+#define UNICODE
+#endif
+
+#include <Python.h>
+#include <windows.h>
+#include <stdio.h>
+#include "process_handles.h"
+
+#ifndef NT_SUCCESS
+#define NT_SUCCESS(x) ((x) >= 0)
+#endif
+#define STATUS_INFO_LENGTH_MISMATCH 0xc0000004
+
+#define SystemHandleInformation 16
+#define ObjectBasicInformation 0
+#define ObjectNameInformation 1
+#define ObjectTypeInformation 2
+
+
+typedef LONG NTSTATUS;
+
+typedef struct _UNICODE_STRING {
+    USHORT Length;
+    USHORT MaximumLength;
+    PWSTR Buffer;
+} UNICODE_STRING, *PUNICODE_STRING;
+
+typedef NTSTATUS (NTAPI *_NtQuerySystemInformation)(
+    ULONG SystemInformationClass,
+    PVOID SystemInformation,
+    ULONG SystemInformationLength,
+    PULONG ReturnLength
+);
+
+typedef NTSTATUS (NTAPI *_NtDuplicateObject)(
+    HANDLE SourceProcessHandle,
+    HANDLE SourceHandle,
+    HANDLE TargetProcessHandle,
+    PHANDLE TargetHandle,
+    ACCESS_MASK DesiredAccess,
+    ULONG Attributes,
+    ULONG Options
+);
+
+typedef NTSTATUS (NTAPI *_NtQueryObject)(
+    HANDLE ObjectHandle,
+    ULONG ObjectInformationClass,
+    PVOID ObjectInformation,
+    ULONG ObjectInformationLength,
+    PULONG ReturnLength
+);
+
+typedef struct _SYSTEM_HANDLE {
+    ULONG ProcessId;
+    BYTE ObjectTypeNumber;
+    BYTE Flags;
+    USHORT Handle;
+    PVOID Object;
+    ACCESS_MASK GrantedAccess;
+} SYSTEM_HANDLE, *PSYSTEM_HANDLE;
+
+typedef struct _SYSTEM_HANDLE_INFORMATION {
+    ULONG HandleCount;
+    SYSTEM_HANDLE Handles[1];
+} SYSTEM_HANDLE_INFORMATION, *PSYSTEM_HANDLE_INFORMATION;
+
+typedef enum _POOL_TYPE {
+    NonPagedPool,
+    PagedPool,
+    NonPagedPoolMustSucceed,
+    DontUseThisType,
+    NonPagedPoolCacheAligned,
+    PagedPoolCacheAligned,
+    NonPagedPoolCacheAlignedMustS
+} POOL_TYPE, *PPOOL_TYPE;
+
+typedef struct _OBJECT_TYPE_INFORMATION {
+    UNICODE_STRING Name;
+    ULONG TotalNumberOfObjects;
+    ULONG TotalNumberOfHandles;
+    ULONG TotalPagedPoolUsage;
+    ULONG TotalNonPagedPoolUsage;
+    ULONG TotalNamePoolUsage;
+    ULONG TotalHandleTableUsage;
+    ULONG HighWaterNumberOfObjects;
+    ULONG HighWaterNumberOfHandles;
+    ULONG HighWaterPagedPoolUsage;
+    ULONG HighWaterNonPagedPoolUsage;
+    ULONG HighWaterNamePoolUsage;
+    ULONG HighWaterHandleTableUsage;
+    ULONG InvalidAttributes;
+    GENERIC_MAPPING GenericMapping;
+    ULONG ValidAccess;
+    BOOLEAN SecurityRequired;
+    BOOLEAN MaintainHandleCount;
+    USHORT MaintainTypeList;
+    POOL_TYPE PoolType;
+    ULONG PagedPoolUsage;
+    ULONG NonPagedPoolUsage;
+} OBJECT_TYPE_INFORMATION, *POBJECT_TYPE_INFORMATION;
+
+
+PVOID
+GetLibraryProcAddress(PSTR LibraryName, PSTR ProcName)
+{
+    return GetProcAddress(GetModuleHandleA(LibraryName), ProcName);
+}
+
+
+PyObject *
+psutil_get_open_files(long pid, HANDLE processHandle)
+{
+    _NtQuerySystemInformation NtQuerySystemInformation =
+        GetLibraryProcAddress("ntdll.dll", "NtQuerySystemInformation");
+    _NtQueryObject NtQueryObject =
+        GetLibraryProcAddress("ntdll.dll", "NtQueryObject");
+
+    NTSTATUS                    status;
+    PSYSTEM_HANDLE_INFORMATION  handleInfo;
+    ULONG                       handleInfoSize = 0x10000;
+    ULONG                       i;
+    ULONG                       fileNameLength;
+    PyObject                    *filesList = Py_BuildValue("[]");
+    PyObject                    *arg = NULL;
+    PyObject                    *fileFromWchar = NULL;
+
+    if (filesList == NULL)
+        return NULL;
+
+    handleInfo = (PSYSTEM_HANDLE_INFORMATION)malloc(handleInfoSize);
+    if (handleInfo == NULL) {
+        Py_DECREF(filesList);
+        PyErr_NoMemory();
+        return NULL;
+    }
+
+    // NtQuerySystemInformation won't give us the correct buffer size,
+    // so we guess by doubling the buffer size.
+    while ((status = NtQuerySystemInformation(
+                         SystemHandleInformation,
+                         handleInfo,
+                         handleInfoSize,
+                         NULL
+                     )) == STATUS_INFO_LENGTH_MISMATCH)
+    {
+        handleInfo = (PSYSTEM_HANDLE_INFORMATION) \
+            realloc(handleInfo, handleInfoSize *= 2);
+    }
+
+    // NtQuerySystemInformation stopped giving us STATUS_INFO_LENGTH_MISMATCH
+    if (!NT_SUCCESS(status)) {
+        Py_DECREF(filesList);
+        free(handleInfo);
+        return NULL;
+    }
+
+    for (i = 0; i < handleInfo->HandleCount; i++) {
+        SYSTEM_HANDLE            handle = handleInfo->Handles[i];
+        HANDLE                   dupHandle = NULL;
+        HANDLE                   mapHandle = NULL;
+        POBJECT_TYPE_INFORMATION objectTypeInfo = NULL;
+        PVOID                    objectNameInfo;
+        UNICODE_STRING           objectName;
+        ULONG                    returnLength;
+        DWORD                    error = 0;
+        fileFromWchar = NULL;
+        arg = NULL;
+
+        // Check if this handle belongs to the PID the user specified.
+        if (handle.ProcessId != pid)
+            continue;
+
+        // Skip handles with the following access codes as the next call
+        // to NtDuplicateObject() or NtQueryObject() might hang forever.
+        if ((handle.GrantedAccess == 0x0012019f)
+                || (handle.GrantedAccess == 0x001a019f)
+                || (handle.GrantedAccess == 0x00120189)
+                || (handle.GrantedAccess == 0x00100000)) {
+            continue;
+        }
+
+        if (!DuplicateHandle(processHandle,
+                             handle.Handle,
+                             GetCurrentProcess(),
+                             &dupHandle,
+                             0,
+                             TRUE,
+                             DUPLICATE_SAME_ACCESS))
+         {
+             //printf("[%#x] Error: %d \n", handle.Handle, GetLastError());
+             continue;
+         }
+
+
+        mapHandle = CreateFileMapping(dupHandle,
+                                      NULL,
+                                      PAGE_READONLY,
+                                      0,
+                                      0,
+                                      NULL);
+        if (mapHandle == NULL &&
+           (error == ERROR_INVALID_HANDLE ||
+            error == ERROR_BAD_EXE_FORMAT)) {
+            CloseHandle(dupHandle);
+            //printf("CreateFileMapping Error: %d\n", error);
+            continue;
+        }
+        CloseHandle(mapHandle);
+
+        // Query the object type.
+        objectTypeInfo = (POBJECT_TYPE_INFORMATION)malloc(0x1000);
+        if (!NT_SUCCESS(NtQueryObject(
+                            dupHandle,
+                            ObjectTypeInformation,
+                            objectTypeInfo,
+                            0x1000,
+                            NULL
+                        )))
+        {
+            free(objectTypeInfo);
+            CloseHandle(dupHandle);
+            continue;
+        }
+
+        objectNameInfo = malloc(0x1000);
+        if (!NT_SUCCESS(NtQueryObject(
+                            dupHandle,
+                            ObjectNameInformation,
+                            objectNameInfo,
+                            0x1000,
+                            &returnLength
+                        )))
+        {
+            // Reallocate the buffer and try again.
+            objectNameInfo = realloc(objectNameInfo, returnLength);
+            if (!NT_SUCCESS(NtQueryObject(
+                                dupHandle,
+                                ObjectNameInformation,
+                                objectNameInfo,
+                                returnLength,
+                                NULL
+                            )))
+            {
+                // We have the type name, so just display that.
+                /*
+                printf(
+                    "[%#x] %.*S: (could not get name)\n",
+                    handle.Handle,
+                    objectTypeInfo->Name.Length / 2,
+                    objectTypeInfo->Name.Buffer
+                    );
+                */
+                free(objectTypeInfo);
+                free(objectNameInfo);
+                CloseHandle(dupHandle);
+                continue;
+
+            }
+        }
+
+        // Cast our buffer into an UNICODE_STRING.
+        objectName = *(PUNICODE_STRING)objectNameInfo;
+
+        // Print the information!
+        if (objectName.Length)
+        {
+            // The object has a name.  Make sure it is a file otherwise
+            // ignore it
+            fileNameLength = objectName.Length / 2;
+            if (wcscmp(objectTypeInfo->Name.Buffer, L"File") == 0) {
+                // printf("%.*S\n", objectName.Length / 2, objectName.Buffer);
+                fileFromWchar = PyUnicode_FromWideChar(objectName.Buffer,
+                                                       fileNameLength);
+                if (fileFromWchar == NULL)
+                    goto error_py_fun;
+#if PY_MAJOR_VERSION >= 3
+                arg = Py_BuildValue("N",
+                                    PyUnicode_AsUTF8String(fileFromWchar));
+#else
+                arg = Py_BuildValue("N",
+                                    PyUnicode_FromObject(fileFromWchar));
+#endif
+                if (!arg)
+                    goto error_py_fun;
+                Py_XDECREF(fileFromWchar);
+                fileFromWchar = NULL;
+                if (PyList_Append(filesList, arg))
+                    goto error_py_fun;
+                Py_XDECREF(arg);
+            }
+            /*
+            printf(
+                "[%#x] %.*S: %.*S\n",
+                handle.Handle,
+                objectTypeInfo->Name.Length / 2,
+                objectTypeInfo->Name.Buffer,
+                objectName.Length / 2,
+                objectName.Buffer
+                );
+            */
+        }
+        else
+        {
+            // Print something else.
+            /*
+            printf(
+                "[%#x] %.*S: (unnamed)\n",
+                handle.Handle,
+                objectTypeInfo->Name.Length / 2,
+                objectTypeInfo->Name.Buffer
+                );
+            */
+            ;;
+        }
+        free(objectTypeInfo);
+        free(objectNameInfo);
+        CloseHandle(dupHandle);
+    }
+    free(handleInfo);
+    CloseHandle(processHandle);
+    return filesList;
+
+error_py_fun:
+    Py_XDECREF(arg);
+    Py_XDECREF(fileFromWchar);
+    Py_DECREF(filesList);
+    return NULL;
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/arch/windows/process_handles.h
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/arch/windows/process_handles.h b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/arch/windows/process_handles.h
new file mode 100644
index 0000000..342ce8f
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/arch/windows/process_handles.h
@@ -0,0 +1,10 @@
+/*
+ * Copyright (c) 2009, Jay Loden, Giampaolo Rodola'. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include <Python.h>
+#include <windows.h>
+
+PyObject* psutil_get_open_files(long pid, HANDLE processHandle);

http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/arch/windows/process_info.c
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/arch/windows/process_info.c b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/arch/windows/process_info.c
new file mode 100644
index 0000000..8298b16
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/arch/windows/process_info.c
@@ -0,0 +1,443 @@
+/*
+ * Copyright (c) 2009, Jay Loden, Giampaolo Rodola'. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ *
+ * Helper functions related to fetching process information. Used by
+ * _psutil_windows module methods.
+ */
+
+#include <Python.h>
+#include <windows.h>
+#include <Psapi.h>
+#include <tlhelp32.h>
+
+#include "security.h"
+#include "process_info.h"
+#include "ntextapi.h"
+#include "../../_psutil_common.h"
+
+
+/*
+ * A wrapper around OpenProcess setting NSP exception if process
+ * no longer exists.
+ * "pid" is the process pid, "dwDesiredAccess" is the first argument
+ * exptected by OpenProcess.
+ * Return a process handle or NULL.
+ */
+HANDLE
+psutil_handle_from_pid_waccess(DWORD pid, DWORD dwDesiredAccess)
+{
+    HANDLE hProcess;
+    DWORD processExitCode = 0;
+
+    if (pid == 0) {
+        // otherwise we'd get NoSuchProcess
+        return AccessDenied();
+    }
+
+    hProcess = OpenProcess(dwDesiredAccess, FALSE, pid);
+    if (hProcess == NULL) {
+        if (GetLastError() == ERROR_INVALID_PARAMETER) {
+            NoSuchProcess();
+        }
+        else {
+            PyErr_SetFromWindowsErr(0);
+        }
+        return NULL;
+    }
+
+    // make sure the process is running
+    GetExitCodeProcess(hProcess, &processExitCode);
+    if (processExitCode == 0) {
+        NoSuchProcess();
+        CloseHandle(hProcess);
+        return NULL;
+    }
+    return hProcess;
+}
+
+
+/*
+ * Same as psutil_handle_from_pid_waccess but implicitly uses
+ * PROCESS_QUERY_INFORMATION | PROCESS_VM_READ as dwDesiredAccess
+ * parameter for OpenProcess.
+ */
+HANDLE
+psutil_handle_from_pid(DWORD pid) {
+    DWORD dwDesiredAccess = PROCESS_QUERY_INFORMATION | PROCESS_VM_READ;
+    return psutil_handle_from_pid_waccess(pid, dwDesiredAccess);
+}
+
+
+// fetch the PEB base address from NtQueryInformationProcess()
+PVOID
+psutil_get_peb_address(HANDLE ProcessHandle)
+{
+    _NtQueryInformationProcess NtQueryInformationProcess =
+        (_NtQueryInformationProcess)GetProcAddress(
+            GetModuleHandleA("ntdll.dll"), "NtQueryInformationProcess");
+    PROCESS_BASIC_INFORMATION pbi;
+
+    NtQueryInformationProcess(ProcessHandle, 0, &pbi, sizeof(pbi), NULL);
+    return pbi.PebBaseAddress;
+}
+
+
+DWORD *
+psutil_get_pids(DWORD *numberOfReturnedPIDs) {
+    // Win32 SDK says the only way to know if our process array
+    // wasn't large enough is to check the returned size and make
+    // sure that it doesn't match the size of the array.
+    // If it does we allocate a larger array and try again
+
+    // Stores the actual array
+    DWORD *procArray = NULL;
+    DWORD procArrayByteSz;
+    int procArraySz = 0;
+
+    // Stores the byte size of the returned array from enumprocesses
+    DWORD enumReturnSz = 0;
+
+    do {
+        procArraySz += 1024;
+        free(procArray);
+        procArrayByteSz = procArraySz * sizeof(DWORD);
+        procArray = malloc(procArrayByteSz);
+        if (procArray == NULL) {
+            PyErr_NoMemory();
+            return NULL;
+        }
+        if (! EnumProcesses(procArray, procArrayByteSz, &enumReturnSz)) {
+            free(procArray);
+            PyErr_SetFromWindowsErr(0);
+            return NULL;
+        }
+    } while (enumReturnSz == procArraySz * sizeof(DWORD));
+
+    // The number of elements is the returned size / size of each element
+    *numberOfReturnedPIDs = enumReturnSz / sizeof(DWORD);
+
+    return procArray;
+}
+
+
+int
+psutil_pid_is_running(DWORD pid)
+{
+    HANDLE hProcess;
+    DWORD exitCode;
+
+    // Special case for PID 0 System Idle Process
+    if (pid == 0) {
+        return 1;
+    }
+
+    if (pid < 0) {
+        return 0;
+    }
+
+    hProcess = OpenProcess(PROCESS_QUERY_INFORMATION | PROCESS_VM_READ,
+                           FALSE, pid);
+    if (NULL == hProcess) {
+        // invalid parameter is no such process
+        if (GetLastError() == ERROR_INVALID_PARAMETER) {
+            CloseHandle(hProcess);
+            return 0;
+        }
+
+        // access denied obviously means there's a process to deny access to...
+        if (GetLastError() == ERROR_ACCESS_DENIED) {
+            CloseHandle(hProcess);
+            return 1;
+        }
+
+        CloseHandle(hProcess);
+        PyErr_SetFromWindowsErr(0);
+        return -1;
+    }
+
+    if (GetExitCodeProcess(hProcess, &exitCode)) {
+        CloseHandle(hProcess);
+        return (exitCode == STILL_ACTIVE);
+    }
+
+    // access denied means there's a process there so we'll assume
+    // it's running
+    if (GetLastError() == ERROR_ACCESS_DENIED) {
+        CloseHandle(hProcess);
+        return 1;
+    }
+
+    PyErr_SetFromWindowsErr(0);
+    CloseHandle(hProcess);
+    return -1;
+}
+
+
+int
+psutil_pid_in_proclist(DWORD pid)
+{
+    DWORD *proclist = NULL;
+    DWORD numberOfReturnedPIDs;
+    DWORD i;
+
+    proclist = psutil_get_pids(&numberOfReturnedPIDs);
+    if (NULL == proclist) {
+        return -1;
+    }
+
+    for (i = 0; i < numberOfReturnedPIDs; i++) {
+        if (pid == proclist[i]) {
+            free(proclist);
+            return 1;
+        }
+    }
+
+    free(proclist);
+    return 0;
+}
+
+
+// Check exit code from a process handle. Return FALSE on an error also
+// XXX - not used anymore
+int
+handlep_is_running(HANDLE hProcess)
+{
+    DWORD dwCode;
+    if (NULL == hProcess) {
+        return 0;
+    }
+    if (GetExitCodeProcess(hProcess, &dwCode)) {
+        if (dwCode == STILL_ACTIVE) {
+            return 1;
+        }
+    }
+    return 0;
+}
+
+
+/*
+ * returns a Python list representing the arguments for the process
+ * with given pid or NULL on error.
+ */
+PyObject *
+psutil_get_arg_list(long pid)
+{
+    int nArgs, i;
+    LPWSTR *szArglist = NULL;
+    HANDLE hProcess = NULL;
+    PVOID pebAddress;
+    PVOID rtlUserProcParamsAddress;
+    UNICODE_STRING commandLine;
+    WCHAR *commandLineContents = NULL;
+    PyObject *arg = NULL;
+    PyObject *arg_from_wchar = NULL;
+    PyObject *argList = NULL;
+
+    hProcess = psutil_handle_from_pid(pid);
+    if (hProcess == NULL) {
+        return NULL;
+    }
+
+    pebAddress = psutil_get_peb_address(hProcess);
+
+    // get the address of ProcessParameters
+#ifdef _WIN64
+    if (!ReadProcessMemory(hProcess, (PCHAR)pebAddress + 32,
+                           &rtlUserProcParamsAddress, sizeof(PVOID), NULL))
+#else
+    if (!ReadProcessMemory(hProcess, (PCHAR)pebAddress + 0x10,
+                           &rtlUserProcParamsAddress, sizeof(PVOID), NULL))
+#endif
+    {
+        ////printf("Could not read the address of ProcessParameters!\n");
+        PyErr_SetFromWindowsErr(0);
+        goto error;
+    }
+
+    // read the CommandLine UNICODE_STRING structure
+#ifdef _WIN64
+    if (!ReadProcessMemory(hProcess, (PCHAR)rtlUserProcParamsAddress + 112,
+                           &commandLine, sizeof(commandLine), NULL))
+#else
+    if (!ReadProcessMemory(hProcess, (PCHAR)rtlUserProcParamsAddress + 0x40,
+                           &commandLine, sizeof(commandLine), NULL))
+#endif
+    {
+        PyErr_SetFromWindowsErr(0);
+        goto error;
+    }
+
+
+    // allocate memory to hold the command line
+    commandLineContents = (WCHAR *)malloc(commandLine.Length + 1);
+    if (commandLineContents == NULL) {
+        PyErr_NoMemory();
+        goto error;
+    }
+
+    // read the command line
+    if (!ReadProcessMemory(hProcess, commandLine.Buffer,
+                           commandLineContents, commandLine.Length, NULL))
+    {
+        PyErr_SetFromWindowsErr(0);
+        goto error;
+    }
+
+    // Null-terminate the string to prevent wcslen from returning
+    // incorrect length the length specifier is in characters, but
+    // commandLine.Length is in bytes.
+    commandLineContents[(commandLine.Length / sizeof(WCHAR))] = '\0';
+
+    // attempt tp parse the command line using Win32 API, fall back
+    // on string cmdline version otherwise
+    szArglist = CommandLineToArgvW(commandLineContents, &nArgs);
+    if (NULL == szArglist) {
+        // failed to parse arglist
+        // encode as a UTF8 Python string object from WCHAR string
+        arg_from_wchar = PyUnicode_FromWideChar(commandLineContents,
+                                                commandLine.Length / 2);
+        if (arg_from_wchar == NULL)
+            goto error;
+#if PY_MAJOR_VERSION >= 3
+        argList = Py_BuildValue("N", PyUnicode_AsUTF8String(arg_from_wchar));
+#else
+        argList = Py_BuildValue("N", PyUnicode_FromObject(arg_from_wchar));
+#endif
+        if (!argList)
+            goto error;
+    }
+    else {
+        // arglist parsed as array of UNICODE_STRING, so convert each to
+        // Python string object and add to arg list
+        argList = Py_BuildValue("[]");
+        if (argList == NULL)
+            goto error;
+        for (i = 0; i < nArgs; i++) {
+            arg_from_wchar = NULL;
+            arg = NULL;
+            arg_from_wchar = PyUnicode_FromWideChar(szArglist[i],
+                                                    wcslen(szArglist[i]));
+            if (arg_from_wchar == NULL)
+                goto error;
+#if PY_MAJOR_VERSION >= 3
+            arg = PyUnicode_FromObject(arg_from_wchar);
+#else
+            arg = PyUnicode_AsUTF8String(arg_from_wchar);
+#endif
+            if (arg == NULL)
+                goto error;
+            Py_XDECREF(arg_from_wchar);
+            if (PyList_Append(argList, arg))
+                goto error;
+            Py_XDECREF(arg);
+        }
+    }
+
+    if (szArglist != NULL)
+        LocalFree(szArglist);
+    free(commandLineContents);
+    CloseHandle(hProcess);
+    return argList;
+
+error:
+    Py_XDECREF(arg);
+    Py_XDECREF(arg_from_wchar);
+    Py_XDECREF(argList);
+    if (hProcess != NULL)
+        CloseHandle(hProcess);
+    if (commandLineContents != NULL)
+        free(commandLineContents);
+    if (szArglist != NULL)
+        LocalFree(szArglist);
+    return NULL;
+}
+
+
+#define PH_FIRST_PROCESS(Processes) ((PSYSTEM_PROCESS_INFORMATION)(Processes))
+#define PH_NEXT_PROCESS(Process) ( \
+   ((PSYSTEM_PROCESS_INFORMATION)(Process))->NextEntryOffset ? \
+   (PSYSTEM_PROCESS_INFORMATION)((PCHAR)(Process) + \
+        ((PSYSTEM_PROCESS_INFORMATION)(Process))->NextEntryOffset) : \
+   NULL)
+
+const int STATUS_INFO_LENGTH_MISMATCH = 0xC0000004;
+const int STATUS_BUFFER_TOO_SMALL = 0xC0000023L;
+
+/*
+ * Given a process PID and a PSYSTEM_PROCESS_INFORMATION structure
+ * fills the structure with process information.
+ * On success return 1, else 0 with Python exception already set.
+ */
+int
+psutil_get_proc_info(DWORD pid, PSYSTEM_PROCESS_INFORMATION *retProcess,
+                 PVOID *retBuffer)
+{
+    static ULONG initialBufferSize = 0x4000;
+    NTSTATUS status;
+    PVOID buffer;
+    ULONG bufferSize;
+    PSYSTEM_PROCESS_INFORMATION process;
+
+    // get NtQuerySystemInformation
+    typedef DWORD (_stdcall * NTQSI_PROC) (int, PVOID, ULONG, PULONG);
+    NTQSI_PROC NtQuerySystemInformation;
+    HINSTANCE hNtDll;
+    hNtDll = LoadLibrary(TEXT("ntdll.dll"));
+    NtQuerySystemInformation = (NTQSI_PROC)GetProcAddress(
+        hNtDll, "NtQuerySystemInformation");
+
+    bufferSize = initialBufferSize;
+    buffer = malloc(bufferSize);
+    if (buffer == NULL) {
+        PyErr_NoMemory();
+        goto error;
+    }
+
+    while (TRUE) {
+        status = NtQuerySystemInformation(SystemProcessInformation, buffer,
+                                          bufferSize, &bufferSize);
+
+        if (status == STATUS_BUFFER_TOO_SMALL ||
+                status == STATUS_INFO_LENGTH_MISMATCH)
+        {
+            free(buffer);
+            buffer = malloc(bufferSize);
+            if (buffer == NULL) {
+                PyErr_NoMemory();
+                goto error;
+            }
+        }
+        else {
+            break;
+        }
+    }
+
+    if (status != 0) {
+        PyErr_Format(PyExc_RuntimeError, "NtQuerySystemInformation() failed");
+        goto error;
+    }
+
+    if (bufferSize <= 0x20000) {
+        initialBufferSize = bufferSize;
+    }
+
+    process = PH_FIRST_PROCESS(buffer);
+    do {
+        if (process->UniqueProcessId == (HANDLE)pid) {
+            *retProcess = process;
+            *retBuffer = buffer;
+            return 1;
+        }
+    } while ( (process = PH_NEXT_PROCESS(process)) );
+
+    NoSuchProcess();
+    goto error;
+
+error:
+    FreeLibrary(hNtDll);
+    if (buffer != NULL)
+        free(buffer);
+    return 0;
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/arch/windows/process_info.h
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/arch/windows/process_info.h b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/arch/windows/process_info.h
new file mode 100644
index 0000000..9544f5d
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/arch/windows/process_info.h
@@ -0,0 +1,17 @@
+/*
+ * Copyright (c) 2009, Jay Loden, Giampaolo Rodola'. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include <Python.h>
+#include <windows.h>
+
+DWORD* psutil_get_pids(DWORD *numberOfReturnedPIDs);
+HANDLE psutil_handle_from_pid(DWORD pid);
+HANDLE psutil_handle_from_pid_waccess(DWORD pid, DWORD dwDesiredAccess);
+int psutil_handlep_is_running(HANDLE hProcess);
+int psutil_pid_in_proclist(DWORD pid);
+int psutil_pid_is_running(DWORD pid);
+PVOID psutil_get_peb_address(HANDLE ProcessHandle);
+PyObject* psutil_get_arg_list(long pid);

http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/arch/windows/security.c
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/arch/windows/security.c b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/arch/windows/security.c
new file mode 100644
index 0000000..a837dfe
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/arch/windows/security.c
@@ -0,0 +1,238 @@
+/*
+ * Copyright (c) 2009, Jay Loden, Giampaolo Rodola'. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ *
+ * Security related functions for Windows platform (Set privileges such as
+ * SeDebug), as well as security helper functions.
+ */
+
+#include <windows.h>
+#include <Python.h>
+
+
+/*
+ * Convert a process handle to a process token handle.
+ */
+HANDLE
+psutil_token_from_handle(HANDLE hProcess) {
+    HANDLE hToken = NULL;
+
+    if (! OpenProcessToken(hProcess, TOKEN_QUERY, &hToken)) {
+        return PyErr_SetFromWindowsErr(0);
+    }
+    return hToken;
+}
+
+
+/*
+ * http://www.ddj.com/windows/184405986
+ *
+ * There's a way to determine whether we're running under the Local System
+ * account. However (you guessed it), we have to call more Win32 functions to
+ * determine this. Backing up through the code listing, we need to make another
+ * call to GetTokenInformation, but instead of passing through the TOKEN_USER
+ * constant, we pass through the TOKEN_PRIVILEGES constant. This value returns
+ * an array of privileges that the account has in the environment. Iterating
+ * through the array, we call the function LookupPrivilegeName looking for the
+ * string �SeTcbPrivilege. If the function returns this string, then this
+ * account has Local System privileges
+ */
+int
+psutil_has_system_privilege(HANDLE hProcess) {
+    DWORD i;
+    DWORD dwSize = 0;
+    DWORD dwRetval = 0;
+    TCHAR privName[256];
+    DWORD dwNameSize = 256;
+    // PTOKEN_PRIVILEGES tp = NULL;
+    BYTE *pBuffer = NULL;
+    TOKEN_PRIVILEGES *tp = NULL;
+    HANDLE hToken = psutil_token_from_handle(hProcess);
+
+    if (NULL == hToken) {
+        return -1;
+    }
+
+    // call GetTokenInformation first to get the buffer size
+    if (! GetTokenInformation(hToken, TokenPrivileges, NULL, 0, &dwSize)) {
+        dwRetval = GetLastError();
+        // if it failed for a reason other than the buffer, bail out
+        if (dwRetval != ERROR_INSUFFICIENT_BUFFER ) {
+            PyErr_SetFromWindowsErr(dwRetval);
+            return 0;
+        }
+    }
+
+    // allocate buffer and call GetTokenInformation again
+    // tp = (PTOKEN_PRIVILEGES) GlobalAlloc(GPTR, dwSize);
+    pBuffer = (BYTE *) malloc(dwSize);
+    if (pBuffer == NULL) {
+        PyErr_NoMemory();
+        return -1;
+    }
+
+    if (! GetTokenInformation(hToken, TokenPrivileges, pBuffer,
+                              dwSize, &dwSize))
+    {
+        PyErr_SetFromWindowsErr(0);
+        free(pBuffer);
+        return -1;
+    }
+
+    // convert the BYTE buffer to a TOKEN_PRIVILEGES struct pointer
+    tp = (TOKEN_PRIVILEGES *)pBuffer;
+
+    // check all the privileges looking for SeTcbPrivilege
+    for (i = 0; i < tp->PrivilegeCount; i++) {
+        // reset the buffer contents and the buffer size
+        strcpy(privName, "");
+        dwNameSize = sizeof(privName) / sizeof(TCHAR);
+        if (! LookupPrivilegeName(NULL,
+                                  &tp->Privileges[i].Luid,
+                                  (LPTSTR)privName,
+                                  &dwNameSize))
+        {
+            PyErr_SetFromWindowsErr(0);
+            free(pBuffer);
+            return -1;
+        }
+
+        // if we find the SeTcbPrivilege then it's a LocalSystem process
+        if (! lstrcmpi(privName, TEXT("SeTcbPrivilege"))) {
+            free(pBuffer);
+            return 1;
+        }
+    }
+
+    free(pBuffer);
+    return 0;
+}
+
+
+BOOL
+psutil_set_privilege(HANDLE hToken, LPCTSTR Privilege, BOOL bEnablePrivilege)
+{
+    TOKEN_PRIVILEGES tp;
+    LUID luid;
+    TOKEN_PRIVILEGES tpPrevious;
+    DWORD cbPrevious = sizeof(TOKEN_PRIVILEGES);
+
+    if (!LookupPrivilegeValue( NULL, Privilege, &luid )) return FALSE;
+
+    // first pass.  get current privilege setting
+    tp.PrivilegeCount = 1;
+    tp.Privileges[0].Luid = luid;
+    tp.Privileges[0].Attributes = 0;
+
+    AdjustTokenPrivileges(
+        hToken,
+        FALSE,
+        &tp,
+        sizeof(TOKEN_PRIVILEGES),
+        &tpPrevious,
+        &cbPrevious
+    );
+
+    if (GetLastError() != ERROR_SUCCESS) return FALSE;
+
+    // second pass. set privilege based on previous setting
+    tpPrevious.PrivilegeCount = 1;
+    tpPrevious.Privileges[0].Luid = luid;
+
+    if (bEnablePrivilege) {
+        tpPrevious.Privileges[0].Attributes |= (SE_PRIVILEGE_ENABLED);
+    }
+
+    else {
+        tpPrevious.Privileges[0].Attributes ^=
+            (SE_PRIVILEGE_ENABLED & tpPrevious.Privileges[0].Attributes);
+    }
+
+    AdjustTokenPrivileges(
+        hToken,
+        FALSE,
+        &tpPrevious,
+        cbPrevious,
+        NULL,
+        NULL
+    );
+
+    if (GetLastError() != ERROR_SUCCESS) return FALSE;
+
+    return TRUE;
+}
+
+
+int
+psutil_set_se_debug()
+{
+    HANDLE hToken;
+    if (! OpenThreadToken(GetCurrentThread(),
+                          TOKEN_ADJUST_PRIVILEGES | TOKEN_QUERY,
+                          FALSE,
+                          &hToken)
+       ) {
+        if (GetLastError() == ERROR_NO_TOKEN) {
+            if (!ImpersonateSelf(SecurityImpersonation)) {
+                CloseHandle(hToken);
+                return 0;
+            }
+            if (!OpenThreadToken(GetCurrentThread(),
+                                 TOKEN_ADJUST_PRIVILEGES | TOKEN_QUERY,
+                                 FALSE,
+                                 &hToken)
+               ) {
+                RevertToSelf();
+                CloseHandle(hToken);
+                return 0;
+            }
+        }
+    }
+
+    // enable SeDebugPrivilege (open any process)
+    if (! psutil_set_privilege(hToken, SE_DEBUG_NAME, TRUE)) {
+        RevertToSelf();
+        CloseHandle(hToken);
+        return 0;
+    }
+
+    RevertToSelf();
+    CloseHandle(hToken);
+    return 1;
+}
+
+
+int
+psutil_unset_se_debug()
+{
+    HANDLE hToken;
+    if (! OpenThreadToken(GetCurrentThread(),
+                          TOKEN_ADJUST_PRIVILEGES | TOKEN_QUERY,
+                          FALSE,
+                          &hToken)
+       ) {
+        if (GetLastError() == ERROR_NO_TOKEN) {
+            if (! ImpersonateSelf(SecurityImpersonation)) {
+                return 0;
+            }
+
+            if (!OpenThreadToken(GetCurrentThread(),
+                                 TOKEN_ADJUST_PRIVILEGES | TOKEN_QUERY,
+                                 FALSE,
+                                 &hToken)
+               )
+            {
+                return 0;
+            }
+        }
+    }
+
+    // now disable SeDebug
+    if (! psutil_set_privilege(hToken, SE_DEBUG_NAME, FALSE)) {
+        return 0;
+    }
+
+    CloseHandle(hToken);
+    return 1;
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/arch/windows/security.h
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/arch/windows/security.h b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/arch/windows/security.h
new file mode 100644
index 0000000..aa8a22a
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/arch/windows/security.h
@@ -0,0 +1,17 @@
+/*
+ * Copyright (c) 2009, Jay Loden, Giampaolo Rodola'. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ *
+ * Security related functions for Windows platform (Set privileges such as
+ * SeDebug), as well as security helper functions.
+ */
+
+#include <windows.h>
+
+BOOL psutil_set_privilege(HANDLE hToken, LPCTSTR Privilege, BOOL bEnablePrivilege);
+HANDLE psutil_token_from_handle(HANDLE hProcess);
+int psutil_has_system_privilege(HANDLE hProcess);
+int psutil_set_se_debug();
+int psutil_unset_se_debug();
+


[11/22] AMBARI-5707. Metrics system prototype implementation. (swagle)

Posted by sw...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/build/lib.macosx-10.8-intel-2.7/psutil/_psposix.py
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/build/lib.macosx-10.8-intel-2.7/psutil/_psposix.py b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/build/lib.macosx-10.8-intel-2.7/psutil/_psposix.py
new file mode 100644
index 0000000..1188c9f
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/build/lib.macosx-10.8-intel-2.7/psutil/_psposix.py
@@ -0,0 +1,157 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Routines common to all posix systems."""
+
+import errno
+import glob
+import os
+import sys
+import time
+
+from psutil._common import sdiskusage, usage_percent, memoize
+from psutil._compat import PY3, unicode
+
+
+class TimeoutExpired(Exception):
+    pass
+
+
+def pid_exists(pid):
+    """Check whether pid exists in the current process table."""
+    if pid == 0:
+        # According to "man 2 kill" PID 0 has a special meaning:
+        # it refers to <<every process in the process group of the
+        # calling process>> so we don't want to go any further.
+        # If we get here it means this UNIX platform *does* have
+        # a process with id 0.
+        return True
+    try:
+        os.kill(pid, 0)
+    except OSError:
+        err = sys.exc_info()[1]
+        if err.errno == errno.ESRCH:
+            # ESRCH == No such process
+            return False
+        elif err.errno == errno.EPERM:
+            # EPERM clearly means there's a process to deny access to
+            return True
+        else:
+            # According to "man 2 kill" possible error values are
+            # (EINVAL, EPERM, ESRCH) therefore we should never get
+            # here. If we do let's be explicit in considering this
+            # an error.
+            raise err
+    else:
+        return True
+
+
+def wait_pid(pid, timeout=None):
+    """Wait for process with pid 'pid' to terminate and return its
+    exit status code as an integer.
+
+    If pid is not a children of os.getpid() (current process) just
+    waits until the process disappears and return None.
+
+    If pid does not exist at all return None immediately.
+
+    Raise TimeoutExpired on timeout expired.
+    """
+    def check_timeout(delay):
+        if timeout is not None:
+            if timer() >= stop_at:
+                raise TimeoutExpired()
+        time.sleep(delay)
+        return min(delay * 2, 0.04)
+
+    timer = getattr(time, 'monotonic', time.time)
+    if timeout is not None:
+        waitcall = lambda: os.waitpid(pid, os.WNOHANG)
+        stop_at = timer() + timeout
+    else:
+        waitcall = lambda: os.waitpid(pid, 0)
+
+    delay = 0.0001
+    while 1:
+        try:
+            retpid, status = waitcall()
+        except OSError:
+            err = sys.exc_info()[1]
+            if err.errno == errno.EINTR:
+                delay = check_timeout(delay)
+                continue
+            elif err.errno == errno.ECHILD:
+                # This has two meanings:
+                # - pid is not a child of os.getpid() in which case
+                #   we keep polling until it's gone
+                # - pid never existed in the first place
+                # In both cases we'll eventually return None as we
+                # can't determine its exit status code.
+                while 1:
+                    if pid_exists(pid):
+                        delay = check_timeout(delay)
+                    else:
+                        return
+            else:
+                raise
+        else:
+            if retpid == 0:
+                # WNOHANG was used, pid is still running
+                delay = check_timeout(delay)
+                continue
+            # process exited due to a signal; return the integer of
+            # that signal
+            if os.WIFSIGNALED(status):
+                return os.WTERMSIG(status)
+            # process exited using exit(2) system call; return the
+            # integer exit(2) system call has been called with
+            elif os.WIFEXITED(status):
+                return os.WEXITSTATUS(status)
+            else:
+                # should never happen
+                raise RuntimeError("unknown process exit status")
+
+
+def disk_usage(path):
+    """Return disk usage associated with path."""
+    try:
+        st = os.statvfs(path)
+    except UnicodeEncodeError:
+        if not PY3 and isinstance(path, unicode):
+            # this is a bug with os.statvfs() and unicode on
+            # Python 2, see:
+            # - https://code.google.com/p/psutil/issues/detail?id=416
+            # - http://bugs.python.org/issue18695
+            try:
+                path = path.encode(sys.getfilesystemencoding())
+            except UnicodeEncodeError:
+                pass
+            st = os.statvfs(path)
+        else:
+            raise
+    free = (st.f_bavail * st.f_frsize)
+    total = (st.f_blocks * st.f_frsize)
+    used = (st.f_blocks - st.f_bfree) * st.f_frsize
+    percent = usage_percent(used, total, _round=1)
+    # NB: the percentage is -5% than what shown by df due to
+    # reserved blocks that we are currently not considering:
+    # http://goo.gl/sWGbH
+    return sdiskusage(total, used, free, percent)
+
+
+@memoize
+def _get_terminal_map():
+    ret = {}
+    ls = glob.glob('/dev/tty*') + glob.glob('/dev/pts/*')
+    for name in ls:
+        assert name not in ret
+        try:
+            ret[os.stat(name).st_rdev] = name
+        except OSError:
+            err = sys.exc_info()[1]
+            if err.errno != errno.ENOENT:
+                raise
+    return ret

http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/build/lib.macosx-10.8-intel-2.7/psutil/_pssunos.py
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/build/lib.macosx-10.8-intel-2.7/psutil/_pssunos.py b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/build/lib.macosx-10.8-intel-2.7/psutil/_pssunos.py
new file mode 100644
index 0000000..bc18427
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/build/lib.macosx-10.8-intel-2.7/psutil/_pssunos.py
@@ -0,0 +1,533 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Sun OS Solaris platform implementation."""
+
+import errno
+import os
+import socket
+import subprocess
+import sys
+
+from psutil import _common
+from psutil import _psposix
+from psutil._common import (conn_tmap, usage_percent, isfile_strict)
+from psutil._compat import namedtuple, PY3
+import _psutil_posix
+import _psutil_sunos as cext
+
+
+__extra__all__ = ["CONN_IDLE", "CONN_BOUND"]
+
+PAGE_SIZE = os.sysconf('SC_PAGE_SIZE')
+
+CONN_IDLE = "IDLE"
+CONN_BOUND = "BOUND"
+
+PROC_STATUSES = {
+    cext.SSLEEP: _common.STATUS_SLEEPING,
+    cext.SRUN: _common.STATUS_RUNNING,
+    cext.SZOMB: _common.STATUS_ZOMBIE,
+    cext.SSTOP: _common.STATUS_STOPPED,
+    cext.SIDL: _common.STATUS_IDLE,
+    cext.SONPROC: _common.STATUS_RUNNING,  # same as run
+    cext.SWAIT: _common.STATUS_WAITING,
+}
+
+TCP_STATUSES = {
+    cext.TCPS_ESTABLISHED: _common.CONN_ESTABLISHED,
+    cext.TCPS_SYN_SENT: _common.CONN_SYN_SENT,
+    cext.TCPS_SYN_RCVD: _common.CONN_SYN_RECV,
+    cext.TCPS_FIN_WAIT_1: _common.CONN_FIN_WAIT1,
+    cext.TCPS_FIN_WAIT_2: _common.CONN_FIN_WAIT2,
+    cext.TCPS_TIME_WAIT: _common.CONN_TIME_WAIT,
+    cext.TCPS_CLOSED: _common.CONN_CLOSE,
+    cext.TCPS_CLOSE_WAIT: _common.CONN_CLOSE_WAIT,
+    cext.TCPS_LAST_ACK: _common.CONN_LAST_ACK,
+    cext.TCPS_LISTEN: _common.CONN_LISTEN,
+    cext.TCPS_CLOSING: _common.CONN_CLOSING,
+    cext.PSUTIL_CONN_NONE: _common.CONN_NONE,
+    cext.TCPS_IDLE: CONN_IDLE,  # sunos specific
+    cext.TCPS_BOUND: CONN_BOUND,  # sunos specific
+}
+
+scputimes = namedtuple('scputimes', ['user', 'system', 'idle', 'iowait'])
+svmem = namedtuple('svmem', ['total', 'available', 'percent', 'used', 'free'])
+pextmem = namedtuple('pextmem', ['rss', 'vms'])
+pmmap_grouped = namedtuple('pmmap_grouped', ['path', 'rss', 'anon', 'locked'])
+pmmap_ext = namedtuple(
+    'pmmap_ext', 'addr perms ' + ' '.join(pmmap_grouped._fields))
+
+# set later from __init__.py
+NoSuchProcess = None
+AccessDenied = None
+TimeoutExpired = None
+
+# --- functions
+
+disk_io_counters = cext.disk_io_counters
+net_io_counters = cext.net_io_counters
+disk_usage = _psposix.disk_usage
+
+
+def virtual_memory():
+    # we could have done this with kstat, but imho this is good enough
+    total = os.sysconf('SC_PHYS_PAGES') * PAGE_SIZE
+    # note: there's no difference on Solaris
+    free = avail = os.sysconf('SC_AVPHYS_PAGES') * PAGE_SIZE
+    used = total - free
+    percent = usage_percent(used, total, _round=1)
+    return svmem(total, avail, percent, used, free)
+
+
+def swap_memory():
+    sin, sout = cext.swap_mem()
+    # XXX
+    # we are supposed to get total/free by doing so:
+    # http://cvs.opensolaris.org/source/xref/onnv/onnv-gate/
+    #     usr/src/cmd/swap/swap.c
+    # ...nevertheless I can't manage to obtain the same numbers as 'swap'
+    # cmdline utility, so let's parse its output (sigh!)
+    p = subprocess.Popen(['swap', '-l', '-k'], stdout=subprocess.PIPE)
+    stdout, stderr = p.communicate()
+    if PY3:
+        stdout = stdout.decode(sys.stdout.encoding)
+    if p.returncode != 0:
+        raise RuntimeError("'swap -l -k' failed (retcode=%s)" % p.returncode)
+
+    lines = stdout.strip().split('\n')[1:]
+    if not lines:
+        raise RuntimeError('no swap device(s) configured')
+    total = free = 0
+    for line in lines:
+        line = line.split()
+        t, f = line[-2:]
+        t = t.replace('K', '')
+        f = f.replace('K', '')
+        total += int(int(t) * 1024)
+        free += int(int(f) * 1024)
+    used = total - free
+    percent = usage_percent(used, total, _round=1)
+    return _common.sswap(total, used, free, percent,
+                       sin * PAGE_SIZE, sout * PAGE_SIZE)
+
+
+def pids():
+    """Returns a list of PIDs currently running on the system."""
+    return [int(x) for x in os.listdir('/proc') if x.isdigit()]
+
+
+def pid_exists(pid):
+    """Check for the existence of a unix pid."""
+    return _psposix.pid_exists(pid)
+
+
+def cpu_times():
+    """Return system-wide CPU times as a named tuple"""
+    ret = cext.per_cpu_times()
+    return scputimes(*[sum(x) for x in zip(*ret)])
+
+
+def per_cpu_times():
+    """Return system per-CPU times as a list of named tuples"""
+    ret = cext.per_cpu_times()
+    return [scputimes(*x) for x in ret]
+
+
+def cpu_count_logical():
+    """Return the number of logical CPUs in the system."""
+    try:
+        return os.sysconf("SC_NPROCESSORS_ONLN")
+    except ValueError:
+        # mimic os.cpu_count() behavior
+        return None
+
+
+def cpu_count_physical():
+    """Return the number of physical CPUs in the system."""
+    return cext.cpu_count_phys()
+
+
+def boot_time():
+    """The system boot time expressed in seconds since the epoch."""
+    return cext.boot_time()
+
+
+def users():
+    """Return currently connected users as a list of namedtuples."""
+    retlist = []
+    rawlist = cext.users()
+    localhost = (':0.0', ':0')
+    for item in rawlist:
+        user, tty, hostname, tstamp, user_process = item
+        # note: the underlying C function includes entries about
+        # system boot, run level and others.  We might want
+        # to use them in the future.
+        if not user_process:
+            continue
+        if hostname in localhost:
+            hostname = 'localhost'
+        nt = _common.suser(user, tty, hostname, tstamp)
+        retlist.append(nt)
+    return retlist
+
+
+def disk_partitions(all=False):
+    """Return system disk partitions."""
+    # TODO - the filtering logic should be better checked so that
+    # it tries to reflect 'df' as much as possible
+    retlist = []
+    partitions = cext.disk_partitions()
+    for partition in partitions:
+        device, mountpoint, fstype, opts = partition
+        if device == 'none':
+            device = ''
+        if not all:
+            # Differently from, say, Linux, we don't have a list of
+            # common fs types so the best we can do, AFAIK, is to
+            # filter by filesystem having a total size > 0.
+            if not disk_usage(mountpoint).total:
+                continue
+        ntuple = _common.sdiskpart(device, mountpoint, fstype, opts)
+        retlist.append(ntuple)
+    return retlist
+
+
+def net_connections(kind, _pid=-1):
+    """Return socket connections.  If pid == -1 return system-wide
+    connections (as opposed to connections opened by one process only).
+    Only INET sockets are returned (UNIX are not).
+    """
+    cmap = _common.conn_tmap.copy()
+    if _pid == -1:
+        cmap.pop('unix', 0)
+    if kind not in cmap:
+        raise ValueError("invalid %r kind argument; choose between %s"
+                         % (kind, ', '.join([repr(x) for x in cmap])))
+    families, types = _common.conn_tmap[kind]
+    rawlist = cext.net_connections(_pid, families, types)
+    ret = []
+    for item in rawlist:
+        fd, fam, type_, laddr, raddr, status, pid = item
+        if fam not in families:
+            continue
+        if type_ not in types:
+            continue
+        status = TCP_STATUSES[status]
+        if _pid == -1:
+            nt = _common.sconn(fd, fam, type_, laddr, raddr, status, pid)
+        else:
+            nt = _common.pconn(fd, fam, type_, laddr, raddr, status)
+        ret.append(nt)
+    return ret
+
+
+def wrap_exceptions(fun):
+    """Call callable into a try/except clause and translate ENOENT,
+    EACCES and EPERM in NoSuchProcess or AccessDenied exceptions.
+    """
+    def wrapper(self, *args, **kwargs):
+        try:
+            return fun(self, *args, **kwargs)
+        except EnvironmentError:
+            # support for private module import
+            if NoSuchProcess is None or AccessDenied is None:
+                raise
+            # ENOENT (no such file or directory) gets raised on open().
+            # ESRCH (no such process) can get raised on read() if
+            # process is gone in meantime.
+            err = sys.exc_info()[1]
+            if err.errno in (errno.ENOENT, errno.ESRCH):
+                raise NoSuchProcess(self.pid, self._name)
+            if err.errno in (errno.EPERM, errno.EACCES):
+                raise AccessDenied(self.pid, self._name)
+            raise
+    return wrapper
+
+
+class Process(object):
+    """Wrapper class around underlying C implementation."""
+
+    __slots__ = ["pid", "_name"]
+
+    def __init__(self, pid):
+        self.pid = pid
+        self._name = None
+
+    @wrap_exceptions
+    def name(self):
+        # note: max len == 15
+        return cext.proc_name_and_args(self.pid)[0]
+
+    @wrap_exceptions
+    def exe(self):
+        # Will be guess later from cmdline but we want to explicitly
+        # invoke cmdline here in order to get an AccessDenied
+        # exception if the user has not enough privileges.
+        self.cmdline()
+        return ""
+
+    @wrap_exceptions
+    def cmdline(self):
+        return cext.proc_name_and_args(self.pid)[1].split(' ')
+
+    @wrap_exceptions
+    def create_time(self):
+        return cext.proc_basic_info(self.pid)[3]
+
+    @wrap_exceptions
+    def num_threads(self):
+        return cext.proc_basic_info(self.pid)[5]
+
+    @wrap_exceptions
+    def nice_get(self):
+        # For some reason getpriority(3) return ESRCH (no such process)
+        # for certain low-pid processes, no matter what (even as root).
+        # The process actually exists though, as it has a name,
+        # creation time, etc.
+        # The best thing we can do here appears to be raising AD.
+        # Note: tested on Solaris 11; on Open Solaris 5 everything is
+        # fine.
+        try:
+            return _psutil_posix.getpriority(self.pid)
+        except EnvironmentError:
+            err = sys.exc_info()[1]
+            if err.errno in (errno.ENOENT, errno.ESRCH):
+                if pid_exists(self.pid):
+                    raise AccessDenied(self.pid, self._name)
+            raise
+
+    @wrap_exceptions
+    def nice_set(self, value):
+        if self.pid in (2, 3):
+            # Special case PIDs: internally setpriority(3) return ESRCH
+            # (no such process), no matter what.
+            # The process actually exists though, as it has a name,
+            # creation time, etc.
+            raise AccessDenied(self.pid, self._name)
+        return _psutil_posix.setpriority(self.pid, value)
+
+    @wrap_exceptions
+    def ppid(self):
+        return cext.proc_basic_info(self.pid)[0]
+
+    @wrap_exceptions
+    def uids(self):
+        real, effective, saved, _, _, _ = cext.proc_cred(self.pid)
+        return _common.puids(real, effective, saved)
+
+    @wrap_exceptions
+    def gids(self):
+        _, _, _, real, effective, saved = cext.proc_cred(self.pid)
+        return _common.puids(real, effective, saved)
+
+    @wrap_exceptions
+    def cpu_times(self):
+        user, system = cext.proc_cpu_times(self.pid)
+        return _common.pcputimes(user, system)
+
+    @wrap_exceptions
+    def terminal(self):
+        hit_enoent = False
+        tty = wrap_exceptions(
+            cext.proc_basic_info(self.pid)[0])
+        if tty != cext.PRNODEV:
+            for x in (0, 1, 2, 255):
+                try:
+                    return os.readlink('/proc/%d/path/%d' % (self.pid, x))
+                except OSError:
+                    err = sys.exc_info()[1]
+                    if err.errno == errno.ENOENT:
+                        hit_enoent = True
+                        continue
+                    raise
+        if hit_enoent:
+            # raise NSP if the process disappeared on us
+            os.stat('/proc/%s' % self.pid)
+
+    @wrap_exceptions
+    def cwd(self):
+        # /proc/PID/path/cwd may not be resolved by readlink() even if
+        # it exists (ls shows it). If that's the case and the process
+        # is still alive return None (we can return None also on BSD).
+        # Reference: http://goo.gl/55XgO
+        try:
+            return os.readlink("/proc/%s/path/cwd" % self.pid)
+        except OSError:
+            err = sys.exc_info()[1]
+            if err.errno == errno.ENOENT:
+                os.stat("/proc/%s" % self.pid)
+                return None
+            raise
+
+    @wrap_exceptions
+    def memory_info(self):
+        ret = cext.proc_basic_info(self.pid)
+        rss, vms = ret[1] * 1024, ret[2] * 1024
+        return _common.pmem(rss, vms)
+
+    # it seems Solaris uses rss and vms only
+    memory_info_ex = memory_info
+
+    @wrap_exceptions
+    def status(self):
+        code = cext.proc_basic_info(self.pid)[6]
+        # XXX is '?' legit? (we're not supposed to return it anyway)
+        return PROC_STATUSES.get(code, '?')
+
+    @wrap_exceptions
+    def threads(self):
+        ret = []
+        tids = os.listdir('/proc/%d/lwp' % self.pid)
+        hit_enoent = False
+        for tid in tids:
+            tid = int(tid)
+            try:
+                utime, stime = cext.query_process_thread(
+                    self.pid, tid)
+            except EnvironmentError:
+                # ENOENT == thread gone in meantime
+                err = sys.exc_info()[1]
+                if err.errno == errno.ENOENT:
+                    hit_enoent = True
+                    continue
+                raise
+            else:
+                nt = _common.pthread(tid, utime, stime)
+                ret.append(nt)
+        if hit_enoent:
+            # raise NSP if the process disappeared on us
+            os.stat('/proc/%s' % self.pid)
+        return ret
+
+    @wrap_exceptions
+    def open_files(self):
+        retlist = []
+        hit_enoent = False
+        pathdir = '/proc/%d/path' % self.pid
+        for fd in os.listdir('/proc/%d/fd' % self.pid):
+            path = os.path.join(pathdir, fd)
+            if os.path.islink(path):
+                try:
+                    file = os.readlink(path)
+                except OSError:
+                    # ENOENT == file which is gone in the meantime
+                    err = sys.exc_info()[1]
+                    if err.errno == errno.ENOENT:
+                        hit_enoent = True
+                        continue
+                    raise
+                else:
+                    if isfile_strict(file):
+                        retlist.append(_common.popenfile(file, int(fd)))
+        if hit_enoent:
+            # raise NSP if the process disappeared on us
+            os.stat('/proc/%s' % self.pid)
+        return retlist
+
+    def _get_unix_sockets(self, pid):
+        """Get UNIX sockets used by process by parsing 'pfiles' output."""
+        # TODO: rewrite this in C (...but the damn netstat source code
+        # does not include this part! Argh!!)
+        cmd = "pfiles %s" % pid
+        p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,
+                             stderr=subprocess.PIPE)
+        stdout, stderr = p.communicate()
+        if PY3:
+            stdout, stderr = [x.decode(sys.stdout.encoding)
+                              for x in (stdout, stderr)]
+        if p.returncode != 0:
+            if 'permission denied' in stderr.lower():
+                raise AccessDenied(self.pid, self._name)
+            if 'no such process' in stderr.lower():
+                raise NoSuchProcess(self.pid, self._name)
+            raise RuntimeError("%r command error\n%s" % (cmd, stderr))
+
+        lines = stdout.split('\n')[2:]
+        for i, line in enumerate(lines):
+            line = line.lstrip()
+            if line.startswith('sockname: AF_UNIX'):
+                path = line.split(' ', 2)[2]
+                type = lines[i - 2].strip()
+                if type == 'SOCK_STREAM':
+                    type = socket.SOCK_STREAM
+                elif type == 'SOCK_DGRAM':
+                    type = socket.SOCK_DGRAM
+                else:
+                    type = -1
+                yield (-1, socket.AF_UNIX, type, path, "", _common.CONN_NONE)
+
+    @wrap_exceptions
+    def connections(self, kind='inet'):
+        ret = net_connections(kind, _pid=self.pid)
+        # The underlying C implementation retrieves all OS connections
+        # and filters them by PID.  At this point we can't tell whether
+        # an empty list means there were no connections for process or
+        # process is no longer active so we force NSP in case the PID
+        # is no longer there.
+        if not ret:
+            os.stat('/proc/%s' % self.pid)  # will raise NSP if process is gone
+
+        # UNIX sockets
+        if kind in ('all', 'unix'):
+            ret.extend([_common.pconn(*conn) for conn in
+                        self._get_unix_sockets(self.pid)])
+        return ret
+
+    nt_mmap_grouped = namedtuple('mmap', 'path rss anon locked')
+    nt_mmap_ext = namedtuple('mmap', 'addr perms path rss anon locked')
+
+    @wrap_exceptions
+    def memory_maps(self):
+        def toaddr(start, end):
+            return '%s-%s' % (hex(start)[2:].strip('L'),
+                              hex(end)[2:].strip('L'))
+
+        retlist = []
+        rawlist = cext.proc_memory_maps(self.pid)
+        hit_enoent = False
+        for item in rawlist:
+            addr, addrsize, perm, name, rss, anon, locked = item
+            addr = toaddr(addr, addrsize)
+            if not name.startswith('['):
+                try:
+                    name = os.readlink('/proc/%s/path/%s' % (self.pid, name))
+                except OSError:
+                    err = sys.exc_info()[1]
+                    if err.errno == errno.ENOENT:
+                        # sometimes the link may not be resolved by
+                        # readlink() even if it exists (ls shows it).
+                        # If that's the case we just return the
+                        # unresolved link path.
+                        # This seems an incosistency with /proc similar
+                        # to: http://goo.gl/55XgO
+                        name = '/proc/%s/path/%s' % (self.pid, name)
+                        hit_enoent = True
+                    else:
+                        raise
+            retlist.append((addr, perm, name, rss, anon, locked))
+        if hit_enoent:
+            # raise NSP if the process disappeared on us
+            os.stat('/proc/%s' % self.pid)
+        return retlist
+
+    @wrap_exceptions
+    def num_fds(self):
+        return len(os.listdir("/proc/%s/fd" % self.pid))
+
+    @wrap_exceptions
+    def num_ctx_switches(self):
+        return _common.pctxsw(*cext.proc_num_ctx_switches(self.pid))
+
+    @wrap_exceptions
+    def wait(self, timeout=None):
+        try:
+            return _psposix.wait_pid(self.pid, timeout)
+        except _psposix.TimeoutExpired:
+            # support for private module import
+            if TimeoutExpired is None:
+                raise
+            raise TimeoutExpired(timeout, self.pid, self._name)

http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/build/lib.macosx-10.8-intel-2.7/psutil/_pswindows.py
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/build/lib.macosx-10.8-intel-2.7/psutil/_pswindows.py b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/build/lib.macosx-10.8-intel-2.7/psutil/_pswindows.py
new file mode 100644
index 0000000..1a786f1
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/build/lib.macosx-10.8-intel-2.7/psutil/_pswindows.py
@@ -0,0 +1,485 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Windows platform implementation."""
+
+import errno
+import os
+import sys
+
+from psutil import _common
+from psutil._common import conn_tmap, usage_percent, isfile_strict
+from psutil._compat import PY3, xrange, wraps, lru_cache, namedtuple
+import _psutil_windows as cext
+
+# process priority constants, import from __init__.py:
+# http://msdn.microsoft.com/en-us/library/ms686219(v=vs.85).aspx
+__extra__all__ = ["ABOVE_NORMAL_PRIORITY_CLASS", "BELOW_NORMAL_PRIORITY_CLASS",
+                  "HIGH_PRIORITY_CLASS", "IDLE_PRIORITY_CLASS",
+                  "NORMAL_PRIORITY_CLASS", "REALTIME_PRIORITY_CLASS",
+                  #
+                  "CONN_DELETE_TCB",
+                  ]
+
+# --- module level constants (gets pushed up to psutil module)
+
+CONN_DELETE_TCB = "DELETE_TCB"
+WAIT_TIMEOUT = 0x00000102  # 258 in decimal
+ACCESS_DENIED_SET = frozenset([errno.EPERM, errno.EACCES,
+                               cext.ERROR_ACCESS_DENIED])
+
+TCP_STATUSES = {
+    cext.MIB_TCP_STATE_ESTAB: _common.CONN_ESTABLISHED,
+    cext.MIB_TCP_STATE_SYN_SENT: _common.CONN_SYN_SENT,
+    cext.MIB_TCP_STATE_SYN_RCVD: _common.CONN_SYN_RECV,
+    cext.MIB_TCP_STATE_FIN_WAIT1: _common.CONN_FIN_WAIT1,
+    cext.MIB_TCP_STATE_FIN_WAIT2: _common.CONN_FIN_WAIT2,
+    cext.MIB_TCP_STATE_TIME_WAIT: _common.CONN_TIME_WAIT,
+    cext.MIB_TCP_STATE_CLOSED: _common.CONN_CLOSE,
+    cext.MIB_TCP_STATE_CLOSE_WAIT: _common.CONN_CLOSE_WAIT,
+    cext.MIB_TCP_STATE_LAST_ACK: _common.CONN_LAST_ACK,
+    cext.MIB_TCP_STATE_LISTEN: _common.CONN_LISTEN,
+    cext.MIB_TCP_STATE_CLOSING: _common.CONN_CLOSING,
+    cext.MIB_TCP_STATE_DELETE_TCB: CONN_DELETE_TCB,
+    cext.PSUTIL_CONN_NONE: _common.CONN_NONE,
+}
+
+
+scputimes = namedtuple('scputimes', ['user', 'system', 'idle'])
+svmem = namedtuple('svmem', ['total', 'available', 'percent', 'used', 'free'])
+pextmem = namedtuple(
+    'pextmem', ['num_page_faults', 'peak_wset', 'wset', 'peak_paged_pool',
+                'paged_pool', 'peak_nonpaged_pool', 'nonpaged_pool',
+                'pagefile', 'peak_pagefile', 'private'])
+pmmap_grouped = namedtuple('pmmap_grouped', ['path', 'rss'])
+pmmap_ext = namedtuple(
+    'pmmap_ext', 'addr perms ' + ' '.join(pmmap_grouped._fields))
+
+# set later from __init__.py
+NoSuchProcess = None
+AccessDenied = None
+TimeoutExpired = None
+
+
+@lru_cache(maxsize=512)
+def _win32_QueryDosDevice(s):
+    return cext.win32_QueryDosDevice(s)
+
+
+def _convert_raw_path(s):
+    # convert paths using native DOS format like:
+    # "\Device\HarddiskVolume1\Windows\systemew\file.txt"
+    # into: "C:\Windows\systemew\file.txt"
+    if PY3 and not isinstance(s, str):
+        s = s.decode('utf8')
+    rawdrive = '\\'.join(s.split('\\')[:3])
+    driveletter = _win32_QueryDosDevice(rawdrive)
+    return os.path.join(driveletter, s[len(rawdrive):])
+
+
+# --- public functions
+
+
+def virtual_memory():
+    """System virtual memory as a namedtuple."""
+    mem = cext.virtual_mem()
+    totphys, availphys, totpagef, availpagef, totvirt, freevirt = mem
+    #
+    total = totphys
+    avail = availphys
+    free = availphys
+    used = total - avail
+    percent = usage_percent((total - avail), total, _round=1)
+    return svmem(total, avail, percent, used, free)
+
+
+def swap_memory():
+    """Swap system memory as a (total, used, free, sin, sout) tuple."""
+    mem = cext.virtual_mem()
+    total = mem[2]
+    free = mem[3]
+    used = total - free
+    percent = usage_percent(used, total, _round=1)
+    return _common.sswap(total, used, free, percent, 0, 0)
+
+
+def disk_usage(path):
+    """Return disk usage associated with path."""
+    try:
+        total, free = cext.disk_usage(path)
+    except WindowsError:
+        if not os.path.exists(path):
+            msg = "No such file or directory: '%s'" % path
+            raise OSError(errno.ENOENT, msg)
+        raise
+    used = total - free
+    percent = usage_percent(used, total, _round=1)
+    return _common.sdiskusage(total, used, free, percent)
+
+
+def disk_partitions(all):
+    """Return disk partitions."""
+    rawlist = cext.disk_partitions(all)
+    return [_common.sdiskpart(*x) for x in rawlist]
+
+
+def cpu_times():
+    """Return system CPU times as a named tuple."""
+    user, system, idle = cext.cpu_times()
+    return scputimes(user, system, idle)
+
+
+def per_cpu_times():
+    """Return system per-CPU times as a list of named tuples."""
+    ret = []
+    for cpu_t in cext.per_cpu_times():
+        user, system, idle = cpu_t
+        item = scputimes(user, system, idle)
+        ret.append(item)
+    return ret
+
+
+def cpu_count_logical():
+    """Return the number of logical CPUs in the system."""
+    return cext.cpu_count_logical()
+
+
+def cpu_count_physical():
+    """Return the number of physical CPUs in the system."""
+    return cext.cpu_count_phys()
+
+
+def boot_time():
+    """The system boot time expressed in seconds since the epoch."""
+    return cext.boot_time()
+
+
+def net_connections(kind, _pid=-1):
+    """Return socket connections.  If pid == -1 return system-wide
+    connections (as opposed to connections opened by one process only).
+    """
+    if kind not in conn_tmap:
+        raise ValueError("invalid %r kind argument; choose between %s"
+                         % (kind, ', '.join([repr(x) for x in conn_tmap])))
+    families, types = conn_tmap[kind]
+    rawlist = cext.net_connections(_pid, families, types)
+    ret = []
+    for item in rawlist:
+        fd, fam, type, laddr, raddr, status, pid = item
+        status = TCP_STATUSES[status]
+        if _pid == -1:
+            nt = _common.sconn(fd, fam, type, laddr, raddr, status, pid)
+        else:
+            nt = _common.pconn(fd, fam, type, laddr, raddr, status)
+        ret.append(nt)
+    return ret
+
+
+def users():
+    """Return currently connected users as a list of namedtuples."""
+    retlist = []
+    rawlist = cext.users()
+    for item in rawlist:
+        user, hostname, tstamp = item
+        nt = _common.suser(user, None, hostname, tstamp)
+        retlist.append(nt)
+    return retlist
+
+
+pids = cext.pids
+pid_exists = cext.pid_exists
+net_io_counters = cext.net_io_counters
+disk_io_counters = cext.disk_io_counters
+ppid_map = cext.ppid_map  # not meant to be public
+
+
+def wrap_exceptions(fun):
+    """Decorator which translates bare OSError and WindowsError
+    exceptions into NoSuchProcess and AccessDenied.
+    """
+    @wraps(fun)
+    def wrapper(self, *args, **kwargs):
+        try:
+            return fun(self, *args, **kwargs)
+        except OSError:
+            # support for private module import
+            if NoSuchProcess is None or AccessDenied is None:
+                raise
+            err = sys.exc_info()[1]
+            if err.errno in ACCESS_DENIED_SET:
+                raise AccessDenied(self.pid, self._name)
+            if err.errno == errno.ESRCH:
+                raise NoSuchProcess(self.pid, self._name)
+            raise
+    return wrapper
+
+
+class Process(object):
+    """Wrapper class around underlying C implementation."""
+
+    __slots__ = ["pid", "_name"]
+
+    def __init__(self, pid):
+        self.pid = pid
+        self._name = None
+
+    @wrap_exceptions
+    def name(self):
+        """Return process name, which on Windows is always the final
+        part of the executable.
+        """
+        # This is how PIDs 0 and 4 are always represented in taskmgr
+        # and process-hacker.
+        if self.pid == 0:
+            return "System Idle Process"
+        elif self.pid == 4:
+            return "System"
+        else:
+            return os.path.basename(self.exe())
+
+    @wrap_exceptions
+    def exe(self):
+        # Note: os.path.exists(path) may return False even if the file
+        # is there, see:
+        # http://stackoverflow.com/questions/3112546/os-path-exists-lies
+        return _convert_raw_path(cext.proc_exe(self.pid))
+
+    @wrap_exceptions
+    def cmdline(self):
+        return cext.proc_cmdline(self.pid)
+
+    def ppid(self):
+        try:
+            return ppid_map()[self.pid]
+        except KeyError:
+            raise NoSuchProcess(self.pid, self._name)
+
+    def _get_raw_meminfo(self):
+        try:
+            return cext.proc_memory_info(self.pid)
+        except OSError:
+            err = sys.exc_info()[1]
+            if err.errno in ACCESS_DENIED_SET:
+                return cext.proc_memory_info_2(self.pid)
+            raise
+
+    @wrap_exceptions
+    def memory_info(self):
+        # on Windows RSS == WorkingSetSize and VSM == PagefileUsage
+        # fields of PROCESS_MEMORY_COUNTERS struct:
+        # http://msdn.microsoft.com/en-us/library/windows/desktop/
+        #     ms684877(v=vs.85).aspx
+        t = self._get_raw_meminfo()
+        return _common.pmem(t[2], t[7])
+
+    @wrap_exceptions
+    def memory_info_ex(self):
+        return pextmem(*self._get_raw_meminfo())
+
+    def memory_maps(self):
+        try:
+            raw = cext.proc_memory_maps(self.pid)
+        except OSError:
+            # XXX - can't use wrap_exceptions decorator as we're
+            # returning a generator; probably needs refactoring.
+            err = sys.exc_info()[1]
+            if err.errno in ACCESS_DENIED_SET:
+                raise AccessDenied(self.pid, self._name)
+            if err.errno == errno.ESRCH:
+                raise NoSuchProcess(self.pid, self._name)
+            raise
+        else:
+            for addr, perm, path, rss in raw:
+                path = _convert_raw_path(path)
+                addr = hex(addr)
+                yield (addr, perm, path, rss)
+
+    @wrap_exceptions
+    def kill(self):
+        return cext.proc_kill(self.pid)
+
+    @wrap_exceptions
+    def wait(self, timeout=None):
+        if timeout is None:
+            timeout = cext.INFINITE
+        else:
+            # WaitForSingleObject() expects time in milliseconds
+            timeout = int(timeout * 1000)
+        ret = cext.proc_wait(self.pid, timeout)
+        if ret == WAIT_TIMEOUT:
+            # support for private module import
+            if TimeoutExpired is None:
+                raise RuntimeError("timeout expired")
+            raise TimeoutExpired(timeout, self.pid, self._name)
+        return ret
+
+    @wrap_exceptions
+    def username(self):
+        if self.pid in (0, 4):
+            return 'NT AUTHORITY\\SYSTEM'
+        return cext.proc_username(self.pid)
+
+    @wrap_exceptions
+    def create_time(self):
+        # special case for kernel process PIDs; return system boot time
+        if self.pid in (0, 4):
+            return boot_time()
+        try:
+            return cext.proc_create_time(self.pid)
+        except OSError:
+            err = sys.exc_info()[1]
+            if err.errno in ACCESS_DENIED_SET:
+                return cext.proc_create_time_2(self.pid)
+            raise
+
+    @wrap_exceptions
+    def num_threads(self):
+        return cext.proc_num_threads(self.pid)
+
+    @wrap_exceptions
+    def threads(self):
+        rawlist = cext.proc_threads(self.pid)
+        retlist = []
+        for thread_id, utime, stime in rawlist:
+            ntuple = _common.pthread(thread_id, utime, stime)
+            retlist.append(ntuple)
+        return retlist
+
+    @wrap_exceptions
+    def cpu_times(self):
+        try:
+            ret = cext.proc_cpu_times(self.pid)
+        except OSError:
+            err = sys.exc_info()[1]
+            if err.errno in ACCESS_DENIED_SET:
+                ret = cext.proc_cpu_times_2(self.pid)
+            else:
+                raise
+        return _common.pcputimes(*ret)
+
+    @wrap_exceptions
+    def suspend(self):
+        return cext.proc_suspend(self.pid)
+
+    @wrap_exceptions
+    def resume(self):
+        return cext.proc_resume(self.pid)
+
+    @wrap_exceptions
+    def cwd(self):
+        if self.pid in (0, 4):
+            raise AccessDenied(self.pid, self._name)
+        # return a normalized pathname since the native C function appends
+        # "\\" at the and of the path
+        path = cext.proc_cwd(self.pid)
+        return os.path.normpath(path)
+
+    @wrap_exceptions
+    def open_files(self):
+        if self.pid in (0, 4):
+            return []
+        retlist = []
+        # Filenames come in in native format like:
+        # "\Device\HarddiskVolume1\Windows\systemew\file.txt"
+        # Convert the first part in the corresponding drive letter
+        # (e.g. "C:\") by using Windows's QueryDosDevice()
+        raw_file_names = cext.proc_open_files(self.pid)
+        for file in raw_file_names:
+            file = _convert_raw_path(file)
+            if isfile_strict(file) and file not in retlist:
+                ntuple = _common.popenfile(file, -1)
+                retlist.append(ntuple)
+        return retlist
+
+    @wrap_exceptions
+    def connections(self, kind='inet'):
+        return net_connections(kind, _pid=self.pid)
+
+    @wrap_exceptions
+    def nice_get(self):
+        return cext.proc_priority_get(self.pid)
+
+    @wrap_exceptions
+    def nice_set(self, value):
+        return cext.proc_priority_set(self.pid, value)
+
+    # available on Windows >= Vista
+    if hasattr(cext, "proc_io_priority_get"):
+        @wrap_exceptions
+        def ionice_get(self):
+            return cext.proc_io_priority_get(self.pid)
+
+        @wrap_exceptions
+        def ionice_set(self, value, _):
+            if _:
+                raise TypeError("set_proc_ionice() on Windows takes only "
+                                "1 argument (2 given)")
+            if value not in (2, 1, 0):
+                raise ValueError("value must be 2 (normal), 1 (low) or 0 "
+                                 "(very low); got %r" % value)
+            return cext.proc_io_priority_set(self.pid, value)
+
+    @wrap_exceptions
+    def io_counters(self):
+        try:
+            ret = cext.proc_io_counters(self.pid)
+        except OSError:
+            err = sys.exc_info()[1]
+            if err.errno in ACCESS_DENIED_SET:
+                ret = cext.proc_io_counters_2(self.pid)
+            else:
+                raise
+        return _common.pio(*ret)
+
+    @wrap_exceptions
+    def status(self):
+        suspended = cext.proc_is_suspended(self.pid)
+        if suspended:
+            return _common.STATUS_STOPPED
+        else:
+            return _common.STATUS_RUNNING
+
+    @wrap_exceptions
+    def cpu_affinity_get(self):
+        from_bitmask = lambda x: [i for i in xrange(64) if (1 << i) & x]
+        bitmask = cext.proc_cpu_affinity_get(self.pid)
+        return from_bitmask(bitmask)
+
+    @wrap_exceptions
+    def cpu_affinity_set(self, value):
+        def to_bitmask(l):
+            if not l:
+                raise ValueError("invalid argument %r" % l)
+            out = 0
+            for b in l:
+                out |= 2 ** b
+            return out
+
+        # SetProcessAffinityMask() states that ERROR_INVALID_PARAMETER
+        # is returned for an invalid CPU but this seems not to be true,
+        # therefore we check CPUs validy beforehand.
+        allcpus = list(range(len(per_cpu_times())))
+        for cpu in value:
+            if cpu not in allcpus:
+                raise ValueError("invalid CPU %r" % cpu)
+
+        bitmask = to_bitmask(value)
+        cext.proc_cpu_affinity_set(self.pid, bitmask)
+
+    @wrap_exceptions
+    def num_handles(self):
+        try:
+            return cext.proc_num_handles(self.pid)
+        except OSError:
+            err = sys.exc_info()[1]
+            if err.errno in ACCESS_DENIED_SET:
+                return cext.proc_num_handles_2(self.pid)
+            raise
+
+    @wrap_exceptions
+    def num_ctx_switches(self):
+        tupl = cext.proc_num_ctx_switches(self.pid)
+        return _common.pctxsw(*tupl)

http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/docs/Makefile
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/docs/Makefile b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/docs/Makefile
new file mode 100644
index 0000000..b23ab4b
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/docs/Makefile
@@ -0,0 +1,177 @@
+# Makefile for Sphinx documentation
+#
+
+# You can set these variables from the command line.
+SPHINXOPTS    =
+SPHINXBUILD   = sphinx-build
+PAPER         =
+BUILDDIR      = _build
+
+# User-friendly check for sphinx-build
+ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1)
+$(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/)
+endif
+
+# Internal variables.
+PAPEROPT_a4     = -D latex_paper_size=a4
+PAPEROPT_letter = -D latex_paper_size=letter
+ALLSPHINXOPTS   = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
+# the i18n builder cannot share the environment and doctrees with the others
+I18NSPHINXOPTS  = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
+
+.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext
+
+help:
+	@echo "Please use \`make <target>' where <target> is one of"
+	@echo "  html       to make standalone HTML files"
+	@echo "  dirhtml    to make HTML files named index.html in directories"
+	@echo "  singlehtml to make a single large HTML file"
+	@echo "  pickle     to make pickle files"
+	@echo "  json       to make JSON files"
+	@echo "  htmlhelp   to make HTML files and a HTML help project"
+	@echo "  qthelp     to make HTML files and a qthelp project"
+	@echo "  devhelp    to make HTML files and a Devhelp project"
+	@echo "  epub       to make an epub"
+	@echo "  latex      to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
+	@echo "  latexpdf   to make LaTeX files and run them through pdflatex"
+	@echo "  latexpdfja to make LaTeX files and run them through platex/dvipdfmx"
+	@echo "  text       to make text files"
+	@echo "  man        to make manual pages"
+	@echo "  texinfo    to make Texinfo files"
+	@echo "  info       to make Texinfo files and run them through makeinfo"
+	@echo "  gettext    to make PO message catalogs"
+	@echo "  changes    to make an overview of all changed/added/deprecated items"
+	@echo "  xml        to make Docutils-native XML files"
+	@echo "  pseudoxml  to make pseudoxml-XML files for display purposes"
+	@echo "  linkcheck  to check all external links for integrity"
+	@echo "  doctest    to run all doctests embedded in the documentation (if enabled)"
+
+clean:
+	rm -rf $(BUILDDIR)
+
+html:
+	$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
+	@echo
+	@echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
+
+dirhtml:
+	$(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
+	@echo
+	@echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
+
+singlehtml:
+	$(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml
+	@echo
+	@echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml."
+
+pickle:
+	$(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
+	@echo
+	@echo "Build finished; now you can process the pickle files."
+
+json:
+	$(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
+	@echo
+	@echo "Build finished; now you can process the JSON files."
+
+htmlhelp:
+	$(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
+	@echo
+	@echo "Build finished; now you can run HTML Help Workshop with the" \
+	      ".hhp project file in $(BUILDDIR)/htmlhelp."
+
+qthelp:
+	$(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
+	@echo
+	@echo "Build finished; now you can run "qcollectiongenerator" with the" \
+	      ".qhcp project file in $(BUILDDIR)/qthelp, like this:"
+	@echo "# qcollectiongenerator $(BUILDDIR)/qthelp/psutil.qhcp"
+	@echo "To view the help file:"
+	@echo "# assistant -collectionFile $(BUILDDIR)/qthelp/psutil.qhc"
+
+devhelp:
+	$(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp
+	@echo
+	@echo "Build finished."
+	@echo "To view the help file:"
+	@echo "# mkdir -p $$HOME/.local/share/devhelp/psutil"
+	@echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/psutil"
+	@echo "# devhelp"
+
+epub:
+	$(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub
+	@echo
+	@echo "Build finished. The epub file is in $(BUILDDIR)/epub."
+
+latex:
+	$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
+	@echo
+	@echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
+	@echo "Run \`make' in that directory to run these through (pdf)latex" \
+	      "(use \`make latexpdf' here to do that automatically)."
+
+latexpdf:
+	$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
+	@echo "Running LaTeX files through pdflatex..."
+	$(MAKE) -C $(BUILDDIR)/latex all-pdf
+	@echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
+
+latexpdfja:
+	$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
+	@echo "Running LaTeX files through platex and dvipdfmx..."
+	$(MAKE) -C $(BUILDDIR)/latex all-pdf-ja
+	@echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
+
+text:
+	$(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text
+	@echo
+	@echo "Build finished. The text files are in $(BUILDDIR)/text."
+
+man:
+	$(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man
+	@echo
+	@echo "Build finished. The manual pages are in $(BUILDDIR)/man."
+
+texinfo:
+	$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
+	@echo
+	@echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo."
+	@echo "Run \`make' in that directory to run these through makeinfo" \
+	      "(use \`make info' here to do that automatically)."
+
+info:
+	$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
+	@echo "Running Texinfo files through makeinfo..."
+	make -C $(BUILDDIR)/texinfo info
+	@echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo."
+
+gettext:
+	$(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale
+	@echo
+	@echo "Build finished. The message catalogs are in $(BUILDDIR)/locale."
+
+changes:
+	$(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
+	@echo
+	@echo "The overview file is in $(BUILDDIR)/changes."
+
+linkcheck:
+	$(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
+	@echo
+	@echo "Link check complete; look for any errors in the above output " \
+	      "or in $(BUILDDIR)/linkcheck/output.txt."
+
+doctest:
+	$(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
+	@echo "Testing of doctests in the sources finished, look at the " \
+	      "results in $(BUILDDIR)/doctest/output.txt."
+
+xml:
+	$(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml
+	@echo
+	@echo "Build finished. The XML files are in $(BUILDDIR)/xml."
+
+pseudoxml:
+	$(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml
+	@echo
+	@echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml."

http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/docs/README
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/docs/README b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/docs/README
new file mode 100644
index 0000000..9e2f36e
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/docs/README
@@ -0,0 +1,15 @@
+About
+=====
+
+This directory contains the reStructuredText (reST) sources to the psutil
+documentation.  You don't need to build them yourself, prebuilt versions are
+available at http://psutil.readthedocs.org/en/latest/.
+In case you want, you need to install sphinx first:
+
+    $ pip install sphinx
+
+Then run:
+
+    $ make html
+
+You'll then have an HTML version of the doc at _build/html/index.html.
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/docs/_static/copybutton.js
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/docs/_static/copybutton.js b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/docs/_static/copybutton.js
new file mode 100644
index 0000000..5d82c67
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/docs/_static/copybutton.js
@@ -0,0 +1,57 @@
+$(document).ready(function() {
+    /* Add a [>>>] button on the top-right corner of code samples to hide
+     * the >>> and ... prompts and the output and thus make the code
+     * copyable. */
+    var div = $('.highlight-python .highlight,' +
+                '.highlight-python3 .highlight')
+    var pre = div.find('pre');
+
+    // get the styles from the current theme
+    pre.parent().parent().css('position', 'relative');
+    var hide_text = 'Hide the prompts and output';
+    var show_text = 'Show the prompts and output';
+    var border_width = pre.css('border-top-width');
+    var border_style = pre.css('border-top-style');
+    var border_color = pre.css('border-top-color');
+    var button_styles = {
+        'cursor':'pointer', 'position': 'absolute', 'top': '0', 'right': '0',
+        'border-color': border_color, 'border-style': border_style,
+        'border-width': border_width, 'color': border_color, 'text-size': '75%',
+        'font-family': 'monospace', 'padding-left': '0.2em', 'padding-right': '0.2em',
+        'border-radius': '0 3px 0 0'
+    }
+
+    // create and add the button to all the code blocks that contain >>>
+    div.each(function(index) {
+        var jthis = $(this);
+        if (jthis.find('.gp').length > 0) {
+            var button = $('<span class="copybutton">&gt;&gt;&gt;</span>');
+            button.css(button_styles)
+            button.attr('title', hide_text);
+            jthis.prepend(button);
+        }
+        // tracebacks (.gt) contain bare text elements that need to be
+        // wrapped in a span to work with .nextUntil() (see later)
+        jthis.find('pre:has(.gt)').contents().filter(function() {
+            return ((this.nodeType == 3) && (this.data.trim().length > 0));
+        }).wrap('<span>');
+    });
+
+    // define the behavior of the button when it's clicked
+    $('.copybutton').toggle(
+        function() {
+            var button = $(this);
+            button.parent().find('.go, .gp, .gt').hide();
+            button.next('pre').find('.gt').nextUntil('.gp, .go').css('visibility', 'hidden');
+            button.css('text-decoration', 'line-through');
+            button.attr('title', show_text);
+        },
+        function() {
+            var button = $(this);
+            button.parent().find('.go, .gp, .gt').show();
+            button.next('pre').find('.gt').nextUntil('.gp, .go').css('visibility', 'visible');
+            button.css('text-decoration', 'none');
+            button.attr('title', hide_text);
+        });
+});
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/docs/_static/sidebar.js
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/docs/_static/sidebar.js b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/docs/_static/sidebar.js
new file mode 100644
index 0000000..3376963
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/docs/_static/sidebar.js
@@ -0,0 +1,161 @@
+/*
+ * sidebar.js
+ * ~~~~~~~~~~
+ *
+ * This script makes the Sphinx sidebar collapsible.
+ *
+ * .sphinxsidebar contains .sphinxsidebarwrapper.  This script adds in
+ * .sphixsidebar, after .sphinxsidebarwrapper, the #sidebarbutton used to
+ * collapse and expand the sidebar.
+ *
+ * When the sidebar is collapsed the .sphinxsidebarwrapper is hidden and the
+ * width of the sidebar and the margin-left of the document are decreased.
+ * When the sidebar is expanded the opposite happens.  This script saves a
+ * per-browser/per-session cookie used to remember the position of the sidebar
+ * among the pages.  Once the browser is closed the cookie is deleted and the
+ * position reset to the default (expanded).
+ *
+ * :copyright: Copyright 2007-2011 by the Sphinx team, see AUTHORS.
+ * :license: BSD, see LICENSE for details.
+ *
+ */
+
+$(function() {
+  // global elements used by the functions.
+  // the 'sidebarbutton' element is defined as global after its
+  // creation, in the add_sidebar_button function
+  var bodywrapper = $('.bodywrapper');
+  var sidebar = $('.sphinxsidebar');
+  var sidebarwrapper = $('.sphinxsidebarwrapper');
+
+  // original margin-left of the bodywrapper and width of the sidebar
+  // with the sidebar expanded
+  var bw_margin_expanded = bodywrapper.css('margin-left');
+  var ssb_width_expanded = sidebar.width();
+
+  // margin-left of the bodywrapper and width of the sidebar
+  // with the sidebar collapsed
+  var bw_margin_collapsed = '.8em';
+  var ssb_width_collapsed = '.8em';
+
+  // colors used by the current theme
+  var dark_color = '#AAAAAA';
+  var light_color = '#CCCCCC';
+
+  function sidebar_is_collapsed() {
+    return sidebarwrapper.is(':not(:visible)');
+  }
+
+  function toggle_sidebar() {
+    if (sidebar_is_collapsed())
+      expand_sidebar();
+    else
+      collapse_sidebar();
+  }
+
+  function collapse_sidebar() {
+    sidebarwrapper.hide();
+    sidebar.css('width', ssb_width_collapsed);
+    bodywrapper.css('margin-left', bw_margin_collapsed);
+    sidebarbutton.css({
+        'margin-left': '0',
+        //'height': bodywrapper.height(),
+        'height': sidebar.height(),
+        'border-radius': '5px'
+    });
+    sidebarbutton.find('span').text('»');
+    sidebarbutton.attr('title', _('Expand sidebar'));
+    document.cookie = 'sidebar=collapsed';
+  }
+
+  function expand_sidebar() {
+    bodywrapper.css('margin-left', bw_margin_expanded);
+    sidebar.css('width', ssb_width_expanded);
+    sidebarwrapper.show();
+    sidebarbutton.css({
+        'margin-left': ssb_width_expanded-12,
+        //'height': bodywrapper.height(),
+        'height': sidebar.height(),
+        'border-radius': '0 5px 5px 0'
+    });
+    sidebarbutton.find('span').text('«');
+    sidebarbutton.attr('title', _('Collapse sidebar'));
+    //sidebarwrapper.css({'padding-top':
+    //  Math.max(window.pageYOffset - sidebarwrapper.offset().top, 10)});
+    document.cookie = 'sidebar=expanded';
+  }
+
+  function add_sidebar_button() {
+    sidebarwrapper.css({
+        'float': 'left',
+        'margin-right': '0',
+        'width': ssb_width_expanded - 28
+    });
+    // create the button
+    sidebar.append(
+      '<div id="sidebarbutton"><span>&laquo;</span></div>'
+    );
+    var sidebarbutton = $('#sidebarbutton');
+    // find the height of the viewport to center the '<<' in the page
+    var viewport_height;
+    if (window.innerHeight)
+ 	  viewport_height = window.innerHeight;
+    else
+	  viewport_height = $(window).height();
+    var sidebar_offset = sidebar.offset().top;
+
+    var sidebar_height = sidebar.height();
+    //var sidebar_height = Math.max(bodywrapper.height(), sidebar.height());
+    sidebarbutton.find('span').css({
+        'display': 'block',
+        'margin-top': sidebar_height/2 - 10
+        //'margin-top': (viewport_height - sidebar.position().top - 20) / 2
+        //'position': 'fixed',
+        //'top': Math.min(viewport_height/2, sidebar_height/2 + sidebar_offset) - 10
+    });
+
+    sidebarbutton.click(toggle_sidebar);
+    sidebarbutton.attr('title', _('Collapse sidebar'));
+    sidebarbutton.css({
+        'border-radius': '0 5px 5px 0',
+        'color': '#444444',
+        'background-color': '#CCCCCC',
+        'font-size': '1.2em',
+        'cursor': 'pointer',
+        'height': sidebar_height,
+        'padding-top': '1px',
+        'padding-left': '1px',
+        'margin-left': ssb_width_expanded - 12
+    });
+
+    sidebarbutton.hover(
+      function () {
+          $(this).css('background-color', dark_color);
+      },
+      function () {
+          $(this).css('background-color', light_color);
+      }
+    );
+  }
+
+  function set_position_from_cookie() {
+    if (!document.cookie)
+      return;
+    var items = document.cookie.split(';');
+    for(var k=0; k<items.length; k++) {
+      var key_val = items[k].split('=');
+      var key = key_val[0];
+      if (key == 'sidebar') {
+        var value = key_val[1];
+        if ((value == 'collapsed') && (!sidebar_is_collapsed()))
+          collapse_sidebar();
+        else if ((value == 'expanded') && (sidebar_is_collapsed()))
+          expand_sidebar();
+      }
+    }
+  }
+
+  add_sidebar_button();
+  var sidebarbutton = $('#sidebarbutton');
+  set_position_from_cookie();
+});

http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/docs/_template/globaltoc.html
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/docs/_template/globaltoc.html b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/docs/_template/globaltoc.html
new file mode 100644
index 0000000..f5fbb40
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/docs/_template/globaltoc.html
@@ -0,0 +1,12 @@
+{#
+    basic/globaltoc.html
+    ~~~~~~~~~~~~~~~~~~~~
+
+    Sphinx sidebar template: global table of contents.
+
+    :copyright: Copyright 2007-2011 by the Sphinx team, see AUTHORS.
+    :license: BSD, see LICENSE for details.
+#}
+<h3>{{ _('Manual') }}</h3>
+{{ toctree() }}
+<a href="{{ pathto(master_doc) }}">Back to Welcome</a>

http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/docs/_template/indexcontent.html
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/docs/_template/indexcontent.html b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/docs/_template/indexcontent.html
new file mode 100644
index 0000000..dd5e724
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/docs/_template/indexcontent.html
@@ -0,0 +1,4 @@
+{% extends "defindex.html" %}
+{% block tables %}
+
+{% endblock %}

http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/docs/_template/indexsidebar.html
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/docs/_template/indexsidebar.html b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/docs/_template/indexsidebar.html
new file mode 100644
index 0000000..4af2296
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/docs/_template/indexsidebar.html
@@ -0,0 +1,16 @@
+<!--
+<h3>Other versions</h3>
+<ul>
+  <li><a href="http://psutil.readthedocs.org/en/latest/">Latest</a></li>
+  <li><a href="http://psutil.readthedocs.org/en/0.6/">0.6</a></li>
+  <li><a href="http://psutil.readthedocs.org/en/0.5/">0.5</a></li>
+</ul>
+-->
+<h3>Useful links</h3>
+<ul>
+  <li><a href="http://code.google.com/p/psutil/">Google Code Project</a></li>
+  <li><a href="http://grodola.blogspot.com/search/label/psutil">Blog</a></li>
+  <li><a href="https://pypi.python.org/pypi?:action=display&name=psutil#downloads">Download</a></li>
+  <li><a href="https://code.google.com/p/psutil/issues/list">Issues</a></li>
+  <li><a href="http://groups.google.com/group/psutil/topics">Forum</a></li>
+</ul>

http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/docs/_template/page.html
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/docs/_template/page.html b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/docs/_template/page.html
new file mode 100644
index 0000000..e6686e9
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/docs/_template/page.html
@@ -0,0 +1,66 @@
+{% extends "!page.html" %}
+{% block extrahead %}
+{{ super() }}
+{% if not embedded %}<script type="text/javascript" src="{{ pathto('_static/copybutton.js', 1) }}"></script>{% endif %}
+<script type="text/javascript">
+
+  // Store editor pop-up help state in localStorage
+  // so it does not re-pop-up itself between page loads.
+  // Do not even to pretend to support IE gracefully.
+  (function($) {
+
+    $(document).ready(function() {
+        var box = $("#editor-trap");
+        var klass = "toggled";
+        var storageKey = "toggled";
+
+        function toggle() {
+            box.toggleClass(klass);
+            // Store the toggle status in local storage as "has value string" or null
+            window.localStorage.setItem(storageKey, box.hasClass(klass) ? "toggled" : "not-toggled");
+        }
+
+        box.click(toggle);
+
+        // Check the persistent state of the editor pop-up
+        // Note that localStorage does not necessarily support boolean values (ugh!)
+        // http://stackoverflow.com/questions/3263161/cannot-set-boolean-values-in-localstorage
+        var v = window.localStorage.getItem(storageKey);
+        if(v == "toggled" || !v) {
+          box.addClass(klass);
+        }
+
+    });
+
+  })(jQuery);
+</script>
+<script type="text/javascript">
+
+  var _gaq = _gaq || [];
+  _gaq.push(['_setAccount', 'UA-2097050-4']);
+  _gaq.push(['_trackPageview']);
+
+  (function() {
+    var ga = document.createElement('script'); ga.type = 'text/javascript'; ga.async = true;
+    ga.src = ('https:' == document.location.protocol ? 'https://ssl' : 'http://www') + '.google-analytics.com/ga.js';
+    var s = document.getElementsByTagName('script')[0]; s.parentNode.insertBefore(ga, s);
+  })();
+
+</script>
+{% endblock %}
+
+{% block rootrellink %}
+    <li><a href="http://code.google.com/p/psutil/"><img src="{{ pathto('_static/logo.png', 1) }}" style="height: 30px; vertical-align: middle; padding-right: 1em;" /> Project Homepage</a>{{ reldelim1 }}</li>
+	<li><a href="{{ pathto('index') }}">{{ shorttitle }}</a>{{ reldelim1 }}</li>
+{% endblock %}
+
+
+{% block footer %}
+<div class="footer">
+    &copy; Copyright {{ copyright|e }}.
+    <br />
+    Last updated on {{ last_updated|e }}.
+    <br />
+    Created using <a href="http://sphinx.pocoo.org/">Sphinx</a> {{ sphinx_version|e }}.
+</div>
+{% endblock %}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/docs/_themes/pydoctheme/static/pydoctheme.css
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/docs/_themes/pydoctheme/static/pydoctheme.css b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/docs/_themes/pydoctheme/static/pydoctheme.css
new file mode 100644
index 0000000..4196e55
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/docs/_themes/pydoctheme/static/pydoctheme.css
@@ -0,0 +1,187 @@
+@import url("default.css");
+
+body {
+    background-color: white;
+    margin-left: 1em;
+    margin-right: 1em;
+}
+
+div.related {
+    margin-bottom: 1.2em;
+    padding: 0.5em 0;
+    border-top: 1px solid #ccc;
+    margin-top: 0.5em;
+}
+
+div.related a:hover {
+    color: #0095C4;
+}
+
+div.related:first-child {
+    border-top: 0;
+    padding-top: 0;
+    border-bottom: 1px solid #ccc;
+}
+
+div.sphinxsidebar {
+    background-color: #eeeeee;
+    border-radius: 5px;
+    line-height: 130%;
+    font-size: smaller;
+}
+
+div.sphinxsidebar h3, div.sphinxsidebar h4 {
+    margin-top: 1.5em;
+}
+
+div.sphinxsidebarwrapper > h3:first-child {
+    margin-top: 0.2em;
+}
+
+div.sphinxsidebarwrapper > ul > li > ul > li {
+    margin-bottom: 0.4em;
+}
+
+div.sphinxsidebar a:hover {
+    color: #0095C4;
+}
+
+div.sphinxsidebar input {
+    font-family: 'Lucida Grande','Lucida Sans','DejaVu Sans',Arial,sans-serif;
+    border: 1px solid #999999;
+    font-size: smaller;
+    border-radius: 3px;
+}
+
+div.sphinxsidebar input[type=text] {
+    max-width: 150px;
+}
+
+div.body {
+    padding: 0 0 0 1.2em;
+}
+
+div.body p {
+    line-height: 140%;
+}
+
+div.body h1, div.body h2, div.body h3, div.body h4, div.body h5, div.body h6 {
+    margin: 0;
+    border: 0;
+    padding: 0.3em 0;
+}
+
+div.body hr {
+    border: 0;
+    background-color: #ccc;
+    height: 1px;
+}
+
+div.body pre {
+    border-radius: 3px;
+    border: 1px solid #ac9;
+}
+
+div.body div.admonition, div.body div.impl-detail {
+    border-radius: 3px;
+}
+
+div.body div.impl-detail > p {
+    margin: 0;
+}
+
+div.body div.seealso {
+    border: 1px solid #dddd66;
+}
+
+div.body a {
+    color: #00608f;
+}
+
+div.body a:visited {
+    color: #30306f;
+}
+
+div.body a:hover {
+    color: #00B0E4;
+}
+
+tt, pre {
+    font-family: monospace, sans-serif;
+    font-size: 96.5%;
+}
+
+div.body tt {
+    border-radius: 3px;
+}
+
+div.body tt.descname {
+    font-size: 120%;
+}
+
+div.body tt.xref, div.body a tt {
+    font-weight: normal;
+}
+
+p.deprecated {
+    border-radius: 3px;
+}
+
+table.docutils {
+    border: 1px solid #ddd;
+    min-width: 20%;
+    border-radius: 3px;
+    margin-top: 10px;
+    margin-bottom: 10px;
+}
+
+table.docutils td, table.docutils th {
+    border: 1px solid #ddd !important;
+    border-radius: 3px;
+}
+
+table p, table li {
+    text-align: left !important;
+}
+
+table.docutils th {
+    background-color: #eee;
+    padding: 0.3em 0.5em;
+}
+
+table.docutils td {
+    background-color: white;
+    padding: 0.3em 0.5em;
+}
+
+table.footnote, table.footnote td {
+    border: 0 !important;
+}
+
+div.footer {
+    line-height: 150%;
+    margin-top: -2em;
+    text-align: right;
+    width: auto;
+    margin-right: 10px;
+}
+
+div.footer a:hover {
+    color: #0095C4;
+}
+
+div.body h1,
+div.body h2,
+div.body h3 {
+    background-color: #EAEAEA;
+    border-bottom: 1px solid #CCC;
+    padding-top: 2px;
+    padding-bottom: 2px;
+    padding-left: 5px;
+    margin-top: 5px;
+    margin-bottom: 5px;
+}
+
+div.body h2 {
+    padding-left:10px;
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/docs/_themes/pydoctheme/theme.conf
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/docs/_themes/pydoctheme/theme.conf b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/docs/_themes/pydoctheme/theme.conf
new file mode 100644
index 0000000..95b97e5
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/docs/_themes/pydoctheme/theme.conf
@@ -0,0 +1,23 @@
+[theme]
+inherit = default
+stylesheet = pydoctheme.css
+pygments_style = sphinx
+
+[options]
+bodyfont = 'Lucida Grande', 'Lucida Sans', 'DejaVu Sans', Arial, sans-serif
+headfont = 'Lucida Grande', 'Lucida Sans', 'DejaVu Sans', Arial, sans-serif
+footerbgcolor = white
+footertextcolor = #555555
+relbarbgcolor = white
+relbartextcolor = #666666
+relbarlinkcolor = #444444
+sidebarbgcolor = white
+sidebartextcolor = #444444
+sidebarlinkcolor = #444444
+bgcolor = white
+textcolor = #222222
+linkcolor = #0090c0
+visitedlinkcolor = #00608f
+headtextcolor = #1a1a1a
+headbgcolor = white
+headlinkcolor = #aaaaaa

http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/docs/conf.py
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/docs/conf.py b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/docs/conf.py
new file mode 100644
index 0000000..0c6608a
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/docs/conf.py
@@ -0,0 +1,253 @@
+# -*- coding: utf-8 -*-
+#
+# psutil documentation build configuration file, created by
+# sphinx-quickstart.
+#
+# This file is execfile()d with the current directory set to its
+# containing dir.
+#
+# Note that not all possible configuration values are present in this
+# autogenerated file.
+#
+# All configuration values have a default; values that are commented out
+# serve to show the default.
+
+import datetime
+import os
+
+
+PROJECT_NAME = u"psutil"
+AUTHOR = u"Giampaolo Rodola'"
+THIS_YEAR = str(datetime.datetime.now().year)
+HERE = os.path.abspath(os.path.dirname(__file__))
+
+def get_version():
+    INIT = os.path.abspath(os.path.join(HERE, '../psutil/__init__.py'))
+    f = open(INIT, 'r')
+    try:
+        for line in f:
+            if line.startswith('__version__'):
+                ret = eval(line.strip().split(' = ')[1])
+                assert ret.count('.') == 2, ret
+                for num in ret.split('.'):
+                    assert num.isdigit(), ret
+                return ret
+        else:
+            raise ValueError("couldn't find version string")
+    finally:
+        f.close()
+
+VERSION = get_version()
+
+
+# -- General configuration -----------------------------------------------------
+
+# If your documentation needs a minimal Sphinx version, state it here.
+needs_sphinx = '1.0'
+
+# Add any Sphinx extension module names here, as strings. They can be
+# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
+# ones.
+extensions = ['sphinx.ext.autodoc',
+              'sphinx.ext.coverage',
+              'sphinx.ext.pngmath',
+              'sphinx.ext.viewcode',
+              'sphinx.ext.intersphinx']
+
+# Add any paths that contain templates here, relative to this directory.
+templates_path = ['_template']
+
+# The suffix of source filenames.
+source_suffix = '.rst'
+
+# The encoding of source files.
+#source_encoding = 'utf-8-sig'
+
+# The master toctree document.
+master_doc = 'index'
+
+# General information about the project.
+project = PROJECT_NAME
+copyright = u'2009-%s, %s' % (THIS_YEAR, AUTHOR)
+
+# The version info for the project you're documenting, acts as replacement for
+# |version| and |release|, also used in various other places throughout the
+# built documents.
+#
+# The short X.Y version.
+version = VERSION
+
+# The language for content autogenerated by Sphinx. Refer to documentation
+# for a list of supported languages.
+#language = None
+
+# There are two options for replacing |today|: either, you set today to some
+# non-false value, then it is used:
+#today = ''
+# Else, today_fmt is used as the format for a strftime call.
+#today_fmt = '%B %d, %Y'
+
+# List of patterns, relative to source directory, that match files and
+# directories to ignore when looking for source files.
+exclude_patterns = ['_build']
+
+# The reST default role (used for this markup: `text`) to use for all documents.
+#default_role = None
+
+# If true, '()' will be appended to :func: etc. cross-reference text.
+add_function_parentheses = True
+
+# If true, the current module name will be prepended to all description
+# unit titles (such as .. function::).
+#add_module_names = True
+
+autodoc_docstring_signature = True
+
+# If true, sectionauthor and moduleauthor directives will be shown in the
+# output. They are ignored by default.
+#show_authors = False
+
+# The name of the Pygments (syntax highlighting) style to use.
+pygments_style = 'sphinx'
+
+# A list of ignored prefixes for module index sorting.
+#modindex_common_prefix = []
+
+
+# -- Options for HTML output ---------------------------------------------------
+
+# The theme to use for HTML and HTML Help pages.  See the documentation for
+# a list of builtin themes.
+
+# Theme options are theme-specific and customize the look and feel of a theme
+# further.  For a list of options available for each theme, see the
+# documentation.
+html_theme = 'pydoctheme'
+html_theme_options = {'collapsiblesidebar': True}
+
+# Add any paths that contain custom themes here, relative to this directory.
+html_theme_path = ["_themes"]
+
+# The name for this set of Sphinx documents.  If None, it defaults to
+# "<project> v<release> documentation".
+html_title = "{project} {version} documentation".format(**locals())
+
+# A shorter title for the navigation bar.  Default is the same as html_title.
+#html_short_title = None
+
+# The name of an image file (relative to this directory) to place at the top
+# of the sidebar.
+#html_logo = 'logo.png'
+
+# The name of an image file (within the static path) to use as favicon of the
+# docs.  This file should be a Windows icon file (.ico) being 16x16 or 32x32
+# pixels large.
+html_favicon = 'favicon.ico'
+
+# Add any paths that contain custom static files (such as style sheets) here,
+# relative to this directory. They are copied after the builtin static files,
+# so a file named "default.css" will overwrite the builtin "default.css".
+html_static_path = ['_static']
+
+# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
+# using the given strftime format.
+html_last_updated_fmt = '%b %d, %Y'
+
+# If true, SmartyPants will be used to convert quotes and dashes to
+# typographically correct entities.
+html_use_smartypants = True
+
+# Custom sidebar templates, maps document names to template names.
+html_sidebars = {
+    'index': 'indexsidebar.html',
+    '**': ['globaltoc.html',
+           'relations.html',
+           'sourcelink.html',
+           'searchbox.html']
+}
+
+# Additional templates that should be rendered to pages, maps page names to
+# template names.
+#html_additional_pages = {
+#    'index': 'indexcontent.html',
+#}
+
+# If false, no module index is generated.
+html_domain_indices = False
+
+# If false, no index is generated.
+html_use_index = True
+
+# If true, the index is split into individual pages for each letter.
+#html_split_index = False
+
+# If true, links to the reST sources are added to the pages.
+#html_show_sourcelink = True
+
+# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
+#html_show_sphinx = True
+
+# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
+#html_show_copyright = True
+
+# If true, an OpenSearch description file will be output, and all pages will
+# contain a <link> tag referring to it.  The value of this option must be the
+# base URL from which the finished HTML is served.
+#html_use_opensearch = ''
+
+# This is the file name suffix for HTML files (e.g. ".xhtml").
+#html_file_suffix = None
+
+# Output file base name for HTML help builder.
+htmlhelp_basename = '%s-doc' % PROJECT_NAME
+
+
+# -- Options for LaTeX output --------------------------------------------------
+
+# The paper size ('letter' or 'a4').
+#latex_paper_size = 'letter'
+
+# The font size ('10pt', '11pt' or '12pt').
+#latex_font_size = '10pt'
+
+# Grouping the document tree into LaTeX files. List of tuples
+# (source start file, target name, title, author, documentclass [howto/manual]).
+latex_documents = [
+    ('index', '%s.tex' % PROJECT_NAME,
+     u'%s documentation' % PROJECT_NAME, AUTHOR),
+]
+
+# The name of an image file (relative to this directory) to place at the top of
+# the title page.
+#latex_logo = None
+
+# For "manual" documents, if this is true, then toplevel headings are parts,
+# not chapters.
+#latex_use_parts = False
+
+# If true, show page references after internal links.
+#latex_show_pagerefs = False
+
+# If true, show URL addresses after external links.
+#latex_show_urls = False
+
+# Additional stuff for the LaTeX preamble.
+#latex_preamble = ''
+
+# Documents to append as an appendix to all manuals.
+#latex_appendices = []
+
+# If false, no module index is generated.
+#latex_domain_indices = True
+
+
+# -- Options for manual page output --------------------------------------------
+
+# One entry per manual page. List of tuples
+# (source start file, name, description, authors, manual section).
+man_pages = [
+    ('index', PROJECT_NAME, u'%s documentation' % PROJECT_NAME, [AUTHOR], 1)
+]
+
+# If true, show URL addresses after external links.
+#man_show_urls = False


[21/22] AMBARI-5707. Metrics system prototype implementation. (swagle)

Posted by sw...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryReader.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryReader.java b/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryReader.java
new file mode 100644
index 0000000..590853a
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryReader.java
@@ -0,0 +1,117 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.applicationhistoryservice;
+
+import java.io.IOException;
+import java.util.Map;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ApplicationAttemptHistoryData;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ApplicationHistoryData;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ContainerHistoryData;
+
+@InterfaceAudience.Public
+@InterfaceStability.Unstable
+public interface ApplicationHistoryReader {
+
+  /**
+   * This method returns Application {@link ApplicationHistoryData} for the
+   * specified {@link ApplicationId}.
+   * 
+   * @param appId
+   * 
+   * @return {@link ApplicationHistoryData} for the ApplicationId.
+   * @throws IOException
+   */
+  ApplicationHistoryData getApplication(ApplicationId appId) throws IOException;
+
+  /**
+   * This method returns all Application {@link ApplicationHistoryData}s
+   * 
+   * @return map of {@link ApplicationId} to {@link ApplicationHistoryData}s.
+   * @throws IOException
+   */
+  Map<ApplicationId, ApplicationHistoryData> getAllApplications()
+      throws IOException;
+
+  /**
+   * Application can have multiple application attempts
+   * {@link ApplicationAttemptHistoryData}. This method returns the all
+   * {@link ApplicationAttemptHistoryData}s for the Application.
+   * 
+   * @param appId
+   * 
+   * @return all {@link ApplicationAttemptHistoryData}s for the Application.
+   * @throws IOException
+   */
+  Map<ApplicationAttemptId, ApplicationAttemptHistoryData>
+      getApplicationAttempts(ApplicationId appId) throws IOException;
+
+  /**
+   * This method returns {@link ApplicationAttemptHistoryData} for specified
+   * {@link ApplicationId}.
+   * 
+   * @param appAttemptId
+   *          {@link ApplicationAttemptId}
+   * @return {@link ApplicationAttemptHistoryData} for ApplicationAttemptId
+   * @throws IOException
+   */
+  ApplicationAttemptHistoryData getApplicationAttempt(
+      ApplicationAttemptId appAttemptId) throws IOException;
+
+  /**
+   * This method returns {@link ContainerHistoryData} for specified
+   * {@link ContainerId}.
+   * 
+   * @param containerId
+   *          {@link ContainerId}
+   * @return {@link ContainerHistoryData} for ContainerId
+   * @throws IOException
+   */
+  ContainerHistoryData getContainer(ContainerId containerId) throws IOException;
+
+  /**
+   * This method returns {@link ContainerHistoryData} for specified
+   * {@link ApplicationAttemptId}.
+   * 
+   * @param appAttemptId
+   *          {@link ApplicationAttemptId}
+   * @return {@link ContainerHistoryData} for ApplicationAttemptId
+   * @throws IOException
+   */
+  ContainerHistoryData getAMContainer(ApplicationAttemptId appAttemptId)
+      throws IOException;
+
+  /**
+   * This method returns Map{@link ContainerId} to {@link ContainerHistoryData}
+   * for specified {@link ApplicationAttemptId}.
+   * 
+   * @param appAttemptId
+   *          {@link ApplicationAttemptId}
+   * @return Map{@link ContainerId} to {@link ContainerHistoryData} for
+   *         ApplicationAttemptId
+   * @throws IOException
+   */
+  Map<ContainerId, ContainerHistoryData> getContainers(
+      ApplicationAttemptId appAttemptId) throws IOException;
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryServer.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryServer.java b/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryServer.java
new file mode 100644
index 0000000..f622153
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryServer.java
@@ -0,0 +1,190 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.applicationhistoryservice;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
+import org.apache.hadoop.metrics2.source.JvmMetrics;
+import org.apache.hadoop.service.CompositeService;
+import org.apache.hadoop.service.Service;
+import org.apache.hadoop.util.ExitUtil;
+import org.apache.hadoop.util.ReflectionUtils;
+import org.apache.hadoop.util.ShutdownHookManager;
+import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.yarn.YarnUncaughtExceptionHandler;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.HBaseTimelineMetricStore;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.TimelineMetricStore;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.timeline.TimelineStore;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.timeline.LeveldbTimelineStore;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.webapp.AHSWebApp;
+import org.apache.hadoop.yarn.webapp.WebApp;
+import org.apache.hadoop.yarn.webapp.WebApps;
+import org.apache.hadoop.yarn.webapp.util.WebAppUtils;
+
+import com.google.common.annotations.VisibleForTesting;
+
+/**
+ * History server that keeps track of all types of history in the cluster.
+ * Application specific history to start with.
+ */
+public class ApplicationHistoryServer extends CompositeService {
+
+  public static final int SHUTDOWN_HOOK_PRIORITY = 30;
+  private static final Log LOG = LogFactory
+    .getLog(ApplicationHistoryServer.class);
+
+  ApplicationHistoryClientService ahsClientService;
+  ApplicationHistoryManager historyManager;
+  TimelineStore timelineStore;
+  TimelineMetricStore timelineMetricStore;
+  private WebApp webApp;
+
+  public ApplicationHistoryServer() {
+    super(ApplicationHistoryServer.class.getName());
+  }
+
+  @Override
+  protected void serviceInit(Configuration conf) throws Exception {
+    historyManager = createApplicationHistory();
+    ahsClientService = createApplicationHistoryClientService(historyManager);
+    addService(ahsClientService);
+    addService((Service) historyManager);
+    timelineStore = createTimelineStore(conf);
+    timelineMetricStore = createTimelineMetricStore(conf);
+    addIfService(timelineStore);
+    addIfService(timelineMetricStore);
+    super.serviceInit(conf);
+  }
+
+  @Override
+  protected void serviceStart() throws Exception {
+    DefaultMetricsSystem.initialize("ApplicationHistoryServer");
+    JvmMetrics.initSingleton("ApplicationHistoryServer", null);
+
+    startWebApp();
+    super.serviceStart();
+  }
+
+  @Override
+  protected void serviceStop() throws Exception {
+    if (webApp != null) {
+      webApp.stop();
+    }
+
+    DefaultMetricsSystem.shutdown();
+    super.serviceStop();
+  }
+
+  @Private
+  @VisibleForTesting
+  public ApplicationHistoryClientService getClientService() {
+    return this.ahsClientService;
+  }
+
+  protected ApplicationHistoryClientService
+      createApplicationHistoryClientService(
+          ApplicationHistoryManager historyManager) {
+    return new ApplicationHistoryClientService(historyManager);
+  }
+
+  protected ApplicationHistoryManager createApplicationHistory() {
+    return new ApplicationHistoryManagerImpl();
+  }
+
+  protected ApplicationHistoryManager getApplicationHistory() {
+    return this.historyManager;
+  }
+
+  static ApplicationHistoryServer launchAppHistoryServer(String[] args) {
+    Thread
+      .setDefaultUncaughtExceptionHandler(new YarnUncaughtExceptionHandler());
+    StringUtils.startupShutdownMessage(ApplicationHistoryServer.class, args,
+      LOG);
+    ApplicationHistoryServer appHistoryServer = null;
+    try {
+      appHistoryServer = new ApplicationHistoryServer();
+      ShutdownHookManager.get().addShutdownHook(
+        new CompositeServiceShutdownHook(appHistoryServer),
+        SHUTDOWN_HOOK_PRIORITY);
+      YarnConfiguration conf = new YarnConfiguration();
+      appHistoryServer.init(conf);
+      appHistoryServer.start();
+    } catch (Throwable t) {
+      LOG.fatal("Error starting ApplicationHistoryServer", t);
+      ExitUtil.terminate(-1, "Error starting ApplicationHistoryServer");
+    }
+    return appHistoryServer;
+  }
+
+  public static void main(String[] args) {
+    launchAppHistoryServer(args);
+  }
+
+  protected ApplicationHistoryManager createApplicationHistoryManager(
+      Configuration conf) {
+    return new ApplicationHistoryManagerImpl();
+  }
+
+  protected TimelineStore createTimelineStore(
+      Configuration conf) {
+    return ReflectionUtils.newInstance(conf.getClass(
+        YarnConfiguration.TIMELINE_SERVICE_STORE, LeveldbTimelineStore.class,
+        TimelineStore.class), conf);
+  }
+
+  protected TimelineMetricStore createTimelineMetricStore(Configuration conf) {
+    LOG.info("Creating metrics store.");
+    return ReflectionUtils.newInstance(HBaseTimelineMetricStore.class, conf);
+  }
+
+  protected void startWebApp() {
+    String bindAddress = WebAppUtils.getAHSWebAppURLWithoutScheme(getConfig());
+    LOG.info("Instantiating AHSWebApp at " + bindAddress);
+    try {
+      webApp =
+          WebApps
+            .$for("applicationhistory", ApplicationHistoryClientService.class,
+              ahsClientService, "ws")
+            .with(getConfig())
+            .withHttpSpnegoPrincipalKey(
+              YarnConfiguration.TIMELINE_SERVICE_WEBAPP_SPNEGO_USER_NAME_KEY)
+            .withHttpSpnegoKeytabKey(
+              YarnConfiguration.TIMELINE_SERVICE_WEBAPP_SPNEGO_KEYTAB_FILE_KEY)
+            .at(bindAddress)
+            .start(new AHSWebApp(historyManager, timelineStore, timelineMetricStore));
+    } catch (Exception e) {
+      String msg = "AHSWebApp failed to start.";
+      LOG.error(msg, e);
+      throw new YarnRuntimeException(msg, e);
+    }
+  }
+  /**
+   * @return ApplicationTimelineStore
+   */
+  @Private
+  @VisibleForTesting
+  public TimelineStore getTimelineStore() {
+    return timelineStore;
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryStore.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryStore.java b/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryStore.java
new file mode 100644
index 0000000..c26faef
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryStore.java
@@ -0,0 +1,37 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.applicationhistoryservice;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.service.Service;
+
+/**
+ * This class is the abstract of the storage of the application history data. It
+ * is a {@link Service}, such that the implementation of this class can make use
+ * of the service life cycle to initialize and cleanup the storage. Users can
+ * access the storage via {@link ApplicationHistoryReader} and
+ * {@link ApplicationHistoryWriter} interfaces.
+ * 
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Unstable
+public interface ApplicationHistoryStore extends Service,
+    ApplicationHistoryReader, ApplicationHistoryWriter {
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryWriter.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryWriter.java b/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryWriter.java
new file mode 100644
index 0000000..09ba36d
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryWriter.java
@@ -0,0 +1,112 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.applicationhistoryservice;
+
+import java.io.IOException;
+
+import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ApplicationAttemptFinishData;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ApplicationAttemptStartData;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ApplicationFinishData;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ApplicationStartData;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ContainerFinishData;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ContainerStartData;
+
+/**
+ * It is the interface of writing the application history, exposing the methods
+ * of writing {@link ApplicationStartData}, {@link ApplicationFinishData}
+ * {@link ApplicationAttemptStartData}, {@link ApplicationAttemptFinishData},
+ * {@link ContainerStartData} and {@link ContainerFinishData}.
+ */
+@Private
+@Unstable
+public interface ApplicationHistoryWriter {
+
+  /**
+   * This method writes the information of <code>RMApp</code> that is available
+   * when it starts.
+   * 
+   * @param appStart
+   *          the record of the information of <code>RMApp</code> that is
+   *          available when it starts
+   * @throws IOException
+   */
+  void applicationStarted(ApplicationStartData appStart) throws IOException;
+
+  /**
+   * This method writes the information of <code>RMApp</code> that is available
+   * when it finishes.
+   * 
+   * @param appFinish
+   *          the record of the information of <code>RMApp</code> that is
+   *          available when it finishes
+   * @throws IOException
+   */
+  void applicationFinished(ApplicationFinishData appFinish) throws IOException;
+
+  /**
+   * This method writes the information of <code>RMAppAttempt</code> that is
+   * available when it starts.
+   * 
+   * @param appAttemptStart
+   *          the record of the information of <code>RMAppAttempt</code> that is
+   *          available when it starts
+   * @throws IOException
+   */
+  void applicationAttemptStarted(ApplicationAttemptStartData appAttemptStart)
+      throws IOException;
+
+  /**
+   * This method writes the information of <code>RMAppAttempt</code> that is
+   * available when it finishes.
+   * 
+   * @param appAttemptFinish
+   *          the record of the information of <code>RMAppAttempt</code> that is
+   *          available when it finishes
+   * @throws IOException
+   */
+  void
+      applicationAttemptFinished(ApplicationAttemptFinishData appAttemptFinish)
+          throws IOException;
+
+  /**
+   * This method writes the information of <code>RMContainer</code> that is
+   * available when it starts.
+   * 
+   * @param containerStart
+   *          the record of the information of <code>RMContainer</code> that is
+   *          available when it starts
+   * @throws IOException
+   */
+  void containerStarted(ContainerStartData containerStart) throws IOException;
+
+  /**
+   * This method writes the information of <code>RMContainer</code> that is
+   * available when it finishes.
+   * 
+   * @param containerFinish
+   *          the record of the information of <code>RMContainer</code> that is
+   *          available when it finishes
+   * @throws IOException
+   */
+  void containerFinished(ContainerFinishData containerFinish)
+      throws IOException;
+
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/FileSystemApplicationHistoryStore.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/FileSystemApplicationHistoryStore.java b/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/FileSystemApplicationHistoryStore.java
new file mode 100644
index 0000000..4c8d745
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/FileSystemApplicationHistoryStore.java
@@ -0,0 +1,784 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.applicationhistoryservice;
+
+import java.io.DataInput;
+import java.io.DataInputStream;
+import java.io.DataOutput;
+import java.io.DataOutputStream;
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience.Public;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.io.Writable;
+import org.apache.hadoop.io.file.tfile.TFile;
+import org.apache.hadoop.service.AbstractService;
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.proto.ApplicationHistoryServerProtos.ApplicationAttemptFinishDataProto;
+import org.apache.hadoop.yarn.proto.ApplicationHistoryServerProtos.ApplicationAttemptStartDataProto;
+import org.apache.hadoop.yarn.proto.ApplicationHistoryServerProtos.ApplicationFinishDataProto;
+import org.apache.hadoop.yarn.proto.ApplicationHistoryServerProtos.ApplicationStartDataProto;
+import org.apache.hadoop.yarn.proto.ApplicationHistoryServerProtos.ContainerFinishDataProto;
+import org.apache.hadoop.yarn.proto.ApplicationHistoryServerProtos.ContainerStartDataProto;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ApplicationAttemptFinishData;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ApplicationAttemptHistoryData;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ApplicationAttemptStartData;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ApplicationFinishData;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ApplicationHistoryData;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ApplicationStartData;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ContainerFinishData;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ContainerHistoryData;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ContainerStartData;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.records.impl.pb.ApplicationAttemptFinishDataPBImpl;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.records.impl.pb.ApplicationAttemptStartDataPBImpl;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.records.impl.pb.ApplicationFinishDataPBImpl;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.records.impl.pb.ApplicationStartDataPBImpl;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.records.impl.pb.ContainerFinishDataPBImpl;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.records.impl.pb.ContainerStartDataPBImpl;
+import org.apache.hadoop.yarn.util.ConverterUtils;
+
+import com.google.protobuf.InvalidProtocolBufferException;
+
+/**
+ * File system implementation of {@link ApplicationHistoryStore}. In this
+ * implementation, one application will have just one file in the file system,
+ * which contains all the history data of one application, and its attempts and
+ * containers. {@link #applicationStarted(ApplicationStartData)} is supposed to
+ * be invoked first when writing any history data of one application and it will
+ * open a file, while {@link #applicationFinished(ApplicationFinishData)} is
+ * supposed to be last writing operation and will close the file.
+ */
+@Public
+@Unstable
+public class FileSystemApplicationHistoryStore extends AbstractService
+    implements ApplicationHistoryStore {
+
+  private static final Log LOG = LogFactory
+    .getLog(FileSystemApplicationHistoryStore.class);
+
+  private static final String ROOT_DIR_NAME = "ApplicationHistoryDataRoot";
+  private static final int MIN_BLOCK_SIZE = 256 * 1024;
+  private static final String START_DATA_SUFFIX = "_start";
+  private static final String FINISH_DATA_SUFFIX = "_finish";
+  private static final FsPermission ROOT_DIR_UMASK = FsPermission
+    .createImmutable((short) 0740);
+  private static final FsPermission HISTORY_FILE_UMASK = FsPermission
+    .createImmutable((short) 0640);
+
+  private FileSystem fs;
+  private Path rootDirPath;
+
+  private ConcurrentMap<ApplicationId, HistoryFileWriter> outstandingWriters =
+      new ConcurrentHashMap<ApplicationId, HistoryFileWriter>();
+
+  public FileSystemApplicationHistoryStore() {
+    super(FileSystemApplicationHistoryStore.class.getName());
+  }
+
+  @Override
+  public void serviceInit(Configuration conf) throws Exception {
+    Path fsWorkingPath =
+        new Path(conf.get(YarnConfiguration.FS_APPLICATION_HISTORY_STORE_URI));
+    rootDirPath = new Path(fsWorkingPath, ROOT_DIR_NAME);
+    try {
+      fs = fsWorkingPath.getFileSystem(conf);
+      fs.mkdirs(rootDirPath);
+      fs.setPermission(rootDirPath, ROOT_DIR_UMASK);
+    } catch (IOException e) {
+      LOG.error("Error when initializing FileSystemHistoryStorage", e);
+      throw e;
+    }
+    super.serviceInit(conf);
+  }
+
+  @Override
+  public void serviceStop() throws Exception {
+    try {
+      for (Entry<ApplicationId, HistoryFileWriter> entry : outstandingWriters
+        .entrySet()) {
+        entry.getValue().close();
+      }
+      outstandingWriters.clear();
+    } finally {
+      IOUtils.cleanup(LOG, fs);
+    }
+    super.serviceStop();
+  }
+
+  @Override
+  public ApplicationHistoryData getApplication(ApplicationId appId)
+      throws IOException {
+    HistoryFileReader hfReader = getHistoryFileReader(appId);
+    try {
+      boolean readStartData = false;
+      boolean readFinishData = false;
+      ApplicationHistoryData historyData =
+          ApplicationHistoryData.newInstance(appId, null, null, null, null,
+            Long.MIN_VALUE, Long.MIN_VALUE, Long.MAX_VALUE, null,
+            FinalApplicationStatus.UNDEFINED, null);
+      while ((!readStartData || !readFinishData) && hfReader.hasNext()) {
+        HistoryFileReader.Entry entry = hfReader.next();
+        if (entry.key.id.equals(appId.toString())) {
+          if (entry.key.suffix.equals(START_DATA_SUFFIX)) {
+            ApplicationStartData startData =
+                parseApplicationStartData(entry.value);
+            mergeApplicationHistoryData(historyData, startData);
+            readStartData = true;
+          } else if (entry.key.suffix.equals(FINISH_DATA_SUFFIX)) {
+            ApplicationFinishData finishData =
+                parseApplicationFinishData(entry.value);
+            mergeApplicationHistoryData(historyData, finishData);
+            readFinishData = true;
+          }
+        }
+      }
+      if (!readStartData && !readFinishData) {
+        return null;
+      }
+      if (!readStartData) {
+        LOG.warn("Start information is missing for application " + appId);
+      }
+      if (!readFinishData) {
+        LOG.warn("Finish information is missing for application " + appId);
+      }
+      LOG.info("Completed reading history information of application " + appId);
+      return historyData;
+    } catch (IOException e) {
+      LOG.error("Error when reading history file of application " + appId);
+      throw e;
+    } finally {
+      hfReader.close();
+    }
+  }
+
+  @Override
+  public Map<ApplicationId, ApplicationHistoryData> getAllApplications()
+      throws IOException {
+    Map<ApplicationId, ApplicationHistoryData> historyDataMap =
+        new HashMap<ApplicationId, ApplicationHistoryData>();
+    FileStatus[] files = fs.listStatus(rootDirPath);
+    for (FileStatus file : files) {
+      ApplicationId appId =
+          ConverterUtils.toApplicationId(file.getPath().getName());
+      try {
+        ApplicationHistoryData historyData = getApplication(appId);
+        if (historyData != null) {
+          historyDataMap.put(appId, historyData);
+        }
+      } catch (IOException e) {
+        // Eat the exception not to disturb the getting the next
+        // ApplicationHistoryData
+        LOG.error("History information of application " + appId
+            + " is not included into the result due to the exception", e);
+      }
+    }
+    return historyDataMap;
+  }
+
+  @Override
+  public Map<ApplicationAttemptId, ApplicationAttemptHistoryData>
+      getApplicationAttempts(ApplicationId appId) throws IOException {
+    Map<ApplicationAttemptId, ApplicationAttemptHistoryData> historyDataMap =
+        new HashMap<ApplicationAttemptId, ApplicationAttemptHistoryData>();
+    HistoryFileReader hfReader = getHistoryFileReader(appId);
+    try {
+      while (hfReader.hasNext()) {
+        HistoryFileReader.Entry entry = hfReader.next();
+        if (entry.key.id.startsWith(
+            ConverterUtils.APPLICATION_ATTEMPT_PREFIX)) {
+          ApplicationAttemptId appAttemptId = 
+              ConverterUtils.toApplicationAttemptId(entry.key.id);
+          if (appAttemptId.getApplicationId().equals(appId)) {
+            ApplicationAttemptHistoryData historyData = 
+                historyDataMap.get(appAttemptId);
+            if (historyData == null) {
+              historyData = ApplicationAttemptHistoryData.newInstance(
+                  appAttemptId, null, -1, null, null, null,
+                  FinalApplicationStatus.UNDEFINED, null);
+              historyDataMap.put(appAttemptId, historyData);
+            }
+            if (entry.key.suffix.equals(START_DATA_SUFFIX)) {
+              mergeApplicationAttemptHistoryData(historyData,
+                  parseApplicationAttemptStartData(entry.value));
+            } else if (entry.key.suffix.equals(FINISH_DATA_SUFFIX)) {
+              mergeApplicationAttemptHistoryData(historyData,
+                  parseApplicationAttemptFinishData(entry.value));
+            }
+          }
+        }
+      }
+      LOG.info("Completed reading history information of all application"
+          + " attempts of application " + appId);
+    } catch (IOException e) {
+      LOG.info("Error when reading history information of some application"
+          + " attempts of application " + appId);
+    } finally {
+      hfReader.close();
+    }
+    return historyDataMap;
+  }
+
+  @Override
+  public ApplicationAttemptHistoryData getApplicationAttempt(
+      ApplicationAttemptId appAttemptId) throws IOException {
+    HistoryFileReader hfReader =
+        getHistoryFileReader(appAttemptId.getApplicationId());
+    try {
+      boolean readStartData = false;
+      boolean readFinishData = false;
+      ApplicationAttemptHistoryData historyData =
+          ApplicationAttemptHistoryData.newInstance(appAttemptId, null, -1,
+            null, null, null, FinalApplicationStatus.UNDEFINED, null);
+      while ((!readStartData || !readFinishData) && hfReader.hasNext()) {
+        HistoryFileReader.Entry entry = hfReader.next();
+        if (entry.key.id.equals(appAttemptId.toString())) {
+          if (entry.key.suffix.equals(START_DATA_SUFFIX)) {
+            ApplicationAttemptStartData startData =
+                parseApplicationAttemptStartData(entry.value);
+            mergeApplicationAttemptHistoryData(historyData, startData);
+            readStartData = true;
+          } else if (entry.key.suffix.equals(FINISH_DATA_SUFFIX)) {
+            ApplicationAttemptFinishData finishData =
+                parseApplicationAttemptFinishData(entry.value);
+            mergeApplicationAttemptHistoryData(historyData, finishData);
+            readFinishData = true;
+          }
+        }
+      }
+      if (!readStartData && !readFinishData) {
+        return null;
+      }
+      if (!readStartData) {
+        LOG.warn("Start information is missing for application attempt "
+            + appAttemptId);
+      }
+      if (!readFinishData) {
+        LOG.warn("Finish information is missing for application attempt "
+            + appAttemptId);
+      }
+      LOG.info("Completed reading history information of application attempt "
+          + appAttemptId);
+      return historyData;
+    } catch (IOException e) {
+      LOG.error("Error when reading history file of application attempt"
+          + appAttemptId);
+      throw e;
+    } finally {
+      hfReader.close();
+    }
+  }
+
+  @Override
+  public ContainerHistoryData getContainer(ContainerId containerId)
+      throws IOException {
+    HistoryFileReader hfReader =
+        getHistoryFileReader(containerId.getApplicationAttemptId()
+          .getApplicationId());
+    try {
+      boolean readStartData = false;
+      boolean readFinishData = false;
+      ContainerHistoryData historyData =
+          ContainerHistoryData
+            .newInstance(containerId, null, null, null, Long.MIN_VALUE,
+              Long.MAX_VALUE, null, Integer.MAX_VALUE, null);
+      while ((!readStartData || !readFinishData) && hfReader.hasNext()) {
+        HistoryFileReader.Entry entry = hfReader.next();
+        if (entry.key.id.equals(containerId.toString())) {
+          if (entry.key.suffix.equals(START_DATA_SUFFIX)) {
+            ContainerStartData startData = parseContainerStartData(entry.value);
+            mergeContainerHistoryData(historyData, startData);
+            readStartData = true;
+          } else if (entry.key.suffix.equals(FINISH_DATA_SUFFIX)) {
+            ContainerFinishData finishData =
+                parseContainerFinishData(entry.value);
+            mergeContainerHistoryData(historyData, finishData);
+            readFinishData = true;
+          }
+        }
+      }
+      if (!readStartData && !readFinishData) {
+        return null;
+      }
+      if (!readStartData) {
+        LOG.warn("Start information is missing for container " + containerId);
+      }
+      if (!readFinishData) {
+        LOG.warn("Finish information is missing for container " + containerId);
+      }
+      LOG.info("Completed reading history information of container "
+          + containerId);
+      return historyData;
+    } catch (IOException e) {
+      LOG.error("Error when reading history file of container " + containerId);
+      throw e;
+    } finally {
+      hfReader.close();
+    }
+  }
+
+  @Override
+  public ContainerHistoryData getAMContainer(ApplicationAttemptId appAttemptId)
+      throws IOException {
+    ApplicationAttemptHistoryData attemptHistoryData =
+        getApplicationAttempt(appAttemptId);
+    if (attemptHistoryData == null
+        || attemptHistoryData.getMasterContainerId() == null) {
+      return null;
+    }
+    return getContainer(attemptHistoryData.getMasterContainerId());
+  }
+
+  @Override
+  public Map<ContainerId, ContainerHistoryData> getContainers(
+      ApplicationAttemptId appAttemptId) throws IOException {
+    Map<ContainerId, ContainerHistoryData> historyDataMap =
+        new HashMap<ContainerId, ContainerHistoryData>();
+    HistoryFileReader hfReader =
+        getHistoryFileReader(appAttemptId.getApplicationId());
+    try {
+      while (hfReader.hasNext()) {
+        HistoryFileReader.Entry entry = hfReader.next();
+        if (entry.key.id.startsWith(ConverterUtils.CONTAINER_PREFIX)) {
+          ContainerId containerId =
+              ConverterUtils.toContainerId(entry.key.id);
+          if (containerId.getApplicationAttemptId().equals(appAttemptId)) {
+            ContainerHistoryData historyData =
+                historyDataMap.get(containerId);
+            if (historyData == null) {
+              historyData = ContainerHistoryData.newInstance(
+                  containerId, null, null, null, Long.MIN_VALUE,
+                  Long.MAX_VALUE, null, Integer.MAX_VALUE, null);
+              historyDataMap.put(containerId, historyData);
+            }
+            if (entry.key.suffix.equals(START_DATA_SUFFIX)) {
+              mergeContainerHistoryData(historyData,
+                  parseContainerStartData(entry.value));
+            } else if (entry.key.suffix.equals(FINISH_DATA_SUFFIX)) {
+              mergeContainerHistoryData(historyData,
+                  parseContainerFinishData(entry.value));
+            }
+          }
+        }
+      }
+      LOG.info("Completed reading history information of all conatiners"
+          + " of application attempt " + appAttemptId);
+    } catch (IOException e) {
+      LOG.info("Error when reading history information of some containers"
+          + " of application attempt " + appAttemptId);
+    } finally {
+      hfReader.close();
+    }
+    return historyDataMap;
+  }
+
+  @Override
+  public void applicationStarted(ApplicationStartData appStart)
+      throws IOException {
+    HistoryFileWriter hfWriter =
+        outstandingWriters.get(appStart.getApplicationId());
+    if (hfWriter == null) {
+      Path applicationHistoryFile =
+          new Path(rootDirPath, appStart.getApplicationId().toString());
+      try {
+        hfWriter = new HistoryFileWriter(applicationHistoryFile);
+        LOG.info("Opened history file of application "
+            + appStart.getApplicationId());
+      } catch (IOException e) {
+        LOG.error("Error when openning history file of application "
+            + appStart.getApplicationId());
+        throw e;
+      }
+      outstandingWriters.put(appStart.getApplicationId(), hfWriter);
+    } else {
+      throw new IOException("History file of application "
+          + appStart.getApplicationId() + " is already opened");
+    }
+    assert appStart instanceof ApplicationStartDataPBImpl;
+    try {
+      hfWriter.writeHistoryData(new HistoryDataKey(appStart.getApplicationId()
+        .toString(), START_DATA_SUFFIX),
+        ((ApplicationStartDataPBImpl) appStart).getProto().toByteArray());
+      LOG.info("Start information of application "
+          + appStart.getApplicationId() + " is written");
+    } catch (IOException e) {
+      LOG.error("Error when writing start information of application "
+          + appStart.getApplicationId());
+      throw e;
+    }
+  }
+
+  @Override
+  public void applicationFinished(ApplicationFinishData appFinish)
+      throws IOException {
+    HistoryFileWriter hfWriter =
+        getHistoryFileWriter(appFinish.getApplicationId());
+    assert appFinish instanceof ApplicationFinishDataPBImpl;
+    try {
+      hfWriter.writeHistoryData(new HistoryDataKey(appFinish.getApplicationId()
+        .toString(), FINISH_DATA_SUFFIX),
+        ((ApplicationFinishDataPBImpl) appFinish).getProto().toByteArray());
+      LOG.info("Finish information of application "
+          + appFinish.getApplicationId() + " is written");
+    } catch (IOException e) {
+      LOG.error("Error when writing finish information of application "
+          + appFinish.getApplicationId());
+      throw e;
+    } finally {
+      hfWriter.close();
+      outstandingWriters.remove(appFinish.getApplicationId());
+    }
+  }
+
+  @Override
+  public void applicationAttemptStarted(
+      ApplicationAttemptStartData appAttemptStart) throws IOException {
+    HistoryFileWriter hfWriter =
+        getHistoryFileWriter(appAttemptStart.getApplicationAttemptId()
+          .getApplicationId());
+    assert appAttemptStart instanceof ApplicationAttemptStartDataPBImpl;
+    try {
+      hfWriter.writeHistoryData(new HistoryDataKey(appAttemptStart
+        .getApplicationAttemptId().toString(), START_DATA_SUFFIX),
+        ((ApplicationAttemptStartDataPBImpl) appAttemptStart).getProto()
+          .toByteArray());
+      LOG.info("Start information of application attempt "
+          + appAttemptStart.getApplicationAttemptId() + " is written");
+    } catch (IOException e) {
+      LOG.error("Error when writing start information of application attempt "
+          + appAttemptStart.getApplicationAttemptId());
+      throw e;
+    }
+  }
+
+  @Override
+  public void applicationAttemptFinished(
+      ApplicationAttemptFinishData appAttemptFinish) throws IOException {
+    HistoryFileWriter hfWriter =
+        getHistoryFileWriter(appAttemptFinish.getApplicationAttemptId()
+          .getApplicationId());
+    assert appAttemptFinish instanceof ApplicationAttemptFinishDataPBImpl;
+    try {
+      hfWriter.writeHistoryData(new HistoryDataKey(appAttemptFinish
+        .getApplicationAttemptId().toString(), FINISH_DATA_SUFFIX),
+        ((ApplicationAttemptFinishDataPBImpl) appAttemptFinish).getProto()
+          .toByteArray());
+      LOG.info("Finish information of application attempt "
+          + appAttemptFinish.getApplicationAttemptId() + " is written");
+    } catch (IOException e) {
+      LOG.error("Error when writing finish information of application attempt "
+          + appAttemptFinish.getApplicationAttemptId());
+      throw e;
+    }
+  }
+
+  @Override
+  public void containerStarted(ContainerStartData containerStart)
+      throws IOException {
+    HistoryFileWriter hfWriter =
+        getHistoryFileWriter(containerStart.getContainerId()
+          .getApplicationAttemptId().getApplicationId());
+    assert containerStart instanceof ContainerStartDataPBImpl;
+    try {
+      hfWriter.writeHistoryData(new HistoryDataKey(containerStart
+        .getContainerId().toString(), START_DATA_SUFFIX),
+        ((ContainerStartDataPBImpl) containerStart).getProto().toByteArray());
+      LOG.info("Start information of container "
+          + containerStart.getContainerId() + " is written");
+    } catch (IOException e) {
+      LOG.error("Error when writing start information of container "
+          + containerStart.getContainerId());
+      throw e;
+    }
+  }
+
+  @Override
+  public void containerFinished(ContainerFinishData containerFinish)
+      throws IOException {
+    HistoryFileWriter hfWriter =
+        getHistoryFileWriter(containerFinish.getContainerId()
+          .getApplicationAttemptId().getApplicationId());
+    assert containerFinish instanceof ContainerFinishDataPBImpl;
+    try {
+      hfWriter.writeHistoryData(new HistoryDataKey(containerFinish
+        .getContainerId().toString(), FINISH_DATA_SUFFIX),
+        ((ContainerFinishDataPBImpl) containerFinish).getProto().toByteArray());
+      LOG.info("Finish information of container "
+          + containerFinish.getContainerId() + " is written");
+    } catch (IOException e) {
+      LOG.error("Error when writing finish information of container "
+          + containerFinish.getContainerId());
+    }
+  }
+
+  private static ApplicationStartData parseApplicationStartData(byte[] value)
+      throws InvalidProtocolBufferException {
+    return new ApplicationStartDataPBImpl(
+      ApplicationStartDataProto.parseFrom(value));
+  }
+
+  private static ApplicationFinishData parseApplicationFinishData(byte[] value)
+      throws InvalidProtocolBufferException {
+    return new ApplicationFinishDataPBImpl(
+      ApplicationFinishDataProto.parseFrom(value));
+  }
+
+  private static ApplicationAttemptStartData parseApplicationAttemptStartData(
+      byte[] value) throws InvalidProtocolBufferException {
+    return new ApplicationAttemptStartDataPBImpl(
+      ApplicationAttemptStartDataProto.parseFrom(value));
+  }
+
+  private static ApplicationAttemptFinishData
+      parseApplicationAttemptFinishData(byte[] value)
+          throws InvalidProtocolBufferException {
+    return new ApplicationAttemptFinishDataPBImpl(
+      ApplicationAttemptFinishDataProto.parseFrom(value));
+  }
+
+  private static ContainerStartData parseContainerStartData(byte[] value)
+      throws InvalidProtocolBufferException {
+    return new ContainerStartDataPBImpl(
+      ContainerStartDataProto.parseFrom(value));
+  }
+
+  private static ContainerFinishData parseContainerFinishData(byte[] value)
+      throws InvalidProtocolBufferException {
+    return new ContainerFinishDataPBImpl(
+      ContainerFinishDataProto.parseFrom(value));
+  }
+
+  private static void mergeApplicationHistoryData(
+      ApplicationHistoryData historyData, ApplicationStartData startData) {
+    historyData.setApplicationName(startData.getApplicationName());
+    historyData.setApplicationType(startData.getApplicationType());
+    historyData.setQueue(startData.getQueue());
+    historyData.setUser(startData.getUser());
+    historyData.setSubmitTime(startData.getSubmitTime());
+    historyData.setStartTime(startData.getStartTime());
+  }
+
+  private static void mergeApplicationHistoryData(
+      ApplicationHistoryData historyData, ApplicationFinishData finishData) {
+    historyData.setFinishTime(finishData.getFinishTime());
+    historyData.setDiagnosticsInfo(finishData.getDiagnosticsInfo());
+    historyData.setFinalApplicationStatus(finishData
+      .getFinalApplicationStatus());
+    historyData.setYarnApplicationState(finishData.getYarnApplicationState());
+  }
+
+  private static void mergeApplicationAttemptHistoryData(
+      ApplicationAttemptHistoryData historyData,
+      ApplicationAttemptStartData startData) {
+    historyData.setHost(startData.getHost());
+    historyData.setRPCPort(startData.getRPCPort());
+    historyData.setMasterContainerId(startData.getMasterContainerId());
+  }
+
+  private static void mergeApplicationAttemptHistoryData(
+      ApplicationAttemptHistoryData historyData,
+      ApplicationAttemptFinishData finishData) {
+    historyData.setDiagnosticsInfo(finishData.getDiagnosticsInfo());
+    historyData.setTrackingURL(finishData.getTrackingURL());
+    historyData.setFinalApplicationStatus(finishData
+      .getFinalApplicationStatus());
+    historyData.setYarnApplicationAttemptState(finishData
+      .getYarnApplicationAttemptState());
+  }
+
+  private static void mergeContainerHistoryData(
+      ContainerHistoryData historyData, ContainerStartData startData) {
+    historyData.setAllocatedResource(startData.getAllocatedResource());
+    historyData.setAssignedNode(startData.getAssignedNode());
+    historyData.setPriority(startData.getPriority());
+    historyData.setStartTime(startData.getStartTime());
+  }
+
+  private static void mergeContainerHistoryData(
+      ContainerHistoryData historyData, ContainerFinishData finishData) {
+    historyData.setFinishTime(finishData.getFinishTime());
+    historyData.setDiagnosticsInfo(finishData.getDiagnosticsInfo());
+    historyData.setContainerExitStatus(finishData.getContainerExitStatus());
+    historyData.setContainerState(finishData.getContainerState());
+  }
+
+  private HistoryFileWriter getHistoryFileWriter(ApplicationId appId)
+      throws IOException {
+    HistoryFileWriter hfWriter = outstandingWriters.get(appId);
+    if (hfWriter == null) {
+      throw new IOException("History file of application " + appId
+          + " is not opened");
+    }
+    return hfWriter;
+  }
+
+  private HistoryFileReader getHistoryFileReader(ApplicationId appId)
+      throws IOException {
+    Path applicationHistoryFile = new Path(rootDirPath, appId.toString());
+    if (!fs.exists(applicationHistoryFile)) {
+      throw new IOException("History file for application " + appId
+          + " is not found");
+    }
+    // The history file is still under writing
+    if (outstandingWriters.containsKey(appId)) {
+      throw new IOException("History file for application " + appId
+          + " is under writing");
+    }
+    return new HistoryFileReader(applicationHistoryFile);
+  }
+
+  private class HistoryFileReader {
+
+    private class Entry {
+
+      private HistoryDataKey key;
+      private byte[] value;
+
+      public Entry(HistoryDataKey key, byte[] value) {
+        this.key = key;
+        this.value = value;
+      }
+    }
+
+    private TFile.Reader reader;
+    private TFile.Reader.Scanner scanner;
+
+    public HistoryFileReader(Path historyFile) throws IOException {
+      FSDataInputStream fsdis = fs.open(historyFile);
+      reader =
+          new TFile.Reader(fsdis, fs.getFileStatus(historyFile).getLen(),
+            getConfig());
+      reset();
+    }
+
+    public boolean hasNext() {
+      return !scanner.atEnd();
+    }
+
+    public Entry next() throws IOException {
+      TFile.Reader.Scanner.Entry entry = scanner.entry();
+      DataInputStream dis = entry.getKeyStream();
+      HistoryDataKey key = new HistoryDataKey();
+      key.readFields(dis);
+      dis = entry.getValueStream();
+      byte[] value = new byte[entry.getValueLength()];
+      dis.read(value);
+      scanner.advance();
+      return new Entry(key, value);
+    }
+
+    public void reset() throws IOException {
+      IOUtils.cleanup(LOG, scanner);
+      scanner = reader.createScanner();
+    }
+
+    public void close() {
+      IOUtils.cleanup(LOG, scanner, reader);
+    }
+
+  }
+
+  private class HistoryFileWriter {
+
+    private FSDataOutputStream fsdos;
+    private TFile.Writer writer;
+
+    public HistoryFileWriter(Path historyFile) throws IOException {
+      if (fs.exists(historyFile)) {
+        fsdos = fs.append(historyFile);
+      } else {
+        fsdos = fs.create(historyFile);
+      }
+      fs.setPermission(historyFile, HISTORY_FILE_UMASK);
+      writer =
+          new TFile.Writer(fsdos, MIN_BLOCK_SIZE, getConfig().get(
+            YarnConfiguration.FS_APPLICATION_HISTORY_STORE_COMPRESSION_TYPE,
+            YarnConfiguration.DEFAULT_FS_APPLICATION_HISTORY_STORE_COMPRESSION_TYPE), null,
+            getConfig());
+    }
+
+    public synchronized void close() {
+      IOUtils.cleanup(LOG, writer, fsdos);
+    }
+
+    public synchronized void writeHistoryData(HistoryDataKey key, byte[] value)
+        throws IOException {
+      DataOutputStream dos = null;
+      try {
+        dos = writer.prepareAppendKey(-1);
+        key.write(dos);
+      } finally {
+        IOUtils.cleanup(LOG, dos);
+      }
+      try {
+        dos = writer.prepareAppendValue(value.length);
+        dos.write(value);
+      } finally {
+        IOUtils.cleanup(LOG, dos);
+      }
+    }
+
+  }
+
+  private static class HistoryDataKey implements Writable {
+
+    private String id;
+
+    private String suffix;
+
+    public HistoryDataKey() {
+      this(null, null);
+    }
+
+    public HistoryDataKey(String id, String suffix) {
+      this.id = id;
+      this.suffix = suffix;
+    }
+
+    @Override
+    public void write(DataOutput out) throws IOException {
+      out.writeUTF(id);
+      out.writeUTF(suffix);
+    }
+
+    @Override
+    public void readFields(DataInput in) throws IOException {
+      id = in.readUTF();
+      suffix = in.readUTF();
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/MemoryApplicationHistoryStore.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/MemoryApplicationHistoryStore.java b/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/MemoryApplicationHistoryStore.java
new file mode 100644
index 0000000..c226ad3
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/MemoryApplicationHistoryStore.java
@@ -0,0 +1,274 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.applicationhistoryservice;
+
+import java.io.IOException;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
+
+import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
+import org.apache.hadoop.service.AbstractService;
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ApplicationAttemptFinishData;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ApplicationAttemptHistoryData;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ApplicationAttemptStartData;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ApplicationFinishData;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ApplicationHistoryData;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ApplicationStartData;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ContainerFinishData;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ContainerHistoryData;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ContainerStartData;
+
+/**
+ * In-memory implementation of {@link ApplicationHistoryStore}. This
+ * implementation is for test purpose only. If users improperly instantiate it,
+ * they may encounter reading and writing history data in different memory
+ * store.
+ * 
+ */
+@Private
+@Unstable
+public class MemoryApplicationHistoryStore extends AbstractService implements
+    ApplicationHistoryStore {
+
+  private final ConcurrentMap<ApplicationId, ApplicationHistoryData> applicationData =
+      new ConcurrentHashMap<ApplicationId, ApplicationHistoryData>();
+  private final ConcurrentMap<ApplicationId, ConcurrentMap<ApplicationAttemptId, ApplicationAttemptHistoryData>> applicationAttemptData =
+      new ConcurrentHashMap<ApplicationId, ConcurrentMap<ApplicationAttemptId, ApplicationAttemptHistoryData>>();
+  private final ConcurrentMap<ApplicationAttemptId, ConcurrentMap<ContainerId, ContainerHistoryData>> containerData =
+      new ConcurrentHashMap<ApplicationAttemptId, ConcurrentMap<ContainerId, ContainerHistoryData>>();
+
+  public MemoryApplicationHistoryStore() {
+    super(MemoryApplicationHistoryStore.class.getName());
+  }
+
+  @Override
+  public Map<ApplicationId, ApplicationHistoryData> getAllApplications() {
+    return new HashMap<ApplicationId, ApplicationHistoryData>(applicationData);
+  }
+
+  @Override
+  public ApplicationHistoryData getApplication(ApplicationId appId) {
+    return applicationData.get(appId);
+  }
+
+  @Override
+  public Map<ApplicationAttemptId, ApplicationAttemptHistoryData>
+      getApplicationAttempts(ApplicationId appId) {
+    ConcurrentMap<ApplicationAttemptId, ApplicationAttemptHistoryData> subMap =
+        applicationAttemptData.get(appId);
+    if (subMap == null) {
+      return Collections
+        .<ApplicationAttemptId, ApplicationAttemptHistoryData> emptyMap();
+    } else {
+      return new HashMap<ApplicationAttemptId, ApplicationAttemptHistoryData>(
+        subMap);
+    }
+  }
+
+  @Override
+  public ApplicationAttemptHistoryData getApplicationAttempt(
+      ApplicationAttemptId appAttemptId) {
+    ConcurrentMap<ApplicationAttemptId, ApplicationAttemptHistoryData> subMap =
+        applicationAttemptData.get(appAttemptId.getApplicationId());
+    if (subMap == null) {
+      return null;
+    } else {
+      return subMap.get(appAttemptId);
+    }
+  }
+
+  @Override
+  public ContainerHistoryData getAMContainer(ApplicationAttemptId appAttemptId) {
+    ApplicationAttemptHistoryData appAttempt =
+        getApplicationAttempt(appAttemptId);
+    if (appAttempt == null || appAttempt.getMasterContainerId() == null) {
+      return null;
+    } else {
+      return getContainer(appAttempt.getMasterContainerId());
+    }
+  }
+
+  @Override
+  public ContainerHistoryData getContainer(ContainerId containerId) {
+    Map<ContainerId, ContainerHistoryData> subMap =
+        containerData.get(containerId.getApplicationAttemptId());
+    if (subMap == null) {
+      return null;
+    } else {
+      return subMap.get(containerId);
+    }
+  }
+
+  @Override
+  public Map<ContainerId, ContainerHistoryData> getContainers(
+      ApplicationAttemptId appAttemptId) throws IOException {
+    ConcurrentMap<ContainerId, ContainerHistoryData> subMap =
+        containerData.get(appAttemptId);
+    if (subMap == null) {
+      return Collections.<ContainerId, ContainerHistoryData> emptyMap();
+    } else {
+      return new HashMap<ContainerId, ContainerHistoryData>(subMap);
+    }
+  }
+
+  @Override
+  public void applicationStarted(ApplicationStartData appStart)
+      throws IOException {
+    ApplicationHistoryData oldData =
+        applicationData.putIfAbsent(appStart.getApplicationId(),
+          ApplicationHistoryData.newInstance(appStart.getApplicationId(),
+            appStart.getApplicationName(), appStart.getApplicationType(),
+            appStart.getQueue(), appStart.getUser(), appStart.getSubmitTime(),
+            appStart.getStartTime(), Long.MAX_VALUE, null, null, null));
+    if (oldData != null) {
+      throw new IOException("The start information of application "
+          + appStart.getApplicationId() + " is already stored.");
+    }
+  }
+
+  @Override
+  public void applicationFinished(ApplicationFinishData appFinish)
+      throws IOException {
+    ApplicationHistoryData data =
+        applicationData.get(appFinish.getApplicationId());
+    if (data == null) {
+      throw new IOException("The finish information of application "
+          + appFinish.getApplicationId() + " is stored before the start"
+          + " information.");
+    }
+    // Make the assumption that YarnApplicationState should not be null if
+    // the finish information is already recorded
+    if (data.getYarnApplicationState() != null) {
+      throw new IOException("The finish information of application "
+          + appFinish.getApplicationId() + " is already stored.");
+    }
+    data.setFinishTime(appFinish.getFinishTime());
+    data.setDiagnosticsInfo(appFinish.getDiagnosticsInfo());
+    data.setFinalApplicationStatus(appFinish.getFinalApplicationStatus());
+    data.setYarnApplicationState(appFinish.getYarnApplicationState());
+  }
+
+  @Override
+  public void applicationAttemptStarted(
+      ApplicationAttemptStartData appAttemptStart) throws IOException {
+    ConcurrentMap<ApplicationAttemptId, ApplicationAttemptHistoryData> subMap =
+        getSubMap(appAttemptStart.getApplicationAttemptId().getApplicationId());
+    ApplicationAttemptHistoryData oldData =
+        subMap.putIfAbsent(appAttemptStart.getApplicationAttemptId(),
+          ApplicationAttemptHistoryData.newInstance(
+            appAttemptStart.getApplicationAttemptId(),
+            appAttemptStart.getHost(), appAttemptStart.getRPCPort(),
+            appAttemptStart.getMasterContainerId(), null, null, null, null));
+    if (oldData != null) {
+      throw new IOException("The start information of application attempt "
+          + appAttemptStart.getApplicationAttemptId() + " is already stored.");
+    }
+  }
+
+  @Override
+  public void applicationAttemptFinished(
+      ApplicationAttemptFinishData appAttemptFinish) throws IOException {
+    ConcurrentMap<ApplicationAttemptId, ApplicationAttemptHistoryData> subMap =
+        getSubMap(appAttemptFinish.getApplicationAttemptId().getApplicationId());
+    ApplicationAttemptHistoryData data =
+        subMap.get(appAttemptFinish.getApplicationAttemptId());
+    if (data == null) {
+      throw new IOException("The finish information of application attempt "
+          + appAttemptFinish.getApplicationAttemptId() + " is stored before"
+          + " the start information.");
+    }
+    // Make the assumption that YarnApplicationAttemptState should not be null
+    // if the finish information is already recorded
+    if (data.getYarnApplicationAttemptState() != null) {
+      throw new IOException("The finish information of application attempt "
+          + appAttemptFinish.getApplicationAttemptId() + " is already stored.");
+    }
+    data.setTrackingURL(appAttemptFinish.getTrackingURL());
+    data.setDiagnosticsInfo(appAttemptFinish.getDiagnosticsInfo());
+    data
+      .setFinalApplicationStatus(appAttemptFinish.getFinalApplicationStatus());
+    data.setYarnApplicationAttemptState(appAttemptFinish
+      .getYarnApplicationAttemptState());
+  }
+
+  private ConcurrentMap<ApplicationAttemptId, ApplicationAttemptHistoryData>
+      getSubMap(ApplicationId appId) {
+    applicationAttemptData
+      .putIfAbsent(
+        appId,
+        new ConcurrentHashMap<ApplicationAttemptId, ApplicationAttemptHistoryData>());
+    return applicationAttemptData.get(appId);
+  }
+
+  @Override
+  public void containerStarted(ContainerStartData containerStart)
+      throws IOException {
+    ConcurrentMap<ContainerId, ContainerHistoryData> subMap =
+        getSubMap(containerStart.getContainerId().getApplicationAttemptId());
+    ContainerHistoryData oldData =
+        subMap.putIfAbsent(containerStart.getContainerId(),
+          ContainerHistoryData.newInstance(containerStart.getContainerId(),
+            containerStart.getAllocatedResource(),
+            containerStart.getAssignedNode(), containerStart.getPriority(),
+            containerStart.getStartTime(), Long.MAX_VALUE, null,
+            Integer.MAX_VALUE, null));
+    if (oldData != null) {
+      throw new IOException("The start information of container "
+          + containerStart.getContainerId() + " is already stored.");
+    }
+  }
+
+  @Override
+  public void containerFinished(ContainerFinishData containerFinish)
+      throws IOException {
+    ConcurrentMap<ContainerId, ContainerHistoryData> subMap =
+        getSubMap(containerFinish.getContainerId().getApplicationAttemptId());
+    ContainerHistoryData data = subMap.get(containerFinish.getContainerId());
+    if (data == null) {
+      throw new IOException("The finish information of container "
+          + containerFinish.getContainerId() + " is stored before"
+          + " the start information.");
+    }
+    // Make the assumption that ContainerState should not be null if
+    // the finish information is already recorded
+    if (data.getContainerState() != null) {
+      throw new IOException("The finish information of container "
+          + containerFinish.getContainerId() + " is already stored.");
+    }
+    data.setFinishTime(containerFinish.getFinishTime());
+    data.setDiagnosticsInfo(containerFinish.getDiagnosticsInfo());
+    data.setContainerExitStatus(containerFinish.getContainerExitStatus());
+    data.setContainerState(containerFinish.getContainerState());
+  }
+
+  private ConcurrentMap<ContainerId, ContainerHistoryData> getSubMap(
+      ApplicationAttemptId appAttemptId) {
+    containerData.putIfAbsent(appAttemptId,
+      new ConcurrentHashMap<ContainerId, ContainerHistoryData>());
+    return containerData.get(appAttemptId);
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/NullApplicationHistoryStore.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/NullApplicationHistoryStore.java b/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/NullApplicationHistoryStore.java
new file mode 100644
index 0000000..3660c10
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/NullApplicationHistoryStore.java
@@ -0,0 +1,127 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.applicationhistoryservice;
+
+import java.io.IOException;
+import java.util.Collections;
+import java.util.Map;
+
+import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
+import org.apache.hadoop.service.AbstractService;
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ApplicationAttemptFinishData;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ApplicationAttemptHistoryData;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ApplicationAttemptStartData;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ApplicationFinishData;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ApplicationHistoryData;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ApplicationStartData;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ContainerFinishData;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ContainerHistoryData;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ContainerStartData;
+
+/**
+ * Dummy implementation of {@link ApplicationHistoryStore}. If this
+ * implementation is used, no history data will be persisted.
+ * 
+ */
+@Unstable
+@Private
+public class NullApplicationHistoryStore extends AbstractService implements
+    ApplicationHistoryStore {
+
+  public NullApplicationHistoryStore() {
+    super(NullApplicationHistoryStore.class.getName());
+  }
+
+  @Override
+  public void applicationStarted(ApplicationStartData appStart)
+      throws IOException {
+  }
+
+  @Override
+  public void applicationFinished(ApplicationFinishData appFinish)
+      throws IOException {
+  }
+
+  @Override
+  public void applicationAttemptStarted(
+      ApplicationAttemptStartData appAttemptStart) throws IOException {
+  }
+
+  @Override
+  public void applicationAttemptFinished(
+      ApplicationAttemptFinishData appAttemptFinish) throws IOException {
+  }
+
+  @Override
+  public void containerStarted(ContainerStartData containerStart)
+      throws IOException {
+  }
+
+  @Override
+  public void containerFinished(ContainerFinishData containerFinish)
+      throws IOException {
+  }
+
+  @Override
+  public ApplicationHistoryData getApplication(ApplicationId appId)
+      throws IOException {
+    return null;
+  }
+
+  @Override
+  public Map<ApplicationId, ApplicationHistoryData> getAllApplications()
+      throws IOException {
+    return Collections.emptyMap();
+  }
+
+  @Override
+  public Map<ApplicationAttemptId, ApplicationAttemptHistoryData>
+      getApplicationAttempts(ApplicationId appId) throws IOException {
+    return Collections.emptyMap();
+  }
+
+  @Override
+  public ApplicationAttemptHistoryData getApplicationAttempt(
+      ApplicationAttemptId appAttemptId) throws IOException {
+    return null;
+  }
+
+  @Override
+  public ContainerHistoryData getContainer(ContainerId containerId)
+      throws IOException {
+    return null;
+  }
+
+  @Override
+  public ContainerHistoryData getAMContainer(ApplicationAttemptId appAttemptId)
+      throws IOException {
+    return null;
+  }
+
+  @Override
+  public Map<ContainerId, ContainerHistoryData> getContainers(
+      ApplicationAttemptId appAttemptId) throws IOException {
+    return Collections.emptyMap();
+  }
+
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/AbstractTimelineAggregator.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/AbstractTimelineAggregator.java b/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/AbstractTimelineAggregator.java
new file mode 100644
index 0000000..e702fc0
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/AbstractTimelineAggregator.java
@@ -0,0 +1,294 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline;
+
+import org.apache.commons.io.FileUtils;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.codehaus.jackson.annotate.JsonCreator;
+import org.codehaus.jackson.annotate.JsonProperty;
+import org.codehaus.jackson.annotate.JsonSubTypes;
+import org.codehaus.jackson.map.ObjectMapper;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.Date;
+
+public abstract class AbstractTimelineAggregator implements Runnable {
+  protected final PhoenixHBaseAccessor hBaseAccessor;
+  protected final String CHECKPOINT_LOCATION;
+  private final Log LOG;
+  static final long checkpointDelay = 120000;
+  static final Integer RESULTSET_FETCH_SIZE = 5000;
+  private static final ObjectMapper mapper;
+
+  static {
+    //SimpleModule simpleModule = new SimpleModule("MetricAggregator", new Version(1, 0, 0, null));
+    //simpleModule.addSerializer(new MetricAggregateSerializer());
+    mapper = new ObjectMapper();
+    //mapper.registerModule(simpleModule);
+  }
+
+  public AbstractTimelineAggregator(PhoenixHBaseAccessor hBaseAccessor,
+                                    String checkpointLocation) {
+    this.hBaseAccessor = hBaseAccessor;
+    this.CHECKPOINT_LOCATION = checkpointLocation;
+    this.LOG = LogFactory.getLog(this.getClass());
+  }
+
+  @Override
+  public void run() {
+    LOG.info("Started Timeline aggregator thread @ " + new Date());
+    Long SLEEP_INTERVAL = getSleepInterval();
+
+    while (true) {
+      long currentTime = System.currentTimeMillis();
+      long lastCheckPointTime = -1;
+
+      try {
+        lastCheckPointTime = readCheckPoint();
+        if (isLastCheckPointTooOld(lastCheckPointTime)) {
+          LOG.warn("Last Checkpoint is too old, discarding last checkpoint. " +
+            "lastCheckPointTime = " + lastCheckPointTime);
+          lastCheckPointTime = -1;
+        }
+        if (lastCheckPointTime == -1) {
+          // Assuming first run, save checkpoint and sleep.
+          // Set checkpoint to 2 minutes in the past to allow the
+          // agents/collectors to catch up
+          saveCheckPoint(currentTime - checkpointDelay);
+        }
+      } catch (IOException io) {
+        LOG.warn("Unable to write last checkpoint time. Resuming sleep.", io);
+      }
+
+      if (lastCheckPointTime != -1) {
+        LOG.info("Last check point time: " + lastCheckPointTime + ", " +
+          "lagBy: " + ((System.currentTimeMillis() - lastCheckPointTime)) / 1000);
+        boolean success = doWork(lastCheckPointTime, lastCheckPointTime + SLEEP_INTERVAL);
+        if (success) {
+          try {
+            saveCheckPoint(lastCheckPointTime + SLEEP_INTERVAL);
+          } catch (IOException io) {
+            LOG.warn("Error saving checkpoint, restarting aggregation at " +
+              "previous checkpoint.");
+          }
+        }
+      }
+
+      try {
+        Thread.sleep(SLEEP_INTERVAL);
+      } catch (InterruptedException e) {
+        LOG.info("Sleep interrupted, continuing with aggregation.");
+      }
+    }
+  }
+
+  private boolean isLastCheckPointTooOld(long checkpoint) {
+    return checkpoint != -1 &&
+      ((System.currentTimeMillis() - checkpoint) > getCheckpointCutOffInterval());
+  }
+
+  private long readCheckPoint() {
+    try {
+      File checkpoint = new File(CHECKPOINT_LOCATION);
+      if (checkpoint.exists()) {
+        String contents = FileUtils.readFileToString(checkpoint);
+        if (contents != null && !contents.isEmpty()) {
+          return Long.parseLong(contents);
+        }
+      }
+    } catch (IOException io) {
+      LOG.debug(io);
+    }
+    return -1;
+  }
+
+  private void saveCheckPoint(long checkpointTime) throws IOException {
+    File checkpoint = new File(CHECKPOINT_LOCATION);
+    if (!checkpoint.exists()) {
+      boolean done = checkpoint.createNewFile();
+      if (!done) {
+        throw new IOException("Could not create checkpoint at location, " +
+          CHECKPOINT_LOCATION);
+      }
+    }
+    FileUtils.writeStringToFile(checkpoint, String.valueOf(checkpointTime));
+  }
+
+  // TODO: Abstract out doWork implementation for cluster and host levels
+  protected abstract boolean doWork(long startTime, long endTime);
+
+  protected abstract Long getSleepInterval();
+
+  protected abstract Long getCheckpointCutOffInterval();
+
+  @JsonSubTypes({ @JsonSubTypes.Type(value = MetricClusterAggregate.class),
+                @JsonSubTypes.Type(value = MetricHostAggregate.class) })
+  @InterfaceAudience.Public
+  @InterfaceStability.Unstable
+  public static class MetricAggregate {
+    protected Double sum = 0.0;
+    protected Double deviation;
+    protected Double max = Double.MIN_VALUE;
+    protected Double min = Double.MAX_VALUE;
+
+    public MetricAggregate() {}
+
+    protected MetricAggregate(Double sum, Double deviation, Double max, Double min) {
+      this.sum = sum;
+      this.deviation = deviation;
+      this.max = max;
+      this.min = min;
+    }
+
+    void updateSum(Double sum) {
+      this.sum += sum;
+    }
+
+    void updateMax(Double max) {
+      if (max > this.max) {
+        this.max = max;
+      }
+    }
+
+    void updateMin(Double min) {
+      if (min < this.min) {
+        this.min = min;
+      }
+    }
+
+    @JsonProperty("sum")
+    Double getSum() {
+      return sum;
+    }
+
+    @JsonProperty("deviation")
+    Double getDeviation() {
+      return deviation;
+    }
+
+    @JsonProperty("max")
+    Double getMax() {
+      return max;
+    }
+
+    @JsonProperty("min")
+    Double getMin() {
+      return min;
+    }
+
+    public void setSum(Double sum) {
+      this.sum = sum;
+    }
+
+    public void setDeviation(Double deviation) {
+      this.deviation = deviation;
+    }
+
+    public void setMax(Double max) {
+      this.max = max;
+    }
+
+    public void setMin(Double min) {
+      this.min = min;
+    }
+
+    public String toJSON() throws IOException {
+      return mapper.writeValueAsString(this);
+    }
+  }
+
+  public static class MetricClusterAggregate extends MetricAggregate {
+    private int numberOfHosts;
+
+    @JsonCreator
+    public MetricClusterAggregate() {}
+
+    MetricClusterAggregate(Double sum, int numberOfHosts, Double deviation,
+                           Double max, Double min) {
+      super(sum, deviation, max, min);
+      this.numberOfHosts = numberOfHosts;
+    }
+
+    @JsonProperty("numberOfHosts")
+    int getNumberOfHosts() {
+      return numberOfHosts;
+    }
+
+    void updateNumberOfHosts(int count) {
+      this.numberOfHosts += count;
+    }
+
+    public void setNumberOfHosts(int numberOfHosts) {
+      this.numberOfHosts = numberOfHosts;
+    }
+
+    @Override
+    public String toString() {
+      return "MetricAggregate{" +
+        "sum=" + sum +
+        ", numberOfHosts=" + numberOfHosts +
+        ", deviation=" + deviation +
+        ", max=" + max +
+        ", min=" + min +
+        '}';
+    }
+  }
+
+  /**
+   * Represents a collection of minute based aggregation of values for
+   * resolution greater than a minute.
+   */
+  public static class MetricHostAggregate extends MetricAggregate {
+
+    @JsonCreator
+    public MetricHostAggregate() {
+      super(0.0, 0.0, Double.MIN_VALUE, Double.MAX_VALUE);
+    }
+
+    /**
+     * Find and update min, max and avg for a minute
+     */
+    void updateAggregates(MetricHostAggregate hostAggregate) {
+      updateMax(hostAggregate.getMax());
+      updateMin(hostAggregate.getMin());
+      updateSum(hostAggregate.getSum());
+    }
+
+    /**
+     * Reuse sum to indicate average for a host for the hour
+     */
+    @Override
+    void updateSum(Double sum) {
+      this.sum = (this.sum + sum) / 2;
+    }
+
+    @Override
+    public String toString() {
+      return "MetricHostAggregate{" +
+        "sum=" + sum +
+        ", deviation=" + deviation +
+        ", max=" + max +
+        ", min=" + min +
+        '}';
+    }
+  }
+}


[08/22] AMBARI-5707. Metrics system prototype implementation. (swagle)

Posted by sw...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/_common.py
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/_common.py b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/_common.py
new file mode 100644
index 0000000..3d2f27c
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/_common.py
@@ -0,0 +1,258 @@
+#/usr/bin/env python
+
+# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Common objects shared by all _ps* modules."""
+
+from __future__ import division
+import errno
+import os
+import socket
+import stat
+import sys
+import warnings
+try:
+    import threading
+except ImportError:
+    import dummy_threading as threading
+
+from socket import AF_INET, SOCK_STREAM, SOCK_DGRAM
+
+from psutil._compat import namedtuple, wraps
+
+# --- constants
+
+AF_INET6 = getattr(socket, 'AF_INET6', None)
+AF_UNIX = getattr(socket, 'AF_UNIX', None)
+
+STATUS_RUNNING = "running"
+STATUS_SLEEPING = "sleeping"
+STATUS_DISK_SLEEP = "disk-sleep"
+STATUS_STOPPED = "stopped"
+STATUS_TRACING_STOP = "tracing-stop"
+STATUS_ZOMBIE = "zombie"
+STATUS_DEAD = "dead"
+STATUS_WAKE_KILL = "wake-kill"
+STATUS_WAKING = "waking"
+STATUS_IDLE = "idle"  # BSD
+STATUS_LOCKED = "locked"  # BSD
+STATUS_WAITING = "waiting"  # BSD
+
+CONN_ESTABLISHED = "ESTABLISHED"
+CONN_SYN_SENT = "SYN_SENT"
+CONN_SYN_RECV = "SYN_RECV"
+CONN_FIN_WAIT1 = "FIN_WAIT1"
+CONN_FIN_WAIT2 = "FIN_WAIT2"
+CONN_TIME_WAIT = "TIME_WAIT"
+CONN_CLOSE = "CLOSE"
+CONN_CLOSE_WAIT = "CLOSE_WAIT"
+CONN_LAST_ACK = "LAST_ACK"
+CONN_LISTEN = "LISTEN"
+CONN_CLOSING = "CLOSING"
+CONN_NONE = "NONE"
+
+
+# --- functions
+
+def usage_percent(used, total, _round=None):
+    """Calculate percentage usage of 'used' against 'total'."""
+    try:
+        ret = (used / total) * 100
+    except ZeroDivisionError:
+        ret = 0
+    if _round is not None:
+        return round(ret, _round)
+    else:
+        return ret
+
+
+def memoize(fun):
+    """A simple memoize decorator for functions supporting (hashable)
+    positional arguments.
+    It also provides a cache_clear() function for clearing the cache:
+
+    >>> @memoize
+    ... def foo()
+    ...     return 1
+    ...
+    >>> foo()
+    1
+    >>> foo.cache_clear()
+    >>>
+    """
+    @wraps(fun)
+    def wrapper(*args, **kwargs):
+        key = (args, frozenset(sorted(kwargs.items())))
+        lock.acquire()
+        try:
+            try:
+                return cache[key]
+            except KeyError:
+                ret = cache[key] = fun(*args, **kwargs)
+        finally:
+            lock.release()
+        return ret
+
+    def cache_clear():
+        """Clear cache."""
+        lock.acquire()
+        try:
+            cache.clear()
+        finally:
+            lock.release()
+
+    lock = threading.RLock()
+    cache = {}
+    wrapper.cache_clear = cache_clear
+    return wrapper
+
+
+# http://code.activestate.com/recipes/577819-deprecated-decorator/
+def deprecated(replacement=None):
+    """A decorator which can be used to mark functions as deprecated."""
+    def outer(fun):
+        msg = "psutil.%s is deprecated" % fun.__name__
+        if replacement is not None:
+            msg += "; use %s instead" % replacement
+        if fun.__doc__ is None:
+            fun.__doc__ = msg
+
+        @wraps(fun)
+        def inner(*args, **kwargs):
+            warnings.warn(msg, category=DeprecationWarning, stacklevel=2)
+            return fun(*args, **kwargs)
+
+        return inner
+    return outer
+
+
+def deprecated_method(replacement):
+    """A decorator which can be used to mark a method as deprecated
+    'replcement' is the method name which will be called instead.
+    """
+    def outer(fun):
+        msg = "%s() is deprecated; use %s() instead" % (
+            fun.__name__, replacement)
+        if fun.__doc__ is None:
+            fun.__doc__ = msg
+
+        @wraps(fun)
+        def inner(self, *args, **kwargs):
+            warnings.warn(msg, category=DeprecationWarning, stacklevel=2)
+            return getattr(self, replacement)(*args, **kwargs)
+        return inner
+    return outer
+
+
+def isfile_strict(path):
+    """Same as os.path.isfile() but does not swallow EACCES / EPERM
+    exceptions, see:
+    http://mail.python.org/pipermail/python-dev/2012-June/120787.html
+    """
+    try:
+        st = os.stat(path)
+    except OSError:
+        err = sys.exc_info()[1]
+        if err.errno in (errno.EPERM, errno.EACCES):
+            raise
+        return False
+    else:
+        return stat.S_ISREG(st.st_mode)
+
+
+# --- Process.connections() 'kind' parameter mapping
+
+conn_tmap = {
+    "all": ([AF_INET, AF_INET6, AF_UNIX], [SOCK_STREAM, SOCK_DGRAM]),
+    "tcp": ([AF_INET, AF_INET6], [SOCK_STREAM]),
+    "tcp4": ([AF_INET], [SOCK_STREAM]),
+    "udp": ([AF_INET, AF_INET6], [SOCK_DGRAM]),
+    "udp4": ([AF_INET], [SOCK_DGRAM]),
+    "inet": ([AF_INET, AF_INET6], [SOCK_STREAM, SOCK_DGRAM]),
+    "inet4": ([AF_INET], [SOCK_STREAM, SOCK_DGRAM]),
+    "inet6": ([AF_INET6], [SOCK_STREAM, SOCK_DGRAM]),
+}
+
+if AF_INET6 is not None:
+    conn_tmap.update({
+        "tcp6": ([AF_INET6], [SOCK_STREAM]),
+        "udp6": ([AF_INET6], [SOCK_DGRAM]),
+    })
+
+if AF_UNIX is not None:
+    conn_tmap.update({
+        "unix": ([AF_UNIX], [SOCK_STREAM, SOCK_DGRAM]),
+    })
+
+del AF_INET, AF_INET6, AF_UNIX, SOCK_STREAM, SOCK_DGRAM, socket
+
+
+# --- namedtuples for psutil.* system-related functions
+
+# psutil.swap_memory()
+sswap = namedtuple('sswap', ['total', 'used', 'free', 'percent', 'sin',
+                             'sout'])
+# psutil.disk_usage()
+sdiskusage = namedtuple('sdiskusage', ['total', 'used', 'free', 'percent'])
+# psutil.disk_io_counters()
+sdiskio = namedtuple('sdiskio', ['read_count', 'write_count',
+                                 'read_bytes', 'write_bytes',
+                                 'read_time', 'write_time'])
+# psutil.disk_partitions()
+sdiskpart = namedtuple('sdiskpart', ['device', 'mountpoint', 'fstype', 'opts'])
+# psutil.net_io_counters()
+snetio = namedtuple('snetio', ['bytes_sent', 'bytes_recv',
+                               'packets_sent', 'packets_recv',
+                               'errin', 'errout',
+                               'dropin', 'dropout'])
+# psutil.users()
+suser = namedtuple('suser', ['name', 'terminal', 'host', 'started'])
+# psutil.net_connections()
+sconn = namedtuple('sconn', ['fd', 'family', 'type', 'laddr', 'raddr',
+                             'status', 'pid'])
+
+
+# --- namedtuples for psutil.Process methods
+
+# psutil.Process.memory_info()
+pmem = namedtuple('pmem', ['rss', 'vms'])
+# psutil.Process.cpu_times()
+pcputimes = namedtuple('pcputimes', ['user', 'system'])
+# psutil.Process.open_files()
+popenfile = namedtuple('popenfile', ['path', 'fd'])
+# psutil.Process.threads()
+pthread = namedtuple('pthread', ['id', 'user_time', 'system_time'])
+# psutil.Process.uids()
+puids = namedtuple('puids', ['real', 'effective', 'saved'])
+# psutil.Process.gids()
+pgids = namedtuple('pgids', ['real', 'effective', 'saved'])
+# psutil.Process.io_counters()
+pio = namedtuple('pio', ['read_count', 'write_count',
+                         'read_bytes', 'write_bytes'])
+# psutil.Process.ionice()
+pionice = namedtuple('pionice', ['ioclass', 'value'])
+# psutil.Process.ctx_switches()
+pctxsw = namedtuple('pctxsw', ['voluntary', 'involuntary'])
+
+
+# --- misc
+
+# backward compatibility layer for Process.connections() ntuple
+class pconn(
+    namedtuple('pconn',
+               ['fd', 'family', 'type', 'laddr', 'raddr', 'status'])):
+    __slots__ = ()
+
+    @property
+    def local_address(self):
+        warnings.warn("'local_address' field is deprecated; use 'laddr'"
+                      "instead", category=DeprecationWarning, stacklevel=2)
+        return self.laddr
+
+    @property
+    def remote_address(self):
+        warnings.warn("'remote_address' field is deprecated; use 'raddr'"
+                      "instead", category=DeprecationWarning, stacklevel=2)
+        return self.raddr

http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/_compat.py
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/_compat.py b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/_compat.py
new file mode 100644
index 0000000..b6ac933
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/_compat.py
@@ -0,0 +1,433 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Module which provides compatibility with older Python versions."""
+
+__all__ = ["PY3", "int", "long", "xrange", "exec_", "callable", "namedtuple",
+           "property", "wraps", "defaultdict", "update_wrapper", "lru_cache"]
+
+import sys
+try:
+    import __builtin__
+except ImportError:
+    import builtins as __builtin__  # py3
+
+PY3 = sys.version_info[0] == 3
+
+if PY3:
+    int = int
+    long = int
+    xrange = range
+    unicode = str
+    exec_ = getattr(__builtin__, "exec")
+    print_ = getattr(__builtin__, "print")
+
+    def u(s):
+        return s
+
+    def b(s):
+        return s.encode("latin-1")
+else:
+    int = int
+    long = long
+    xrange = xrange
+    unicode = unicode
+
+    def u(s):
+        return unicode(s, "unicode_escape")
+
+    def b(s):
+        return s
+
+    def exec_(code, globs=None, locs=None):
+        if globs is None:
+            frame = _sys._getframe(1)
+            globs = frame.f_globals
+            if locs is None:
+                locs = frame.f_locals
+            del frame
+        elif locs is None:
+            locs = globs
+        exec("""exec code in globs, locs""")
+
+    def print_(s):
+        sys.stdout.write(s + '\n')
+        sys.stdout.flush()
+
+
+# removed in 3.0, reintroduced in 3.2
+try:
+    callable = callable
+except NameError:
+    def callable(obj):
+        return any("__call__" in klass.__dict__ for klass in type(obj).__mro__)
+
+
+# --- stdlib additions
+
+# py 2.6 collections.namedtuple
+# Taken from: http://code.activestate.com/recipes/500261/
+# Credits: Raymond Hettinger
+try:
+    from collections import namedtuple
+except ImportError:
+    from operator import itemgetter as _itemgetter
+    from keyword import iskeyword as _iskeyword
+    import sys as _sys
+
+    def namedtuple(typename, field_names, verbose=False, rename=False):
+        """A collections.namedtuple implementation, see:
+        http://docs.python.org/library/collections.html#namedtuple
+        """
+        if isinstance(field_names, basestring):
+            field_names = field_names.replace(',', ' ').split()
+        field_names = tuple(map(str, field_names))
+        if rename:
+            names = list(field_names)
+            seen = set()
+            for i, name in enumerate(names):
+                if ((not min(c.isalnum() or c == '_' for c in name)
+                        or _iskeyword(name)
+                        or not name or name[0].isdigit()
+                        or name.startswith('_')
+                        or name in seen)):
+                    names[i] = '_%d' % i
+                seen.add(name)
+            field_names = tuple(names)
+        for name in (typename,) + field_names:
+            if not min(c.isalnum() or c == '_' for c in name):
+                raise ValueError('Type names and field names can only contain '
+                                 'alphanumeric characters and underscores: %r'
+                                 % name)
+            if _iskeyword(name):
+                raise ValueError('Type names and field names cannot be a '
+                                 'keyword: %r' % name)
+            if name[0].isdigit():
+                raise ValueError('Type names and field names cannot start '
+                                 'with a number: %r' % name)
+        seen_names = set()
+        for name in field_names:
+            if name.startswith('_') and not rename:
+                raise ValueError(
+                    'Field names cannot start with an underscore: %r' % name)
+            if name in seen_names:
+                raise ValueError('Encountered duplicate field name: %r' % name)
+            seen_names.add(name)
+
+        numfields = len(field_names)
+        argtxt = repr(field_names).replace("'", "")[1:-1]
+        reprtxt = ', '.join('%s=%%r' % name for name in field_names)
+        template = '''class %(typename)s(tuple):
+        '%(typename)s(%(argtxt)s)' \n
+        __slots__ = () \n
+        _fields = %(field_names)r \n
+        def __new__(_cls, %(argtxt)s):
+            return _tuple.__new__(_cls, (%(argtxt)s)) \n
+        @classmethod
+        def _make(cls, iterable, new=tuple.__new__, len=len):
+            'Make a new %(typename)s object from a sequence or iterable'
+            result = new(cls, iterable)
+            if len(result) != %(numfields)d:
+                raise TypeError(
+                    'Expected %(numfields)d arguments, got %%d' %% len(result))
+            return result \n
+        def __repr__(self):
+            return '%(typename)s(%(reprtxt)s)' %% self \n
+        def _asdict(self):
+            'Return a new dict which maps field names to their values'
+            return dict(zip(self._fields, self)) \n
+        def _replace(_self, **kwds):
+            result = _self._make(map(kwds.pop, %(field_names)r, _self))
+            if kwds:
+                raise ValueError(
+                    'Got unexpected field names: %%r' %% kwds.keys())
+            return result \n
+        def __getnewargs__(self):
+            return tuple(self) \n\n''' % locals()
+        for i, name in enumerate(field_names):
+            template += '        %s = _property(_itemgetter(%d))\n' % (name, i)
+        if verbose:
+            sys.stdout.write(template + '\n')
+            sys.stdout.flush()
+
+        namespace = dict(
+            _itemgetter=_itemgetter, __name__='namedtuple_%s' % typename,
+            _property=property, _tuple=tuple)
+        try:
+            exec_(template, namespace)
+        except SyntaxError:
+            e = sys.exc_info()[1]
+            raise SyntaxError(e.message + ':\n' + template)
+        result = namespace[typename]
+        try:
+            result.__module__ = _sys._getframe(
+                1).f_globals.get('__name__', '__main__')
+        except (AttributeError, ValueError):
+            pass
+
+        return result
+
+
+# hack to support property getter/setter/deleter on python < 2.6
+# http://docs.python.org/library/functions.html?highlight=property#property
+if hasattr(property, 'setter'):
+    property = property
+else:
+    class property(__builtin__.property):
+        __metaclass__ = type
+
+        def __init__(self, fget, *args, **kwargs):
+            super(property, self).__init__(fget, *args, **kwargs)
+            self.__doc__ = fget.__doc__
+
+        def getter(self, method):
+            return property(method, self.fset, self.fdel)
+
+        def setter(self, method):
+            return property(self.fget, method, self.fdel)
+
+        def deleter(self, method):
+            return property(self.fget, self.fset, method)
+
+
+# py 2.5 collections.defauldict
+# Taken from:
+# http://code.activestate.com/recipes/523034-emulate-collectionsdefaultdict/
+# Credits: Jason Kirtland
+try:
+    from collections import defaultdict
+except ImportError:
+    class defaultdict(dict):
+        """Dict subclass that calls a factory function to supply
+        missing values:
+        http://docs.python.org/library/collections.html#collections.defaultdict
+        """
+
+        def __init__(self, default_factory=None, *a, **kw):
+            if ((default_factory is not None and
+                    not hasattr(default_factory, '__call__'))):
+                raise TypeError('first argument must be callable')
+            dict.__init__(self, *a, **kw)
+            self.default_factory = default_factory
+
+        def __getitem__(self, key):
+            try:
+                return dict.__getitem__(self, key)
+            except KeyError:
+                return self.__missing__(key)
+
+        def __missing__(self, key):
+            if self.default_factory is None:
+                raise KeyError(key)
+            self[key] = value = self.default_factory()
+            return value
+
+        def __reduce__(self):
+            if self.default_factory is None:
+                args = tuple()
+            else:
+                args = self.default_factory,
+            return type(self), args, None, None, self.items()
+
+        def copy(self):
+            return self.__copy__()
+
+        def __copy__(self):
+            return type(self)(self.default_factory, self)
+
+        def __deepcopy__(self, memo):
+            import copy
+            return type(self)(self.default_factory,
+                              copy.deepcopy(self.items()))
+
+        def __repr__(self):
+            return 'defaultdict(%s, %s)' % (self.default_factory,
+                                            dict.__repr__(self))
+
+
+# py 2.5 functools.wraps
+try:
+    from functools import wraps
+except ImportError:
+    def wraps(original):
+        def inner(fn):
+            for attribute in ['__module__', '__name__', '__doc__']:
+                setattr(fn, attribute, getattr(original, attribute))
+            for attribute in ['__dict__']:
+                if hasattr(fn, attribute):
+                    getattr(fn, attribute).update(getattr(original, attribute))
+                else:
+                    setattr(fn, attribute,
+                            getattr(original, attribute).copy())
+            return fn
+        return inner
+
+
+# py 2.5 functools.update_wrapper
+try:
+    from functools import update_wrapper
+except ImportError:
+    WRAPPER_ASSIGNMENTS = ('__module__', '__name__', '__doc__')
+    WRAPPER_UPDATES = ('__dict__',)
+
+    def update_wrapper(wrapper, wrapped, assigned=WRAPPER_ASSIGNMENTS,
+                       updated=WRAPPER_UPDATES):
+        """Update a wrapper function to look like the wrapped function, see:
+        http://docs.python.org/library/functools.html#functools.update_wrapper
+        """
+        for attr in assigned:
+            setattr(wrapper, attr, getattr(wrapped, attr))
+        for attr in updated:
+            getattr(wrapper, attr).update(getattr(wrapped, attr, {}))
+        return wrapper
+
+
+# py 3.2 functools.lru_cache
+# Taken from: http://code.activestate.com/recipes/578078
+# Credit: Raymond Hettinger
+try:
+    from functools import lru_cache
+except ImportError:
+    try:
+        from threading import RLock
+    except ImportError:
+        from dummy_threading import RLock
+
+    _CacheInfo = namedtuple("CacheInfo",
+                            ["hits", "misses", "maxsize", "currsize"])
+
+    class _HashedSeq(list):
+        __slots__ = 'hashvalue'
+
+        def __init__(self, tup, hash=hash):
+            self[:] = tup
+            self.hashvalue = hash(tup)
+
+        def __hash__(self):
+            return self.hashvalue
+
+    def _make_key(args, kwds, typed,
+                  kwd_mark=(object(), ),
+                  fasttypes=set((int, str, frozenset, type(None))),
+                  sorted=sorted, tuple=tuple, type=type, len=len):
+        key = args
+        if kwds:
+            sorted_items = sorted(kwds.items())
+            key += kwd_mark
+            for item in sorted_items:
+                key += item
+        if typed:
+            key += tuple(type(v) for v in args)
+            if kwds:
+                key += tuple(type(v) for k, v in sorted_items)
+        elif len(key) == 1 and type(key[0]) in fasttypes:
+            return key[0]
+        return _HashedSeq(key)
+
+    def lru_cache(maxsize=100, typed=False):
+        """Least-recently-used cache decorator, see:
+        http://docs.python.org/3/library/functools.html#functools.lru_cache
+        """
+        def decorating_function(user_function):
+            cache = dict()
+            stats = [0, 0]
+            HITS, MISSES = 0, 1
+            make_key = _make_key
+            cache_get = cache.get
+            _len = len
+            lock = RLock()
+            root = []
+            root[:] = [root, root, None, None]
+            nonlocal_root = [root]
+            PREV, NEXT, KEY, RESULT = 0, 1, 2, 3
+            if maxsize == 0:
+                def wrapper(*args, **kwds):
+                    result = user_function(*args, **kwds)
+                    stats[MISSES] += 1
+                    return result
+            elif maxsize is None:
+                def wrapper(*args, **kwds):
+                    key = make_key(args, kwds, typed)
+                    result = cache_get(key, root)
+                    if result is not root:
+                        stats[HITS] += 1
+                        return result
+                    result = user_function(*args, **kwds)
+                    cache[key] = result
+                    stats[MISSES] += 1
+                    return result
+            else:
+                def wrapper(*args, **kwds):
+                    if kwds or typed:
+                        key = make_key(args, kwds, typed)
+                    else:
+                        key = args
+                    lock.acquire()
+                    try:
+                        link = cache_get(key)
+                        if link is not None:
+                            root, = nonlocal_root
+                            link_prev, link_next, key, result = link
+                            link_prev[NEXT] = link_next
+                            link_next[PREV] = link_prev
+                            last = root[PREV]
+                            last[NEXT] = root[PREV] = link
+                            link[PREV] = last
+                            link[NEXT] = root
+                            stats[HITS] += 1
+                            return result
+                    finally:
+                        lock.release()
+                    result = user_function(*args, **kwds)
+                    lock.acquire()
+                    try:
+                        root, = nonlocal_root
+                        if key in cache:
+                            pass
+                        elif _len(cache) >= maxsize:
+                            oldroot = root
+                            oldroot[KEY] = key
+                            oldroot[RESULT] = result
+                            root = nonlocal_root[0] = oldroot[NEXT]
+                            oldkey = root[KEY]
+                            root[KEY] = root[RESULT] = None
+                            del cache[oldkey]
+                            cache[key] = oldroot
+                        else:
+                            last = root[PREV]
+                            link = [last, root, key, result]
+                            last[NEXT] = root[PREV] = cache[key] = link
+                        stats[MISSES] += 1
+                    finally:
+                        lock.release()
+                    return result
+
+            def cache_info():
+                """Report cache statistics"""
+                lock.acquire()
+                try:
+                    return _CacheInfo(stats[HITS], stats[MISSES], maxsize,
+                                      len(cache))
+                finally:
+                    lock.release()
+
+            def cache_clear():
+                """Clear the cache and cache statistics"""
+                lock.acquire()
+                try:
+                    cache.clear()
+                    root = nonlocal_root[0]
+                    root[:] = [root, root, None, None]
+                    stats[:] = [0, 0]
+                finally:
+                    lock.release()
+
+            wrapper.__wrapped__ = user_function
+            wrapper.cache_info = cache_info
+            wrapper.cache_clear = cache_clear
+            return update_wrapper(wrapper, user_function)
+
+        return decorating_function

http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/_psbsd.py
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/_psbsd.py b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/_psbsd.py
new file mode 100644
index 0000000..5663736
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/_psbsd.py
@@ -0,0 +1,389 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""FreeBSD platform implementation."""
+
+import errno
+import os
+import sys
+
+from psutil import _common
+from psutil import _psposix
+from psutil._common import conn_tmap, usage_percent
+from psutil._compat import namedtuple, wraps
+import _psutil_bsd as cext
+import _psutil_posix
+
+
+__extra__all__ = []
+
+# --- constants
+
+PROC_STATUSES = {
+    cext.SSTOP: _common.STATUS_STOPPED,
+    cext.SSLEEP: _common.STATUS_SLEEPING,
+    cext.SRUN: _common.STATUS_RUNNING,
+    cext.SIDL: _common.STATUS_IDLE,
+    cext.SWAIT: _common.STATUS_WAITING,
+    cext.SLOCK: _common.STATUS_LOCKED,
+    cext.SZOMB: _common.STATUS_ZOMBIE,
+}
+
+TCP_STATUSES = {
+    cext.TCPS_ESTABLISHED: _common.CONN_ESTABLISHED,
+    cext.TCPS_SYN_SENT: _common.CONN_SYN_SENT,
+    cext.TCPS_SYN_RECEIVED: _common.CONN_SYN_RECV,
+    cext.TCPS_FIN_WAIT_1: _common.CONN_FIN_WAIT1,
+    cext.TCPS_FIN_WAIT_2: _common.CONN_FIN_WAIT2,
+    cext.TCPS_TIME_WAIT: _common.CONN_TIME_WAIT,
+    cext.TCPS_CLOSED: _common.CONN_CLOSE,
+    cext.TCPS_CLOSE_WAIT: _common.CONN_CLOSE_WAIT,
+    cext.TCPS_LAST_ACK: _common.CONN_LAST_ACK,
+    cext.TCPS_LISTEN: _common.CONN_LISTEN,
+    cext.TCPS_CLOSING: _common.CONN_CLOSING,
+    cext.PSUTIL_CONN_NONE: _common.CONN_NONE,
+}
+
+PAGESIZE = os.sysconf("SC_PAGE_SIZE")
+
+# extend base mem ntuple with BSD-specific memory metrics
+svmem = namedtuple(
+    'svmem', ['total', 'available', 'percent', 'used', 'free',
+              'active', 'inactive', 'buffers', 'cached', 'shared', 'wired'])
+scputimes = namedtuple(
+    'scputimes', ['user', 'nice', 'system', 'idle', 'irq'])
+pextmem = namedtuple('pextmem', ['rss', 'vms', 'text', 'data', 'stack'])
+pmmap_grouped = namedtuple(
+    'pmmap_grouped', 'path rss, private, ref_count, shadow_count')
+pmmap_ext = namedtuple(
+    'pmmap_ext', 'addr, perms path rss, private, ref_count, shadow_count')
+
+# set later from __init__.py
+NoSuchProcess = None
+AccessDenied = None
+TimeoutExpired = None
+
+
+def virtual_memory():
+    """System virtual memory as a namedutple."""
+    mem = cext.virtual_mem()
+    total, free, active, inactive, wired, cached, buffers, shared = mem
+    avail = inactive + cached + free
+    used = active + wired + cached
+    percent = usage_percent((total - avail), total, _round=1)
+    return svmem(total, avail, percent, used, free,
+                 active, inactive, buffers, cached, shared, wired)
+
+
+def swap_memory():
+    """System swap memory as (total, used, free, sin, sout) namedtuple."""
+    total, used, free, sin, sout = [x * PAGESIZE for x in cext.swap_mem()]
+    percent = usage_percent(used, total, _round=1)
+    return _common.sswap(total, used, free, percent, sin, sout)
+
+
+def cpu_times():
+    """Return system per-CPU times as a named tuple"""
+    user, nice, system, idle, irq = cext.cpu_times()
+    return scputimes(user, nice, system, idle, irq)
+
+
+if hasattr(cext, "per_cpu_times"):
+    def per_cpu_times():
+        """Return system CPU times as a named tuple"""
+        ret = []
+        for cpu_t in cext.per_cpu_times():
+            user, nice, system, idle, irq = cpu_t
+            item = scputimes(user, nice, system, idle, irq)
+            ret.append(item)
+        return ret
+else:
+    # XXX
+    # Ok, this is very dirty.
+    # On FreeBSD < 8 we cannot gather per-cpu information, see:
+    # http://code.google.com/p/psutil/issues/detail?id=226
+    # If num cpus > 1, on first call we return single cpu times to avoid a
+    # crash at psutil import time.
+    # Next calls will fail with NotImplementedError
+    def per_cpu_times():
+        if cpu_count_logical() == 1:
+            return [cpu_times()]
+        if per_cpu_times.__called__:
+            raise NotImplementedError("supported only starting from FreeBSD 8")
+        per_cpu_times.__called__ = True
+        return [cpu_times()]
+
+    per_cpu_times.__called__ = False
+
+
+def cpu_count_logical():
+    """Return the number of logical CPUs in the system."""
+    return cext.cpu_count_logical()
+
+
+def cpu_count_physical():
+    """Return the number of physical CPUs in the system."""
+    # From the C module we'll get an XML string similar to this:
+    # http://manpages.ubuntu.com/manpages/precise/man4/smp.4freebsd.html
+    # We may get None in case "sysctl kern.sched.topology_spec"
+    # is not supported on this BSD version, in which case we'll mimic
+    # os.cpu_count() and return None.
+    s = cext.cpu_count_phys()
+    if s is not None:
+        # get rid of padding chars appended at the end of the string
+        index = s.rfind("</groups>")
+        if index != -1:
+            s = s[:index + 9]
+            if sys.version_info >= (2, 5):
+                import xml.etree.ElementTree as ET
+                root = ET.fromstring(s)
+                return len(root.findall('group/children/group/cpu')) or None
+            else:
+                s = s[s.find('<children>'):]
+                return s.count("<cpu") or None
+
+
+def boot_time():
+    """The system boot time expressed in seconds since the epoch."""
+    return cext.boot_time()
+
+
+def disk_partitions(all=False):
+    retlist = []
+    partitions = cext.disk_partitions()
+    for partition in partitions:
+        device, mountpoint, fstype, opts = partition
+        if device == 'none':
+            device = ''
+        if not all:
+            if not os.path.isabs(device) or not os.path.exists(device):
+                continue
+        ntuple = _common.sdiskpart(device, mountpoint, fstype, opts)
+        retlist.append(ntuple)
+    return retlist
+
+
+def users():
+    retlist = []
+    rawlist = cext.users()
+    for item in rawlist:
+        user, tty, hostname, tstamp = item
+        if tty == '~':
+            continue  # reboot or shutdown
+        nt = _common.suser(user, tty or None, hostname, tstamp)
+        retlist.append(nt)
+    return retlist
+
+
+def net_connections(kind):
+    if kind not in _common.conn_tmap:
+        raise ValueError("invalid %r kind argument; choose between %s"
+                        % (kind, ', '.join([repr(x) for x in conn_tmap])))
+    families, types = conn_tmap[kind]
+    ret = []
+    rawlist = cext.net_connections()
+    for item in rawlist:
+        fd, fam, type, laddr, raddr, status, pid = item
+        # TODO: apply filter at C level
+        if fam in families and type in types:
+            status = TCP_STATUSES[status]
+            nt = _common.sconn(fd, fam, type, laddr, raddr, status, pid)
+            ret.append(nt)
+    return ret
+
+
+pids = cext.pids
+pid_exists = _psposix.pid_exists
+disk_usage = _psposix.disk_usage
+net_io_counters = cext.net_io_counters
+disk_io_counters = cext.disk_io_counters
+
+
+def wrap_exceptions(fun):
+    """Decorator which translates bare OSError exceptions into
+    NoSuchProcess and AccessDenied.
+    """
+    @wraps(fun)
+    def wrapper(self, *args, **kwargs):
+        try:
+            return fun(self, *args, **kwargs)
+        except OSError:
+            # support for private module import
+            if NoSuchProcess is None or AccessDenied is None:
+                raise
+            err = sys.exc_info()[1]
+            if err.errno == errno.ESRCH:
+                raise NoSuchProcess(self.pid, self._name)
+            if err.errno in (errno.EPERM, errno.EACCES):
+                raise AccessDenied(self.pid, self._name)
+            raise
+    return wrapper
+
+
+class Process(object):
+    """Wrapper class around underlying C implementation."""
+
+    __slots__ = ["pid", "_name"]
+
+    def __init__(self, pid):
+        self.pid = pid
+        self._name = None
+
+    @wrap_exceptions
+    def name(self):
+        return cext.proc_name(self.pid)
+
+    @wrap_exceptions
+    def exe(self):
+        return cext.proc_exe(self.pid)
+
+    @wrap_exceptions
+    def cmdline(self):
+        return cext.proc_cmdline(self.pid)
+
+    @wrap_exceptions
+    def terminal(self):
+        tty_nr = cext.proc_tty_nr(self.pid)
+        tmap = _psposix._get_terminal_map()
+        try:
+            return tmap[tty_nr]
+        except KeyError:
+            return None
+
+    @wrap_exceptions
+    def ppid(self):
+        return cext.proc_ppid(self.pid)
+
+    @wrap_exceptions
+    def uids(self):
+        real, effective, saved = cext.proc_uids(self.pid)
+        return _common.puids(real, effective, saved)
+
+    @wrap_exceptions
+    def gids(self):
+        real, effective, saved = cext.proc_gids(self.pid)
+        return _common.pgids(real, effective, saved)
+
+    @wrap_exceptions
+    def cpu_times(self):
+        user, system = cext.proc_cpu_times(self.pid)
+        return _common.pcputimes(user, system)
+
+    @wrap_exceptions
+    def memory_info(self):
+        rss, vms = cext.proc_memory_info(self.pid)[:2]
+        return _common.pmem(rss, vms)
+
+    @wrap_exceptions
+    def memory_info_ex(self):
+        return pextmem(*cext.proc_memory_info(self.pid))
+
+    @wrap_exceptions
+    def create_time(self):
+        return cext.proc_create_time(self.pid)
+
+    @wrap_exceptions
+    def num_threads(self):
+        return cext.proc_num_threads(self.pid)
+
+    @wrap_exceptions
+    def num_ctx_switches(self):
+        return _common.pctxsw(*cext.proc_num_ctx_switches(self.pid))
+
+    @wrap_exceptions
+    def threads(self):
+        rawlist = cext.proc_threads(self.pid)
+        retlist = []
+        for thread_id, utime, stime in rawlist:
+            ntuple = _common.pthread(thread_id, utime, stime)
+            retlist.append(ntuple)
+        return retlist
+
+    @wrap_exceptions
+    def connections(self, kind='inet'):
+        if kind not in conn_tmap:
+            raise ValueError("invalid %r kind argument; choose between %s"
+                             % (kind, ', '.join([repr(x) for x in conn_tmap])))
+        families, types = conn_tmap[kind]
+        rawlist = cext.proc_connections(self.pid, families, types)
+        ret = []
+        for item in rawlist:
+            fd, fam, type, laddr, raddr, status = item
+            status = TCP_STATUSES[status]
+            nt = _common.pconn(fd, fam, type, laddr, raddr, status)
+            ret.append(nt)
+        return ret
+
+    @wrap_exceptions
+    def wait(self, timeout=None):
+        try:
+            return _psposix.wait_pid(self.pid, timeout)
+        except _psposix.TimeoutExpired:
+            # support for private module import
+            if TimeoutExpired is None:
+                raise
+            raise TimeoutExpired(timeout, self.pid, self._name)
+
+    @wrap_exceptions
+    def nice_get(self):
+        return _psutil_posix.getpriority(self.pid)
+
+    @wrap_exceptions
+    def nice_set(self, value):
+        return _psutil_posix.setpriority(self.pid, value)
+
+    @wrap_exceptions
+    def status(self):
+        code = cext.proc_status(self.pid)
+        if code in PROC_STATUSES:
+            return PROC_STATUSES[code]
+        # XXX is this legit? will we even ever get here?
+        return "?"
+
+    @wrap_exceptions
+    def io_counters(self):
+        rc, wc, rb, wb = cext.proc_io_counters(self.pid)
+        return _common.pio(rc, wc, rb, wb)
+
+    nt_mmap_grouped = namedtuple(
+        'mmap', 'path rss, private, ref_count, shadow_count')
+    nt_mmap_ext = namedtuple(
+        'mmap', 'addr, perms path rss, private, ref_count, shadow_count')
+
+    # FreeBSD < 8 does not support functions based on kinfo_getfile()
+    # and kinfo_getvmmap()
+    if hasattr(cext, 'proc_open_files'):
+
+        @wrap_exceptions
+        def open_files(self):
+            """Return files opened by process as a list of namedtuples."""
+            rawlist = cext.proc_open_files(self.pid)
+            return [_common.popenfile(path, fd) for path, fd in rawlist]
+
+        @wrap_exceptions
+        def cwd(self):
+            """Return process current working directory."""
+            # sometimes we get an empty string, in which case we turn
+            # it into None
+            return cext.proc_cwd(self.pid) or None
+
+        @wrap_exceptions
+        def memory_maps(self):
+            return cext.proc_memory_maps(self.pid)
+
+        @wrap_exceptions
+        def num_fds(self):
+            """Return the number of file descriptors opened by this process."""
+            return cext.proc_num_fds(self.pid)
+
+    else:
+        def _not_implemented(self):
+            raise NotImplementedError("supported only starting from FreeBSD 8")
+
+        open_files = _not_implemented
+        proc_cwd = _not_implemented
+        memory_maps = _not_implemented
+        num_fds = _not_implemented

http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/_pslinux.py
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/_pslinux.py b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/_pslinux.py
new file mode 100644
index 0000000..d20b267
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/_pslinux.py
@@ -0,0 +1,1225 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Linux platform implementation."""
+
+from __future__ import division
+
+import base64
+import errno
+import os
+import re
+import socket
+import struct
+import sys
+import warnings
+
+from psutil import _common
+from psutil import _psposix
+from psutil._common import (isfile_strict, usage_percent, deprecated)
+from psutil._compat import PY3, xrange, namedtuple, wraps, b, defaultdict
+import _psutil_linux as cext
+import _psutil_posix
+
+
+__extra__all__ = [
+    # io prio constants
+    "IOPRIO_CLASS_NONE", "IOPRIO_CLASS_RT", "IOPRIO_CLASS_BE",
+    "IOPRIO_CLASS_IDLE",
+    # connection status constants
+    "CONN_ESTABLISHED", "CONN_SYN_SENT", "CONN_SYN_RECV", "CONN_FIN_WAIT1",
+    "CONN_FIN_WAIT2", "CONN_TIME_WAIT", "CONN_CLOSE", "CONN_CLOSE_WAIT",
+    "CONN_LAST_ACK", "CONN_LISTEN", "CONN_CLOSING",
+    # other
+    "phymem_buffers", "cached_phymem"]
+
+
+# --- constants
+
+HAS_PRLIMIT = hasattr(cext, "linux_prlimit")
+
+# RLIMIT_* constants, not guaranteed to be present on all kernels
+if HAS_PRLIMIT:
+    for name in dir(cext):
+        if name.startswith('RLIM'):
+            __extra__all__.append(name)
+
+# Number of clock ticks per second
+CLOCK_TICKS = os.sysconf("SC_CLK_TCK")
+PAGESIZE = os.sysconf("SC_PAGE_SIZE")
+BOOT_TIME = None  # set later
+DEFAULT_ENCODING = sys.getdefaultencoding()
+
+# ioprio_* constants http://linux.die.net/man/2/ioprio_get
+IOPRIO_CLASS_NONE = 0
+IOPRIO_CLASS_RT = 1
+IOPRIO_CLASS_BE = 2
+IOPRIO_CLASS_IDLE = 3
+
+# taken from /fs/proc/array.c
+PROC_STATUSES = {
+    "R": _common.STATUS_RUNNING,
+    "S": _common.STATUS_SLEEPING,
+    "D": _common.STATUS_DISK_SLEEP,
+    "T": _common.STATUS_STOPPED,
+    "t": _common.STATUS_TRACING_STOP,
+    "Z": _common.STATUS_ZOMBIE,
+    "X": _common.STATUS_DEAD,
+    "x": _common.STATUS_DEAD,
+    "K": _common.STATUS_WAKE_KILL,
+    "W": _common.STATUS_WAKING
+}
+
+# http://students.mimuw.edu.pl/lxr/source/include/net/tcp_states.h
+TCP_STATUSES = {
+    "01": _common.CONN_ESTABLISHED,
+    "02": _common.CONN_SYN_SENT,
+    "03": _common.CONN_SYN_RECV,
+    "04": _common.CONN_FIN_WAIT1,
+    "05": _common.CONN_FIN_WAIT2,
+    "06": _common.CONN_TIME_WAIT,
+    "07": _common.CONN_CLOSE,
+    "08": _common.CONN_CLOSE_WAIT,
+    "09": _common.CONN_LAST_ACK,
+    "0A": _common.CONN_LISTEN,
+    "0B": _common.CONN_CLOSING
+}
+
+# set later from __init__.py
+NoSuchProcess = None
+AccessDenied = None
+TimeoutExpired = None
+
+
+# --- named tuples
+
+def _get_cputimes_fields():
+    """Return a namedtuple of variable fields depending on the
+    CPU times available on this Linux kernel version which may be:
+    (user, nice, system, idle, iowait, irq, softirq, [steal, [guest,
+     [guest_nice]]])
+    """
+    f = open('/proc/stat', 'rb')
+    try:
+        values = f.readline().split()[1:]
+    finally:
+        f.close()
+    fields = ['user', 'nice', 'system', 'idle', 'iowait', 'irq', 'softirq']
+    vlen = len(values)
+    if vlen >= 8:
+        # Linux >= 2.6.11
+        fields.append('steal')
+    if vlen >= 9:
+        # Linux >= 2.6.24
+        fields.append('guest')
+    if vlen >= 10:
+        # Linux >= 3.2.0
+        fields.append('guest_nice')
+    return fields
+
+
+scputimes = namedtuple('scputimes', _get_cputimes_fields())
+
+svmem = namedtuple(
+    'svmem', ['total', 'available', 'percent', 'used', 'free',
+              'active', 'inactive', 'buffers', 'cached'])
+
+pextmem = namedtuple('pextmem', 'rss vms shared text lib data dirty')
+
+pmmap_grouped = namedtuple(
+    'pmmap_grouped', ['path', 'rss', 'size', 'pss', 'shared_clean',
+                      'shared_dirty', 'private_clean', 'private_dirty',
+                      'referenced', 'anonymous', 'swap'])
+
+pmmap_ext = namedtuple(
+    'pmmap_ext', 'addr perms ' + ' '.join(pmmap_grouped._fields))
+
+
+# --- system memory
+
+def virtual_memory():
+    total, free, buffers, shared, _, _ = cext.linux_sysinfo()
+    cached = active = inactive = None
+    f = open('/proc/meminfo', 'rb')
+    CACHED, ACTIVE, INACTIVE = b("Cached:"), b("Active:"), b("Inactive:")
+    try:
+        for line in f:
+            if line.startswith(CACHED):
+                cached = int(line.split()[1]) * 1024
+            elif line.startswith(ACTIVE):
+                active = int(line.split()[1]) * 1024
+            elif line.startswith(INACTIVE):
+                inactive = int(line.split()[1]) * 1024
+            if (cached is not None
+                    and active is not None
+                    and inactive is not None):
+                break
+        else:
+            # we might get here when dealing with exotic Linux flavors, see:
+            # http://code.google.com/p/psutil/issues/detail?id=313
+            msg = "'cached', 'active' and 'inactive' memory stats couldn't " \
+                  "be determined and were set to 0"
+            warnings.warn(msg, RuntimeWarning)
+            cached = active = inactive = 0
+    finally:
+        f.close()
+    avail = free + buffers + cached
+    used = total - free
+    percent = usage_percent((total - avail), total, _round=1)
+    return svmem(total, avail, percent, used, free,
+                 active, inactive, buffers, cached)
+
+
+def swap_memory():
+    _, _, _, _, total, free = cext.linux_sysinfo()
+    used = total - free
+    percent = usage_percent(used, total, _round=1)
+    # get pgin/pgouts
+    f = open("/proc/vmstat", "rb")
+    SIN, SOUT = b('pswpin'), b('pswpout')
+    sin = sout = None
+    try:
+        for line in f:
+            # values are expressed in 4 kilo bytes, we want bytes instead
+            if line.startswith(SIN):
+                sin = int(line.split(b(' '))[1]) * 4 * 1024
+            elif line.startswith(SOUT):
+                sout = int(line.split(b(' '))[1]) * 4 * 1024
+            if sin is not None and sout is not None:
+                break
+        else:
+            # we might get here when dealing with exotic Linux flavors, see:
+            # http://code.google.com/p/psutil/issues/detail?id=313
+            msg = "'sin' and 'sout' swap memory stats couldn't " \
+                  "be determined and were set to 0"
+            warnings.warn(msg, RuntimeWarning)
+            sin = sout = 0
+    finally:
+        f.close()
+    return _common.sswap(total, used, free, percent, sin, sout)
+
+
+@deprecated(replacement='psutil.virtual_memory().cached')
+def cached_phymem():
+    return virtual_memory().cached
+
+
+@deprecated(replacement='psutil.virtual_memory().buffers')
+def phymem_buffers():
+    return virtual_memory().buffers
+
+
+# --- CPUs
+
+def cpu_times():
+    """Return a named tuple representing the following system-wide
+    CPU times:
+    (user, nice, system, idle, iowait, irq, softirq [steal, [guest,
+     [guest_nice]]])
+    Last 3 fields may not be available on all Linux kernel versions.
+    """
+    f = open('/proc/stat', 'rb')
+    try:
+        values = f.readline().split()
+    finally:
+        f.close()
+    fields = values[1:len(scputimes._fields) + 1]
+    fields = [float(x) / CLOCK_TICKS for x in fields]
+    return scputimes(*fields)
+
+
+def per_cpu_times():
+    """Return a list of namedtuple representing the CPU times
+    for every CPU available on the system.
+    """
+    cpus = []
+    f = open('/proc/stat', 'rb')
+    try:
+        # get rid of the first line which refers to system wide CPU stats
+        f.readline()
+        CPU = b('cpu')
+        for line in f:
+            if line.startswith(CPU):
+                values = line.split()
+                fields = values[1:len(scputimes._fields) + 1]
+                fields = [float(x) / CLOCK_TICKS for x in fields]
+                entry = scputimes(*fields)
+                cpus.append(entry)
+        return cpus
+    finally:
+        f.close()
+
+
+def cpu_count_logical():
+    """Return the number of logical CPUs in the system."""
+    try:
+        return os.sysconf("SC_NPROCESSORS_ONLN")
+    except ValueError:
+        # as a second fallback we try to parse /proc/cpuinfo
+        num = 0
+        f = open('/proc/cpuinfo', 'rb')
+        try:
+            lines = f.readlines()
+        finally:
+            f.close()
+        PROCESSOR = b('processor')
+        for line in lines:
+            if line.lower().startswith(PROCESSOR):
+                num += 1
+
+    # unknown format (e.g. amrel/sparc architectures), see:
+    # http://code.google.com/p/psutil/issues/detail?id=200
+    # try to parse /proc/stat as a last resort
+    if num == 0:
+        f = open('/proc/stat', 'rt')
+        try:
+            lines = f.readlines()
+        finally:
+            f.close()
+        search = re.compile('cpu\d')
+        for line in lines:
+            line = line.split(' ')[0]
+            if search.match(line):
+                num += 1
+
+    if num == 0:
+        # mimic os.cpu_count()
+        return None
+    return num
+
+
+def cpu_count_physical():
+    """Return the number of physical CPUs in the system."""
+    f = open('/proc/cpuinfo', 'rb')
+    try:
+        lines = f.readlines()
+    finally:
+        f.close()
+    found = set()
+    PHYSICAL_ID = b('physical id')
+    for line in lines:
+        if line.lower().startswith(PHYSICAL_ID):
+            found.add(line.strip())
+    if found:
+        return len(found)
+    else:
+        return None  # mimic os.cpu_count()
+
+
+# --- other system functions
+
+def users():
+    """Return currently connected users as a list of namedtuples."""
+    retlist = []
+    rawlist = cext.users()
+    for item in rawlist:
+        user, tty, hostname, tstamp, user_process = item
+        # note: the underlying C function includes entries about
+        # system boot, run level and others.  We might want
+        # to use them in the future.
+        if not user_process:
+            continue
+        if hostname == ':0.0':
+            hostname = 'localhost'
+        nt = _common.suser(user, tty or None, hostname, tstamp)
+        retlist.append(nt)
+    return retlist
+
+
+def boot_time():
+    """Return the system boot time expressed in seconds since the epoch."""
+    global BOOT_TIME
+    f = open('/proc/stat', 'rb')
+    try:
+        BTIME = b('btime')
+        for line in f:
+            if line.startswith(BTIME):
+                ret = float(line.strip().split()[1])
+                BOOT_TIME = ret
+                return ret
+        raise RuntimeError("line 'btime' not found")
+    finally:
+        f.close()
+
+
+# --- processes
+
+def pids():
+    """Returns a list of PIDs currently running on the system."""
+    return [int(x) for x in os.listdir(b('/proc')) if x.isdigit()]
+
+
+def pid_exists(pid):
+    """Check For the existence of a unix pid."""
+    return _psposix.pid_exists(pid)
+
+
+# --- network
+
+class Connections:
+    """A wrapper on top of /proc/net/* files, retrieving per-process
+    and system-wide open connections (TCP, UDP, UNIX) similarly to
+    "netstat -an".
+
+    Note: in case of UNIX sockets we're only able to determine the
+    local endpoint/path, not the one it's connected to.
+    According to [1] it would be possible but not easily.
+
+    [1] http://serverfault.com/a/417946
+    """
+
+    def __init__(self):
+        tcp4 = ("tcp", socket.AF_INET, socket.SOCK_STREAM)
+        tcp6 = ("tcp6", socket.AF_INET6, socket.SOCK_STREAM)
+        udp4 = ("udp", socket.AF_INET, socket.SOCK_DGRAM)
+        udp6 = ("udp6", socket.AF_INET6, socket.SOCK_DGRAM)
+        unix = ("unix", socket.AF_UNIX, None)
+        self.tmap = {
+            "all": (tcp4, tcp6, udp4, udp6, unix),
+            "tcp": (tcp4, tcp6),
+            "tcp4": (tcp4,),
+            "tcp6": (tcp6,),
+            "udp": (udp4, udp6),
+            "udp4": (udp4,),
+            "udp6": (udp6,),
+            "unix": (unix,),
+            "inet": (tcp4, tcp6, udp4, udp6),
+            "inet4": (tcp4, udp4),
+            "inet6": (tcp6, udp6),
+        }
+
+    def get_proc_inodes(self, pid):
+        inodes = defaultdict(list)
+        for fd in os.listdir("/proc/%s/fd" % pid):
+            try:
+                inode = os.readlink("/proc/%s/fd/%s" % (pid, fd))
+            except OSError:
+                # TODO: need comment here
+                continue
+            else:
+                if inode.startswith('socket:['):
+                    # the process is using a socket
+                    inode = inode[8:][:-1]
+                    inodes[inode].append((pid, int(fd)))
+        return inodes
+
+    def get_all_inodes(self):
+        inodes = {}
+        for pid in pids():
+            try:
+                inodes.update(self.get_proc_inodes(pid))
+            except OSError:
+                # os.listdir() is gonna raise a lot of access denied
+                # exceptions in case of unprivileged user; that's fine
+                # as we'll just end up returning a connection with PID
+                # and fd set to None anyway.
+                # Both netstat -an and lsof does the same so it's
+                # unlikely we can do any better.
+                # ENOENT just means a PID disappeared on us.
+                err = sys.exc_info()[1]
+                if err.errno not in (errno.ENOENT, errno.EPERM, errno.EACCES):
+                    raise
+        return inodes
+
+    def decode_address(self, addr, family):
+        """Accept an "ip:port" address as displayed in /proc/net/*
+        and convert it into a human readable form, like:
+
+        "0500000A:0016" -> ("10.0.0.5", 22)
+        "0000000000000000FFFF00000100007F:9E49" -> ("::ffff:127.0.0.1", 40521)
+
+        The IP address portion is a little or big endian four-byte
+        hexadecimal number; that is, the least significant byte is listed
+        first, so we need to reverse the order of the bytes to convert it
+        to an IP address.
+        The port is represented as a two-byte hexadecimal number.
+
+        Reference:
+        http://linuxdevcenter.com/pub/a/linux/2000/11/16/LinuxAdmin.html
+        """
+        ip, port = addr.split(':')
+        port = int(port, 16)
+        if PY3:
+            ip = ip.encode('ascii')
+        # this usually refers to a local socket in listen mode with
+        # no end-points connected
+        if not port:
+            return ()
+        if family == socket.AF_INET:
+            # see: http://code.google.com/p/psutil/issues/detail?id=201
+            if sys.byteorder == 'little':
+                ip = socket.inet_ntop(family, base64.b16decode(ip)[::-1])
+            else:
+                ip = socket.inet_ntop(family, base64.b16decode(ip))
+        else:  # IPv6
+            # old version - let's keep it, just in case...
+            # ip = ip.decode('hex')
+            # return socket.inet_ntop(socket.AF_INET6,
+            #          ''.join(ip[i:i+4][::-1] for i in xrange(0, 16, 4)))
+            ip = base64.b16decode(ip)
+            # see: http://code.google.com/p/psutil/issues/detail?id=201
+            if sys.byteorder == 'little':
+                ip = socket.inet_ntop(
+                    socket.AF_INET6,
+                    struct.pack('>4I', *struct.unpack('<4I', ip)))
+            else:
+                ip = socket.inet_ntop(
+                    socket.AF_INET6,
+                    struct.pack('<4I', *struct.unpack('<4I', ip)))
+        return (ip, port)
+
+    def process_inet(self, file, family, type_, inodes, filter_pid=None):
+        """Parse /proc/net/tcp* and /proc/net/udp* files."""
+        if file.endswith('6') and not os.path.exists(file):
+            # IPv6 not supported
+            return
+        f = open(file, 'rt')
+        try:
+            f.readline()  # skip the first line
+            for line in f:
+                _, laddr, raddr, status, _, _, _, _, _, inode = \
+                    line.split()[:10]
+                if inode in inodes:
+                    # We assume inet sockets are unique, so we error
+                    # out if there are multiple references to the
+                    # same inode. We won't do this for UNIX sockets.
+                    if len(inodes[inode]) > 1 and type_ != socket.AF_UNIX:
+                        raise ValueError("ambiguos inode with multiple "
+                                         "PIDs references")
+                    pid, fd = inodes[inode][0]
+                else:
+                    pid, fd = None, -1
+                if filter_pid is not None and filter_pid != pid:
+                    continue
+                else:
+                    if type_ == socket.SOCK_STREAM:
+                        status = TCP_STATUSES[status]
+                    else:
+                        status = _common.CONN_NONE
+                    laddr = self.decode_address(laddr, family)
+                    raddr = self.decode_address(raddr, family)
+                    yield (fd, family, type_, laddr, raddr, status, pid)
+        finally:
+            f.close()
+
+    def process_unix(self, file, family, inodes, filter_pid=None):
+        """Parse /proc/net/unix files."""
+        f = open(file, 'rt')
+        try:
+            f.readline()  # skip the first line
+            for line in f:
+                tokens = line.split()
+                _, _, _, _, type_, _, inode = tokens[0:7]
+                if inode in inodes:
+                    # With UNIX sockets we can have a single inode
+                    # referencing many file descriptors.
+                    pairs = inodes[inode]
+                else:
+                    pairs = [(None, -1)]
+                for pid, fd in pairs:
+                    if filter_pid is not None and filter_pid != pid:
+                        continue
+                    else:
+                        if len(tokens) == 8:
+                            path = tokens[-1]
+                        else:
+                            path = ""
+                        type_ = int(type_)
+                        raddr = None
+                        status = _common.CONN_NONE
+                        yield (fd, family, type_, path, raddr, status, pid)
+        finally:
+            f.close()
+
+    def retrieve(self, kind, pid=None):
+        if kind not in self.tmap:
+            raise ValueError("invalid %r kind argument; choose between %s"
+                             % (kind, ', '.join([repr(x) for x in self.tmap])))
+        if pid is not None:
+            inodes = self.get_proc_inodes(pid)
+            if not inodes:
+                # no connections for this process
+                return []
+        else:
+            inodes = self.get_all_inodes()
+        ret = []
+        for f, family, type_ in self.tmap[kind]:
+            if family in (socket.AF_INET, socket.AF_INET6):
+                ls = self.process_inet(
+                    "/proc/net/%s" % f, family, type_, inodes, filter_pid=pid)
+            else:
+                ls = self.process_unix(
+                    "/proc/net/%s" % f, family, inodes, filter_pid=pid)
+            for fd, family, type_, laddr, raddr, status, bound_pid in ls:
+                if pid:
+                    conn = _common.pconn(fd, family, type_, laddr, raddr,
+                                         status)
+                else:
+                    conn = _common.sconn(fd, family, type_, laddr, raddr,
+                                         status, bound_pid)
+                ret.append(conn)
+        return ret
+
+
+_connections = Connections()
+
+
+def net_connections(kind='inet'):
+    """Return system-wide open connections."""
+    return _connections.retrieve(kind)
+
+
+def net_io_counters():
+    """Return network I/O statistics for every network interface
+    installed on the system as a dict of raw tuples.
+    """
+    f = open("/proc/net/dev", "rt")
+    try:
+        lines = f.readlines()
+    finally:
+        f.close()
+
+    retdict = {}
+    for line in lines[2:]:
+        colon = line.rfind(':')
+        assert colon > 0, repr(line)
+        name = line[:colon].strip()
+        fields = line[colon + 1:].strip().split()
+        bytes_recv = int(fields[0])
+        packets_recv = int(fields[1])
+        errin = int(fields[2])
+        dropin = int(fields[3])
+        bytes_sent = int(fields[8])
+        packets_sent = int(fields[9])
+        errout = int(fields[10])
+        dropout = int(fields[11])
+        retdict[name] = (bytes_sent, bytes_recv, packets_sent, packets_recv,
+                         errin, errout, dropin, dropout)
+    return retdict
+
+
+# --- disks
+
+def disk_io_counters():
+    """Return disk I/O statistics for every disk installed on the
+    system as a dict of raw tuples.
+    """
+    # man iostat states that sectors are equivalent with blocks and
+    # have a size of 512 bytes since 2.4 kernels. This value is
+    # needed to calculate the amount of disk I/O in bytes.
+    SECTOR_SIZE = 512
+
+    # determine partitions we want to look for
+    partitions = []
+    f = open("/proc/partitions", "rt")
+    try:
+        lines = f.readlines()[2:]
+    finally:
+        f.close()
+    for line in reversed(lines):
+        _, _, _, name = line.split()
+        if name[-1].isdigit():
+            # we're dealing with a partition (e.g. 'sda1'); 'sda' will
+            # also be around but we want to omit it
+            partitions.append(name)
+        else:
+            if not partitions or not partitions[-1].startswith(name):
+                # we're dealing with a disk entity for which no
+                # partitions have been defined (e.g. 'sda' but
+                # 'sda1' was not around), see:
+                # http://code.google.com/p/psutil/issues/detail?id=338
+                partitions.append(name)
+    #
+    retdict = {}
+    f = open("/proc/diskstats", "rt")
+    try:
+        lines = f.readlines()
+    finally:
+        f.close()
+    for line in lines:
+        # http://www.mjmwired.net/kernel/Documentation/iostats.txt
+        _, _, name, reads, _, rbytes, rtime, writes, _, wbytes, wtime = \
+            line.split()[:11]
+        if name in partitions:
+            rbytes = int(rbytes) * SECTOR_SIZE
+            wbytes = int(wbytes) * SECTOR_SIZE
+            reads = int(reads)
+            writes = int(writes)
+            rtime = int(rtime)
+            wtime = int(wtime)
+            retdict[name] = (reads, writes, rbytes, wbytes, rtime, wtime)
+    return retdict
+
+
+def disk_partitions(all=False):
+    """Return mounted disk partitions as a list of nameduples"""
+    phydevs = []
+    f = open("/proc/filesystems", "r")
+    try:
+        for line in f:
+            if not line.startswith("nodev"):
+                phydevs.append(line.strip())
+    finally:
+        f.close()
+
+    retlist = []
+    partitions = cext.disk_partitions()
+    for partition in partitions:
+        device, mountpoint, fstype, opts = partition
+        if device == 'none':
+            device = ''
+        if not all:
+            if device == '' or fstype not in phydevs:
+                continue
+        ntuple = _common.sdiskpart(device, mountpoint, fstype, opts)
+        retlist.append(ntuple)
+    return retlist
+
+
+disk_usage = _psposix.disk_usage
+
+
+# --- decorators
+
+def wrap_exceptions(fun):
+    """Decorator which translates bare OSError and IOError exceptions
+    into NoSuchProcess and AccessDenied.
+    """
+    @wraps(fun)
+    def wrapper(self, *args, **kwargs):
+        try:
+            return fun(self, *args, **kwargs)
+        except EnvironmentError:
+            # support for private module import
+            if NoSuchProcess is None or AccessDenied is None:
+                raise
+            # ENOENT (no such file or directory) gets raised on open().
+            # ESRCH (no such process) can get raised on read() if
+            # process is gone in meantime.
+            err = sys.exc_info()[1]
+            if err.errno in (errno.ENOENT, errno.ESRCH):
+                raise NoSuchProcess(self.pid, self._name)
+            if err.errno in (errno.EPERM, errno.EACCES):
+                raise AccessDenied(self.pid, self._name)
+            raise
+    return wrapper
+
+
+class Process(object):
+    """Linux process implementation."""
+
+    __slots__ = ["pid", "_name"]
+
+    def __init__(self, pid):
+        self.pid = pid
+        self._name = None
+
+    @wrap_exceptions
+    def name(self):
+        fname = "/proc/%s/stat" % self.pid
+        if PY3:
+            f = open(fname, "rt", encoding=DEFAULT_ENCODING)
+        else:
+            f = open(fname, "rt")
+        try:
+            name = f.read().split(' ')[1].replace('(', '').replace(')', '')
+        finally:
+            f.close()
+        # XXX - gets changed later and probably needs refactoring
+        return name
+
+    def exe(self):
+        try:
+            exe = os.readlink("/proc/%s/exe" % self.pid)
+        except (OSError, IOError):
+            err = sys.exc_info()[1]
+            if err.errno == errno.ENOENT:
+                # no such file error; might be raised also if the
+                # path actually exists for system processes with
+                # low pids (about 0-20)
+                if os.path.lexists("/proc/%s" % self.pid):
+                    return ""
+                else:
+                    # ok, it is a process which has gone away
+                    raise NoSuchProcess(self.pid, self._name)
+            if err.errno in (errno.EPERM, errno.EACCES):
+                raise AccessDenied(self.pid, self._name)
+            raise
+
+        # readlink() might return paths containing null bytes ('\x00').
+        # Certain names have ' (deleted)' appended. Usually this is
+        # bogus as the file actually exists. Either way that's not
+        # important as we don't want to discriminate executables which
+        # have been deleted.
+        exe = exe.split('\x00')[0]
+        if exe.endswith(' (deleted)') and not os.path.exists(exe):
+            exe = exe[:-10]
+        return exe
+
+    @wrap_exceptions
+    def cmdline(self):
+        fname = "/proc/%s/cmdline" % self.pid
+        if PY3:
+            f = open(fname, "rt", encoding=DEFAULT_ENCODING)
+        else:
+            f = open(fname, "rt")
+        try:
+            # return the args as a list
+            return [x for x in f.read().split('\x00') if x]
+        finally:
+            f.close()
+
+    @wrap_exceptions
+    def terminal(self):
+        tmap = _psposix._get_terminal_map()
+        f = open("/proc/%s/stat" % self.pid, 'rb')
+        try:
+            tty_nr = int(f.read().split(b(' '))[6])
+        finally:
+            f.close()
+        try:
+            return tmap[tty_nr]
+        except KeyError:
+            return None
+
+    if os.path.exists('/proc/%s/io' % os.getpid()):
+        @wrap_exceptions
+        def io_counters(self):
+            fname = "/proc/%s/io" % self.pid
+            f = open(fname, 'rb')
+            SYSCR, SYSCW = b("syscr"), b("syscw")
+            READ_BYTES, WRITE_BYTES = b("read_bytes"), b("write_bytes")
+            try:
+                rcount = wcount = rbytes = wbytes = None
+                for line in f:
+                    if rcount is None and line.startswith(SYSCR):
+                        rcount = int(line.split()[1])
+                    elif wcount is None and line.startswith(SYSCW):
+                        wcount = int(line.split()[1])
+                    elif rbytes is None and line.startswith(READ_BYTES):
+                        rbytes = int(line.split()[1])
+                    elif wbytes is None and line.startswith(WRITE_BYTES):
+                        wbytes = int(line.split()[1])
+                for x in (rcount, wcount, rbytes, wbytes):
+                    if x is None:
+                        raise NotImplementedError(
+                            "couldn't read all necessary info from %r" % fname)
+                return _common.pio(rcount, wcount, rbytes, wbytes)
+            finally:
+                f.close()
+    else:
+        def io_counters(self):
+            raise NotImplementedError("couldn't find /proc/%s/io (kernel "
+                                      "too old?)" % self.pid)
+
+    @wrap_exceptions
+    def cpu_times(self):
+        f = open("/proc/%s/stat" % self.pid, 'rb')
+        try:
+            st = f.read().strip()
+        finally:
+            f.close()
+        # ignore the first two values ("pid (exe)")
+        st = st[st.find(b(')')) + 2:]
+        values = st.split(b(' '))
+        utime = float(values[11]) / CLOCK_TICKS
+        stime = float(values[12]) / CLOCK_TICKS
+        return _common.pcputimes(utime, stime)
+
+    @wrap_exceptions
+    def wait(self, timeout=None):
+        try:
+            return _psposix.wait_pid(self.pid, timeout)
+        except _psposix.TimeoutExpired:
+            # support for private module import
+            if TimeoutExpired is None:
+                raise
+            raise TimeoutExpired(timeout, self.pid, self._name)
+
+    @wrap_exceptions
+    def create_time(self):
+        f = open("/proc/%s/stat" % self.pid, 'rb')
+        try:
+            st = f.read().strip()
+        finally:
+            f.close()
+        # ignore the first two values ("pid (exe)")
+        st = st[st.rfind(b(')')) + 2:]
+        values = st.split(b(' '))
+        # According to documentation, starttime is in field 21 and the
+        # unit is jiffies (clock ticks).
+        # We first divide it for clock ticks and then add uptime returning
+        # seconds since the epoch, in UTC.
+        # Also use cached value if available.
+        bt = BOOT_TIME or boot_time()
+        return (float(values[19]) / CLOCK_TICKS) + bt
+
+    @wrap_exceptions
+    def memory_info(self):
+        f = open("/proc/%s/statm" % self.pid, 'rb')
+        try:
+            vms, rss = f.readline().split()[:2]
+            return _common.pmem(int(rss) * PAGESIZE,
+                                int(vms) * PAGESIZE)
+        finally:
+            f.close()
+
+    @wrap_exceptions
+    def memory_info_ex(self):
+        #  ============================================================
+        # | FIELD  | DESCRIPTION                         | AKA  | TOP  |
+        #  ============================================================
+        # | rss    | resident set size                   |      | RES  |
+        # | vms    | total program size                  | size | VIRT |
+        # | shared | shared pages (from shared mappings) |      | SHR  |
+        # | text   | text ('code')                       | trs  | CODE |
+        # | lib    | library (unused in Linux 2.6)       | lrs  |      |
+        # | data   | data + stack                        | drs  | DATA |
+        # | dirty  | dirty pages (unused in Linux 2.6)   | dt   |      |
+        #  ============================================================
+        f = open("/proc/%s/statm" % self.pid, "rb")
+        try:
+            vms, rss, shared, text, lib, data, dirty = \
+                [int(x) * PAGESIZE for x in f.readline().split()[:7]]
+        finally:
+            f.close()
+        return pextmem(rss, vms, shared, text, lib, data, dirty)
+
+    if os.path.exists('/proc/%s/smaps' % os.getpid()):
+        def memory_maps(self):
+            """Return process's mapped memory regions as a list of nameduples.
+            Fields are explained in 'man proc'; here is an updated (Apr 2012)
+            version: http://goo.gl/fmebo
+            """
+            f = None
+            try:
+                f = open("/proc/%s/smaps" % self.pid, "rt")
+                first_line = f.readline()
+                current_block = [first_line]
+
+                def get_blocks():
+                    data = {}
+                    for line in f:
+                        fields = line.split(None, 5)
+                        if not fields[0].endswith(':'):
+                            # new block section
+                            yield (current_block.pop(), data)
+                            current_block.append(line)
+                        else:
+                            try:
+                                data[fields[0]] = int(fields[1]) * 1024
+                            except ValueError:
+                                if fields[0].startswith('VmFlags:'):
+                                    # see issue #369
+                                    continue
+                                else:
+                                    raise ValueError("don't know how to inte"
+                                                     "rpret line %r" % line)
+                    yield (current_block.pop(), data)
+
+                if first_line:  # smaps file can be empty
+                    for header, data in get_blocks():
+                        hfields = header.split(None, 5)
+                        try:
+                            addr, perms, offset, dev, inode, path = hfields
+                        except ValueError:
+                            addr, perms, offset, dev, inode, path = \
+                                hfields + ['']
+                        if not path:
+                            path = '[anon]'
+                        else:
+                            path = path.strip()
+                        yield (addr, perms, path,
+                               data['Rss:'],
+                               data.get('Size:', 0),
+                               data.get('Pss:', 0),
+                               data.get('Shared_Clean:', 0),
+                               data.get('Shared_Dirty:', 0),
+                               data.get('Private_Clean:', 0),
+                               data.get('Private_Dirty:', 0),
+                               data.get('Referenced:', 0),
+                               data.get('Anonymous:', 0),
+                               data.get('Swap:', 0))
+                f.close()
+            except EnvironmentError:
+                # XXX - Can't use wrap_exceptions decorator as we're
+                # returning a generator;  this probably needs some
+                # refactoring in order to avoid this code duplication.
+                if f is not None:
+                    f.close()
+                err = sys.exc_info()[1]
+                if err.errno in (errno.ENOENT, errno.ESRCH):
+                    raise NoSuchProcess(self.pid, self._name)
+                if err.errno in (errno.EPERM, errno.EACCES):
+                    raise AccessDenied(self.pid, self._name)
+                raise
+            except:
+                if f is not None:
+                    f.close()
+                raise
+            f.close()
+
+    else:
+        def memory_maps(self, ext):
+            msg = "couldn't find /proc/%s/smaps; kernel < 2.6.14 or "  \
+                  "CONFIG_MMU kernel configuration option is not enabled" \
+                  % self.pid
+            raise NotImplementedError(msg)
+
+    @wrap_exceptions
+    def cwd(self):
+        # readlink() might return paths containing null bytes causing
+        # problems when used with other fs-related functions (os.*,
+        # open(), ...)
+        path = os.readlink("/proc/%s/cwd" % self.pid)
+        return path.replace('\x00', '')
+
+    @wrap_exceptions
+    def num_ctx_switches(self):
+        vol = unvol = None
+        f = open("/proc/%s/status" % self.pid, "rb")
+        VOLUNTARY = b("voluntary_ctxt_switches")
+        NON_VOLUNTARY = b("nonvoluntary_ctxt_switches")
+        try:
+            for line in f:
+                if line.startswith(VOLUNTARY):
+                    vol = int(line.split()[1])
+                elif line.startswith(NON_VOLUNTARY):
+                    unvol = int(line.split()[1])
+                if vol is not None and unvol is not None:
+                    return _common.pctxsw(vol, unvol)
+            raise NotImplementedError(
+                "'voluntary_ctxt_switches' and 'nonvoluntary_ctxt_switches'"
+                "fields were not found in /proc/%s/status; the kernel is "
+                "probably older than 2.6.23" % self.pid)
+        finally:
+            f.close()
+
+    @wrap_exceptions
+    def num_threads(self):
+        f = open("/proc/%s/status" % self.pid, "rb")
+        try:
+            THREADS = b("Threads:")
+            for line in f:
+                if line.startswith(THREADS):
+                    return int(line.split()[1])
+            raise NotImplementedError("line not found")
+        finally:
+            f.close()
+
+    @wrap_exceptions
+    def threads(self):
+        thread_ids = os.listdir("/proc/%s/task" % self.pid)
+        thread_ids.sort()
+        retlist = []
+        hit_enoent = False
+        for thread_id in thread_ids:
+            try:
+                f = open("/proc/%s/task/%s/stat" % (self.pid, thread_id), 'rb')
+            except EnvironmentError:
+                err = sys.exc_info()[1]
+                if err.errno == errno.ENOENT:
+                    # no such file or directory; it means thread
+                    # disappeared on us
+                    hit_enoent = True
+                    continue
+                raise
+            try:
+                st = f.read().strip()
+            finally:
+                f.close()
+            # ignore the first two values ("pid (exe)")
+            st = st[st.find(b(')')) + 2:]
+            values = st.split(b(' '))
+            utime = float(values[11]) / CLOCK_TICKS
+            stime = float(values[12]) / CLOCK_TICKS
+            ntuple = _common.pthread(int(thread_id), utime, stime)
+            retlist.append(ntuple)
+        if hit_enoent:
+            # raise NSP if the process disappeared on us
+            os.stat('/proc/%s' % self.pid)
+        return retlist
+
+    @wrap_exceptions
+    def nice_get(self):
+        #f = open('/proc/%s/stat' % self.pid, 'r')
+        # try:
+        #   data = f.read()
+        #   return int(data.split()[18])
+        # finally:
+        #   f.close()
+
+        # Use C implementation
+        return _psutil_posix.getpriority(self.pid)
+
+    @wrap_exceptions
+    def nice_set(self, value):
+        return _psutil_posix.setpriority(self.pid, value)
+
+    @wrap_exceptions
+    def cpu_affinity_get(self):
+        from_bitmask = lambda x: [i for i in xrange(64) if (1 << i) & x]
+        bitmask = cext.proc_cpu_affinity_get(self.pid)
+        return from_bitmask(bitmask)
+
+    @wrap_exceptions
+    def cpu_affinity_set(self, cpus):
+        try:
+            cext.proc_cpu_affinity_set(self.pid, cpus)
+        except OSError:
+            err = sys.exc_info()[1]
+            if err.errno == errno.EINVAL:
+                allcpus = tuple(range(len(per_cpu_times())))
+                for cpu in cpus:
+                    if cpu not in allcpus:
+                        raise ValueError("invalid CPU #%i (choose between %s)"
+                                         % (cpu, allcpus))
+            raise
+
+    # only starting from kernel 2.6.13
+    if hasattr(cext, "proc_ioprio_get"):
+
+        @wrap_exceptions
+        def ionice_get(self):
+            ioclass, value = cext.proc_ioprio_get(self.pid)
+            return _common.pionice(ioclass, value)
+
+        @wrap_exceptions
+        def ionice_set(self, ioclass, value):
+            if ioclass in (IOPRIO_CLASS_NONE, None):
+                if value:
+                    msg = "can't specify value with IOPRIO_CLASS_NONE"
+                    raise ValueError(msg)
+                ioclass = IOPRIO_CLASS_NONE
+                value = 0
+            if ioclass in (IOPRIO_CLASS_RT, IOPRIO_CLASS_BE):
+                if value is None:
+                    value = 4
+            elif ioclass == IOPRIO_CLASS_IDLE:
+                if value:
+                    msg = "can't specify value with IOPRIO_CLASS_IDLE"
+                    raise ValueError(msg)
+                value = 0
+            else:
+                value = 0
+            if not 0 <= value <= 8:
+                raise ValueError(
+                    "value argument range expected is between 0 and 8")
+            return cext.proc_ioprio_set(self.pid, ioclass, value)
+
+    if HAS_PRLIMIT:
+        @wrap_exceptions
+        def rlimit(self, resource, limits=None):
+            # if pid is 0 prlimit() applies to the calling process and
+            # we don't want that
+            if self.pid == 0:
+                raise ValueError("can't use prlimit() against PID 0 process")
+            if limits is None:
+                # get
+                return cext.linux_prlimit(self.pid, resource)
+            else:
+                # set
+                if len(limits) != 2:
+                    raise ValueError(
+                        "second argument must be a (soft, hard) tuple")
+                soft, hard = limits
+                cext.linux_prlimit(self.pid, resource, soft, hard)
+
+    @wrap_exceptions
+    def status(self):
+        f = open("/proc/%s/status" % self.pid, 'rb')
+        try:
+            STATE = b("State:")
+            for line in f:
+                if line.startswith(STATE):
+                    letter = line.split()[1]
+                    if PY3:
+                        letter = letter.decode()
+                    # XXX is '?' legit? (we're not supposed to return
+                    # it anyway)
+                    return PROC_STATUSES.get(letter, '?')
+        finally:
+            f.close()
+
+    @wrap_exceptions
+    def open_files(self):
+        retlist = []
+        files = os.listdir("/proc/%s/fd" % self.pid)
+        hit_enoent = False
+        for fd in files:
+            file = "/proc/%s/fd/%s" % (self.pid, fd)
+            if os.path.islink(file):
+                try:
+                    file = os.readlink(file)
+                except OSError:
+                    # ENOENT == file which is gone in the meantime
+                    err = sys.exc_info()[1]
+                    if err.errno == errno.ENOENT:
+                        hit_enoent = True
+                        continue
+                    raise
+                else:
+                    # If file is not an absolute path there's no way
+                    # to tell whether it's a regular file or not,
+                    # so we skip it. A regular file is always supposed
+                    # to be absolutized though.
+                    if file.startswith('/') and isfile_strict(file):
+                        ntuple = _common.popenfile(file, int(fd))
+                        retlist.append(ntuple)
+        if hit_enoent:
+            # raise NSP if the process disappeared on us
+            os.stat('/proc/%s' % self.pid)
+        return retlist
+
+    @wrap_exceptions
+    def connections(self, kind='inet'):
+        ret = _connections.retrieve(kind, self.pid)
+        # raise NSP if the process disappeared on us
+        os.stat('/proc/%s' % self.pid)
+        return ret
+
+    @wrap_exceptions
+    def num_fds(self):
+        return len(os.listdir("/proc/%s/fd" % self.pid))
+
+    @wrap_exceptions
+    def ppid(self):
+        f = open("/proc/%s/status" % self.pid, 'rb')
+        try:
+            PPID = b("PPid:")
+            for line in f:
+                if line.startswith(PPID):
+                    # PPid: nnnn
+                    return int(line.split()[1])
+            raise NotImplementedError("line not found")
+        finally:
+            f.close()
+
+    @wrap_exceptions
+    def uids(self):
+        f = open("/proc/%s/status" % self.pid, 'rb')
+        try:
+            UID = b('Uid:')
+            for line in f:
+                if line.startswith(UID):
+                    _, real, effective, saved, fs = line.split()
+                    return _common.puids(int(real), int(effective), int(saved))
+            raise NotImplementedError("line not found")
+        finally:
+            f.close()
+
+    @wrap_exceptions
+    def gids(self):
+        f = open("/proc/%s/status" % self.pid, 'rb')
+        try:
+            GID = b('Gid:')
+            for line in f:
+                if line.startswith(GID):
+                    _, real, effective, saved, fs = line.split()
+                    return _common.pgids(int(real), int(effective), int(saved))
+            raise NotImplementedError("line not found")
+        finally:
+            f.close()


[15/22] AMBARI-5707. Metrics system prototype implementation. (swagle)

Posted by sw...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/TestAHSWebServices.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/TestAHSWebServices.java b/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/TestAHSWebServices.java
new file mode 100644
index 0000000..7ca5a03
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/TestAHSWebServices.java
@@ -0,0 +1,303 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.applicationhistoryservice.webapp;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.fail;
+
+import javax.ws.rs.core.MediaType;
+
+import junit.framework.Assert;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.ContainerState;
+import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
+import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.api.records.Priority;
+import org.apache.hadoop.yarn.api.records.YarnApplicationAttemptState;
+import org.apache.hadoop.yarn.api.records.YarnApplicationState;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.server.api.ApplicationContext;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.ApplicationHistoryManager;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.ApplicationHistoryStore;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.MemoryApplicationHistoryStore;
+import org.apache.hadoop.yarn.webapp.GenericExceptionHandler;
+import org.apache.hadoop.yarn.webapp.WebServicesTestUtils;
+import org.apache.hadoop.yarn.webapp.util.WebAppUtils;
+import org.codehaus.jettison.json.JSONArray;
+import org.codehaus.jettison.json.JSONException;
+import org.codehaus.jettison.json.JSONObject;
+import org.junit.Before;
+import org.junit.Test;
+
+import com.google.inject.Guice;
+import com.google.inject.Injector;
+import com.google.inject.servlet.GuiceServletContextListener;
+import com.google.inject.servlet.ServletModule;
+import com.sun.jersey.api.client.ClientResponse;
+import com.sun.jersey.api.client.ClientResponse.Status;
+import com.sun.jersey.api.client.UniformInterfaceException;
+import com.sun.jersey.api.client.WebResource;
+import com.sun.jersey.guice.spi.container.servlet.GuiceContainer;
+import com.sun.jersey.test.framework.JerseyTest;
+import com.sun.jersey.test.framework.WebAppDescriptor;
+
+public class TestAHSWebServices extends JerseyTest {
+
+  private static ApplicationHistoryManager ahManager;
+
+  private Injector injector = Guice.createInjector(new ServletModule() {
+
+    @Override
+    protected void configureServlets() {
+      bind(JAXBContextResolver.class);
+      bind(AHSWebServices.class);
+      bind(GenericExceptionHandler.class);
+      try {
+        ahManager = mockApplicationHistoryManager();
+      } catch (Exception e) {
+        Assert.fail();
+      }
+      bind(ApplicationContext.class).toInstance(ahManager);
+      serve("/*").with(GuiceContainer.class);
+    }
+  });
+
+  public class GuiceServletConfig extends GuiceServletContextListener {
+
+    @Override
+    protected Injector getInjector() {
+      return injector;
+    }
+  }
+
+  private ApplicationHistoryManager mockApplicationHistoryManager()
+      throws Exception {
+    ApplicationHistoryStore store = new MemoryApplicationHistoryStore();
+    TestAHSWebApp testAHSWebApp = new TestAHSWebApp();
+    testAHSWebApp.setApplicationHistoryStore(store);
+    ApplicationHistoryManager ahManager =
+        testAHSWebApp.mockApplicationHistoryManager(5, 5, 5);
+    return ahManager;
+  }
+
+  public TestAHSWebServices() {
+    super(new WebAppDescriptor.Builder(
+      "org.apache.hadoop.yarn.server.applicationhistoryservice.webapp")
+      .contextListenerClass(GuiceServletConfig.class)
+      .filterClass(com.google.inject.servlet.GuiceFilter.class)
+      .contextPath("jersey-guice-filter").servletPath("/").build());
+  }
+
+  @Before
+  @Override
+  public void setUp() throws Exception {
+    super.setUp();
+  }
+
+  @Test
+  public void testInvalidUri() throws JSONException, Exception {
+    WebResource r = resource();
+    String responseStr = "";
+    try {
+      responseStr =
+          r.path("ws").path("v1").path("applicationhistory").path("bogus")
+            .accept(MediaType.APPLICATION_JSON).get(String.class);
+      fail("should have thrown exception on invalid uri");
+    } catch (UniformInterfaceException ue) {
+      ClientResponse response = ue.getResponse();
+      assertEquals(Status.NOT_FOUND, response.getClientResponseStatus());
+
+      WebServicesTestUtils.checkStringMatch(
+        "error string exists and shouldn't", "", responseStr);
+    }
+  }
+
+  @Test
+  public void testInvalidUri2() throws JSONException, Exception {
+    WebResource r = resource();
+    String responseStr = "";
+    try {
+      responseStr = r.accept(MediaType.APPLICATION_JSON).get(String.class);
+      fail("should have thrown exception on invalid uri");
+    } catch (UniformInterfaceException ue) {
+      ClientResponse response = ue.getResponse();
+      assertEquals(Status.NOT_FOUND, response.getClientResponseStatus());
+      WebServicesTestUtils.checkStringMatch(
+        "error string exists and shouldn't", "", responseStr);
+    }
+  }
+
+  @Test
+  public void testInvalidAccept() throws JSONException, Exception {
+    WebResource r = resource();
+    String responseStr = "";
+    try {
+      responseStr =
+          r.path("ws").path("v1").path("applicationhistory")
+            .accept(MediaType.TEXT_PLAIN).get(String.class);
+      fail("should have thrown exception on invalid uri");
+    } catch (UniformInterfaceException ue) {
+      ClientResponse response = ue.getResponse();
+      assertEquals(Status.INTERNAL_SERVER_ERROR,
+        response.getClientResponseStatus());
+      WebServicesTestUtils.checkStringMatch(
+        "error string exists and shouldn't", "", responseStr);
+    }
+  }
+
+  @Test
+  public void testAppsQuery() throws Exception {
+    WebResource r = resource();
+    ClientResponse response =
+        r.path("ws").path("v1").path("applicationhistory").path("apps")
+          .queryParam("state", YarnApplicationState.FINISHED.toString())
+          .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
+    assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
+    JSONObject json = response.getEntity(JSONObject.class);
+    assertEquals("incorrect number of elements", 1, json.length());
+    JSONObject apps = json.getJSONObject("apps");
+    assertEquals("incorrect number of elements", 1, apps.length());
+    JSONArray array = apps.getJSONArray("app");
+    assertEquals("incorrect number of elements", 5, array.length());
+  }
+
+  @Test
+  public void testSingleApp() throws Exception {
+    ApplicationId appId = ApplicationId.newInstance(0, 1);
+    WebResource r = resource();
+    ClientResponse response =
+        r.path("ws").path("v1").path("applicationhistory").path("apps")
+          .path(appId.toString()).accept(MediaType.APPLICATION_JSON)
+          .get(ClientResponse.class);
+    assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
+    JSONObject json = response.getEntity(JSONObject.class);
+    assertEquals("incorrect number of elements", 1, json.length());
+    JSONObject app = json.getJSONObject("app");
+    assertEquals(appId.toString(), app.getString("appId"));
+    assertEquals(appId.toString(), app.get("name"));
+    assertEquals(appId.toString(), app.get("diagnosticsInfo"));
+    assertEquals("test queue", app.get("queue"));
+    assertEquals("test user", app.get("user"));
+    assertEquals("test type", app.get("type"));
+    assertEquals(FinalApplicationStatus.UNDEFINED.toString(),
+      app.get("finalAppStatus"));
+    assertEquals(YarnApplicationState.FINISHED.toString(), app.get("appState"));
+  }
+
+  @Test
+  public void testMultipleAttempts() throws Exception {
+    ApplicationId appId = ApplicationId.newInstance(0, 1);
+    WebResource r = resource();
+    ClientResponse response =
+        r.path("ws").path("v1").path("applicationhistory").path("apps")
+          .path(appId.toString()).path("appattempts")
+          .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
+    assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
+    JSONObject json = response.getEntity(JSONObject.class);
+    assertEquals("incorrect number of elements", 1, json.length());
+    JSONObject appAttempts = json.getJSONObject("appAttempts");
+    assertEquals("incorrect number of elements", 1, appAttempts.length());
+    JSONArray array = appAttempts.getJSONArray("appAttempt");
+    assertEquals("incorrect number of elements", 5, array.length());
+  }
+
+  @Test
+  public void testSingleAttempt() throws Exception {
+    ApplicationId appId = ApplicationId.newInstance(0, 1);
+    ApplicationAttemptId appAttemptId =
+        ApplicationAttemptId.newInstance(appId, 1);
+    WebResource r = resource();
+    ClientResponse response =
+        r.path("ws").path("v1").path("applicationhistory").path("apps")
+          .path(appId.toString()).path("appattempts")
+          .path(appAttemptId.toString()).accept(MediaType.APPLICATION_JSON)
+          .get(ClientResponse.class);
+    assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
+    JSONObject json = response.getEntity(JSONObject.class);
+    assertEquals("incorrect number of elements", 1, json.length());
+    JSONObject appAttempt = json.getJSONObject("appAttempt");
+    assertEquals(appAttemptId.toString(), appAttempt.getString("appAttemptId"));
+    assertEquals(appAttemptId.toString(), appAttempt.getString("host"));
+    assertEquals(appAttemptId.toString(),
+      appAttempt.getString("diagnosticsInfo"));
+    assertEquals("test tracking url", appAttempt.getString("trackingUrl"));
+    assertEquals(YarnApplicationAttemptState.FINISHED.toString(),
+      appAttempt.get("appAttemptState"));
+  }
+
+  @Test
+  public void testMultipleContainers() throws Exception {
+    ApplicationId appId = ApplicationId.newInstance(0, 1);
+    ApplicationAttemptId appAttemptId =
+        ApplicationAttemptId.newInstance(appId, 1);
+    WebResource r = resource();
+    ClientResponse response =
+        r.path("ws").path("v1").path("applicationhistory").path("apps")
+          .path(appId.toString()).path("appattempts")
+          .path(appAttemptId.toString()).path("containers")
+          .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
+    assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
+    JSONObject json = response.getEntity(JSONObject.class);
+    assertEquals("incorrect number of elements", 1, json.length());
+    JSONObject containers = json.getJSONObject("containers");
+    assertEquals("incorrect number of elements", 1, containers.length());
+    JSONArray array = containers.getJSONArray("container");
+    assertEquals("incorrect number of elements", 5, array.length());
+  }
+
+  @Test
+  public void testSingleContainer() throws Exception {
+    ApplicationId appId = ApplicationId.newInstance(0, 1);
+    ApplicationAttemptId appAttemptId =
+        ApplicationAttemptId.newInstance(appId, 1);
+    ContainerId containerId = ContainerId.newInstance(appAttemptId, 1);
+    WebResource r = resource();
+    ClientResponse response =
+        r.path("ws").path("v1").path("applicationhistory").path("apps")
+          .path(appId.toString()).path("appattempts")
+          .path(appAttemptId.toString()).path("containers")
+          .path(containerId.toString()).accept(MediaType.APPLICATION_JSON)
+          .get(ClientResponse.class);
+    assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
+    JSONObject json = response.getEntity(JSONObject.class);
+    assertEquals("incorrect number of elements", 1, json.length());
+    JSONObject container = json.getJSONObject("container");
+    assertEquals(containerId.toString(), container.getString("containerId"));
+    assertEquals(containerId.toString(), container.getString("diagnosticsInfo"));
+    assertEquals("0", container.getString("allocatedMB"));
+    assertEquals("0", container.getString("allocatedVCores"));
+    assertEquals(NodeId.newInstance("localhost", 0).toString(),
+      container.getString("assignedNodeId"));
+    assertEquals(Priority.newInstance(containerId.getId()).toString(),
+      container.getString("priority"));
+    Configuration conf = new YarnConfiguration();
+    assertEquals(WebAppUtils.getHttpSchemePrefix(conf) +
+        WebAppUtils.getAHSWebAppURLWithoutScheme(conf) +
+        "/applicationhistory/logs/localhost:0/container_0_0001_01_000001/" +
+        "container_0_0001_01_000001/test user",
+        container.getString("logUrl"));
+    assertEquals(ContainerState.COMPLETE.toString(),
+      container.getString("containerState"));
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/TestTimelineWebServices.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/TestTimelineWebServices.java b/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/TestTimelineWebServices.java
new file mode 100644
index 0000000..2b93190
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/TestTimelineWebServices.java
@@ -0,0 +1,391 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.applicationhistoryservice.webapp;
+
+import static org.junit.Assert.assertEquals;
+
+import javax.ws.rs.core.MediaType;
+
+import junit.framework.Assert;
+
+import org.apache.hadoop.metrics2.sink.timeline.TimelineMetrics;
+import org.apache.hadoop.yarn.api.records.timeline.TimelineEntities;
+import org.apache.hadoop.yarn.api.records.timeline.TimelineEntity;
+import org.apache.hadoop.yarn.api.records.timeline.TimelineEvent;
+import org.apache.hadoop.yarn.api.records.timeline.TimelineEvents;
+import org.apache.hadoop.yarn.api.records.timeline.TimelinePutResponse;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.TestTimelineMetricStore;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.TimelineMetricStore;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.timeline.TimelineStore;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.timeline.TestMemoryTimelineStore;
+import org.apache.hadoop.yarn.webapp.GenericExceptionHandler;
+import org.apache.hadoop.yarn.webapp.YarnJacksonJaxbJsonProvider;
+import org.junit.Test;
+
+import com.google.inject.Guice;
+import com.google.inject.Injector;
+import com.google.inject.servlet.GuiceServletContextListener;
+import com.google.inject.servlet.ServletModule;
+import com.sun.jersey.api.client.ClientResponse;
+import com.sun.jersey.api.client.WebResource;
+import com.sun.jersey.api.client.config.DefaultClientConfig;
+import com.sun.jersey.guice.spi.container.servlet.GuiceContainer;
+import com.sun.jersey.test.framework.JerseyTest;
+import com.sun.jersey.test.framework.WebAppDescriptor;
+
+
+public class TestTimelineWebServices extends JerseyTest {
+
+  private static TimelineStore store;
+  private static TimelineMetricStore metricStore;
+  private long beforeTime;
+
+  private Injector injector = Guice.createInjector(new ServletModule() {
+
+    @Override
+    protected void configureServlets() {
+      bind(YarnJacksonJaxbJsonProvider.class);
+      bind(TimelineWebServices.class);
+      bind(GenericExceptionHandler.class);
+      try{
+        store = mockTimelineStore();
+        metricStore = new TestTimelineMetricStore();
+      } catch (Exception e) {
+        Assert.fail();
+      }
+      bind(TimelineStore.class).toInstance(store);
+      bind(TimelineMetricStore.class).toInstance(metricStore);
+      serve("/*").with(GuiceContainer.class);
+    }
+
+  });
+
+  public class GuiceServletConfig extends GuiceServletContextListener {
+
+    @Override
+    protected Injector getInjector() {
+      return injector;
+    }
+  }
+
+  private TimelineStore mockTimelineStore()
+      throws Exception {
+    beforeTime = System.currentTimeMillis() - 1;
+    TestMemoryTimelineStore store = new TestMemoryTimelineStore();
+    store.setup();
+    return store.getTimelineStore();
+  }
+
+  public TestTimelineWebServices() {
+    super(new WebAppDescriptor.Builder(
+        "org.apache.hadoop.yarn.server.applicationhistoryservice.webapp")
+        .contextListenerClass(GuiceServletConfig.class)
+        .filterClass(com.google.inject.servlet.GuiceFilter.class)
+        .contextPath("jersey-guice-filter")
+        .servletPath("/")
+        .clientConfig(new DefaultClientConfig(YarnJacksonJaxbJsonProvider.class))
+        .build());
+  }
+
+  @Test
+  public void testAbout() throws Exception {
+    WebResource r = resource();
+    ClientResponse response = r.path("ws").path("v1").path("timeline")
+        .accept(MediaType.APPLICATION_JSON)
+        .get(ClientResponse.class);
+    assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
+    TimelineWebServices.AboutInfo about =
+        response.getEntity(TimelineWebServices.AboutInfo.class);
+    Assert.assertNotNull(about);
+    Assert.assertEquals("Timeline API", about.getAbout());
+  }
+
+  private static void verifyEntities(TimelineEntities entities) {
+    Assert.assertNotNull(entities);
+    Assert.assertEquals(2, entities.getEntities().size());
+    TimelineEntity entity1 = entities.getEntities().get(0);
+    Assert.assertNotNull(entity1);
+    Assert.assertEquals("id_1", entity1.getEntityId());
+    Assert.assertEquals("type_1", entity1.getEntityType());
+    Assert.assertEquals(123l, entity1.getStartTime().longValue());
+    Assert.assertEquals(2, entity1.getEvents().size());
+    Assert.assertEquals(4, entity1.getPrimaryFilters().size());
+    Assert.assertEquals(4, entity1.getOtherInfo().size());
+    TimelineEntity entity2 = entities.getEntities().get(1);
+    Assert.assertNotNull(entity2);
+    Assert.assertEquals("id_2", entity2.getEntityId());
+    Assert.assertEquals("type_1", entity2.getEntityType());
+    Assert.assertEquals(123l, entity2.getStartTime().longValue());
+    Assert.assertEquals(2, entity2.getEvents().size());
+    Assert.assertEquals(4, entity2.getPrimaryFilters().size());
+    Assert.assertEquals(4, entity2.getOtherInfo().size());
+  }
+
+  private static void verifyMetrics(TimelineMetrics metrics) {
+    Assert.assertNotNull(metrics);
+    Assert.assertEquals("cpu_user", metrics.getMetrics().get(0).getMetricName());
+    Assert.assertEquals(3, metrics.getMetrics().get(0).getMetricValues().size());
+    Assert.assertEquals("mem_free", metrics.getMetrics().get(1).getMetricName());
+    Assert.assertEquals(3, metrics.getMetrics().get(1).getMetricValues().size());
+  }
+
+  @Test
+  public void testGetEntities() throws Exception {
+    WebResource r = resource();
+    ClientResponse response = r.path("ws").path("v1").path("timeline")
+        .path("type_1")
+        .accept(MediaType.APPLICATION_JSON)
+        .get(ClientResponse.class);
+    assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
+    verifyEntities(response.getEntity(TimelineEntities.class));
+  }
+
+  @Test
+  public void testFromId() throws Exception {
+    WebResource r = resource();
+    ClientResponse response = r.path("ws").path("v1").path("timeline")
+        .path("type_1").queryParam("fromId", "id_2")
+        .accept(MediaType.APPLICATION_JSON)
+        .get(ClientResponse.class);
+    assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
+    assertEquals(1, response.getEntity(TimelineEntities.class).getEntities()
+        .size());
+
+    response = r.path("ws").path("v1").path("timeline")
+        .path("type_1").queryParam("fromId", "id_1")
+        .accept(MediaType.APPLICATION_JSON)
+        .get(ClientResponse.class);
+    assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
+    assertEquals(2, response.getEntity(TimelineEntities.class).getEntities()
+        .size());
+  }
+
+  @Test
+  public void testFromTs() throws Exception {
+    WebResource r = resource();
+    ClientResponse response = r.path("ws").path("v1").path("timeline")
+        .path("type_1").queryParam("fromTs", Long.toString(beforeTime))
+        .accept(MediaType.APPLICATION_JSON)
+        .get(ClientResponse.class);
+    assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
+    assertEquals(0, response.getEntity(TimelineEntities.class).getEntities()
+        .size());
+
+    response = r.path("ws").path("v1").path("timeline")
+        .path("type_1").queryParam("fromTs", Long.toString(
+            System.currentTimeMillis()))
+        .accept(MediaType.APPLICATION_JSON)
+        .get(ClientResponse.class);
+    assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
+    assertEquals(2, response.getEntity(TimelineEntities.class).getEntities()
+        .size());
+  }
+
+  @Test
+  public void testPrimaryFilterString() {
+    WebResource r = resource();
+    ClientResponse response = r.path("ws").path("v1").path("timeline")
+        .path("type_1").queryParam("primaryFilter", "user:username")
+        .accept(MediaType.APPLICATION_JSON)
+        .get(ClientResponse.class);
+    assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
+    verifyEntities(response.getEntity(TimelineEntities.class));
+  }
+
+  @Test
+  public void testPrimaryFilterInteger() {
+    WebResource r = resource();
+    ClientResponse response = r.path("ws").path("v1").path("timeline")
+        .path("type_1").queryParam("primaryFilter",
+            "appname:" + Integer.toString(Integer.MAX_VALUE))
+        .accept(MediaType.APPLICATION_JSON)
+        .get(ClientResponse.class);
+    assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
+    verifyEntities(response.getEntity(TimelineEntities.class));
+  }
+
+  @Test
+  public void testPrimaryFilterLong() {
+    WebResource r = resource();
+    ClientResponse response = r.path("ws").path("v1").path("timeline")
+        .path("type_1").queryParam("primaryFilter",
+            "long:" + Long.toString((long)Integer.MAX_VALUE + 1l))
+        .accept(MediaType.APPLICATION_JSON)
+        .get(ClientResponse.class);
+    assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
+    verifyEntities(response.getEntity(TimelineEntities.class));
+  }
+
+  @Test
+  public void testPrimaryFilterNumericString() {
+    // without quotes, 123abc is interpreted as the number 123,
+    // which finds no entities
+    WebResource r = resource();
+    ClientResponse response = r.path("ws").path("v1").path("timeline")
+        .path("type_1").queryParam("primaryFilter", "other:123abc")
+        .accept(MediaType.APPLICATION_JSON)
+        .get(ClientResponse.class);
+    assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
+    assertEquals(0, response.getEntity(TimelineEntities.class).getEntities()
+        .size());
+  }
+
+  @Test
+  public void testPrimaryFilterNumericStringWithQuotes() {
+    WebResource r = resource();
+    ClientResponse response = r.path("ws").path("v1").path("timeline")
+        .path("type_1").queryParam("primaryFilter", "other:\"123abc\"")
+        .accept(MediaType.APPLICATION_JSON)
+        .get(ClientResponse.class);
+    assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
+    verifyEntities(response.getEntity(TimelineEntities.class));
+  }
+
+  @Test
+  public void testSecondaryFilters() {
+    WebResource r = resource();
+    ClientResponse response = r.path("ws").path("v1").path("timeline")
+        .path("type_1")
+        .queryParam("secondaryFilter",
+            "user:username,appname:" + Integer.toString(Integer.MAX_VALUE))
+        .accept(MediaType.APPLICATION_JSON)
+        .get(ClientResponse.class);
+    assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
+    verifyEntities(response.getEntity(TimelineEntities.class));
+  }
+
+  @Test
+  public void testGetEntity() throws Exception {
+    WebResource r = resource();
+    ClientResponse response = r.path("ws").path("v1").path("timeline")
+        .path("type_1").path("id_1")
+        .accept(MediaType.APPLICATION_JSON)
+        .get(ClientResponse.class);
+    assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
+    TimelineEntity entity = response.getEntity(TimelineEntity.class);
+    Assert.assertNotNull(entity);
+    Assert.assertEquals("id_1", entity.getEntityId());
+    Assert.assertEquals("type_1", entity.getEntityType());
+    Assert.assertEquals(123l, entity.getStartTime().longValue());
+    Assert.assertEquals(2, entity.getEvents().size());
+    Assert.assertEquals(4, entity.getPrimaryFilters().size());
+    Assert.assertEquals(4, entity.getOtherInfo().size());
+  }
+
+  @Test
+  public void testGetEntityFields1() throws Exception {
+    WebResource r = resource();
+    ClientResponse response = r.path("ws").path("v1").path("timeline")
+        .path("type_1").path("id_1").queryParam("fields", "events,otherinfo")
+        .accept(MediaType.APPLICATION_JSON)
+        .get(ClientResponse.class);
+    assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
+    TimelineEntity entity = response.getEntity(TimelineEntity.class);
+    Assert.assertNotNull(entity);
+    Assert.assertEquals("id_1", entity.getEntityId());
+    Assert.assertEquals("type_1", entity.getEntityType());
+    Assert.assertEquals(123l, entity.getStartTime().longValue());
+    Assert.assertEquals(2, entity.getEvents().size());
+    Assert.assertEquals(0, entity.getPrimaryFilters().size());
+    Assert.assertEquals(4, entity.getOtherInfo().size());
+  }
+
+  @Test
+  public void testGetEntityFields2() throws Exception {
+    WebResource r = resource();
+    ClientResponse response = r.path("ws").path("v1").path("timeline")
+        .path("type_1").path("id_1").queryParam("fields", "lasteventonly," +
+            "primaryfilters,relatedentities")
+        .accept(MediaType.APPLICATION_JSON)
+        .get(ClientResponse.class);
+    assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
+    TimelineEntity entity = response.getEntity(TimelineEntity.class);
+    Assert.assertNotNull(entity);
+    Assert.assertEquals("id_1", entity.getEntityId());
+    Assert.assertEquals("type_1", entity.getEntityType());
+    Assert.assertEquals(123l, entity.getStartTime().longValue());
+    Assert.assertEquals(1, entity.getEvents().size());
+    Assert.assertEquals(4, entity.getPrimaryFilters().size());
+    Assert.assertEquals(0, entity.getOtherInfo().size());
+  }
+
+  @Test
+  public void testGetEvents() throws Exception {
+    WebResource r = resource();
+    ClientResponse response = r.path("ws").path("v1").path("timeline")
+        .path("type_1").path("events")
+        .queryParam("entityId", "id_1")
+        .accept(MediaType.APPLICATION_JSON)
+        .get(ClientResponse.class);
+    assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
+    TimelineEvents events = response.getEntity(TimelineEvents.class);
+    Assert.assertNotNull(events);
+    Assert.assertEquals(1, events.getAllEvents().size());
+    TimelineEvents.EventsOfOneEntity partEvents = events.getAllEvents().get(0);
+    Assert.assertEquals(2, partEvents.getEvents().size());
+    TimelineEvent event1 = partEvents.getEvents().get(0);
+    Assert.assertEquals(456l, event1.getTimestamp());
+    Assert.assertEquals("end_event", event1.getEventType());
+    Assert.assertEquals(1, event1.getEventInfo().size());
+    TimelineEvent event2 = partEvents.getEvents().get(1);
+    Assert.assertEquals(123l, event2.getTimestamp());
+    Assert.assertEquals("start_event", event2.getEventType());
+    Assert.assertEquals(0, event2.getEventInfo().size());
+  }
+
+  @Test
+  public void testPostEntities() throws Exception {
+    TimelineEntities entities = new TimelineEntities();
+    TimelineEntity entity = new TimelineEntity();
+    entity.setEntityId("test id");
+    entity.setEntityType("test type");
+    entity.setStartTime(System.currentTimeMillis());
+    entities.addEntity(entity);
+    WebResource r = resource();
+    ClientResponse response = r.path("ws").path("v1").path("timeline")
+        .accept(MediaType.APPLICATION_JSON)
+        .type(MediaType.APPLICATION_JSON)
+        .post(ClientResponse.class, entities);
+    assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
+    TimelinePutResponse putResposne = response.getEntity(TimelinePutResponse.class);
+    Assert.assertNotNull(putResposne);
+    Assert.assertEquals(0, putResposne.getErrors().size());
+    // verify the entity exists in the store
+    response = r.path("ws").path("v1").path("timeline")
+        .path("test type").path("test id")
+        .accept(MediaType.APPLICATION_JSON)
+        .get(ClientResponse.class);
+    assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
+    entity = response.getEntity(TimelineEntity.class);
+    Assert.assertNotNull(entity);
+    Assert.assertEquals("test id", entity.getEntityId());
+    Assert.assertEquals("test type", entity.getEntityType());
+  }
+
+  @Test
+  public void testGetMetrics() throws Exception {
+    WebResource r = resource();
+    ClientResponse response = r.path("ws").path("v1").path("timeline")
+      .path("metrics").queryParam("metricNames", "cpu_user")
+      .accept(MediaType.APPLICATION_JSON)
+      .get(ClientResponse.class);
+    assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
+    verifyMetrics(response.getEntity(TimelineMetrics.class));
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-host-monitoring/pom.xml
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-host-monitoring/pom.xml b/ambari-metrics/ambari-metrics-host-monitoring/pom.xml
new file mode 100644
index 0000000..20c6642
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-host-monitoring/pom.xml
@@ -0,0 +1,152 @@
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
+                      http://maven.apache.org/xsd/maven-4.0.0.xsd">
+
+  <parent>
+    <artifactId>ambari-metrics</artifactId>
+    <groupId>org.apache.ambari</groupId>
+    <version>1.3.0-SNAPSHOT</version>
+  </parent>
+  <modelVersion>4.0.0</modelVersion>
+  <packaging>pom</packaging>
+  <version>1.3.0-SNAPSHOT</version>
+  <artifactId>ambari-metrics-host-monitoring</artifactId>
+  <properties>
+    <resmonitor.install.dir>
+      /usr/lib/python2.6/site-packages/resource_monitoring
+    </resmonitor.install.dir>
+  </properties>
+  <build>
+    <plugins>
+      <plugin>
+        <groupId>org.codehaus.mojo</groupId>
+        <artifactId>build-helper-maven-plugin</artifactId>
+        <version>1.8</version>
+        <executions>
+          <execution>
+            <id>parse-version</id>
+            <phase>validate</phase>
+            <goals>
+              <goal>parse-version</goal>
+            </goals>
+          </execution>
+          <execution>
+            <id>regex-property</id>
+            <goals>
+              <goal>regex-property</goal>
+            </goals>
+            <configuration>
+              <name>ambariVersion</name>
+              <value>${project.version}</value>
+              <regex>^([0-9]+)\.([0-9]+)\.([0-9]+)(\.|-).*</regex>
+              <replacement>$1.$2.$3</replacement>
+              <failIfNoMatch>false</failIfNoMatch>
+            </configuration>
+          </execution>
+        </executions>
+      </plugin>
+      <plugin>
+        <artifactId>maven-compiler-plugin</artifactId>
+        <version>3.0</version>
+      </plugin>
+      <plugin>
+        <artifactId>maven-assembly-plugin</artifactId>
+        <configuration>
+          <tarLongFileMode>gnu</tarLongFileMode>
+          <descriptors>
+            <descriptor>src/packages/tarball/all.xml</descriptor>
+          </descriptors>
+        </configuration>
+        <executions>
+          <execution>
+            <id>build-tarball</id>
+            <phase>prepare-package</phase>
+            <goals>
+              <goal>single</goal>
+            </goals>
+          </execution>
+        </executions>
+      </plugin>
+      <plugin>
+        <groupId>org.codehaus.mojo</groupId>
+        <artifactId>exec-maven-plugin</artifactId>
+        <version>1.2</version>
+        <executions>
+          <execution>
+            <configuration>
+              <executable>python</executable>
+              <workingDirectory>
+                ${basedir}/src/main/python/resource_monitoring/psutil
+              </workingDirectory>
+              <arguments>
+                <argument>build.py</argument>
+              </arguments>
+            </configuration>
+            <id>generate-psutil-binaries</id>
+            <phase>package</phase>
+            <goals>
+              <goal>exec</goal>
+            </goals>
+          </execution>
+        </executions>
+      </plugin>
+      <plugin>
+        <groupId>org.codehaus.mojo</groupId>
+        <artifactId>rpm-maven-plugin</artifactId>
+        <version>2.0.1</version>
+        <executions>
+          <execution>
+            <!-- unbinds rpm creation from maven lifecycle -->
+            <phase>none</phase>
+            <goals>
+              <goal>rpm</goal>
+            </goals>
+          </execution>
+        </executions>
+
+        <configuration>
+          <group>Development</group>
+          <needarch>x86_64</needarch>
+          <autoRequires>false</autoRequires>
+          <mappings>
+            <mapping>
+              <directory>${resmonitor.install.dir}</directory>
+              <username>root</username>
+              <groupname>root</groupname>
+              <sources>
+                <source>
+                  <location>
+                    ${project.build.directory}/${project.artifactId}-${project.version}/resource_monitoring/external/build/*
+                  </location>
+                  <location>
+                    ${project.build.directory}/${project.artifactId}-${project.version}/resource_monitoring/core
+                  </location>
+                  <location>
+                    ${project.build.directory}/${project.artifactId}-${project.version}/resource_monitoring/__init__.py
+                  </location>
+                </source>
+              </sources>
+            </mapping>
+          </mappings>
+        </configuration>
+      </plugin>
+    </plugins>
+  </build>
+</project>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/__init__.py
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/__init__.py b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/__init__.py
new file mode 100644
index 0000000..38daf22
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/__init__.py
@@ -0,0 +1,21 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+
+from resource_monitoring.core import *

http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/__init__.py
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/__init__.py b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/__init__.py
new file mode 100644
index 0000000..1629c9f
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/__init__.py
@@ -0,0 +1,34 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_monitoring.core import *
+import os, sys
+path = os.path.abspath(__file__)
+path = os.path.join(os.path.dirname(os.path.dirname(path)), "psutil/build/")
+
+for dir in os.walk(path).next()[1]:
+  if 'lib' in dir:
+    sys.path.append(os.path.join(path, dir))
+
+try:
+  import psutil
+except ImportError:
+  print 'psutil binaries need to be built by running, psutil/build.py ' \
+        'manually or by running a, mvn clean package, command.'

http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/application_metric_map.py
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/application_metric_map.py b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/application_metric_map.py
new file mode 100644
index 0000000..8c32661
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/application_metric_map.py
@@ -0,0 +1,127 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+
+import logging
+import json
+from threading import RLock
+
+logger = logging.getLogger()
+
+class ApplicationMetricMap:
+  """
+  A data structure to buffer metrics in memory.
+  The in-memory dict stores metrics as shown below:
+  { application_id : { metric_id : { timestamp :  metric_value } } }
+  application_id => uniquely identify the metrics for an application / host.
+  metric_id      => identify the metric
+  timestamp      => collection time
+  metric_value   => numeric value
+  """
+
+  app_metric_map = {}
+
+  def __init__(self, hostname, ip_address):
+    self.hostname = hostname
+    self.ip_address = ip_address
+    self.lock = RLock()
+  pass
+
+  def acquire_lock(self):
+    self.lock.acquire()
+
+  def release_lock(self):
+    self.lock.release()
+
+  def put_metric(self, application_id, metric_id, timestamp, value):
+    metric_map = self.app_metric_map.get(application_id)
+    if not metric_map:
+      metric_map = { metric_id : { timestamp : value } }
+      self.app_metric_map[ application_id ] = metric_map
+    else:
+      metric_id_map = metric_map.get(metric_id)
+      if not metric_id_map:
+        metric_id_map = { timestamp : value }
+        metric_map[ metric_id ] = metric_id_map
+      else:
+        metric_map[ metric_id ].update( { timestamp : value } )
+      pass
+    pass
+  pass
+
+  def delete_application_metrics(self, app_id):
+    del self.app_metric_map[ app_id ]
+  pass
+
+  def flatten(self, application_id = None):
+    """
+    Return flatten dict to caller in json format.
+    Json format:
+    {"metrics":[{"hostname":"a","metricname":"b","appid":"c",
+    "instanceid":"d","starttime":"e","metrics":{"t":"v"}}]}
+    """
+
+    timeline_metrics = { "metrics" : [] }
+    local_metric_map = {}
+
+    if application_id:
+      if self.app_metric_map.has_key(application_id):
+        local_metric_map = { application_id : self.app_metric_map[application_id] }
+      else:
+        logger.info("application_id: {0}, not present in the map.".format(application_id))
+    else:
+      local_metric_map = self.app_metric_map.copy()
+    pass
+
+    for appId, metrics in local_metric_map.iteritems():
+      for metricId, metricData in dict(metrics).iteritems():
+        # Create a timeline metric object
+        timeline_metric = {
+          "hostname" : self.hostname,
+          "metricname" : metricId,
+          "appid" : self.get_app_id(appId),
+          "instanceid" : self.get_instance_id(appId),
+          "starttime" : self.get_start_time(appId, metricId),
+          "metrics" : metricData
+        }
+        timeline_metrics[ "metrics" ].append( timeline_metric )
+      pass
+    pass
+
+    return json.dumps(timeline_metrics)
+  pass
+
+  def get_start_time(self, app_id, metric_id):
+    if self.app_metric_map.has_key(app_id):
+      if self.app_metric_map.get(app_id).has_key(metric_id):
+        return self.app_metric_map.get(app_id).get(metric_id).iteritems().next()[0]
+  pass
+
+  def format_app_id(self, app_id, instance_id = None):
+    return app_id + "_" + instance_id if instance_id else app_id
+  pass
+
+  def get_app_id(self, app_id):
+    return app_id.split("_")[0]
+  pass
+
+  def get_instance_id(self, app_id):
+    parts = app_id.split("_")
+    return parts[1] if len(parts) > 1 else ''
+  pass

http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/config_reader.py
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/config_reader.py b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/config_reader.py
new file mode 100644
index 0000000..7726925
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/config_reader.py
@@ -0,0 +1,127 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+
+import ConfigParser
+import StringIO
+import json
+import os
+
+config = ConfigParser.RawConfigParser()
+CONFIG_FILE_PATH = "/etc/metric-monitor/conf/metric_monitor.ini"
+METRIC_FILE_PATH = "/etc/metric-monitor/conf/metric_groups.conf"
+
+config_content = """
+[default]
+debug_level = INFO
+metrics_server = host:port
+enable_time_threshold = false
+enable_value_threshold = false
+
+[emitter]
+send_interval = 60
+
+[collector]
+collector_sleep_interval = 5
+max_queue_size = 5000
+"""
+
+metric_group_info = """
+{
+   "host_metric_groups": {
+      "cpu_info": {
+         "collect_every": "15",
+         "metrics": [
+            {
+               "name": "cpu_user",
+               "value_threshold": "1.0"
+            }
+         ]
+      },
+      "disk_info": {
+         "collect_every": "30",
+         "metrics": [
+            {
+               "name": "disk_free",
+               "value_threshold": "5.0"
+            }
+         ]
+      },
+      "network_info": {
+         "collect_every": "20",
+         "metrics": [
+            {
+               "name": "bytes_out",
+               "value_threshold": "128"
+            }
+         ]
+      }
+   },
+   "process_metric_groups": {
+      "": {
+         "collect_every": "15",
+         "metrics": []
+      }
+   }
+}
+"""
+
+class Configuration:
+
+  def __init__(self):
+    global config_content
+    self.config = ConfigParser.RawConfigParser()
+    if os.path.exists(CONFIG_FILE_PATH):
+      self.config.read(CONFIG_FILE_PATH)
+    else:
+      self.config.readfp(StringIO.StringIO(config_content))
+    pass
+    if os.path.exists(METRIC_FILE_PATH):
+      self.metric_groups = json.load(open(METRIC_FILE_PATH))
+    else:
+      print 'No metric configs found at {0}'.format(METRIC_FILE_PATH)
+    pass
+
+  def getConfig(self):
+    return self.config
+
+  def getMetricGroupConfig(self):
+    return self.metric_groups
+
+  def get(self, section, key, default=None):
+    try:
+      value = self.config.get(section, key)
+    except:
+      return default
+    return value
+
+  def get_send_interval(self):
+    return int(self.get("emitter", "send_interval", 60))
+
+  def get_collector_sleep_interval(self):
+    return int(self.get("collector", "collector_sleep_interval", 5))
+
+  def get_server_address(self):
+    return self.get("default", "metrics_server")
+
+  def get_log_level(self):
+    return self.get("default", "debug_level", "INFO")
+
+  def get_max_queue_size(self):
+    return int(self.get("collector", "max_queue_size", 5000))
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/controller.py
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/controller.py b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/controller.py
new file mode 100644
index 0000000..3d1f487
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/controller.py
@@ -0,0 +1,126 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+
+import logging
+import signal
+import threading
+import time
+import sys
+from Queue import Queue
+from threading import Timer
+from application_metric_map import ApplicationMetricMap
+from config_reader import Configuration
+from event_definition import HostMetricCollectEvent, ProcessMetricCollectEvent
+from metric_collector import MetricsCollector
+from emitter import Emitter
+from host_info import HostInfo
+
+logger = logging.getLogger()
+
+class Controller(threading.Thread):
+
+  def __init__(self, config):
+    # Process initialization code
+    threading.Thread.__init__(self)
+    logger.debug('Initializing Controller thread.')
+    self.lock = threading.Lock()
+    self.config = config
+    self.metrics_config = config.getMetricGroupConfig()
+    self.events_cache = []
+    hostinfo = HostInfo()
+    self.application_metric_map = ApplicationMetricMap(hostinfo.get_hostname(),
+                                                       hostinfo.get_ip_address())
+    self.event_queue = Queue(config.get_max_queue_size())
+    self.metric_collector = MetricsCollector(self.event_queue, self.application_metric_map)
+    self.server_url = config.get_server_address()
+    self.sleep_interval = config.get_collector_sleep_interval()
+    self.initialize_events_cache()
+    self.emitter = Emitter(self.config, self.application_metric_map)
+
+  def run(self):
+    logger.info('Running Controller thread: %s' % threading.currentThread().getName())
+    # Wake every 5 seconds to push events to the queue
+    while True:
+      if (self.event_queue.full()):
+        logger.warn('Event Queue full!! Suspending further collections.')
+      else:
+        self.enqueque_events()
+      pass
+      time.sleep(self.sleep_interval)
+    pass
+
+  # TODO: Optimize to not use Timer class and use the Queue instead
+  def enqueque_events(self):
+    # Queue events for up to a minute
+    for event in self.events_cache:
+      t = Timer(event.get_collect_interval(), self.metric_collector.process_event(event))
+      t.start()
+    pass
+
+  def initialize_events_cache(self):
+    self.events_cache = []
+    try:
+      host_metrics_groups = self.metrics_config['host_metric_groups']
+      process_metrics_groups = self.metrics_config['process_metric_groups']
+    except KeyError, ke:
+      logger.warn('Error loading metric groups.')
+      raise ke
+    pass
+
+    if host_metrics_groups:
+      for name, properties in host_metrics_groups.iteritems():
+        event = HostMetricCollectEvent(properties, name)
+        logger.info('Adding event to cache, {0} : {1}'.format(name, properties))
+        self.events_cache.append(event)
+      pass
+    pass
+
+    if process_metrics_groups:
+      for name, properties in process_metrics_groups.iteritems():
+        event = ProcessMetricCollectEvent(properties, name)
+        logger.info('Adding event to cache, {0} : {1}'.format(name, properties))
+        #self.events_cache.append(event)
+      pass
+    pass
+
+  pass
+
+  def start_emitter(self):
+    self.emitter.start()
+
+def main(argv=None):
+  # Allow Ctrl-C
+  signal.signal(signal.SIGINT, signal.SIG_DFL)
+
+  config = Configuration()
+  collector = Controller(config)
+
+  logger.setLevel(config.get_log_level())
+  #formatter = logging.Formatter("%(asctime)s %(filename)s:%(lineno)d - %(message)s")
+  stream_handler = logging.StreamHandler(sys.stdout)
+  stream_handler.setFormatter(formatter)
+  logger.addHandler(stream_handler)
+  logger.info('Starting Server RPC Thread: {0}'.format(sys.argv))
+
+  collector.start()
+  collector.start_emitter()
+
+if __name__ == '__main__':
+  main()

http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/emitter.py
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/emitter.py b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/emitter.py
new file mode 100644
index 0000000..c907445
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/emitter.py
@@ -0,0 +1,90 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+
+import logging
+import threading
+import time
+import urllib2
+
+logger = logging.getLogger()
+COLLECTOR_URL = "http://{0}/ws/v1/timeline/metrics"
+RETRY_SLEEP_INTERVAL = 5
+MAX_RETRY_COUNT = 3
+
+class Emitter(threading.Thread):
+  """
+  Wake up every send interval seconds and empty the application metric map.
+  """
+
+  def __init__(self, config, application_metric_map):
+    threading.Thread.__init__(self)
+    logger.debug('Initializing Emitter thread.')
+    self.lock = threading.Lock()
+    self.collector_address = config.get_server_address()
+    self.send_interval = config.get_send_interval()
+    self.application_metric_map = application_metric_map
+
+  def run(self):
+    logger.info('Running Emitter thread: %s' % threading.currentThread().getName())
+    while True:
+      try:
+        retry_count = 0
+        while retry_count < MAX_RETRY_COUNT:
+          self.application_metric_map.acquire_lock()
+          json_data = self.application_metric_map.flatten()
+          if json_data is None:
+            logger.info("Nothing to emit, resume waiting.")
+            break
+          pass
+          response = self.push_metrics(json_data)
+
+          if response and response.status == '201':
+            retry_count = MAX_RETRY_COUNT
+            self.application_metric_map.clear()
+            self.application_metric_map.release_lock()
+          else:
+            logger.warn("Error sending metrics to server. Retrying after {0} "
+                        "...".format(RETRY_SLEEP_INTERVAL))
+            self.application_metric_map.release_lock()
+            retry_count += 1
+            time.sleep(RETRY_SLEEP_INTERVAL)
+          pass
+        pass
+
+        time.sleep(self.send_interval)
+      except Exception, e:
+        logger.warn('Unable to emit events. %s' % str(e))
+        time.sleep(RETRY_SLEEP_INTERVAL)
+        logger.info('Retrying emit after %s seconds.' % RETRY_SLEEP_INTERVAL)
+    pass
+
+  def push_metrics(self, data):
+    headers = {"Content-Type" : "application/json", "Accept" : "*/*"}
+    server = COLLECTOR_URL.format(self.collector_address.strip())
+    logger.info("server: %s" % server)
+    req = urllib2.Request(server, data, headers)
+    response = urllib2.urlopen(req, timeout=int(self.send_interval - 10))
+    if response:
+      logger.debug("POST response from server: status = {0}, code = {1}".format(
+        response.status, response.reason))
+      logger.debug(response.read())
+    pass
+    return response
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/event_definition.py
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/event_definition.py b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/event_definition.py
new file mode 100644
index 0000000..f356b03
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/event_definition.py
@@ -0,0 +1,85 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+
+import logging
+
+DEFAULT_COLLECT_INTERVAL = 10
+
+logger = logging.getLogger()
+
+class Event:
+  def __init__(self):
+    self._classname = self.__class__.__name__
+
+  def get_classname(self):
+    return self._classname
+
+  def get_collect_interval(self):
+    return DEFAULT_COLLECT_INTERVAL
+
+
+class EmmitEvent(Event):
+
+  def __init__(self, application_metric_map, config):
+    Event.__init__(self)
+    self.collector_address = config.get_server_address()
+    self.application_metric_map = application_metric_map
+
+  def get_emmit_payload(self):
+    return self.application_metric_map.flatten()
+
+
+class HostMetricCollectEvent(Event):
+
+  def __init__(self, group_config, group_name):
+    Event.__init__(self)
+    self.group_config = group_config
+    self.group_name = group_name
+    try:
+      self.group_interval = group_config['collect_every']
+      self.metrics = group_config['metrics']
+    except KeyError, ex:
+      logger.warn('Unable to create event from metric group. {0}'.format(
+        group_config))
+      raise ex
+
+  def get_metric_value_thresholds(self):
+    metric_value_thresholds = {}
+
+    for metric in self.metrics:
+      try:
+        metric_value_thresholds[metric['name']] = metric['value_threshold']
+      except:
+        logger.warn('Error parsing metric configuration. {0}'.format(metric))
+    pass
+
+    return metric_value_thresholds
+
+  def get_group_name(self):
+    return self.group_name
+
+  def get_collect_interval(self):
+    return int(self.group_interval if self.group_interval else DEFAULT_COLLECT_INTERVAL)
+
+class ProcessMetricCollectEvent:
+
+  def __init__(self, group_config, group_name):
+    # Initialize the Process metric event
+    pass
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/host_info.py
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/host_info.py b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/host_info.py
new file mode 100644
index 0000000..4b39119
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/host_info.py
@@ -0,0 +1,187 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+
+import logging
+import psutil
+import os
+from collections import namedtuple
+import platform
+import socket
+
+logger = logging.getLogger()
+
+def bytes2human(n):
+  bytes = float(n)
+  gigabytes = bytes / 1073741824
+  return '%.2f' % gigabytes
+pass
+
+
+class HostInfo():
+
+
+  def get_cpu_times(self):
+    """
+    Return cpu stats at current time
+    """
+    cpu_times = psutil.cpu_times()
+    load_avg = os.getloadavg()
+
+    return {
+      'cpu_user' : cpu_times.user if hasattr(cpu_times, 'user') else '',
+      'cpu_system' : cpu_times.system if hasattr(cpu_times, 'system') else '',
+      'cpu_idle' : cpu_times.idle if hasattr(cpu_times, 'idle') else '',
+      'cpu_nice' : cpu_times.nice if hasattr(cpu_times, 'nice') else '',
+      'cpu_wio' : cpu_times.iowait if hasattr(cpu_times, 'iowait') else '',
+      'cpu_intr' : cpu_times.irq if hasattr(cpu_times, 'irq') else '',
+      'cpu_sintr' : cpu_times.softirq if hasattr(cpu_times, 'softirq') else '',
+      'load_one' : load_avg[0] if hasattr(cpu_times, 'load_avg') else '',
+      'load_five' : load_avg[1] if hasattr(cpu_times, 'load_avg') else '',
+      'load_fifteen' : load_avg[2] if hasattr(cpu_times, 'load_avg') else ''
+    }
+  pass
+
+  def get_mem_info(self):
+    """
+    Return memory statistics at current time
+    """
+
+    mem_stats = psutil.virtual_memory()
+    swap_stats = psutil.swap_memory()
+    disk_usage = self.get_combined_disk_usage()
+
+    return {
+      'mem_free' : mem_stats.free if hasattr(mem_stats, 'free') else '',
+      'mem_shared' : mem_stats.shared if hasattr(mem_stats, 'shared') else '',
+      'mem_buffered' : mem_stats.buffers if hasattr(mem_stats, 'buffers') else '',
+      'mem_cached' : mem_stats.cached if hasattr(mem_stats, 'cached') else '',
+      'swap_free' : swap_stats.free if hasattr(mem_stats, 'free') else '',
+      'disk_free' : disk_usage.get("disk_free"),
+      'part_max_used' : disk_usage.get("max_part_used")[0],
+      'disk_total' : disk_usage.get("disk_total")
+    }
+  pass
+
+  def get_network_info(self):
+    """
+    Return network counters
+    """
+    net_stats = psutil.net_io_counters()
+
+    return {
+      'bytes_out' : net_stats.bytes_sent,
+      'bytes_in' : net_stats.bytes_recv,
+      'pkts_out' : net_stats.packets_sent,
+      'pkts_in' : net_stats.packets_recv
+    }
+  pass
+
+  # Faster version
+  def get_combined_disk_usage(self):
+    disk_usage = namedtuple('disk_usage', [ 'total', 'used', 'free',
+                                            'percent', 'part_max_used' ])
+    combined_disk_total = 0
+    combined_disk_used = 0
+    combined_disk_free = 0
+    combined_disk_percent = 0
+    max_percent_usage = ('', 0)
+
+    for part in psutil.disk_partitions(all=False):
+      if os.name == 'nt':
+        if 'cdrom' in part.opts or part.fstype == '':
+          # skip cd-rom drives with no disk in it; they may raise
+          # ENOENT, pop-up a Windows GUI error for a non-ready
+          # partition or just hang.
+          continue
+        pass
+      pass
+      usage = psutil.disk_usage(part.mountpoint)
+
+      combined_disk_total += usage.total if hasattr(usage, 'total') else 0
+      combined_disk_used += usage.used if hasattr(usage, 'used') else 0
+      combined_disk_free += usage.free if hasattr(usage, 'free') else 0
+      combined_disk_percent += usage.percent if hasattr(usage, 'percent') else 0
+
+      if hasattr(usage, 'percent') and max_percent_usage[1] < int(usage.percent):
+        max_percent_usage = (part.mountpoint, usage.percent)
+      pass
+    pass
+
+    return { "disk_total" : bytes2human(combined_disk_total),
+             "disk_used"  : bytes2human(combined_disk_used),
+             "disk_free"  : bytes2human(combined_disk_free),
+             "disk_percent" : bytes2human(combined_disk_percent),
+             "max_part_used" : max_percent_usage }
+  pass
+
+  def get_host_static_info(self):
+
+    boot_time = psutil.boot_time()
+    cpu_count_logical = psutil.cpu_count()
+    swap_stats = psutil.swap_memory()
+    mem_info = psutil.virtual_memory()
+
+    return {
+      'cpu_num' : cpu_count_logical,
+      'cpu_speed' : '',
+      'swap_total' : swap_stats.total,
+      'boottime' : boot_time,
+      'machine_type' : platform.processor(),
+      'os_name' : platform.system(),
+      'os_release' : platform.release(),
+      'location' : '',
+      'mem_total' : mem_info.total
+    }
+
+
+
+  def get_disk_usage(self):
+    disk_usage = {}
+
+    for part in psutil.disk_partitions(all=False):
+      if os.name == 'nt':
+        if 'cdrom' in part.opts or part.fstype == '':
+          # skip cd-rom drives with no disk in it; they may raise
+          # ENOENT, pop-up a Windows GUI error for a non-ready
+          # partition or just hang.
+          continue
+        pass
+      pass
+      usage = psutil.disk_usage(part.mountpoint)
+      disk_usage.update(
+        { part.device :
+          {
+              "total" : bytes2human(usage.total),
+              "user" : bytes2human(usage.used),
+              "free" : bytes2human(usage.free),
+              "percent" : int(usage.percent),
+              "fstype" : part.fstype,
+              "mount" : part.mountpoint
+          }
+        }
+      )
+    pass
+  pass
+
+  def get_hostname(self):
+    return socket.getfqdn()
+
+  def get_ip_address(self):
+    return socket.gethostbyname(socket.getfqdn())

http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/metric_collector.py
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/metric_collector.py b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/metric_collector.py
new file mode 100644
index 0000000..d4b96dc
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/metric_collector.py
@@ -0,0 +1,91 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+
+import logging
+from time import time
+from host_info import HostInfo
+from event_definition import HostMetricCollectEvent, ProcessMetricCollectEvent
+
+logger = logging.getLogger()
+
+DEFAULT_HOST_APP_ID = '_HOST'
+
+class MetricsCollector():
+  """
+  The main Reader thread that dequeues events from the event queue and
+  submits a metric record to the emit buffer. Implementation of dequeue is
+  not required if Timer class is used for metric groups.
+  """
+
+  def __init__(self, emit_queue, application_metric_map):
+    self.emit_queue = emit_queue
+    self.application_metric_map = application_metric_map
+    self.host_info = HostInfo()
+  pass
+
+  def process_event(self, event):
+    if event.get_classname() == HostMetricCollectEvent.__name__:
+      self.process_host_collection_event(event)
+    elif event.get_classname() == ProcessMetricCollectEvent.__name__:
+      self.process_process_collection_event(event)
+    else:
+      logger.warn('Unknown event in queue')
+    pass
+
+  def process_host_collection_event(self, event):
+    startTime = int(round(time() * 1000))
+    metrics = None
+
+    if 'cpu' in event.get_group_name():
+      metrics = self.host_info.get_cpu_times()
+
+    elif 'disk' in event.get_group_name():
+      metrics = self.host_info.get_combined_disk_usage()
+
+    elif 'network' in event.get_group_name():
+      metrics = self.host_info.get_network_info()
+
+    elif 'mem' in event.get_group_name():
+      metrics = self.host_info.get_mem_info()
+
+    elif 'all' in event.get_group_name():
+      metrics = {}
+      metrics.update(self.host_info.get_cpu_times())
+      metrics.update(self.host_info.get_combined_disk_usage())
+      metrics.update(self.host_info.get_network_info())
+      metrics.update(self.host_info.get_mem_info())
+
+    else:
+      logger.warn('Unknown metric group.')
+    pass
+
+    if metrics:
+      self.application_metric_map.acquire_lock()
+      for metric_name, value in metrics.iteritems():
+        self.application_metric_map.put_metric(DEFAULT_HOST_APP_ID, metric_name, startTime, value)
+      pass
+      self.application_metric_map.release_lock()
+    pass
+
+  def process_process_collection_event(self, event):
+    """
+    Collect Process level metrics and update the application metric map
+    """
+    pass

http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/main.py
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/main.py b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/main.py
new file mode 100644
index 0000000..0a54d6b
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/main.py
@@ -0,0 +1,49 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+
+import resource_monitoring.core
+from core.controller import Controller, Configuration
+import logging
+import signal
+import sys
+
+logger = logging.getLogger()
+
+def main(argv=None):
+  # Allow Ctrl-C
+  signal.signal(signal.SIGINT, signal.SIG_DFL)
+
+  config = Configuration()
+  collector = Controller(config)
+
+  logger.setLevel(config.get_log_level())
+  formatter = logging.Formatter("%(asctime)s %(filename)s:%(lineno)d - %(message)s")
+  stream_handler = logging.StreamHandler()
+  stream_handler.setFormatter(formatter)
+  logger.addHandler(stream_handler)
+  logger.info('Starting Server RPC Thread: %s' % ' '.join(sys.argv))
+
+  collector.start()
+  collector.start_emitter()
+
+
+if __name__ == '__main__':
+  main()
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/LICENSE
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/LICENSE b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/LICENSE
new file mode 100644
index 0000000..e91b135
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/LICENSE
@@ -0,0 +1,27 @@
+psutil is distributed under BSD license reproduced below.
+
+Copyright (c) 2009, Jay Loden, Dave Daeschler, Giampaolo Rodola'
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice, this
+   list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright notice,
+   this list of conditions and the following disclaimer in the documentation
+   and/or other materials provided with the distribution.
+ * Neither the name of the psutil authors nor the names of its contributors
+   may be used to endorse or promote products derived from this software without
+   specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/MANIFEST.in
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/MANIFEST.in b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/MANIFEST.in
new file mode 100644
index 0000000..d930257
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/MANIFEST.in
@@ -0,0 +1,14 @@
+include CREDITS
+include HISTORY
+include LICENSE
+include make.bat
+include Makefile
+include MANIFEST.in
+include README
+include setup.py
+include TODO
+recursive-include docs *
+recursive-exclude docs/_build *
+recursive-include examples *.py
+recursive-include psutil *.py *.c *.h
+recursive-include test *.py README
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/Makefile
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/Makefile b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/Makefile
new file mode 100644
index 0000000..b812527
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/Makefile
@@ -0,0 +1,77 @@
+# Shortcuts for various tasks (UNIX only).
+# To use a specific Python version run:
+# $ make install PYTHON=python3.3
+
+# You can set these variables from the command line.
+PYTHON    = python
+TSCRIPT   = test/test_psutil.py
+
+all: test
+
+clean:
+	rm -f `find . -type f -name \*.py[co]`
+	rm -f `find . -type f -name \*.so`
+	rm -f `find . -type f -name .\*~`
+	rm -f `find . -type f -name \*.orig`
+	rm -f `find . -type f -name \*.bak`
+	rm -f `find . -type f -name \*.rej`
+	rm -rf `find . -type d -name __pycache__`
+	rm -rf *.egg-info
+	rm -rf *\$testfile*
+	rm -rf build
+	rm -rf dist
+	rm -rf docs/_build
+
+build: clean
+	$(PYTHON) setup.py build
+
+install: build
+	if test $(PYTHON) = python2.4; then \
+		$(PYTHON) setup.py install; \
+	elif test $(PYTHON) = python2.5; then \
+		$(PYTHON) setup.py install; \
+	else \
+		$(PYTHON) setup.py install --user; \
+	fi
+
+uninstall:
+	if test $(PYTHON) = python2.4; then \
+		pip-2.4 uninstall -y -v psutil; \
+	else \
+		cd ..; $(PYTHON) -m pip uninstall -y -v psutil; \
+	fi
+
+test: install
+	$(PYTHON) $(TSCRIPT)
+
+test-process: install
+	$(PYTHON) -m unittest -v test.test_psutil.TestProcess
+
+test-system: install
+	$(PYTHON) -m unittest -v test.test_psutil.TestSystemAPIs
+
+# Run a specific test by name; e.g. "make test-by-name disk_" will run
+# all test methods containing "disk_" in their name.
+# Requires "pip install nose".
+test-by-name:
+	@$(PYTHON) -m nose test/test_psutil.py --nocapture -v -m $(filter-out $@,$(MAKECMDGOALS))
+
+memtest: install
+	$(PYTHON) test/test_memory_leaks.py
+
+pep8:
+	@hg locate '*py' | xargs pep8
+
+pyflakes:
+	@export PYFLAKES_NODOCTEST=1 && \
+		hg locate '*py' | xargs pyflakes
+
+# Upload source tarball on https://pypi.python.org/pypi/psutil.
+upload-src: clean
+	$(PYTHON) setup.py sdist upload
+
+# Build and upload doc on https://pythonhosted.org/psutil/.
+# Requires "pip install sphinx-pypi-upload".
+upload-doc:
+	cd docs; make html
+	$(PYTHON) setup.py upload_sphinx --upload-dir=docs/_build/html


[14/22] AMBARI-5707. Metrics system prototype implementation. (swagle)

Posted by sw...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/README
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/README b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/README
new file mode 100644
index 0000000..382e5e8
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/README
@@ -0,0 +1,270 @@
+.. image:: https://pypip.in/d/psutil/badge.png
+    :target: https://crate.io/packages/psutil/
+    :alt: Download this month
+
+.. image:: https://pypip.in/v/psutil/badge.png
+    :target: https://pypi.python.org/pypi/psutil/
+    :alt: Latest version
+
+.. image:: https://pypip.in/license/psutil/badge.png
+    :target: https://pypi.python.org/pypi/psutil/
+    :alt: License
+
+===========
+Quick links
+===========
+
+* `Home page <http://code.google.com/p/psutil>`_
+* `Download <https://pypi.python.org/pypi?:action=display&name=psutil#downloads>`_
+* `Blog <http://grodola.blogspot.com/search/label/psutil>`_
+* `Documentation <http://pythonhosted.org/psutil/>`_
+* `Forum <http://groups.google.com/group/psutil/topics>`_
+* `What's new <https://psutil.googlecode.com/hg/HISTORY>`_
+
+=======
+Summary
+=======
+
+psutil (python system and process utilities) is a cross-platform library for
+retrieving information on **running processes** and **system utilization**
+(CPU, memory, disks, network) in Python. It is useful mainly for **system
+monitoring**, **profiling and limiting process resources** and **management of
+running processes**. It implements many functionalities offered by command line
+tools such as: ps, top, lsof, netstat, ifconfig, who, df, kill, free, nice,
+ionice, iostat, iotop, uptime, pidof, tty, taskset, pmap. It currently supports
+**Linux, Windows, OSX, FreeBSD** and **Sun Solaris**, both **32-bit** and
+**64-bit** architectures, with Python versions from **2.4 to 3.4**. Pypi is
+also known to work.
+
+==============
+Example usages
+==============
+
+CPU
+===
+
+.. code-block:: python
+
+    >>> import psutil
+    >>> psutil.cpu_times()
+    scputimes(user=3961.46, nice=169.729, system=2150.659, idle=16900.540, iowait=629.59, irq=0.0, softirq=19.42, steal=0.0, guest=0, nice=0.0)
+    >>>
+    >>> for x in range(3):
+    ...     psutil.cpu_percent(interval=1)
+    ...
+    4.0
+    5.9
+    3.8
+    >>>
+    >>> for x in range(3):
+    ...     psutil.cpu_percent(interval=1, percpu=True)
+    ...
+    [4.0, 6.9, 3.7, 9.2]
+    [7.0, 8.5, 2.4, 2.1]
+    [1.2, 9.0, 9.9, 7.2]
+    >>>
+    >>>
+    >>> for x in range(3):
+    ...     psutil.cpu_times_percent(interval=1, percpu=False)
+    ...
+    scputimes(user=1.5, nice=0.0, system=0.5, idle=96.5, iowait=1.5, irq=0.0, softirq=0.0, steal=0.0, guest=0.0, guest_nice=0.0)
+    scputimes(user=1.0, nice=0.0, system=0.0, idle=99.0, iowait=0.0, irq=0.0, softirq=0.0, steal=0.0, guest=0.0, guest_nice=0.0)
+    scputimes(user=2.0, nice=0.0, system=0.0, idle=98.0, iowait=0.0, irq=0.0, softirq=0.0, steal=0.0, guest=0.0, guest_nice=0.0)
+    >>>
+    >>> psutil.cpu_count()
+    4
+    >>> psutil.cpu_count(logical=False)
+    2
+    >>>
+
+Memory
+======
+
+.. code-block:: python
+
+    >>> psutil.virtual_memory()
+    svmem(total=8374149120L, available=2081050624L, percent=75.1, used=8074080256L, free=300068864L, active=3294920704, inactive=1361616896, buffers=529895424L, cached=1251086336)
+    >>> psutil.swap_memory()
+    sswap(total=2097147904L, used=296128512L, free=1801019392L, percent=14.1, sin=304193536, sout=677842944)
+    >>>
+
+Disks
+=====
+
+.. code-block:: python
+
+    >>> psutil.disk_partitions()
+    [sdiskpart(device='/dev/sda1', mountpoint='/', fstype='ext4', opts='rw,nosuid'),
+     sdiskpart(device='/dev/sda2', mountpoint='/home', fstype='ext, opts='rw')]
+    >>>
+    >>> psutil.disk_usage('/')
+    sdiskusage(total=21378641920, used=4809781248, free=15482871808, percent=22.5)
+    >>>
+    >>> psutil.disk_io_counters(perdisk=False)
+    sdiskio(read_count=719566, write_count=1082197, read_bytes=18626220032, write_bytes=24081764352, read_time=5023392, write_time=63199568)
+    >>>
+
+Network
+=======
+
+.. code-block:: python
+
+    >>> psutil.net_io_counters(pernic=True)
+    {'eth0': netio(bytes_sent=485291293, bytes_recv=6004858642, packets_sent=3251564, packets_recv=4787798, errin=0, errout=0, dropin=0, dropout=0),
+     'lo': netio(bytes_sent=2838627, bytes_recv=2838627, packets_sent=30567, packets_recv=30567, errin=0, errout=0, dropin=0, dropout=0)}
+    >>>
+    >>> psutil.net_connections()
+    [pconn(fd=115, family=2, type=1, laddr=('10.0.0.1', 48776), raddr=('93.186.135.91', 80), status='ESTABLISHED', pid=1254),
+     pconn(fd=117, family=2, type=1, laddr=('10.0.0.1', 43761), raddr=('72.14.234.100', 80), status='CLOSING', pid=2987),
+     pconn(fd=-1, family=2, type=1, laddr=('10.0.0.1', 60759), raddr=('72.14.234.104', 80), status='ESTABLISHED', pid=None),
+     pconn(fd=-1, family=2, type=1, laddr=('10.0.0.1', 51314), raddr=('72.14.234.83', 443), status='SYN_SENT', pid=None)
+     ...]
+
+Other system info
+=================
+
+.. code-block:: python
+
+    >>> psutil.users()
+    [user(name='giampaolo', terminal='pts/2', host='localhost', started=1340737536.0),
+     user(name='giampaolo', terminal='pts/3', host='localhost', started=1340737792.0)]
+    >>>
+    >>> psutil.boot_time()
+    1365519115.0
+    >>>
+
+Process management
+==================
+
+.. code-block:: python
+
+    >>> import psutil
+    >>> psutil.pids()
+    [1, 2, 3, 4, 5, 6, 7, 46, 48, 50, 51, 178, 182, 222, 223, 224,
+     268, 1215, 1216, 1220, 1221, 1243, 1244, 1301, 1601, 2237, 2355,
+     2637, 2774, 3932, 4176, 4177, 4185, 4187, 4189, 4225, 4243, 4245,
+     4263, 4282, 4306, 4311, 4312, 4313, 4314, 4337, 4339, 4357, 4358,
+     4363, 4383, 4395, 4408, 4433, 4443, 4445, 4446, 5167, 5234, 5235,
+     5252, 5318, 5424, 5644, 6987, 7054, 7055, 7071]
+    >>>
+    >>> p = psutil.Process(7055)
+    >>> p.name()
+    'python'
+    >>> p.exe()
+    '/usr/bin/python'
+    >>> p.cwd()
+    '/home/giampaolo'
+    >>> p.cmdline()
+    ['/usr/bin/python', 'main.py']
+    >>>
+    >>> p.status()
+    'running'
+    >>> p.username()
+    'giampaolo'
+    >>> p.create_time()
+    1267551141.5019531
+    >>> p.terminal()
+    '/dev/pts/0'
+    >>>
+    >>> p.uids()
+    puids(real=1000, effective=1000, saved=1000)
+    >>> p.gids()
+    pgids(real=1000, effective=1000, saved=1000)
+    >>>
+    >>> p.cpu_times()
+    pcputimes(user=1.02, system=0.31)
+    >>> p.cpu_percent(interval=1.0)
+    12.1
+    >>> p.cpu_affinity()
+    [0, 1, 2, 3]
+    >>> p.set_cpu_affinity([0])
+    >>>
+    >>> p.memory_percent()
+    0.63423
+    >>>
+    >>> p.memory_info()
+    pmem(rss=7471104, vms=68513792)
+    >>> p.ext_memory_info()
+    extmem(rss=9662464, vms=49192960, shared=3612672, text=2564096, lib=0, data=5754880, dirty=0)
+    >>> p.memory_maps()
+    [pmmap_grouped(path='/lib/x86_64-linux-gnu/libutil-2.15.so', rss=16384, anonymous=8192, swap=0),
+     pmmap_grouped(path='/lib/x86_64-linux-gnu/libc-2.15.so', rss=6384, anonymous=15, swap=0),
+     pmmap_grouped(path='/lib/x86_64-linux-gnu/libcrypto.so.1.0.0', rss=34124, anonymous=1245, swap=0),
+     pmmap_grouped(path='[heap]', rss=54653, anonymous=8192, swap=0),
+     pmmap_grouped(path='[stack]', rss=1542, anonymous=166, swap=0),
+     ...]
+    >>>
+    >>> p.io_counters()
+    pio(read_count=478001, write_count=59371, read_bytes=700416, write_bytes=69632)
+    >>>
+    >>> p.open_files()
+    [popenfile(path='/home/giampaolo/svn/psutil/somefile', fd=3)]
+    >>>
+    >>> p.connections()
+    [pconn(fd=115, family=2, type=1, laddr=('10.0.0.1', 48776), raddr=('93.186.135.91', 80), status='ESTABLISHED'),
+     pconn(fd=117, family=2, type=1, laddr=('10.0.0.1', 43761), raddr=('72.14.234.100', 80), status='CLOSING'),
+     pconn(fd=119, family=2, type=1, laddr=('10.0.0.1', 60759), raddr=('72.14.234.104', 80), status='ESTABLISHED'),
+     pconn(fd=123, family=2, type=1, laddr=('10.0.0.1', 51314), raddr=('72.14.234.83', 443), status='SYN_SENT')]
+    >>>
+    >>> p.num_threads()
+    4
+    >>> p.num_fds()
+    8
+    >>> p.threads()
+    [pthread(id=5234, user_time=22.5, system_time=9.2891),
+     pthread(id=5235, user_time=0.0, system_time=0.0),
+     pthread(id=5236, user_time=0.0, system_time=0.0),
+     pthread(id=5237, user_time=0.0707, system_time=1.1)]
+    >>>
+    >>> p.num_ctx_switches()
+    pctxsw(voluntary=78, involuntary=19)
+    >>>
+    >>> p.nice()
+    0
+    >>> p.set_nice(10)
+    >>>
+    >>> p.set_ionice(psutil.IOPRIO_CLASS_IDLE)  # IO priority (Win and Linux only)
+    >>> p.ionice()
+    pionice(ioclass=3, value=0)
+    >>>
+    >>> p.set_rlimit(psutil.RLIMIT_NOFILE, (5, 5))  # resource limits (Linux only)
+    >>> p.rlimit(psutil.RLIMIT_NOFILE)
+    (5, 5)
+    >>>
+    >>> p.suspend()
+    >>> p.resume()
+    >>>
+    >>> p.terminate()
+    >>> p.wait(timeout=3)
+    0
+    >>>
+    >>> psutil.test()
+    USER         PID %CPU %MEM     VSZ     RSS TTY        START    TIME  COMMAND
+    root           1  0.0  0.0   24584    2240            Jun17   00:00  init
+    root           2  0.0  0.0       0       0            Jun17   00:00  kthreadd
+    root           3  0.0  0.0       0       0            Jun17   00:05  ksoftirqd/0
+    ...
+    giampaolo  31475  0.0  0.0   20760    3024 /dev/pts/0 Jun19   00:00  python2.4
+    giampaolo  31721  0.0  2.2  773060  181896            00:04   10:30  chrome
+    root       31763  0.0  0.0       0       0            00:05   00:00  kworker/0:1
+    >>>
+
+Further process APIs
+====================
+
+.. code-block:: python
+
+    >>> for p in psutil.process_iter():
+    ...     print(p)
+    ...
+    psutil.Process(pid=1, name='init')
+    psutil.Process(pid=2, name='kthreadd')
+    psutil.Process(pid=3, name='ksoftirqd/0')
+    ...
+    >>>
+    >>> def on_terminate(proc):
+    ...     print("process {} terminated".format(proc))
+    ...
+    >>> # waits for multiple processes to terminate
+    >>> gone, alive = psutil.wait_procs(procs_list, 3, callback=on_terminate)
+    >>>

http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/build.out
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/build.out b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/build.out
new file mode 100644
index 0000000..cdedb21
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/build.out
@@ -0,0 +1,137 @@
+rm -f `find . -type f -name \*.py[co]`
+rm -f `find . -type f -name \*.so`
+rm -f `find . -type f -name .\*~`
+rm -f `find . -type f -name \*.orig`
+rm -f `find . -type f -name \*.bak`
+rm -f `find . -type f -name \*.rej`
+rm -rf `find . -type d -name __pycache__`
+rm -rf *.egg-info
+rm -rf *\estfile*
+rm -rf build
+rm -rf dist
+rm -rf docs/_build
+python setup.py build
+running build
+running build_py
+creating build
+creating build/lib.macosx-10.8-intel-2.7
+creating build/lib.macosx-10.8-intel-2.7/psutil
+copying psutil/__init__.py -> build/lib.macosx-10.8-intel-2.7/psutil
+copying psutil/_common.py -> build/lib.macosx-10.8-intel-2.7/psutil
+copying psutil/_compat.py -> build/lib.macosx-10.8-intel-2.7/psutil
+copying psutil/_psbsd.py -> build/lib.macosx-10.8-intel-2.7/psutil
+copying psutil/_pslinux.py -> build/lib.macosx-10.8-intel-2.7/psutil
+copying psutil/_psosx.py -> build/lib.macosx-10.8-intel-2.7/psutil
+copying psutil/_psposix.py -> build/lib.macosx-10.8-intel-2.7/psutil
+copying psutil/_pssunos.py -> build/lib.macosx-10.8-intel-2.7/psutil
+copying psutil/_pswindows.py -> build/lib.macosx-10.8-intel-2.7/psutil
+running build_ext
+building '_psutil_osx' extension
+creating build/temp.macosx-10.8-intel-2.7
+creating build/temp.macosx-10.8-intel-2.7/psutil
+creating build/temp.macosx-10.8-intel-2.7/psutil/arch
+creating build/temp.macosx-10.8-intel-2.7/psutil/arch/osx
+clang -fno-strict-aliasing -fno-common -dynamic -g -Os -pipe -fno-common -fno-strict-aliasing -fwrapv -mno-fused-madd -DENABLE_DTRACE -DMACOSX -DNDEBUG -Wall -Wstrict-prototypes -Wshorten-64-to-32 -DNDEBUG -g -Os -Wall -Wstrict-prototypes -DENABLE_DTRACE -arch i386 -arch x86_64 -pipe -I/System/Library/Frameworks/Python.framework/Versions/2.7/include/python2.7 -c psutil/_psutil_osx.c -o build/temp.macosx-10.8-intel-2.7/psutil/_psutil_osx.o
+clang: warning: argument unused during compilation: '-mno-fused-madd'
+psutil/_psutil_osx.c:341:48: warning: format specifies type 'unsigned long' but the argument has type 'vm_address_t' (aka 'unsigned int') [-Wformat]
+            sprintf(addr_str, "%016lx-%016lx", address, address + size);
+                               ~~~~~~          ^~~~~~~
+                               %016x
+/usr/include/secure/_stdio.h:49:56: note: expanded from macro 'sprintf'
+  __builtin___sprintf_chk (str, 0, __darwin_obsz(str), __VA_ARGS__)
+                                                       ^
+psutil/_psutil_osx.c:341:57: warning: format specifies type 'unsigned long' but the argument has type 'unsigned int' [-Wformat]
+            sprintf(addr_str, "%016lx-%016lx", address, address + size);
+                                      ~~~~~~            ^~~~~~~~~~~~~~
+                                      %016x
+/usr/include/secure/_stdio.h:49:56: note: expanded from macro 'sprintf'
+  __builtin___sprintf_chk (str, 0, __darwin_obsz(str), __VA_ARGS__)
+                                                       ^
+2 warnings generated.
+psutil/_psutil_osx.c:126:31: warning: implicit conversion loses integer precision: 'long' to 'pid_t' (aka 'int') [-Wshorten-64-to-32]
+    if (psutil_get_kinfo_proc(pid, &kp) == -1) {
+        ~~~~~~~~~~~~~~~~~~~~~ ^~~
+psutil/_psutil_osx.c:168:24: warning: implicit conversion loses integer precision: 'long' to 'int' [-Wshorten-64-to-32]
+    ret = proc_pidpath(pid, &buf, sizeof(buf));
+          ~~~~~~~~~~~~ ^~~
+psutil/_psutil_osx.c:211:31: warning: implicit conversion loses integer precision: 'long' to 'pid_t' (aka 'int') [-Wshorten-64-to-32]
+    if (psutil_get_kinfo_proc(pid, &kp) == -1) {
+        ~~~~~~~~~~~~~~~~~~~~~ ^~~
+psutil/_psutil_osx.c:229:31: warning: implicit conversion loses integer precision: 'long' to 'pid_t' (aka 'int') [-Wshorten-64-to-32]
+    if (psutil_get_kinfo_proc(pid, &kp) == -1) {
+        ~~~~~~~~~~~~~~~~~~~~~ ^~~
+psutil/_psutil_osx.c:250:31: warning: implicit conversion loses integer precision: 'long' to 'pid_t' (aka 'int') [-Wshorten-64-to-32]
+    if (psutil_get_kinfo_proc(pid, &kp) == -1) {
+        ~~~~~~~~~~~~~~~~~~~~~ ^~~
+psutil/_psutil_osx.c:271:31: warning: implicit conversion loses integer precision: 'long' to 'pid_t' (aka 'int') [-Wshorten-64-to-32]
+    if (psutil_get_kinfo_proc(pid, &kp) == -1) {
+        ~~~~~~~~~~~~~~~~~~~~~ ^~~
+psutil/_psutil_osx.c:306:42: warning: implicit conversion loses integer precision: 'long' to 'int' [-Wshorten-64-to-32]
+    err = task_for_pid(mach_task_self(), pid, &task);
+          ~~~~~~~~~~~~                   ^~~
+psutil/_psutil_osx.c:350:39: warning: implicit conversion loses integer precision: 'long' to 'int' [-Wshorten-64-to-32]
+            err = proc_regionfilename(pid, address, buf, sizeof(buf));
+                  ~~~~~~~~~~~~~~~~~~~ ^~~
+psutil/_psutil_osx.c:502:31: warning: implicit conversion loses integer precision: 'long' to 'pid_t' (aka 'int') [-Wshorten-64-to-32]
+    if (psutil_get_kinfo_proc(pid, &kp) == -1) {
+        ~~~~~~~~~~~~~~~~~~~~~ ^~~
+psutil/_psutil_osx.c:808:25: warning: implicit conversion loses integer precision: 'long' to 'int' [-Wshorten-64-to-32]
+    num = getfsstat(fs, len, MNT_NOWAIT);
+          ~~~~~~~~~     ^~~
+psutil/_psutil_osx.c:906:31: warning: implicit conversion loses integer precision: 'long' to 'pid_t' (aka 'int') [-Wshorten-64-to-32]
+    if (psutil_get_kinfo_proc(pid, &kp) == -1) {
+        ~~~~~~~~~~~~~~~~~~~~~ ^~~
+psutil/_psutil_osx.c:942:42: warning: implicit conversion loses integer precision: 'long' to 'int' [-Wshorten-64-to-32]
+    err = task_for_pid(mach_task_self(), pid, &task);
+          ~~~~~~~~~~~~                   ^~~
+psutil/_psutil_osx.c:1055:35: warning: implicit conversion loses integer precision: 'long' to 'int' [-Wshorten-64-to-32]
+    pidinfo_result = proc_pidinfo(pid, PROC_PIDLISTFDS, 0, NULL, 0);
+                     ~~~~~~~~~~~~ ^~~
+psutil/_psutil_osx.c:1068:35: warning: implicit conversion loses integer precision: 'long' to 'int' [-Wshorten-64-to-32]
+    pidinfo_result = proc_pidinfo(pid, PROC_PIDLISTFDS, 0, fds_pointer,
+                     ~~~~~~~~~~~~ ^~~
+psutil/_psutil_osx.c:1085:33: warning: implicit conversion loses integer precision: 'long' to 'int' [-Wshorten-64-to-32]
+            nb = proc_pidfdinfo(pid,
+                 ~~~~~~~~~~~~~~ ^~~
+psutil/_psutil_osx.c:1191:35: warning: implicit conversion loses integer precision: 'long' to 'int' [-Wshorten-64-to-32]
+    pidinfo_result = proc_pidinfo(pid, PROC_PIDLISTFDS, 0, NULL, 0);
+                     ~~~~~~~~~~~~ ^~~
+psutil/_psutil_osx.c:1201:35: warning: implicit conversion loses integer precision: 'long' to 'int' [-Wshorten-64-to-32]
+    pidinfo_result = proc_pidinfo(pid, PROC_PIDLISTFDS, 0, fds_pointer,
+                     ~~~~~~~~~~~~ ^~~
+psutil/_psutil_osx.c:1219:33: warning: implicit conversion loses integer precision: 'long' to 'int' [-Wshorten-64-to-32]
+            nb = proc_pidfdinfo(pid, fdp_pointer->proc_fd,
+                 ~~~~~~~~~~~~~~ ^~~
+psutil/_psutil_osx.c:1393:35: warning: implicit conversion loses integer precision: 'long' to 'int' [-Wshorten-64-to-32]
+    pidinfo_result = proc_pidinfo(pid, PROC_PIDLISTFDS, 0, NULL, 0);
+                     ~~~~~~~~~~~~ ^~~
+psutil/_psutil_osx.c:1402:35: warning: implicit conversion loses integer precision: 'long' to 'int' [-Wshorten-64-to-32]
+    pidinfo_result = proc_pidinfo(pid, PROC_PIDLISTFDS, 0, fds_pointer,
+                     ~~~~~~~~~~~~ ^~~
+20 warnings generated.
+clang -fno-strict-aliasing -fno-common -dynamic -g -Os -pipe -fno-common -fno-strict-aliasing -fwrapv -mno-fused-madd -DENABLE_DTRACE -DMACOSX -DNDEBUG -Wall -Wstrict-prototypes -Wshorten-64-to-32 -DNDEBUG -g -Os -Wall -Wstrict-prototypes -DENABLE_DTRACE -arch i386 -arch x86_64 -pipe -I/System/Library/Frameworks/Python.framework/Versions/2.7/include/python2.7 -c psutil/_psutil_common.c -o build/temp.macosx-10.8-intel-2.7/psutil/_psutil_common.o
+clang: warning: argument unused during compilation: '-mno-fused-madd'
+clang -fno-strict-aliasing -fno-common -dynamic -g -Os -pipe -fno-common -fno-strict-aliasing -fwrapv -mno-fused-madd -DENABLE_DTRACE -DMACOSX -DNDEBUG -Wall -Wstrict-prototypes -Wshorten-64-to-32 -DNDEBUG -g -Os -Wall -Wstrict-prototypes -DENABLE_DTRACE -arch i386 -arch x86_64 -pipe -I/System/Library/Frameworks/Python.framework/Versions/2.7/include/python2.7 -c psutil/arch/osx/process_info.c -o build/temp.macosx-10.8-intel-2.7/psutil/arch/osx/process_info.o
+clang: warning: argument unused during compilation: '-mno-fused-madd'
+psutil/arch/osx/process_info.c:40:21: warning: implicit conversion loses integer precision: 'long' to 'pid_t' (aka 'int') [-Wshorten-64-to-32]
+    kill_ret = kill(pid , 0);
+               ~~~~ ^~~
+psutil/arch/osx/process_info.c:176:14: warning: implicit conversion loses integer precision: 'long' to 'int' [-Wshorten-64-to-32]
+    mib[2] = pid;
+           ~ ^~~
+psutil/arch/osx/process_info.c:194:11: warning: implicit conversion loses integer precision: 'size_t' (aka 'unsigned long') to 'int' [-Wshorten-64-to-32]
+    len = strlen(arg_ptr);
+        ~ ^~~~~~~~~~~~~~~
+3 warnings generated.
+clang -bundle -undefined dynamic_lookup -Wl,-F. -arch i386 -arch x86_64 build/temp.macosx-10.8-intel-2.7/psutil/_psutil_osx.o build/temp.macosx-10.8-intel-2.7/psutil/_psutil_common.o build/temp.macosx-10.8-intel-2.7/psutil/arch/osx/process_info.o -o build/lib.macosx-10.8-intel-2.7/_psutil_osx.so -framework CoreFoundation -framework IOKit
+building '_psutil_posix' extension
+clang -fno-strict-aliasing -fno-common -dynamic -g -Os -pipe -fno-common -fno-strict-aliasing -fwrapv -mno-fused-madd -DENABLE_DTRACE -DMACOSX -DNDEBUG -Wall -Wstrict-prototypes -Wshorten-64-to-32 -DNDEBUG -g -Os -Wall -Wstrict-prototypes -DENABLE_DTRACE -arch i386 -arch x86_64 -pipe -I/System/Library/Frameworks/Python.framework/Versions/2.7/include/python2.7 -c psutil/_psutil_posix.c -o build/temp.macosx-10.8-intel-2.7/psutil/_psutil_posix.o
+clang: warning: argument unused during compilation: '-mno-fused-madd'
+psutil/_psutil_posix.c:29:42: warning: implicit conversion loses integer precision: 'long' to 'id_t' (aka 'unsigned int') [-Wshorten-64-to-32]
+    priority = getpriority(PRIO_PROCESS, pid);
+               ~~~~~~~~~~~               ^~~
+psutil/_psutil_posix.c:49:40: warning: implicit conversion loses integer precision: 'long' to 'id_t' (aka 'unsigned int') [-Wshorten-64-to-32]
+    retval = setpriority(PRIO_PROCESS, pid, priority);
+             ~~~~~~~~~~~               ^~~
+2 warnings generated.
+clang -bundle -undefined dynamic_lookup -Wl,-F. -arch i386 -arch x86_64 build/temp.macosx-10.8-intel-2.7/psutil/_psutil_posix.o -o build/lib.macosx-10.8-intel-2.7/_psutil_posix.so

http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/build.py
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/build.py b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/build.py
new file mode 100644
index 0000000..09fb411
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/build.py
@@ -0,0 +1,57 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+
+from subprocess import call
+import sys
+import os
+import shutil
+
+def build():
+  path = os.path.dirname(os.path.abspath(__file__))
+  build_path = path + os.sep + 'build'
+  build_out_path = path + os.sep + 'build.out'
+  build_out = open(build_out_path, 'wb')
+
+  # Delete old build dir if exists
+  if (os.path.exists(build_path)):
+    shutil.rmtree(build_path)
+  pass
+
+  cwd = os.getcwd()
+  os.chdir(path)
+
+  print 'Executing make at location: %s ' % path
+
+  if sys.platform.startswith("win"):
+    # Windows
+    returncode = call(['make.bat', 'build'], stdout=build_out, stderr=build_out)
+  else:
+    # Unix based
+    returncode = call(['make', 'build'], stdout=build_out, stderr=build_out)
+  pass
+
+  os.chdir(cwd)
+
+  if returncode != 0:
+    print 'psutil build failed. Please find build output at: %s' % build_out_path
+  pass
+
+if __name__ == '__main__':
+  build()
\ No newline at end of file


[12/22] AMBARI-5707. Metrics system prototype implementation. (swagle)

Posted by sw...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/build/lib.macosx-10.8-intel-2.7/psutil/_compat.py
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/build/lib.macosx-10.8-intel-2.7/psutil/_compat.py b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/build/lib.macosx-10.8-intel-2.7/psutil/_compat.py
new file mode 100644
index 0000000..b6ac933
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/build/lib.macosx-10.8-intel-2.7/psutil/_compat.py
@@ -0,0 +1,433 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Module which provides compatibility with older Python versions."""
+
+__all__ = ["PY3", "int", "long", "xrange", "exec_", "callable", "namedtuple",
+           "property", "wraps", "defaultdict", "update_wrapper", "lru_cache"]
+
+import sys
+try:
+    import __builtin__
+except ImportError:
+    import builtins as __builtin__  # py3
+
+PY3 = sys.version_info[0] == 3
+
+if PY3:
+    int = int
+    long = int
+    xrange = range
+    unicode = str
+    exec_ = getattr(__builtin__, "exec")
+    print_ = getattr(__builtin__, "print")
+
+    def u(s):
+        return s
+
+    def b(s):
+        return s.encode("latin-1")
+else:
+    int = int
+    long = long
+    xrange = xrange
+    unicode = unicode
+
+    def u(s):
+        return unicode(s, "unicode_escape")
+
+    def b(s):
+        return s
+
+    def exec_(code, globs=None, locs=None):
+        if globs is None:
+            frame = _sys._getframe(1)
+            globs = frame.f_globals
+            if locs is None:
+                locs = frame.f_locals
+            del frame
+        elif locs is None:
+            locs = globs
+        exec("""exec code in globs, locs""")
+
+    def print_(s):
+        sys.stdout.write(s + '\n')
+        sys.stdout.flush()
+
+
+# removed in 3.0, reintroduced in 3.2
+try:
+    callable = callable
+except NameError:
+    def callable(obj):
+        return any("__call__" in klass.__dict__ for klass in type(obj).__mro__)
+
+
+# --- stdlib additions
+
+# py 2.6 collections.namedtuple
+# Taken from: http://code.activestate.com/recipes/500261/
+# Credits: Raymond Hettinger
+try:
+    from collections import namedtuple
+except ImportError:
+    from operator import itemgetter as _itemgetter
+    from keyword import iskeyword as _iskeyword
+    import sys as _sys
+
+    def namedtuple(typename, field_names, verbose=False, rename=False):
+        """A collections.namedtuple implementation, see:
+        http://docs.python.org/library/collections.html#namedtuple
+        """
+        if isinstance(field_names, basestring):
+            field_names = field_names.replace(',', ' ').split()
+        field_names = tuple(map(str, field_names))
+        if rename:
+            names = list(field_names)
+            seen = set()
+            for i, name in enumerate(names):
+                if ((not min(c.isalnum() or c == '_' for c in name)
+                        or _iskeyword(name)
+                        or not name or name[0].isdigit()
+                        or name.startswith('_')
+                        or name in seen)):
+                    names[i] = '_%d' % i
+                seen.add(name)
+            field_names = tuple(names)
+        for name in (typename,) + field_names:
+            if not min(c.isalnum() or c == '_' for c in name):
+                raise ValueError('Type names and field names can only contain '
+                                 'alphanumeric characters and underscores: %r'
+                                 % name)
+            if _iskeyword(name):
+                raise ValueError('Type names and field names cannot be a '
+                                 'keyword: %r' % name)
+            if name[0].isdigit():
+                raise ValueError('Type names and field names cannot start '
+                                 'with a number: %r' % name)
+        seen_names = set()
+        for name in field_names:
+            if name.startswith('_') and not rename:
+                raise ValueError(
+                    'Field names cannot start with an underscore: %r' % name)
+            if name in seen_names:
+                raise ValueError('Encountered duplicate field name: %r' % name)
+            seen_names.add(name)
+
+        numfields = len(field_names)
+        argtxt = repr(field_names).replace("'", "")[1:-1]
+        reprtxt = ', '.join('%s=%%r' % name for name in field_names)
+        template = '''class %(typename)s(tuple):
+        '%(typename)s(%(argtxt)s)' \n
+        __slots__ = () \n
+        _fields = %(field_names)r \n
+        def __new__(_cls, %(argtxt)s):
+            return _tuple.__new__(_cls, (%(argtxt)s)) \n
+        @classmethod
+        def _make(cls, iterable, new=tuple.__new__, len=len):
+            'Make a new %(typename)s object from a sequence or iterable'
+            result = new(cls, iterable)
+            if len(result) != %(numfields)d:
+                raise TypeError(
+                    'Expected %(numfields)d arguments, got %%d' %% len(result))
+            return result \n
+        def __repr__(self):
+            return '%(typename)s(%(reprtxt)s)' %% self \n
+        def _asdict(self):
+            'Return a new dict which maps field names to their values'
+            return dict(zip(self._fields, self)) \n
+        def _replace(_self, **kwds):
+            result = _self._make(map(kwds.pop, %(field_names)r, _self))
+            if kwds:
+                raise ValueError(
+                    'Got unexpected field names: %%r' %% kwds.keys())
+            return result \n
+        def __getnewargs__(self):
+            return tuple(self) \n\n''' % locals()
+        for i, name in enumerate(field_names):
+            template += '        %s = _property(_itemgetter(%d))\n' % (name, i)
+        if verbose:
+            sys.stdout.write(template + '\n')
+            sys.stdout.flush()
+
+        namespace = dict(
+            _itemgetter=_itemgetter, __name__='namedtuple_%s' % typename,
+            _property=property, _tuple=tuple)
+        try:
+            exec_(template, namespace)
+        except SyntaxError:
+            e = sys.exc_info()[1]
+            raise SyntaxError(e.message + ':\n' + template)
+        result = namespace[typename]
+        try:
+            result.__module__ = _sys._getframe(
+                1).f_globals.get('__name__', '__main__')
+        except (AttributeError, ValueError):
+            pass
+
+        return result
+
+
+# hack to support property getter/setter/deleter on python < 2.6
+# http://docs.python.org/library/functions.html?highlight=property#property
+if hasattr(property, 'setter'):
+    property = property
+else:
+    class property(__builtin__.property):
+        __metaclass__ = type
+
+        def __init__(self, fget, *args, **kwargs):
+            super(property, self).__init__(fget, *args, **kwargs)
+            self.__doc__ = fget.__doc__
+
+        def getter(self, method):
+            return property(method, self.fset, self.fdel)
+
+        def setter(self, method):
+            return property(self.fget, method, self.fdel)
+
+        def deleter(self, method):
+            return property(self.fget, self.fset, method)
+
+
+# py 2.5 collections.defauldict
+# Taken from:
+# http://code.activestate.com/recipes/523034-emulate-collectionsdefaultdict/
+# Credits: Jason Kirtland
+try:
+    from collections import defaultdict
+except ImportError:
+    class defaultdict(dict):
+        """Dict subclass that calls a factory function to supply
+        missing values:
+        http://docs.python.org/library/collections.html#collections.defaultdict
+        """
+
+        def __init__(self, default_factory=None, *a, **kw):
+            if ((default_factory is not None and
+                    not hasattr(default_factory, '__call__'))):
+                raise TypeError('first argument must be callable')
+            dict.__init__(self, *a, **kw)
+            self.default_factory = default_factory
+
+        def __getitem__(self, key):
+            try:
+                return dict.__getitem__(self, key)
+            except KeyError:
+                return self.__missing__(key)
+
+        def __missing__(self, key):
+            if self.default_factory is None:
+                raise KeyError(key)
+            self[key] = value = self.default_factory()
+            return value
+
+        def __reduce__(self):
+            if self.default_factory is None:
+                args = tuple()
+            else:
+                args = self.default_factory,
+            return type(self), args, None, None, self.items()
+
+        def copy(self):
+            return self.__copy__()
+
+        def __copy__(self):
+            return type(self)(self.default_factory, self)
+
+        def __deepcopy__(self, memo):
+            import copy
+            return type(self)(self.default_factory,
+                              copy.deepcopy(self.items()))
+
+        def __repr__(self):
+            return 'defaultdict(%s, %s)' % (self.default_factory,
+                                            dict.__repr__(self))
+
+
+# py 2.5 functools.wraps
+try:
+    from functools import wraps
+except ImportError:
+    def wraps(original):
+        def inner(fn):
+            for attribute in ['__module__', '__name__', '__doc__']:
+                setattr(fn, attribute, getattr(original, attribute))
+            for attribute in ['__dict__']:
+                if hasattr(fn, attribute):
+                    getattr(fn, attribute).update(getattr(original, attribute))
+                else:
+                    setattr(fn, attribute,
+                            getattr(original, attribute).copy())
+            return fn
+        return inner
+
+
+# py 2.5 functools.update_wrapper
+try:
+    from functools import update_wrapper
+except ImportError:
+    WRAPPER_ASSIGNMENTS = ('__module__', '__name__', '__doc__')
+    WRAPPER_UPDATES = ('__dict__',)
+
+    def update_wrapper(wrapper, wrapped, assigned=WRAPPER_ASSIGNMENTS,
+                       updated=WRAPPER_UPDATES):
+        """Update a wrapper function to look like the wrapped function, see:
+        http://docs.python.org/library/functools.html#functools.update_wrapper
+        """
+        for attr in assigned:
+            setattr(wrapper, attr, getattr(wrapped, attr))
+        for attr in updated:
+            getattr(wrapper, attr).update(getattr(wrapped, attr, {}))
+        return wrapper
+
+
+# py 3.2 functools.lru_cache
+# Taken from: http://code.activestate.com/recipes/578078
+# Credit: Raymond Hettinger
+try:
+    from functools import lru_cache
+except ImportError:
+    try:
+        from threading import RLock
+    except ImportError:
+        from dummy_threading import RLock
+
+    _CacheInfo = namedtuple("CacheInfo",
+                            ["hits", "misses", "maxsize", "currsize"])
+
+    class _HashedSeq(list):
+        __slots__ = 'hashvalue'
+
+        def __init__(self, tup, hash=hash):
+            self[:] = tup
+            self.hashvalue = hash(tup)
+
+        def __hash__(self):
+            return self.hashvalue
+
+    def _make_key(args, kwds, typed,
+                  kwd_mark=(object(), ),
+                  fasttypes=set((int, str, frozenset, type(None))),
+                  sorted=sorted, tuple=tuple, type=type, len=len):
+        key = args
+        if kwds:
+            sorted_items = sorted(kwds.items())
+            key += kwd_mark
+            for item in sorted_items:
+                key += item
+        if typed:
+            key += tuple(type(v) for v in args)
+            if kwds:
+                key += tuple(type(v) for k, v in sorted_items)
+        elif len(key) == 1 and type(key[0]) in fasttypes:
+            return key[0]
+        return _HashedSeq(key)
+
+    def lru_cache(maxsize=100, typed=False):
+        """Least-recently-used cache decorator, see:
+        http://docs.python.org/3/library/functools.html#functools.lru_cache
+        """
+        def decorating_function(user_function):
+            cache = dict()
+            stats = [0, 0]
+            HITS, MISSES = 0, 1
+            make_key = _make_key
+            cache_get = cache.get
+            _len = len
+            lock = RLock()
+            root = []
+            root[:] = [root, root, None, None]
+            nonlocal_root = [root]
+            PREV, NEXT, KEY, RESULT = 0, 1, 2, 3
+            if maxsize == 0:
+                def wrapper(*args, **kwds):
+                    result = user_function(*args, **kwds)
+                    stats[MISSES] += 1
+                    return result
+            elif maxsize is None:
+                def wrapper(*args, **kwds):
+                    key = make_key(args, kwds, typed)
+                    result = cache_get(key, root)
+                    if result is not root:
+                        stats[HITS] += 1
+                        return result
+                    result = user_function(*args, **kwds)
+                    cache[key] = result
+                    stats[MISSES] += 1
+                    return result
+            else:
+                def wrapper(*args, **kwds):
+                    if kwds or typed:
+                        key = make_key(args, kwds, typed)
+                    else:
+                        key = args
+                    lock.acquire()
+                    try:
+                        link = cache_get(key)
+                        if link is not None:
+                            root, = nonlocal_root
+                            link_prev, link_next, key, result = link
+                            link_prev[NEXT] = link_next
+                            link_next[PREV] = link_prev
+                            last = root[PREV]
+                            last[NEXT] = root[PREV] = link
+                            link[PREV] = last
+                            link[NEXT] = root
+                            stats[HITS] += 1
+                            return result
+                    finally:
+                        lock.release()
+                    result = user_function(*args, **kwds)
+                    lock.acquire()
+                    try:
+                        root, = nonlocal_root
+                        if key in cache:
+                            pass
+                        elif _len(cache) >= maxsize:
+                            oldroot = root
+                            oldroot[KEY] = key
+                            oldroot[RESULT] = result
+                            root = nonlocal_root[0] = oldroot[NEXT]
+                            oldkey = root[KEY]
+                            root[KEY] = root[RESULT] = None
+                            del cache[oldkey]
+                            cache[key] = oldroot
+                        else:
+                            last = root[PREV]
+                            link = [last, root, key, result]
+                            last[NEXT] = root[PREV] = cache[key] = link
+                        stats[MISSES] += 1
+                    finally:
+                        lock.release()
+                    return result
+
+            def cache_info():
+                """Report cache statistics"""
+                lock.acquire()
+                try:
+                    return _CacheInfo(stats[HITS], stats[MISSES], maxsize,
+                                      len(cache))
+                finally:
+                    lock.release()
+
+            def cache_clear():
+                """Clear the cache and cache statistics"""
+                lock.acquire()
+                try:
+                    cache.clear()
+                    root = nonlocal_root[0]
+                    root[:] = [root, root, None, None]
+                    stats[:] = [0, 0]
+                finally:
+                    lock.release()
+
+            wrapper.__wrapped__ = user_function
+            wrapper.cache_info = cache_info
+            wrapper.cache_clear = cache_clear
+            return update_wrapper(wrapper, user_function)
+
+        return decorating_function

http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/build/lib.macosx-10.8-intel-2.7/psutil/_psbsd.py
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/build/lib.macosx-10.8-intel-2.7/psutil/_psbsd.py b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/build/lib.macosx-10.8-intel-2.7/psutil/_psbsd.py
new file mode 100644
index 0000000..5663736
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/build/lib.macosx-10.8-intel-2.7/psutil/_psbsd.py
@@ -0,0 +1,389 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""FreeBSD platform implementation."""
+
+import errno
+import os
+import sys
+
+from psutil import _common
+from psutil import _psposix
+from psutil._common import conn_tmap, usage_percent
+from psutil._compat import namedtuple, wraps
+import _psutil_bsd as cext
+import _psutil_posix
+
+
+__extra__all__ = []
+
+# --- constants
+
+PROC_STATUSES = {
+    cext.SSTOP: _common.STATUS_STOPPED,
+    cext.SSLEEP: _common.STATUS_SLEEPING,
+    cext.SRUN: _common.STATUS_RUNNING,
+    cext.SIDL: _common.STATUS_IDLE,
+    cext.SWAIT: _common.STATUS_WAITING,
+    cext.SLOCK: _common.STATUS_LOCKED,
+    cext.SZOMB: _common.STATUS_ZOMBIE,
+}
+
+TCP_STATUSES = {
+    cext.TCPS_ESTABLISHED: _common.CONN_ESTABLISHED,
+    cext.TCPS_SYN_SENT: _common.CONN_SYN_SENT,
+    cext.TCPS_SYN_RECEIVED: _common.CONN_SYN_RECV,
+    cext.TCPS_FIN_WAIT_1: _common.CONN_FIN_WAIT1,
+    cext.TCPS_FIN_WAIT_2: _common.CONN_FIN_WAIT2,
+    cext.TCPS_TIME_WAIT: _common.CONN_TIME_WAIT,
+    cext.TCPS_CLOSED: _common.CONN_CLOSE,
+    cext.TCPS_CLOSE_WAIT: _common.CONN_CLOSE_WAIT,
+    cext.TCPS_LAST_ACK: _common.CONN_LAST_ACK,
+    cext.TCPS_LISTEN: _common.CONN_LISTEN,
+    cext.TCPS_CLOSING: _common.CONN_CLOSING,
+    cext.PSUTIL_CONN_NONE: _common.CONN_NONE,
+}
+
+PAGESIZE = os.sysconf("SC_PAGE_SIZE")
+
+# extend base mem ntuple with BSD-specific memory metrics
+svmem = namedtuple(
+    'svmem', ['total', 'available', 'percent', 'used', 'free',
+              'active', 'inactive', 'buffers', 'cached', 'shared', 'wired'])
+scputimes = namedtuple(
+    'scputimes', ['user', 'nice', 'system', 'idle', 'irq'])
+pextmem = namedtuple('pextmem', ['rss', 'vms', 'text', 'data', 'stack'])
+pmmap_grouped = namedtuple(
+    'pmmap_grouped', 'path rss, private, ref_count, shadow_count')
+pmmap_ext = namedtuple(
+    'pmmap_ext', 'addr, perms path rss, private, ref_count, shadow_count')
+
+# set later from __init__.py
+NoSuchProcess = None
+AccessDenied = None
+TimeoutExpired = None
+
+
+def virtual_memory():
+    """System virtual memory as a namedutple."""
+    mem = cext.virtual_mem()
+    total, free, active, inactive, wired, cached, buffers, shared = mem
+    avail = inactive + cached + free
+    used = active + wired + cached
+    percent = usage_percent((total - avail), total, _round=1)
+    return svmem(total, avail, percent, used, free,
+                 active, inactive, buffers, cached, shared, wired)
+
+
+def swap_memory():
+    """System swap memory as (total, used, free, sin, sout) namedtuple."""
+    total, used, free, sin, sout = [x * PAGESIZE for x in cext.swap_mem()]
+    percent = usage_percent(used, total, _round=1)
+    return _common.sswap(total, used, free, percent, sin, sout)
+
+
+def cpu_times():
+    """Return system per-CPU times as a named tuple"""
+    user, nice, system, idle, irq = cext.cpu_times()
+    return scputimes(user, nice, system, idle, irq)
+
+
+if hasattr(cext, "per_cpu_times"):
+    def per_cpu_times():
+        """Return system CPU times as a named tuple"""
+        ret = []
+        for cpu_t in cext.per_cpu_times():
+            user, nice, system, idle, irq = cpu_t
+            item = scputimes(user, nice, system, idle, irq)
+            ret.append(item)
+        return ret
+else:
+    # XXX
+    # Ok, this is very dirty.
+    # On FreeBSD < 8 we cannot gather per-cpu information, see:
+    # http://code.google.com/p/psutil/issues/detail?id=226
+    # If num cpus > 1, on first call we return single cpu times to avoid a
+    # crash at psutil import time.
+    # Next calls will fail with NotImplementedError
+    def per_cpu_times():
+        if cpu_count_logical() == 1:
+            return [cpu_times()]
+        if per_cpu_times.__called__:
+            raise NotImplementedError("supported only starting from FreeBSD 8")
+        per_cpu_times.__called__ = True
+        return [cpu_times()]
+
+    per_cpu_times.__called__ = False
+
+
+def cpu_count_logical():
+    """Return the number of logical CPUs in the system."""
+    return cext.cpu_count_logical()
+
+
+def cpu_count_physical():
+    """Return the number of physical CPUs in the system."""
+    # From the C module we'll get an XML string similar to this:
+    # http://manpages.ubuntu.com/manpages/precise/man4/smp.4freebsd.html
+    # We may get None in case "sysctl kern.sched.topology_spec"
+    # is not supported on this BSD version, in which case we'll mimic
+    # os.cpu_count() and return None.
+    s = cext.cpu_count_phys()
+    if s is not None:
+        # get rid of padding chars appended at the end of the string
+        index = s.rfind("</groups>")
+        if index != -1:
+            s = s[:index + 9]
+            if sys.version_info >= (2, 5):
+                import xml.etree.ElementTree as ET
+                root = ET.fromstring(s)
+                return len(root.findall('group/children/group/cpu')) or None
+            else:
+                s = s[s.find('<children>'):]
+                return s.count("<cpu") or None
+
+
+def boot_time():
+    """The system boot time expressed in seconds since the epoch."""
+    return cext.boot_time()
+
+
+def disk_partitions(all=False):
+    retlist = []
+    partitions = cext.disk_partitions()
+    for partition in partitions:
+        device, mountpoint, fstype, opts = partition
+        if device == 'none':
+            device = ''
+        if not all:
+            if not os.path.isabs(device) or not os.path.exists(device):
+                continue
+        ntuple = _common.sdiskpart(device, mountpoint, fstype, opts)
+        retlist.append(ntuple)
+    return retlist
+
+
+def users():
+    retlist = []
+    rawlist = cext.users()
+    for item in rawlist:
+        user, tty, hostname, tstamp = item
+        if tty == '~':
+            continue  # reboot or shutdown
+        nt = _common.suser(user, tty or None, hostname, tstamp)
+        retlist.append(nt)
+    return retlist
+
+
+def net_connections(kind):
+    if kind not in _common.conn_tmap:
+        raise ValueError("invalid %r kind argument; choose between %s"
+                        % (kind, ', '.join([repr(x) for x in conn_tmap])))
+    families, types = conn_tmap[kind]
+    ret = []
+    rawlist = cext.net_connections()
+    for item in rawlist:
+        fd, fam, type, laddr, raddr, status, pid = item
+        # TODO: apply filter at C level
+        if fam in families and type in types:
+            status = TCP_STATUSES[status]
+            nt = _common.sconn(fd, fam, type, laddr, raddr, status, pid)
+            ret.append(nt)
+    return ret
+
+
+pids = cext.pids
+pid_exists = _psposix.pid_exists
+disk_usage = _psposix.disk_usage
+net_io_counters = cext.net_io_counters
+disk_io_counters = cext.disk_io_counters
+
+
+def wrap_exceptions(fun):
+    """Decorator which translates bare OSError exceptions into
+    NoSuchProcess and AccessDenied.
+    """
+    @wraps(fun)
+    def wrapper(self, *args, **kwargs):
+        try:
+            return fun(self, *args, **kwargs)
+        except OSError:
+            # support for private module import
+            if NoSuchProcess is None or AccessDenied is None:
+                raise
+            err = sys.exc_info()[1]
+            if err.errno == errno.ESRCH:
+                raise NoSuchProcess(self.pid, self._name)
+            if err.errno in (errno.EPERM, errno.EACCES):
+                raise AccessDenied(self.pid, self._name)
+            raise
+    return wrapper
+
+
+class Process(object):
+    """Wrapper class around underlying C implementation."""
+
+    __slots__ = ["pid", "_name"]
+
+    def __init__(self, pid):
+        self.pid = pid
+        self._name = None
+
+    @wrap_exceptions
+    def name(self):
+        return cext.proc_name(self.pid)
+
+    @wrap_exceptions
+    def exe(self):
+        return cext.proc_exe(self.pid)
+
+    @wrap_exceptions
+    def cmdline(self):
+        return cext.proc_cmdline(self.pid)
+
+    @wrap_exceptions
+    def terminal(self):
+        tty_nr = cext.proc_tty_nr(self.pid)
+        tmap = _psposix._get_terminal_map()
+        try:
+            return tmap[tty_nr]
+        except KeyError:
+            return None
+
+    @wrap_exceptions
+    def ppid(self):
+        return cext.proc_ppid(self.pid)
+
+    @wrap_exceptions
+    def uids(self):
+        real, effective, saved = cext.proc_uids(self.pid)
+        return _common.puids(real, effective, saved)
+
+    @wrap_exceptions
+    def gids(self):
+        real, effective, saved = cext.proc_gids(self.pid)
+        return _common.pgids(real, effective, saved)
+
+    @wrap_exceptions
+    def cpu_times(self):
+        user, system = cext.proc_cpu_times(self.pid)
+        return _common.pcputimes(user, system)
+
+    @wrap_exceptions
+    def memory_info(self):
+        rss, vms = cext.proc_memory_info(self.pid)[:2]
+        return _common.pmem(rss, vms)
+
+    @wrap_exceptions
+    def memory_info_ex(self):
+        return pextmem(*cext.proc_memory_info(self.pid))
+
+    @wrap_exceptions
+    def create_time(self):
+        return cext.proc_create_time(self.pid)
+
+    @wrap_exceptions
+    def num_threads(self):
+        return cext.proc_num_threads(self.pid)
+
+    @wrap_exceptions
+    def num_ctx_switches(self):
+        return _common.pctxsw(*cext.proc_num_ctx_switches(self.pid))
+
+    @wrap_exceptions
+    def threads(self):
+        rawlist = cext.proc_threads(self.pid)
+        retlist = []
+        for thread_id, utime, stime in rawlist:
+            ntuple = _common.pthread(thread_id, utime, stime)
+            retlist.append(ntuple)
+        return retlist
+
+    @wrap_exceptions
+    def connections(self, kind='inet'):
+        if kind not in conn_tmap:
+            raise ValueError("invalid %r kind argument; choose between %s"
+                             % (kind, ', '.join([repr(x) for x in conn_tmap])))
+        families, types = conn_tmap[kind]
+        rawlist = cext.proc_connections(self.pid, families, types)
+        ret = []
+        for item in rawlist:
+            fd, fam, type, laddr, raddr, status = item
+            status = TCP_STATUSES[status]
+            nt = _common.pconn(fd, fam, type, laddr, raddr, status)
+            ret.append(nt)
+        return ret
+
+    @wrap_exceptions
+    def wait(self, timeout=None):
+        try:
+            return _psposix.wait_pid(self.pid, timeout)
+        except _psposix.TimeoutExpired:
+            # support for private module import
+            if TimeoutExpired is None:
+                raise
+            raise TimeoutExpired(timeout, self.pid, self._name)
+
+    @wrap_exceptions
+    def nice_get(self):
+        return _psutil_posix.getpriority(self.pid)
+
+    @wrap_exceptions
+    def nice_set(self, value):
+        return _psutil_posix.setpriority(self.pid, value)
+
+    @wrap_exceptions
+    def status(self):
+        code = cext.proc_status(self.pid)
+        if code in PROC_STATUSES:
+            return PROC_STATUSES[code]
+        # XXX is this legit? will we even ever get here?
+        return "?"
+
+    @wrap_exceptions
+    def io_counters(self):
+        rc, wc, rb, wb = cext.proc_io_counters(self.pid)
+        return _common.pio(rc, wc, rb, wb)
+
+    nt_mmap_grouped = namedtuple(
+        'mmap', 'path rss, private, ref_count, shadow_count')
+    nt_mmap_ext = namedtuple(
+        'mmap', 'addr, perms path rss, private, ref_count, shadow_count')
+
+    # FreeBSD < 8 does not support functions based on kinfo_getfile()
+    # and kinfo_getvmmap()
+    if hasattr(cext, 'proc_open_files'):
+
+        @wrap_exceptions
+        def open_files(self):
+            """Return files opened by process as a list of namedtuples."""
+            rawlist = cext.proc_open_files(self.pid)
+            return [_common.popenfile(path, fd) for path, fd in rawlist]
+
+        @wrap_exceptions
+        def cwd(self):
+            """Return process current working directory."""
+            # sometimes we get an empty string, in which case we turn
+            # it into None
+            return cext.proc_cwd(self.pid) or None
+
+        @wrap_exceptions
+        def memory_maps(self):
+            return cext.proc_memory_maps(self.pid)
+
+        @wrap_exceptions
+        def num_fds(self):
+            """Return the number of file descriptors opened by this process."""
+            return cext.proc_num_fds(self.pid)
+
+    else:
+        def _not_implemented(self):
+            raise NotImplementedError("supported only starting from FreeBSD 8")
+
+        open_files = _not_implemented
+        proc_cwd = _not_implemented
+        memory_maps = _not_implemented
+        num_fds = _not_implemented

http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/build/lib.macosx-10.8-intel-2.7/psutil/_pslinux.py
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/build/lib.macosx-10.8-intel-2.7/psutil/_pslinux.py b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/build/lib.macosx-10.8-intel-2.7/psutil/_pslinux.py
new file mode 100644
index 0000000..d20b267
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/build/lib.macosx-10.8-intel-2.7/psutil/_pslinux.py
@@ -0,0 +1,1225 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Linux platform implementation."""
+
+from __future__ import division
+
+import base64
+import errno
+import os
+import re
+import socket
+import struct
+import sys
+import warnings
+
+from psutil import _common
+from psutil import _psposix
+from psutil._common import (isfile_strict, usage_percent, deprecated)
+from psutil._compat import PY3, xrange, namedtuple, wraps, b, defaultdict
+import _psutil_linux as cext
+import _psutil_posix
+
+
+__extra__all__ = [
+    # io prio constants
+    "IOPRIO_CLASS_NONE", "IOPRIO_CLASS_RT", "IOPRIO_CLASS_BE",
+    "IOPRIO_CLASS_IDLE",
+    # connection status constants
+    "CONN_ESTABLISHED", "CONN_SYN_SENT", "CONN_SYN_RECV", "CONN_FIN_WAIT1",
+    "CONN_FIN_WAIT2", "CONN_TIME_WAIT", "CONN_CLOSE", "CONN_CLOSE_WAIT",
+    "CONN_LAST_ACK", "CONN_LISTEN", "CONN_CLOSING",
+    # other
+    "phymem_buffers", "cached_phymem"]
+
+
+# --- constants
+
+HAS_PRLIMIT = hasattr(cext, "linux_prlimit")
+
+# RLIMIT_* constants, not guaranteed to be present on all kernels
+if HAS_PRLIMIT:
+    for name in dir(cext):
+        if name.startswith('RLIM'):
+            __extra__all__.append(name)
+
+# Number of clock ticks per second
+CLOCK_TICKS = os.sysconf("SC_CLK_TCK")
+PAGESIZE = os.sysconf("SC_PAGE_SIZE")
+BOOT_TIME = None  # set later
+DEFAULT_ENCODING = sys.getdefaultencoding()
+
+# ioprio_* constants http://linux.die.net/man/2/ioprio_get
+IOPRIO_CLASS_NONE = 0
+IOPRIO_CLASS_RT = 1
+IOPRIO_CLASS_BE = 2
+IOPRIO_CLASS_IDLE = 3
+
+# taken from /fs/proc/array.c
+PROC_STATUSES = {
+    "R": _common.STATUS_RUNNING,
+    "S": _common.STATUS_SLEEPING,
+    "D": _common.STATUS_DISK_SLEEP,
+    "T": _common.STATUS_STOPPED,
+    "t": _common.STATUS_TRACING_STOP,
+    "Z": _common.STATUS_ZOMBIE,
+    "X": _common.STATUS_DEAD,
+    "x": _common.STATUS_DEAD,
+    "K": _common.STATUS_WAKE_KILL,
+    "W": _common.STATUS_WAKING
+}
+
+# http://students.mimuw.edu.pl/lxr/source/include/net/tcp_states.h
+TCP_STATUSES = {
+    "01": _common.CONN_ESTABLISHED,
+    "02": _common.CONN_SYN_SENT,
+    "03": _common.CONN_SYN_RECV,
+    "04": _common.CONN_FIN_WAIT1,
+    "05": _common.CONN_FIN_WAIT2,
+    "06": _common.CONN_TIME_WAIT,
+    "07": _common.CONN_CLOSE,
+    "08": _common.CONN_CLOSE_WAIT,
+    "09": _common.CONN_LAST_ACK,
+    "0A": _common.CONN_LISTEN,
+    "0B": _common.CONN_CLOSING
+}
+
+# set later from __init__.py
+NoSuchProcess = None
+AccessDenied = None
+TimeoutExpired = None
+
+
+# --- named tuples
+
+def _get_cputimes_fields():
+    """Return a namedtuple of variable fields depending on the
+    CPU times available on this Linux kernel version which may be:
+    (user, nice, system, idle, iowait, irq, softirq, [steal, [guest,
+     [guest_nice]]])
+    """
+    f = open('/proc/stat', 'rb')
+    try:
+        values = f.readline().split()[1:]
+    finally:
+        f.close()
+    fields = ['user', 'nice', 'system', 'idle', 'iowait', 'irq', 'softirq']
+    vlen = len(values)
+    if vlen >= 8:
+        # Linux >= 2.6.11
+        fields.append('steal')
+    if vlen >= 9:
+        # Linux >= 2.6.24
+        fields.append('guest')
+    if vlen >= 10:
+        # Linux >= 3.2.0
+        fields.append('guest_nice')
+    return fields
+
+
+scputimes = namedtuple('scputimes', _get_cputimes_fields())
+
+svmem = namedtuple(
+    'svmem', ['total', 'available', 'percent', 'used', 'free',
+              'active', 'inactive', 'buffers', 'cached'])
+
+pextmem = namedtuple('pextmem', 'rss vms shared text lib data dirty')
+
+pmmap_grouped = namedtuple(
+    'pmmap_grouped', ['path', 'rss', 'size', 'pss', 'shared_clean',
+                      'shared_dirty', 'private_clean', 'private_dirty',
+                      'referenced', 'anonymous', 'swap'])
+
+pmmap_ext = namedtuple(
+    'pmmap_ext', 'addr perms ' + ' '.join(pmmap_grouped._fields))
+
+
+# --- system memory
+
+def virtual_memory():
+    total, free, buffers, shared, _, _ = cext.linux_sysinfo()
+    cached = active = inactive = None
+    f = open('/proc/meminfo', 'rb')
+    CACHED, ACTIVE, INACTIVE = b("Cached:"), b("Active:"), b("Inactive:")
+    try:
+        for line in f:
+            if line.startswith(CACHED):
+                cached = int(line.split()[1]) * 1024
+            elif line.startswith(ACTIVE):
+                active = int(line.split()[1]) * 1024
+            elif line.startswith(INACTIVE):
+                inactive = int(line.split()[1]) * 1024
+            if (cached is not None
+                    and active is not None
+                    and inactive is not None):
+                break
+        else:
+            # we might get here when dealing with exotic Linux flavors, see:
+            # http://code.google.com/p/psutil/issues/detail?id=313
+            msg = "'cached', 'active' and 'inactive' memory stats couldn't " \
+                  "be determined and were set to 0"
+            warnings.warn(msg, RuntimeWarning)
+            cached = active = inactive = 0
+    finally:
+        f.close()
+    avail = free + buffers + cached
+    used = total - free
+    percent = usage_percent((total - avail), total, _round=1)
+    return svmem(total, avail, percent, used, free,
+                 active, inactive, buffers, cached)
+
+
+def swap_memory():
+    _, _, _, _, total, free = cext.linux_sysinfo()
+    used = total - free
+    percent = usage_percent(used, total, _round=1)
+    # get pgin/pgouts
+    f = open("/proc/vmstat", "rb")
+    SIN, SOUT = b('pswpin'), b('pswpout')
+    sin = sout = None
+    try:
+        for line in f:
+            # values are expressed in 4 kilo bytes, we want bytes instead
+            if line.startswith(SIN):
+                sin = int(line.split(b(' '))[1]) * 4 * 1024
+            elif line.startswith(SOUT):
+                sout = int(line.split(b(' '))[1]) * 4 * 1024
+            if sin is not None and sout is not None:
+                break
+        else:
+            # we might get here when dealing with exotic Linux flavors, see:
+            # http://code.google.com/p/psutil/issues/detail?id=313
+            msg = "'sin' and 'sout' swap memory stats couldn't " \
+                  "be determined and were set to 0"
+            warnings.warn(msg, RuntimeWarning)
+            sin = sout = 0
+    finally:
+        f.close()
+    return _common.sswap(total, used, free, percent, sin, sout)
+
+
+@deprecated(replacement='psutil.virtual_memory().cached')
+def cached_phymem():
+    return virtual_memory().cached
+
+
+@deprecated(replacement='psutil.virtual_memory().buffers')
+def phymem_buffers():
+    return virtual_memory().buffers
+
+
+# --- CPUs
+
+def cpu_times():
+    """Return a named tuple representing the following system-wide
+    CPU times:
+    (user, nice, system, idle, iowait, irq, softirq [steal, [guest,
+     [guest_nice]]])
+    Last 3 fields may not be available on all Linux kernel versions.
+    """
+    f = open('/proc/stat', 'rb')
+    try:
+        values = f.readline().split()
+    finally:
+        f.close()
+    fields = values[1:len(scputimes._fields) + 1]
+    fields = [float(x) / CLOCK_TICKS for x in fields]
+    return scputimes(*fields)
+
+
+def per_cpu_times():
+    """Return a list of namedtuple representing the CPU times
+    for every CPU available on the system.
+    """
+    cpus = []
+    f = open('/proc/stat', 'rb')
+    try:
+        # get rid of the first line which refers to system wide CPU stats
+        f.readline()
+        CPU = b('cpu')
+        for line in f:
+            if line.startswith(CPU):
+                values = line.split()
+                fields = values[1:len(scputimes._fields) + 1]
+                fields = [float(x) / CLOCK_TICKS for x in fields]
+                entry = scputimes(*fields)
+                cpus.append(entry)
+        return cpus
+    finally:
+        f.close()
+
+
+def cpu_count_logical():
+    """Return the number of logical CPUs in the system."""
+    try:
+        return os.sysconf("SC_NPROCESSORS_ONLN")
+    except ValueError:
+        # as a second fallback we try to parse /proc/cpuinfo
+        num = 0
+        f = open('/proc/cpuinfo', 'rb')
+        try:
+            lines = f.readlines()
+        finally:
+            f.close()
+        PROCESSOR = b('processor')
+        for line in lines:
+            if line.lower().startswith(PROCESSOR):
+                num += 1
+
+    # unknown format (e.g. amrel/sparc architectures), see:
+    # http://code.google.com/p/psutil/issues/detail?id=200
+    # try to parse /proc/stat as a last resort
+    if num == 0:
+        f = open('/proc/stat', 'rt')
+        try:
+            lines = f.readlines()
+        finally:
+            f.close()
+        search = re.compile('cpu\d')
+        for line in lines:
+            line = line.split(' ')[0]
+            if search.match(line):
+                num += 1
+
+    if num == 0:
+        # mimic os.cpu_count()
+        return None
+    return num
+
+
+def cpu_count_physical():
+    """Return the number of physical CPUs in the system."""
+    f = open('/proc/cpuinfo', 'rb')
+    try:
+        lines = f.readlines()
+    finally:
+        f.close()
+    found = set()
+    PHYSICAL_ID = b('physical id')
+    for line in lines:
+        if line.lower().startswith(PHYSICAL_ID):
+            found.add(line.strip())
+    if found:
+        return len(found)
+    else:
+        return None  # mimic os.cpu_count()
+
+
+# --- other system functions
+
+def users():
+    """Return currently connected users as a list of namedtuples."""
+    retlist = []
+    rawlist = cext.users()
+    for item in rawlist:
+        user, tty, hostname, tstamp, user_process = item
+        # note: the underlying C function includes entries about
+        # system boot, run level and others.  We might want
+        # to use them in the future.
+        if not user_process:
+            continue
+        if hostname == ':0.0':
+            hostname = 'localhost'
+        nt = _common.suser(user, tty or None, hostname, tstamp)
+        retlist.append(nt)
+    return retlist
+
+
+def boot_time():
+    """Return the system boot time expressed in seconds since the epoch."""
+    global BOOT_TIME
+    f = open('/proc/stat', 'rb')
+    try:
+        BTIME = b('btime')
+        for line in f:
+            if line.startswith(BTIME):
+                ret = float(line.strip().split()[1])
+                BOOT_TIME = ret
+                return ret
+        raise RuntimeError("line 'btime' not found")
+    finally:
+        f.close()
+
+
+# --- processes
+
+def pids():
+    """Returns a list of PIDs currently running on the system."""
+    return [int(x) for x in os.listdir(b('/proc')) if x.isdigit()]
+
+
+def pid_exists(pid):
+    """Check For the existence of a unix pid."""
+    return _psposix.pid_exists(pid)
+
+
+# --- network
+
+class Connections:
+    """A wrapper on top of /proc/net/* files, retrieving per-process
+    and system-wide open connections (TCP, UDP, UNIX) similarly to
+    "netstat -an".
+
+    Note: in case of UNIX sockets we're only able to determine the
+    local endpoint/path, not the one it's connected to.
+    According to [1] it would be possible but not easily.
+
+    [1] http://serverfault.com/a/417946
+    """
+
+    def __init__(self):
+        tcp4 = ("tcp", socket.AF_INET, socket.SOCK_STREAM)
+        tcp6 = ("tcp6", socket.AF_INET6, socket.SOCK_STREAM)
+        udp4 = ("udp", socket.AF_INET, socket.SOCK_DGRAM)
+        udp6 = ("udp6", socket.AF_INET6, socket.SOCK_DGRAM)
+        unix = ("unix", socket.AF_UNIX, None)
+        self.tmap = {
+            "all": (tcp4, tcp6, udp4, udp6, unix),
+            "tcp": (tcp4, tcp6),
+            "tcp4": (tcp4,),
+            "tcp6": (tcp6,),
+            "udp": (udp4, udp6),
+            "udp4": (udp4,),
+            "udp6": (udp6,),
+            "unix": (unix,),
+            "inet": (tcp4, tcp6, udp4, udp6),
+            "inet4": (tcp4, udp4),
+            "inet6": (tcp6, udp6),
+        }
+
+    def get_proc_inodes(self, pid):
+        inodes = defaultdict(list)
+        for fd in os.listdir("/proc/%s/fd" % pid):
+            try:
+                inode = os.readlink("/proc/%s/fd/%s" % (pid, fd))
+            except OSError:
+                # TODO: need comment here
+                continue
+            else:
+                if inode.startswith('socket:['):
+                    # the process is using a socket
+                    inode = inode[8:][:-1]
+                    inodes[inode].append((pid, int(fd)))
+        return inodes
+
+    def get_all_inodes(self):
+        inodes = {}
+        for pid in pids():
+            try:
+                inodes.update(self.get_proc_inodes(pid))
+            except OSError:
+                # os.listdir() is gonna raise a lot of access denied
+                # exceptions in case of unprivileged user; that's fine
+                # as we'll just end up returning a connection with PID
+                # and fd set to None anyway.
+                # Both netstat -an and lsof does the same so it's
+                # unlikely we can do any better.
+                # ENOENT just means a PID disappeared on us.
+                err = sys.exc_info()[1]
+                if err.errno not in (errno.ENOENT, errno.EPERM, errno.EACCES):
+                    raise
+        return inodes
+
+    def decode_address(self, addr, family):
+        """Accept an "ip:port" address as displayed in /proc/net/*
+        and convert it into a human readable form, like:
+
+        "0500000A:0016" -> ("10.0.0.5", 22)
+        "0000000000000000FFFF00000100007F:9E49" -> ("::ffff:127.0.0.1", 40521)
+
+        The IP address portion is a little or big endian four-byte
+        hexadecimal number; that is, the least significant byte is listed
+        first, so we need to reverse the order of the bytes to convert it
+        to an IP address.
+        The port is represented as a two-byte hexadecimal number.
+
+        Reference:
+        http://linuxdevcenter.com/pub/a/linux/2000/11/16/LinuxAdmin.html
+        """
+        ip, port = addr.split(':')
+        port = int(port, 16)
+        if PY3:
+            ip = ip.encode('ascii')
+        # this usually refers to a local socket in listen mode with
+        # no end-points connected
+        if not port:
+            return ()
+        if family == socket.AF_INET:
+            # see: http://code.google.com/p/psutil/issues/detail?id=201
+            if sys.byteorder == 'little':
+                ip = socket.inet_ntop(family, base64.b16decode(ip)[::-1])
+            else:
+                ip = socket.inet_ntop(family, base64.b16decode(ip))
+        else:  # IPv6
+            # old version - let's keep it, just in case...
+            # ip = ip.decode('hex')
+            # return socket.inet_ntop(socket.AF_INET6,
+            #          ''.join(ip[i:i+4][::-1] for i in xrange(0, 16, 4)))
+            ip = base64.b16decode(ip)
+            # see: http://code.google.com/p/psutil/issues/detail?id=201
+            if sys.byteorder == 'little':
+                ip = socket.inet_ntop(
+                    socket.AF_INET6,
+                    struct.pack('>4I', *struct.unpack('<4I', ip)))
+            else:
+                ip = socket.inet_ntop(
+                    socket.AF_INET6,
+                    struct.pack('<4I', *struct.unpack('<4I', ip)))
+        return (ip, port)
+
+    def process_inet(self, file, family, type_, inodes, filter_pid=None):
+        """Parse /proc/net/tcp* and /proc/net/udp* files."""
+        if file.endswith('6') and not os.path.exists(file):
+            # IPv6 not supported
+            return
+        f = open(file, 'rt')
+        try:
+            f.readline()  # skip the first line
+            for line in f:
+                _, laddr, raddr, status, _, _, _, _, _, inode = \
+                    line.split()[:10]
+                if inode in inodes:
+                    # We assume inet sockets are unique, so we error
+                    # out if there are multiple references to the
+                    # same inode. We won't do this for UNIX sockets.
+                    if len(inodes[inode]) > 1 and type_ != socket.AF_UNIX:
+                        raise ValueError("ambiguos inode with multiple "
+                                         "PIDs references")
+                    pid, fd = inodes[inode][0]
+                else:
+                    pid, fd = None, -1
+                if filter_pid is not None and filter_pid != pid:
+                    continue
+                else:
+                    if type_ == socket.SOCK_STREAM:
+                        status = TCP_STATUSES[status]
+                    else:
+                        status = _common.CONN_NONE
+                    laddr = self.decode_address(laddr, family)
+                    raddr = self.decode_address(raddr, family)
+                    yield (fd, family, type_, laddr, raddr, status, pid)
+        finally:
+            f.close()
+
+    def process_unix(self, file, family, inodes, filter_pid=None):
+        """Parse /proc/net/unix files."""
+        f = open(file, 'rt')
+        try:
+            f.readline()  # skip the first line
+            for line in f:
+                tokens = line.split()
+                _, _, _, _, type_, _, inode = tokens[0:7]
+                if inode in inodes:
+                    # With UNIX sockets we can have a single inode
+                    # referencing many file descriptors.
+                    pairs = inodes[inode]
+                else:
+                    pairs = [(None, -1)]
+                for pid, fd in pairs:
+                    if filter_pid is not None and filter_pid != pid:
+                        continue
+                    else:
+                        if len(tokens) == 8:
+                            path = tokens[-1]
+                        else:
+                            path = ""
+                        type_ = int(type_)
+                        raddr = None
+                        status = _common.CONN_NONE
+                        yield (fd, family, type_, path, raddr, status, pid)
+        finally:
+            f.close()
+
+    def retrieve(self, kind, pid=None):
+        if kind not in self.tmap:
+            raise ValueError("invalid %r kind argument; choose between %s"
+                             % (kind, ', '.join([repr(x) for x in self.tmap])))
+        if pid is not None:
+            inodes = self.get_proc_inodes(pid)
+            if not inodes:
+                # no connections for this process
+                return []
+        else:
+            inodes = self.get_all_inodes()
+        ret = []
+        for f, family, type_ in self.tmap[kind]:
+            if family in (socket.AF_INET, socket.AF_INET6):
+                ls = self.process_inet(
+                    "/proc/net/%s" % f, family, type_, inodes, filter_pid=pid)
+            else:
+                ls = self.process_unix(
+                    "/proc/net/%s" % f, family, inodes, filter_pid=pid)
+            for fd, family, type_, laddr, raddr, status, bound_pid in ls:
+                if pid:
+                    conn = _common.pconn(fd, family, type_, laddr, raddr,
+                                         status)
+                else:
+                    conn = _common.sconn(fd, family, type_, laddr, raddr,
+                                         status, bound_pid)
+                ret.append(conn)
+        return ret
+
+
+_connections = Connections()
+
+
+def net_connections(kind='inet'):
+    """Return system-wide open connections."""
+    return _connections.retrieve(kind)
+
+
+def net_io_counters():
+    """Return network I/O statistics for every network interface
+    installed on the system as a dict of raw tuples.
+    """
+    f = open("/proc/net/dev", "rt")
+    try:
+        lines = f.readlines()
+    finally:
+        f.close()
+
+    retdict = {}
+    for line in lines[2:]:
+        colon = line.rfind(':')
+        assert colon > 0, repr(line)
+        name = line[:colon].strip()
+        fields = line[colon + 1:].strip().split()
+        bytes_recv = int(fields[0])
+        packets_recv = int(fields[1])
+        errin = int(fields[2])
+        dropin = int(fields[3])
+        bytes_sent = int(fields[8])
+        packets_sent = int(fields[9])
+        errout = int(fields[10])
+        dropout = int(fields[11])
+        retdict[name] = (bytes_sent, bytes_recv, packets_sent, packets_recv,
+                         errin, errout, dropin, dropout)
+    return retdict
+
+
+# --- disks
+
+def disk_io_counters():
+    """Return disk I/O statistics for every disk installed on the
+    system as a dict of raw tuples.
+    """
+    # man iostat states that sectors are equivalent with blocks and
+    # have a size of 512 bytes since 2.4 kernels. This value is
+    # needed to calculate the amount of disk I/O in bytes.
+    SECTOR_SIZE = 512
+
+    # determine partitions we want to look for
+    partitions = []
+    f = open("/proc/partitions", "rt")
+    try:
+        lines = f.readlines()[2:]
+    finally:
+        f.close()
+    for line in reversed(lines):
+        _, _, _, name = line.split()
+        if name[-1].isdigit():
+            # we're dealing with a partition (e.g. 'sda1'); 'sda' will
+            # also be around but we want to omit it
+            partitions.append(name)
+        else:
+            if not partitions or not partitions[-1].startswith(name):
+                # we're dealing with a disk entity for which no
+                # partitions have been defined (e.g. 'sda' but
+                # 'sda1' was not around), see:
+                # http://code.google.com/p/psutil/issues/detail?id=338
+                partitions.append(name)
+    #
+    retdict = {}
+    f = open("/proc/diskstats", "rt")
+    try:
+        lines = f.readlines()
+    finally:
+        f.close()
+    for line in lines:
+        # http://www.mjmwired.net/kernel/Documentation/iostats.txt
+        _, _, name, reads, _, rbytes, rtime, writes, _, wbytes, wtime = \
+            line.split()[:11]
+        if name in partitions:
+            rbytes = int(rbytes) * SECTOR_SIZE
+            wbytes = int(wbytes) * SECTOR_SIZE
+            reads = int(reads)
+            writes = int(writes)
+            rtime = int(rtime)
+            wtime = int(wtime)
+            retdict[name] = (reads, writes, rbytes, wbytes, rtime, wtime)
+    return retdict
+
+
+def disk_partitions(all=False):
+    """Return mounted disk partitions as a list of nameduples"""
+    phydevs = []
+    f = open("/proc/filesystems", "r")
+    try:
+        for line in f:
+            if not line.startswith("nodev"):
+                phydevs.append(line.strip())
+    finally:
+        f.close()
+
+    retlist = []
+    partitions = cext.disk_partitions()
+    for partition in partitions:
+        device, mountpoint, fstype, opts = partition
+        if device == 'none':
+            device = ''
+        if not all:
+            if device == '' or fstype not in phydevs:
+                continue
+        ntuple = _common.sdiskpart(device, mountpoint, fstype, opts)
+        retlist.append(ntuple)
+    return retlist
+
+
+disk_usage = _psposix.disk_usage
+
+
+# --- decorators
+
+def wrap_exceptions(fun):
+    """Decorator which translates bare OSError and IOError exceptions
+    into NoSuchProcess and AccessDenied.
+    """
+    @wraps(fun)
+    def wrapper(self, *args, **kwargs):
+        try:
+            return fun(self, *args, **kwargs)
+        except EnvironmentError:
+            # support for private module import
+            if NoSuchProcess is None or AccessDenied is None:
+                raise
+            # ENOENT (no such file or directory) gets raised on open().
+            # ESRCH (no such process) can get raised on read() if
+            # process is gone in meantime.
+            err = sys.exc_info()[1]
+            if err.errno in (errno.ENOENT, errno.ESRCH):
+                raise NoSuchProcess(self.pid, self._name)
+            if err.errno in (errno.EPERM, errno.EACCES):
+                raise AccessDenied(self.pid, self._name)
+            raise
+    return wrapper
+
+
+class Process(object):
+    """Linux process implementation."""
+
+    __slots__ = ["pid", "_name"]
+
+    def __init__(self, pid):
+        self.pid = pid
+        self._name = None
+
+    @wrap_exceptions
+    def name(self):
+        fname = "/proc/%s/stat" % self.pid
+        if PY3:
+            f = open(fname, "rt", encoding=DEFAULT_ENCODING)
+        else:
+            f = open(fname, "rt")
+        try:
+            name = f.read().split(' ')[1].replace('(', '').replace(')', '')
+        finally:
+            f.close()
+        # XXX - gets changed later and probably needs refactoring
+        return name
+
+    def exe(self):
+        try:
+            exe = os.readlink("/proc/%s/exe" % self.pid)
+        except (OSError, IOError):
+            err = sys.exc_info()[1]
+            if err.errno == errno.ENOENT:
+                # no such file error; might be raised also if the
+                # path actually exists for system processes with
+                # low pids (about 0-20)
+                if os.path.lexists("/proc/%s" % self.pid):
+                    return ""
+                else:
+                    # ok, it is a process which has gone away
+                    raise NoSuchProcess(self.pid, self._name)
+            if err.errno in (errno.EPERM, errno.EACCES):
+                raise AccessDenied(self.pid, self._name)
+            raise
+
+        # readlink() might return paths containing null bytes ('\x00').
+        # Certain names have ' (deleted)' appended. Usually this is
+        # bogus as the file actually exists. Either way that's not
+        # important as we don't want to discriminate executables which
+        # have been deleted.
+        exe = exe.split('\x00')[0]
+        if exe.endswith(' (deleted)') and not os.path.exists(exe):
+            exe = exe[:-10]
+        return exe
+
+    @wrap_exceptions
+    def cmdline(self):
+        fname = "/proc/%s/cmdline" % self.pid
+        if PY3:
+            f = open(fname, "rt", encoding=DEFAULT_ENCODING)
+        else:
+            f = open(fname, "rt")
+        try:
+            # return the args as a list
+            return [x for x in f.read().split('\x00') if x]
+        finally:
+            f.close()
+
+    @wrap_exceptions
+    def terminal(self):
+        tmap = _psposix._get_terminal_map()
+        f = open("/proc/%s/stat" % self.pid, 'rb')
+        try:
+            tty_nr = int(f.read().split(b(' '))[6])
+        finally:
+            f.close()
+        try:
+            return tmap[tty_nr]
+        except KeyError:
+            return None
+
+    if os.path.exists('/proc/%s/io' % os.getpid()):
+        @wrap_exceptions
+        def io_counters(self):
+            fname = "/proc/%s/io" % self.pid
+            f = open(fname, 'rb')
+            SYSCR, SYSCW = b("syscr"), b("syscw")
+            READ_BYTES, WRITE_BYTES = b("read_bytes"), b("write_bytes")
+            try:
+                rcount = wcount = rbytes = wbytes = None
+                for line in f:
+                    if rcount is None and line.startswith(SYSCR):
+                        rcount = int(line.split()[1])
+                    elif wcount is None and line.startswith(SYSCW):
+                        wcount = int(line.split()[1])
+                    elif rbytes is None and line.startswith(READ_BYTES):
+                        rbytes = int(line.split()[1])
+                    elif wbytes is None and line.startswith(WRITE_BYTES):
+                        wbytes = int(line.split()[1])
+                for x in (rcount, wcount, rbytes, wbytes):
+                    if x is None:
+                        raise NotImplementedError(
+                            "couldn't read all necessary info from %r" % fname)
+                return _common.pio(rcount, wcount, rbytes, wbytes)
+            finally:
+                f.close()
+    else:
+        def io_counters(self):
+            raise NotImplementedError("couldn't find /proc/%s/io (kernel "
+                                      "too old?)" % self.pid)
+
+    @wrap_exceptions
+    def cpu_times(self):
+        f = open("/proc/%s/stat" % self.pid, 'rb')
+        try:
+            st = f.read().strip()
+        finally:
+            f.close()
+        # ignore the first two values ("pid (exe)")
+        st = st[st.find(b(')')) + 2:]
+        values = st.split(b(' '))
+        utime = float(values[11]) / CLOCK_TICKS
+        stime = float(values[12]) / CLOCK_TICKS
+        return _common.pcputimes(utime, stime)
+
+    @wrap_exceptions
+    def wait(self, timeout=None):
+        try:
+            return _psposix.wait_pid(self.pid, timeout)
+        except _psposix.TimeoutExpired:
+            # support for private module import
+            if TimeoutExpired is None:
+                raise
+            raise TimeoutExpired(timeout, self.pid, self._name)
+
+    @wrap_exceptions
+    def create_time(self):
+        f = open("/proc/%s/stat" % self.pid, 'rb')
+        try:
+            st = f.read().strip()
+        finally:
+            f.close()
+        # ignore the first two values ("pid (exe)")
+        st = st[st.rfind(b(')')) + 2:]
+        values = st.split(b(' '))
+        # According to documentation, starttime is in field 21 and the
+        # unit is jiffies (clock ticks).
+        # We first divide it for clock ticks and then add uptime returning
+        # seconds since the epoch, in UTC.
+        # Also use cached value if available.
+        bt = BOOT_TIME or boot_time()
+        return (float(values[19]) / CLOCK_TICKS) + bt
+
+    @wrap_exceptions
+    def memory_info(self):
+        f = open("/proc/%s/statm" % self.pid, 'rb')
+        try:
+            vms, rss = f.readline().split()[:2]
+            return _common.pmem(int(rss) * PAGESIZE,
+                                int(vms) * PAGESIZE)
+        finally:
+            f.close()
+
+    @wrap_exceptions
+    def memory_info_ex(self):
+        #  ============================================================
+        # | FIELD  | DESCRIPTION                         | AKA  | TOP  |
+        #  ============================================================
+        # | rss    | resident set size                   |      | RES  |
+        # | vms    | total program size                  | size | VIRT |
+        # | shared | shared pages (from shared mappings) |      | SHR  |
+        # | text   | text ('code')                       | trs  | CODE |
+        # | lib    | library (unused in Linux 2.6)       | lrs  |      |
+        # | data   | data + stack                        | drs  | DATA |
+        # | dirty  | dirty pages (unused in Linux 2.6)   | dt   |      |
+        #  ============================================================
+        f = open("/proc/%s/statm" % self.pid, "rb")
+        try:
+            vms, rss, shared, text, lib, data, dirty = \
+                [int(x) * PAGESIZE for x in f.readline().split()[:7]]
+        finally:
+            f.close()
+        return pextmem(rss, vms, shared, text, lib, data, dirty)
+
+    if os.path.exists('/proc/%s/smaps' % os.getpid()):
+        def memory_maps(self):
+            """Return process's mapped memory regions as a list of nameduples.
+            Fields are explained in 'man proc'; here is an updated (Apr 2012)
+            version: http://goo.gl/fmebo
+            """
+            f = None
+            try:
+                f = open("/proc/%s/smaps" % self.pid, "rt")
+                first_line = f.readline()
+                current_block = [first_line]
+
+                def get_blocks():
+                    data = {}
+                    for line in f:
+                        fields = line.split(None, 5)
+                        if not fields[0].endswith(':'):
+                            # new block section
+                            yield (current_block.pop(), data)
+                            current_block.append(line)
+                        else:
+                            try:
+                                data[fields[0]] = int(fields[1]) * 1024
+                            except ValueError:
+                                if fields[0].startswith('VmFlags:'):
+                                    # see issue #369
+                                    continue
+                                else:
+                                    raise ValueError("don't know how to inte"
+                                                     "rpret line %r" % line)
+                    yield (current_block.pop(), data)
+
+                if first_line:  # smaps file can be empty
+                    for header, data in get_blocks():
+                        hfields = header.split(None, 5)
+                        try:
+                            addr, perms, offset, dev, inode, path = hfields
+                        except ValueError:
+                            addr, perms, offset, dev, inode, path = \
+                                hfields + ['']
+                        if not path:
+                            path = '[anon]'
+                        else:
+                            path = path.strip()
+                        yield (addr, perms, path,
+                               data['Rss:'],
+                               data.get('Size:', 0),
+                               data.get('Pss:', 0),
+                               data.get('Shared_Clean:', 0),
+                               data.get('Shared_Dirty:', 0),
+                               data.get('Private_Clean:', 0),
+                               data.get('Private_Dirty:', 0),
+                               data.get('Referenced:', 0),
+                               data.get('Anonymous:', 0),
+                               data.get('Swap:', 0))
+                f.close()
+            except EnvironmentError:
+                # XXX - Can't use wrap_exceptions decorator as we're
+                # returning a generator;  this probably needs some
+                # refactoring in order to avoid this code duplication.
+                if f is not None:
+                    f.close()
+                err = sys.exc_info()[1]
+                if err.errno in (errno.ENOENT, errno.ESRCH):
+                    raise NoSuchProcess(self.pid, self._name)
+                if err.errno in (errno.EPERM, errno.EACCES):
+                    raise AccessDenied(self.pid, self._name)
+                raise
+            except:
+                if f is not None:
+                    f.close()
+                raise
+            f.close()
+
+    else:
+        def memory_maps(self, ext):
+            msg = "couldn't find /proc/%s/smaps; kernel < 2.6.14 or "  \
+                  "CONFIG_MMU kernel configuration option is not enabled" \
+                  % self.pid
+            raise NotImplementedError(msg)
+
+    @wrap_exceptions
+    def cwd(self):
+        # readlink() might return paths containing null bytes causing
+        # problems when used with other fs-related functions (os.*,
+        # open(), ...)
+        path = os.readlink("/proc/%s/cwd" % self.pid)
+        return path.replace('\x00', '')
+
+    @wrap_exceptions
+    def num_ctx_switches(self):
+        vol = unvol = None
+        f = open("/proc/%s/status" % self.pid, "rb")
+        VOLUNTARY = b("voluntary_ctxt_switches")
+        NON_VOLUNTARY = b("nonvoluntary_ctxt_switches")
+        try:
+            for line in f:
+                if line.startswith(VOLUNTARY):
+                    vol = int(line.split()[1])
+                elif line.startswith(NON_VOLUNTARY):
+                    unvol = int(line.split()[1])
+                if vol is not None and unvol is not None:
+                    return _common.pctxsw(vol, unvol)
+            raise NotImplementedError(
+                "'voluntary_ctxt_switches' and 'nonvoluntary_ctxt_switches'"
+                "fields were not found in /proc/%s/status; the kernel is "
+                "probably older than 2.6.23" % self.pid)
+        finally:
+            f.close()
+
+    @wrap_exceptions
+    def num_threads(self):
+        f = open("/proc/%s/status" % self.pid, "rb")
+        try:
+            THREADS = b("Threads:")
+            for line in f:
+                if line.startswith(THREADS):
+                    return int(line.split()[1])
+            raise NotImplementedError("line not found")
+        finally:
+            f.close()
+
+    @wrap_exceptions
+    def threads(self):
+        thread_ids = os.listdir("/proc/%s/task" % self.pid)
+        thread_ids.sort()
+        retlist = []
+        hit_enoent = False
+        for thread_id in thread_ids:
+            try:
+                f = open("/proc/%s/task/%s/stat" % (self.pid, thread_id), 'rb')
+            except EnvironmentError:
+                err = sys.exc_info()[1]
+                if err.errno == errno.ENOENT:
+                    # no such file or directory; it means thread
+                    # disappeared on us
+                    hit_enoent = True
+                    continue
+                raise
+            try:
+                st = f.read().strip()
+            finally:
+                f.close()
+            # ignore the first two values ("pid (exe)")
+            st = st[st.find(b(')')) + 2:]
+            values = st.split(b(' '))
+            utime = float(values[11]) / CLOCK_TICKS
+            stime = float(values[12]) / CLOCK_TICKS
+            ntuple = _common.pthread(int(thread_id), utime, stime)
+            retlist.append(ntuple)
+        if hit_enoent:
+            # raise NSP if the process disappeared on us
+            os.stat('/proc/%s' % self.pid)
+        return retlist
+
+    @wrap_exceptions
+    def nice_get(self):
+        #f = open('/proc/%s/stat' % self.pid, 'r')
+        # try:
+        #   data = f.read()
+        #   return int(data.split()[18])
+        # finally:
+        #   f.close()
+
+        # Use C implementation
+        return _psutil_posix.getpriority(self.pid)
+
+    @wrap_exceptions
+    def nice_set(self, value):
+        return _psutil_posix.setpriority(self.pid, value)
+
+    @wrap_exceptions
+    def cpu_affinity_get(self):
+        from_bitmask = lambda x: [i for i in xrange(64) if (1 << i) & x]
+        bitmask = cext.proc_cpu_affinity_get(self.pid)
+        return from_bitmask(bitmask)
+
+    @wrap_exceptions
+    def cpu_affinity_set(self, cpus):
+        try:
+            cext.proc_cpu_affinity_set(self.pid, cpus)
+        except OSError:
+            err = sys.exc_info()[1]
+            if err.errno == errno.EINVAL:
+                allcpus = tuple(range(len(per_cpu_times())))
+                for cpu in cpus:
+                    if cpu not in allcpus:
+                        raise ValueError("invalid CPU #%i (choose between %s)"
+                                         % (cpu, allcpus))
+            raise
+
+    # only starting from kernel 2.6.13
+    if hasattr(cext, "proc_ioprio_get"):
+
+        @wrap_exceptions
+        def ionice_get(self):
+            ioclass, value = cext.proc_ioprio_get(self.pid)
+            return _common.pionice(ioclass, value)
+
+        @wrap_exceptions
+        def ionice_set(self, ioclass, value):
+            if ioclass in (IOPRIO_CLASS_NONE, None):
+                if value:
+                    msg = "can't specify value with IOPRIO_CLASS_NONE"
+                    raise ValueError(msg)
+                ioclass = IOPRIO_CLASS_NONE
+                value = 0
+            if ioclass in (IOPRIO_CLASS_RT, IOPRIO_CLASS_BE):
+                if value is None:
+                    value = 4
+            elif ioclass == IOPRIO_CLASS_IDLE:
+                if value:
+                    msg = "can't specify value with IOPRIO_CLASS_IDLE"
+                    raise ValueError(msg)
+                value = 0
+            else:
+                value = 0
+            if not 0 <= value <= 8:
+                raise ValueError(
+                    "value argument range expected is between 0 and 8")
+            return cext.proc_ioprio_set(self.pid, ioclass, value)
+
+    if HAS_PRLIMIT:
+        @wrap_exceptions
+        def rlimit(self, resource, limits=None):
+            # if pid is 0 prlimit() applies to the calling process and
+            # we don't want that
+            if self.pid == 0:
+                raise ValueError("can't use prlimit() against PID 0 process")
+            if limits is None:
+                # get
+                return cext.linux_prlimit(self.pid, resource)
+            else:
+                # set
+                if len(limits) != 2:
+                    raise ValueError(
+                        "second argument must be a (soft, hard) tuple")
+                soft, hard = limits
+                cext.linux_prlimit(self.pid, resource, soft, hard)
+
+    @wrap_exceptions
+    def status(self):
+        f = open("/proc/%s/status" % self.pid, 'rb')
+        try:
+            STATE = b("State:")
+            for line in f:
+                if line.startswith(STATE):
+                    letter = line.split()[1]
+                    if PY3:
+                        letter = letter.decode()
+                    # XXX is '?' legit? (we're not supposed to return
+                    # it anyway)
+                    return PROC_STATUSES.get(letter, '?')
+        finally:
+            f.close()
+
+    @wrap_exceptions
+    def open_files(self):
+        retlist = []
+        files = os.listdir("/proc/%s/fd" % self.pid)
+        hit_enoent = False
+        for fd in files:
+            file = "/proc/%s/fd/%s" % (self.pid, fd)
+            if os.path.islink(file):
+                try:
+                    file = os.readlink(file)
+                except OSError:
+                    # ENOENT == file which is gone in the meantime
+                    err = sys.exc_info()[1]
+                    if err.errno == errno.ENOENT:
+                        hit_enoent = True
+                        continue
+                    raise
+                else:
+                    # If file is not an absolute path there's no way
+                    # to tell whether it's a regular file or not,
+                    # so we skip it. A regular file is always supposed
+                    # to be absolutized though.
+                    if file.startswith('/') and isfile_strict(file):
+                        ntuple = _common.popenfile(file, int(fd))
+                        retlist.append(ntuple)
+        if hit_enoent:
+            # raise NSP if the process disappeared on us
+            os.stat('/proc/%s' % self.pid)
+        return retlist
+
+    @wrap_exceptions
+    def connections(self, kind='inet'):
+        ret = _connections.retrieve(kind, self.pid)
+        # raise NSP if the process disappeared on us
+        os.stat('/proc/%s' % self.pid)
+        return ret
+
+    @wrap_exceptions
+    def num_fds(self):
+        return len(os.listdir("/proc/%s/fd" % self.pid))
+
+    @wrap_exceptions
+    def ppid(self):
+        f = open("/proc/%s/status" % self.pid, 'rb')
+        try:
+            PPID = b("PPid:")
+            for line in f:
+                if line.startswith(PPID):
+                    # PPid: nnnn
+                    return int(line.split()[1])
+            raise NotImplementedError("line not found")
+        finally:
+            f.close()
+
+    @wrap_exceptions
+    def uids(self):
+        f = open("/proc/%s/status" % self.pid, 'rb')
+        try:
+            UID = b('Uid:')
+            for line in f:
+                if line.startswith(UID):
+                    _, real, effective, saved, fs = line.split()
+                    return _common.puids(int(real), int(effective), int(saved))
+            raise NotImplementedError("line not found")
+        finally:
+            f.close()
+
+    @wrap_exceptions
+    def gids(self):
+        f = open("/proc/%s/status" % self.pid, 'rb')
+        try:
+            GID = b('Gid:')
+            for line in f:
+                if line.startswith(GID):
+                    _, real, effective, saved, fs = line.split()
+                    return _common.pgids(int(real), int(effective), int(saved))
+            raise NotImplementedError("line not found")
+        finally:
+            f.close()

http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/build/lib.macosx-10.8-intel-2.7/psutil/_psosx.py
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/build/lib.macosx-10.8-intel-2.7/psutil/_psosx.py b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/build/lib.macosx-10.8-intel-2.7/psutil/_psosx.py
new file mode 100644
index 0000000..8953867
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/build/lib.macosx-10.8-intel-2.7/psutil/_psosx.py
@@ -0,0 +1,341 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""OSX platform implementation."""
+
+import errno
+import os
+import sys
+
+from psutil import _common
+from psutil import _psposix
+from psutil._common import conn_tmap, usage_percent, isfile_strict
+from psutil._compat import namedtuple, wraps
+import _psutil_osx as cext
+import _psutil_posix
+
+
+__extra__all__ = []
+
+# --- constants
+
+PAGESIZE = os.sysconf("SC_PAGE_SIZE")
+
+# http://students.mimuw.edu.pl/lxr/source/include/net/tcp_states.h
+TCP_STATUSES = {
+    cext.TCPS_ESTABLISHED: _common.CONN_ESTABLISHED,
+    cext.TCPS_SYN_SENT: _common.CONN_SYN_SENT,
+    cext.TCPS_SYN_RECEIVED: _common.CONN_SYN_RECV,
+    cext.TCPS_FIN_WAIT_1: _common.CONN_FIN_WAIT1,
+    cext.TCPS_FIN_WAIT_2: _common.CONN_FIN_WAIT2,
+    cext.TCPS_TIME_WAIT: _common.CONN_TIME_WAIT,
+    cext.TCPS_CLOSED: _common.CONN_CLOSE,
+    cext.TCPS_CLOSE_WAIT: _common.CONN_CLOSE_WAIT,
+    cext.TCPS_LAST_ACK: _common.CONN_LAST_ACK,
+    cext.TCPS_LISTEN: _common.CONN_LISTEN,
+    cext.TCPS_CLOSING: _common.CONN_CLOSING,
+    cext.PSUTIL_CONN_NONE: _common.CONN_NONE,
+}
+
+PROC_STATUSES = {
+    cext.SIDL: _common.STATUS_IDLE,
+    cext.SRUN: _common.STATUS_RUNNING,
+    cext.SSLEEP: _common.STATUS_SLEEPING,
+    cext.SSTOP: _common.STATUS_STOPPED,
+    cext.SZOMB: _common.STATUS_ZOMBIE,
+}
+
+scputimes = namedtuple('scputimes', ['user', 'nice', 'system', 'idle'])
+
+svmem = namedtuple(
+    'svmem', ['total', 'available', 'percent', 'used', 'free',
+              'active', 'inactive', 'wired'])
+
+pextmem = namedtuple('pextmem', ['rss', 'vms', 'pfaults', 'pageins'])
+
+pmmap_grouped = namedtuple(
+    'pmmap_grouped',
+    'path rss private swapped dirtied ref_count shadow_depth')
+
+pmmap_ext = namedtuple(
+    'pmmap_ext', 'addr perms ' + ' '.join(pmmap_grouped._fields))
+
+# set later from __init__.py
+NoSuchProcess = None
+AccessDenied = None
+TimeoutExpired = None
+
+
+# --- functions
+
+def virtual_memory():
+    """System virtual memory as a namedtuple."""
+    total, active, inactive, wired, free = cext.virtual_mem()
+    avail = inactive + free
+    used = active + inactive + wired
+    percent = usage_percent((total - avail), total, _round=1)
+    return svmem(total, avail, percent, used, free,
+                 active, inactive, wired)
+
+
+def swap_memory():
+    """Swap system memory as a (total, used, free, sin, sout) tuple."""
+    total, used, free, sin, sout = cext.swap_mem()
+    percent = usage_percent(used, total, _round=1)
+    return _common.sswap(total, used, free, percent, sin, sout)
+
+
+def cpu_times():
+    """Return system CPU times as a namedtuple."""
+    user, nice, system, idle = cext.cpu_times()
+    return scputimes(user, nice, system, idle)
+
+
+def per_cpu_times():
+    """Return system CPU times as a named tuple"""
+    ret = []
+    for cpu_t in cext.per_cpu_times():
+        user, nice, system, idle = cpu_t
+        item = scputimes(user, nice, system, idle)
+        ret.append(item)
+    return ret
+
+
+def cpu_count_logical():
+    """Return the number of logical CPUs in the system."""
+    return cext.cpu_count_logical()
+
+
+def cpu_count_physical():
+    """Return the number of physical CPUs in the system."""
+    return cext.cpu_count_phys()
+
+
+def boot_time():
+    """The system boot time expressed in seconds since the epoch."""
+    return cext.boot_time()
+
+
+def disk_partitions(all=False):
+    retlist = []
+    partitions = cext.disk_partitions()
+    for partition in partitions:
+        device, mountpoint, fstype, opts = partition
+        if device == 'none':
+            device = ''
+        if not all:
+            if not os.path.isabs(device) or not os.path.exists(device):
+                continue
+        ntuple = _common.sdiskpart(device, mountpoint, fstype, opts)
+        retlist.append(ntuple)
+    return retlist
+
+
+def users():
+    retlist = []
+    rawlist = cext.users()
+    for item in rawlist:
+        user, tty, hostname, tstamp = item
+        if tty == '~':
+            continue  # reboot or shutdown
+        if not tstamp:
+            continue
+        nt = _common.suser(user, tty or None, hostname or None, tstamp)
+        retlist.append(nt)
+    return retlist
+
+
+def net_connections(kind='inet'):
+    # Note: on OSX this will fail with AccessDenied unless
+    # the process is owned by root.
+    ret = []
+    for pid in pids():
+        try:
+            cons = Process(pid).connections(kind)
+        except NoSuchProcess:
+            continue
+        else:
+            if cons:
+                for c in cons:
+                    c = list(c) + [pid]
+                    ret.append(_common.sconn(*c))
+    return ret
+
+
+pids = cext.pids
+pid_exists = _psposix.pid_exists
+disk_usage = _psposix.disk_usage
+net_io_counters = cext.net_io_counters
+disk_io_counters = cext.disk_io_counters
+
+
+def wrap_exceptions(fun):
+    """Decorator which translates bare OSError exceptions into
+    NoSuchProcess and AccessDenied.
+    """
+    @wraps(fun)
+    def wrapper(self, *args, **kwargs):
+        try:
+            return fun(self, *args, **kwargs)
+        except OSError:
+            # support for private module import
+            if NoSuchProcess is None or AccessDenied is None:
+                raise
+            err = sys.exc_info()[1]
+            if err.errno == errno.ESRCH:
+                raise NoSuchProcess(self.pid, self._name)
+            if err.errno in (errno.EPERM, errno.EACCES):
+                raise AccessDenied(self.pid, self._name)
+            raise
+    return wrapper
+
+
+class Process(object):
+    """Wrapper class around underlying C implementation."""
+
+    __slots__ = ["pid", "_name"]
+
+    def __init__(self, pid):
+        self.pid = pid
+        self._name = None
+
+    @wrap_exceptions
+    def name(self):
+        return cext.proc_name(self.pid)
+
+    @wrap_exceptions
+    def exe(self):
+        return cext.proc_exe(self.pid)
+
+    @wrap_exceptions
+    def cmdline(self):
+        if not pid_exists(self.pid):
+            raise NoSuchProcess(self.pid, self._name)
+        return cext.proc_cmdline(self.pid)
+
+    @wrap_exceptions
+    def ppid(self):
+        return cext.proc_ppid(self.pid)
+
+    @wrap_exceptions
+    def cwd(self):
+        return cext.proc_cwd(self.pid)
+
+    @wrap_exceptions
+    def uids(self):
+        real, effective, saved = cext.proc_uids(self.pid)
+        return _common.puids(real, effective, saved)
+
+    @wrap_exceptions
+    def gids(self):
+        real, effective, saved = cext.proc_gids(self.pid)
+        return _common.pgids(real, effective, saved)
+
+    @wrap_exceptions
+    def terminal(self):
+        tty_nr = cext.proc_tty_nr(self.pid)
+        tmap = _psposix._get_terminal_map()
+        try:
+            return tmap[tty_nr]
+        except KeyError:
+            return None
+
+    @wrap_exceptions
+    def memory_info(self):
+        rss, vms = cext.proc_memory_info(self.pid)[:2]
+        return _common.pmem(rss, vms)
+
+    @wrap_exceptions
+    def memory_info_ex(self):
+        rss, vms, pfaults, pageins = cext.proc_memory_info(self.pid)
+        return pextmem(rss, vms, pfaults * PAGESIZE, pageins * PAGESIZE)
+
+    @wrap_exceptions
+    def cpu_times(self):
+        user, system = cext.proc_cpu_times(self.pid)
+        return _common.pcputimes(user, system)
+
+    @wrap_exceptions
+    def create_time(self):
+        return cext.proc_create_time(self.pid)
+
+    @wrap_exceptions
+    def num_ctx_switches(self):
+        return _common.pctxsw(*cext.proc_num_ctx_switches(self.pid))
+
+    @wrap_exceptions
+    def num_threads(self):
+        return cext.proc_num_threads(self.pid)
+
+    @wrap_exceptions
+    def open_files(self):
+        if self.pid == 0:
+            return []
+        files = []
+        rawlist = cext.proc_open_files(self.pid)
+        for path, fd in rawlist:
+            if isfile_strict(path):
+                ntuple = _common.popenfile(path, fd)
+                files.append(ntuple)
+        return files
+
+    @wrap_exceptions
+    def connections(self, kind='inet'):
+        if kind not in conn_tmap:
+            raise ValueError("invalid %r kind argument; choose between %s"
+                             % (kind, ', '.join([repr(x) for x in conn_tmap])))
+        families, types = conn_tmap[kind]
+        rawlist = cext.proc_connections(self.pid, families, types)
+        ret = []
+        for item in rawlist:
+            fd, fam, type, laddr, raddr, status = item
+            status = TCP_STATUSES[status]
+            nt = _common.pconn(fd, fam, type, laddr, raddr, status)
+            ret.append(nt)
+        return ret
+
+    @wrap_exceptions
+    def num_fds(self):
+        if self.pid == 0:
+            return 0
+        return cext.proc_num_fds(self.pid)
+
+    @wrap_exceptions
+    def wait(self, timeout=None):
+        try:
+            return _psposix.wait_pid(self.pid, timeout)
+        except _psposix.TimeoutExpired:
+            # support for private module import
+            if TimeoutExpired is None:
+                raise
+            raise TimeoutExpired(timeout, self.pid, self._name)
+
+    @wrap_exceptions
+    def nice_get(self):
+        return _psutil_posix.getpriority(self.pid)
+
+    @wrap_exceptions
+    def nice_set(self, value):
+        return _psutil_posix.setpriority(self.pid, value)
+
+    @wrap_exceptions
+    def status(self):
+        code = cext.proc_status(self.pid)
+        # XXX is '?' legit? (we're not supposed to return it anyway)
+        return PROC_STATUSES.get(code, '?')
+
+    @wrap_exceptions
+    def threads(self):
+        rawlist = cext.proc_threads(self.pid)
+        retlist = []
+        for thread_id, utime, stime in rawlist:
+            ntuple = _common.pthread(thread_id, utime, stime)
+            retlist.append(ntuple)
+        return retlist
+
+    @wrap_exceptions
+    def memory_maps(self):
+        return cext.proc_memory_maps(self.pid)


[16/22] AMBARI-5707. Metrics system prototype implementation. (swagle)

Posted by sw...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestMemoryApplicationHistoryStore.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestMemoryApplicationHistoryStore.java b/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestMemoryApplicationHistoryStore.java
new file mode 100644
index 0000000..7a45405
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestMemoryApplicationHistoryStore.java
@@ -0,0 +1,204 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.applicationhistoryservice;
+
+import java.io.IOException;
+
+import junit.framework.Assert;
+
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.Priority;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ApplicationAttemptHistoryData;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ApplicationHistoryData;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ContainerHistoryData;
+import org.junit.Before;
+import org.junit.Test;
+
+public class TestMemoryApplicationHistoryStore extends
+    ApplicationHistoryStoreTestUtils {
+
+  @Before
+  public void setup() {
+    store = new MemoryApplicationHistoryStore();
+  }
+
+  @Test
+  public void testReadWriteApplicationHistory() throws Exception {
+    // Out of order
+    ApplicationId appId = ApplicationId.newInstance(0, 1);
+    try {
+      writeApplicationFinishData(appId);
+      Assert.fail();
+    } catch (IOException e) {
+      Assert.assertTrue(e.getMessage().contains(
+        "is stored before the start information"));
+    }
+    // Normal
+    int numApps = 5;
+    for (int i = 1; i <= numApps; ++i) {
+      appId = ApplicationId.newInstance(0, i);
+      writeApplicationStartData(appId);
+      writeApplicationFinishData(appId);
+    }
+    Assert.assertEquals(numApps, store.getAllApplications().size());
+    for (int i = 1; i <= numApps; ++i) {
+      appId = ApplicationId.newInstance(0, i);
+      ApplicationHistoryData data = store.getApplication(appId);
+      Assert.assertNotNull(data);
+      Assert.assertEquals(appId.toString(), data.getApplicationName());
+      Assert.assertEquals(appId.toString(), data.getDiagnosticsInfo());
+    }
+    // Write again
+    appId = ApplicationId.newInstance(0, 1);
+    try {
+      writeApplicationStartData(appId);
+      Assert.fail();
+    } catch (IOException e) {
+      Assert.assertTrue(e.getMessage().contains("is already stored"));
+    }
+    try {
+      writeApplicationFinishData(appId);
+      Assert.fail();
+    } catch (IOException e) {
+      Assert.assertTrue(e.getMessage().contains("is already stored"));
+    }
+  }
+
+  @Test
+  public void testReadWriteApplicationAttemptHistory() throws Exception {
+    // Out of order
+    ApplicationId appId = ApplicationId.newInstance(0, 1);
+    ApplicationAttemptId appAttemptId =
+        ApplicationAttemptId.newInstance(appId, 1);
+    try {
+      writeApplicationAttemptFinishData(appAttemptId);
+      Assert.fail();
+    } catch (IOException e) {
+      Assert.assertTrue(e.getMessage().contains(
+        "is stored before the start information"));
+    }
+    // Normal
+    int numAppAttempts = 5;
+    writeApplicationStartData(appId);
+    for (int i = 1; i <= numAppAttempts; ++i) {
+      appAttemptId = ApplicationAttemptId.newInstance(appId, i);
+      writeApplicationAttemptStartData(appAttemptId);
+      writeApplicationAttemptFinishData(appAttemptId);
+    }
+    Assert.assertEquals(numAppAttempts, store.getApplicationAttempts(appId)
+      .size());
+    for (int i = 1; i <= numAppAttempts; ++i) {
+      appAttemptId = ApplicationAttemptId.newInstance(appId, i);
+      ApplicationAttemptHistoryData data =
+          store.getApplicationAttempt(appAttemptId);
+      Assert.assertNotNull(data);
+      Assert.assertEquals(appAttemptId.toString(), data.getHost());
+      Assert.assertEquals(appAttemptId.toString(), data.getDiagnosticsInfo());
+    }
+    writeApplicationFinishData(appId);
+    // Write again
+    appAttemptId = ApplicationAttemptId.newInstance(appId, 1);
+    try {
+      writeApplicationAttemptStartData(appAttemptId);
+      Assert.fail();
+    } catch (IOException e) {
+      Assert.assertTrue(e.getMessage().contains("is already stored"));
+    }
+    try {
+      writeApplicationAttemptFinishData(appAttemptId);
+      Assert.fail();
+    } catch (IOException e) {
+      Assert.assertTrue(e.getMessage().contains("is already stored"));
+    }
+  }
+
+  @Test
+  public void testReadWriteContainerHistory() throws Exception {
+    // Out of order
+    ApplicationId appId = ApplicationId.newInstance(0, 1);
+    ApplicationAttemptId appAttemptId =
+        ApplicationAttemptId.newInstance(appId, 1);
+    ContainerId containerId = ContainerId.newInstance(appAttemptId, 1);
+    try {
+      writeContainerFinishData(containerId);
+      Assert.fail();
+    } catch (IOException e) {
+      Assert.assertTrue(e.getMessage().contains(
+        "is stored before the start information"));
+    }
+    // Normal
+    writeApplicationAttemptStartData(appAttemptId);
+    int numContainers = 5;
+    for (int i = 1; i <= numContainers; ++i) {
+      containerId = ContainerId.newInstance(appAttemptId, i);
+      writeContainerStartData(containerId);
+      writeContainerFinishData(containerId);
+    }
+    Assert
+      .assertEquals(numContainers, store.getContainers(appAttemptId).size());
+    for (int i = 1; i <= numContainers; ++i) {
+      containerId = ContainerId.newInstance(appAttemptId, i);
+      ContainerHistoryData data = store.getContainer(containerId);
+      Assert.assertNotNull(data);
+      Assert.assertEquals(Priority.newInstance(containerId.getId()),
+        data.getPriority());
+      Assert.assertEquals(containerId.toString(), data.getDiagnosticsInfo());
+    }
+    ContainerHistoryData masterContainer = store.getAMContainer(appAttemptId);
+    Assert.assertNotNull(masterContainer);
+    Assert.assertEquals(ContainerId.newInstance(appAttemptId, 1),
+      masterContainer.getContainerId());
+    writeApplicationAttemptFinishData(appAttemptId);
+    // Write again
+    containerId = ContainerId.newInstance(appAttemptId, 1);
+    try {
+      writeContainerStartData(containerId);
+      Assert.fail();
+    } catch (IOException e) {
+      Assert.assertTrue(e.getMessage().contains("is already stored"));
+    }
+    try {
+      writeContainerFinishData(containerId);
+      Assert.fail();
+    } catch (IOException e) {
+      Assert.assertTrue(e.getMessage().contains("is already stored"));
+    }
+  }
+
+  @Test
+  public void testMassiveWriteContainerHistory() throws IOException {
+    long mb = 1024 * 1024;
+    Runtime runtime = Runtime.getRuntime();
+    long usedMemoryBefore = (runtime.totalMemory() - runtime.freeMemory()) / mb;
+    int numContainers = 100000;
+    ApplicationId appId = ApplicationId.newInstance(0, 1);
+    ApplicationAttemptId appAttemptId =
+        ApplicationAttemptId.newInstance(appId, 1);
+    for (int i = 1; i <= numContainers; ++i) {
+      ContainerId containerId = ContainerId.newInstance(appAttemptId, i);
+      writeContainerStartData(containerId);
+      writeContainerFinishData(containerId);
+    }
+    long usedMemoryAfter = (runtime.totalMemory() - runtime.freeMemory()) / mb;
+    Assert.assertTrue((usedMemoryAfter - usedMemoryBefore) < 200);
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TestPhoenixTransactSQL.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TestPhoenixTransactSQL.java b/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TestPhoenixTransactSQL.java
new file mode 100644
index 0000000..72e43d1
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TestPhoenixTransactSQL.java
@@ -0,0 +1,69 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline;
+
+import org.apache.hadoop.metrics2.sink.timeline.TimelineMetric;
+import org.apache.hadoop.yarn.util.timeline.TimelineUtils;
+import org.codehaus.jackson.map.ObjectMapper;
+import org.junit.Assert;
+import org.junit.Test;
+import java.util.Arrays;
+import java.util.HashMap;
+
+import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.AbstractTimelineAggregator.*;
+import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.PhoenixTransactSQL.Condition;
+
+public class TestPhoenixTransactSQL {
+  @Test
+  public void testConditionClause() throws Exception {
+    Condition condition = new Condition(
+      Arrays.asList("cpu_user", "mem_free"), "h1", "a1", "i1",
+        1407959718L, 1407959918L, null, false);
+
+    String preparedClause = condition.getConditionClause();
+    String expectedClause = "METRIC_NAME IN (?,?) AND HOSTNAME = ? AND APP_ID" +
+      " = ? AND INSTANCE_ID = ? AND START_TIME >= ? AND START_TIME < ?";
+
+    Assert.assertNotNull(preparedClause);
+    Assert.assertEquals(expectedClause, preparedClause);
+  }
+
+  @Test
+  public void testTimelineMetricHourlyAggregateSerialization() throws Exception {
+    final long now = System.currentTimeMillis();
+
+    MetricHostAggregate hostAggregate = new MetricHostAggregate();
+    TimelineMetric metric = new TimelineMetric();
+    metric.setMetricName("m01");
+    metric.setAppId("app01");
+    metric.setHostName("h1");
+    metric.setInstanceId("i1");
+    metric.setTimestamp(now);
+    metric.setStartTime(now);
+    metric.addMetricValues(new HashMap<Long, Double>() {{
+      put(now, 0.1);
+      put(now + 10000, 0.2);
+    }});
+
+    hostAggregate.updateMinuteAggregates(metric);
+    //String json = hostAggregate.toJSON();
+    ObjectMapper mapper = new ObjectMapper();
+    System.out.println(mapper.writeValueAsString(hostAggregate));
+    System.out.println(hostAggregate.toJSON());
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TestTimelineMetricStore.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TestTimelineMetricStore.java b/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TestTimelineMetricStore.java
new file mode 100644
index 0000000..c893314
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TestTimelineMetricStore.java
@@ -0,0 +1,81 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline;
+
+import org.apache.hadoop.metrics2.sink.timeline.TimelineMetric;
+import org.apache.hadoop.metrics2.sink.timeline.TimelineMetrics;
+import org.apache.hadoop.yarn.api.records.timeline.TimelinePutResponse;
+import java.io.IOException;
+import java.sql.SQLException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+
+public class TestTimelineMetricStore implements TimelineMetricStore {
+  @Override
+  public TimelineMetrics getTimelineMetrics(List<String> metricNames,
+      String hostname, String applicationId, String instanceId, Long startTime,
+      Long endTime, Integer limit, boolean groupedByHost) throws SQLException,
+    IOException {
+    TimelineMetrics timelineMetrics = new TimelineMetrics();
+    List<TimelineMetric> metricList = new ArrayList<TimelineMetric>();
+    timelineMetrics.setMetrics(metricList);
+    TimelineMetric metric1 = new TimelineMetric();
+    TimelineMetric metric2 = new TimelineMetric();
+    metricList.add(metric1);
+    metricList.add(metric2);
+    metric1.setMetricName("cpu_user");
+    metric1.setAppId("1");
+    metric1.setInstanceId(null);
+    metric1.setHostName("c6401");
+    metric1.setStartTime(1407949812L);
+    metric1.setMetricValues(new HashMap<Long, Double>() {{
+      put(1407949812L, 1.0d);
+      put(1407949912L, 1.8d);
+      put(1407950002L, 0.7d);
+    }});
+
+    metric2.setMetricName("mem_free");
+    metric2.setAppId("2");
+    metric2.setInstanceId("3");
+    metric2.setHostName("c6401");
+    metric2.setStartTime(1407949812L);
+    metric2.setMetricValues(new HashMap<Long, Double>() {{
+      put(1407949812L, 2.5d);
+      put(1407949912L, 3.0d);
+      put(1407950002L, 0.9d);
+    }});
+
+    return timelineMetrics;
+  }
+
+  @Override
+  public TimelineMetric getTimelineMetric(String metricName, String hostname,
+      String applicationId, String instanceId, Long startTime, Long endTime,
+      Integer limit) throws SQLException, IOException {
+
+    return null;
+  }
+
+  @Override
+  public TimelinePutResponse putMetrics(TimelineMetrics metrics)
+      throws SQLException, IOException {
+
+    return new TimelinePutResponse();
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/timeline/TestGenericObjectMapper.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/timeline/TestGenericObjectMapper.java b/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/timeline/TestGenericObjectMapper.java
new file mode 100644
index 0000000..d684a27
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/timeline/TestGenericObjectMapper.java
@@ -0,0 +1,102 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.applicationhistoryservice.timeline;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.io.WritableComparator;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.timeline.GenericObjectMapper;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import static org.junit.Assert.assertEquals;
+
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+public class TestGenericObjectMapper {
+
+  @Test
+  public void testEncoding() {
+    testEncoding(Long.MAX_VALUE);
+    testEncoding(Long.MIN_VALUE);
+    testEncoding(0l);
+    testEncoding(128l);
+    testEncoding(256l);
+    testEncoding(512l);
+    testEncoding(-256l);
+  }
+
+  private static void testEncoding(long l) {
+    byte[] b = GenericObjectMapper.writeReverseOrderedLong(l);
+    assertEquals("error decoding", l,
+        GenericObjectMapper.readReverseOrderedLong(b, 0));
+    byte[] buf = new byte[16];
+    System.arraycopy(b, 0, buf, 5, 8);
+    assertEquals("error decoding at offset", l,
+        GenericObjectMapper.readReverseOrderedLong(buf, 5));
+    if (l > Long.MIN_VALUE) {
+      byte[] a = GenericObjectMapper.writeReverseOrderedLong(l-1);
+      assertEquals("error preserving ordering", 1,
+          WritableComparator.compareBytes(a, 0, a.length, b, 0, b.length));
+    }
+    if (l < Long.MAX_VALUE) {
+      byte[] c = GenericObjectMapper.writeReverseOrderedLong(l+1);
+      assertEquals("error preserving ordering", 1,
+          WritableComparator.compareBytes(b, 0, b.length, c, 0, c.length));
+    }
+  }
+
+  private static void verify(Object o) throws IOException {
+    assertEquals(o, GenericObjectMapper.read(GenericObjectMapper.write(o)));
+  }
+
+  @Test
+  public void testValueTypes() throws IOException {
+    verify(Integer.MAX_VALUE);
+    verify(Integer.MIN_VALUE);
+    assertEquals(Integer.MAX_VALUE, GenericObjectMapper.read(
+        GenericObjectMapper.write((long) Integer.MAX_VALUE)));
+    assertEquals(Integer.MIN_VALUE, GenericObjectMapper.read(
+        GenericObjectMapper.write((long) Integer.MIN_VALUE)));
+    verify((long)Integer.MAX_VALUE + 1l);
+    verify((long)Integer.MIN_VALUE - 1l);
+
+    verify(Long.MAX_VALUE);
+    verify(Long.MIN_VALUE);
+
+    assertEquals(42, GenericObjectMapper.read(GenericObjectMapper.write(42l)));
+    verify(42);
+    verify(1.23);
+    verify("abc");
+    verify(true);
+    List<String> list = new ArrayList<String>();
+    list.add("123");
+    list.add("abc");
+    verify(list);
+    Map<String,String> map = new HashMap<String,String>();
+    map.put("k1","v1");
+    map.put("k2","v2");
+    verify(map);
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/timeline/TestLeveldbTimelineStore.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/timeline/TestLeveldbTimelineStore.java b/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/timeline/TestLeveldbTimelineStore.java
new file mode 100644
index 0000000..9b27309
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/timeline/TestLeveldbTimelineStore.java
@@ -0,0 +1,253 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.applicationhistoryservice.timeline;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.Collections;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileContext;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.yarn.api.records.timeline.TimelineEntities;
+import org.apache.hadoop.yarn.api.records.timeline.TimelineEntity;
+import org.apache.hadoop.yarn.api.records.timeline.TimelinePutResponse;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.iq80.leveldb.DBIterator;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+import static org.apache.hadoop.yarn.server.applicationhistoryservice.timeline.GenericObjectMapper.writeReverseOrderedLong;
+import static org.junit.Assert.assertEquals;
+
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+public class TestLeveldbTimelineStore extends TimelineStoreTestUtils {
+  private FileContext fsContext;
+  private File fsPath;
+
+  @Before
+  public void setup() throws Exception {
+    fsContext = FileContext.getLocalFSFileContext();
+    Configuration conf = new Configuration();
+    fsPath = new File("target", this.getClass().getSimpleName() +
+        "-tmpDir").getAbsoluteFile();
+    fsContext.delete(new Path(fsPath.getAbsolutePath()), true);
+    conf.set(YarnConfiguration.TIMELINE_SERVICE_LEVELDB_PATH,
+        fsPath.getAbsolutePath());
+    conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_TTL_ENABLE, false);
+    store = new LeveldbTimelineStore();
+    store.init(conf);
+    store.start();
+    loadTestData();
+    loadVerificationData();
+  }
+
+  @After
+  public void tearDown() throws Exception {
+    store.stop();
+    fsContext.delete(new Path(fsPath.getAbsolutePath()), true);
+  }
+
+  @Test
+  public void testGetSingleEntity() throws IOException {
+    super.testGetSingleEntity();
+    ((LeveldbTimelineStore)store).clearStartTimeCache();
+    super.testGetSingleEntity();
+    loadTestData();
+  }
+
+  @Test
+  public void testGetEntities() throws IOException {
+    super.testGetEntities();
+  }
+
+  @Test
+  public void testGetEntitiesWithFromId() throws IOException {
+    super.testGetEntitiesWithFromId();
+  }
+
+  @Test
+  public void testGetEntitiesWithFromTs() throws IOException {
+    super.testGetEntitiesWithFromTs();
+  }
+
+  @Test
+  public void testGetEntitiesWithPrimaryFilters() throws IOException {
+    super.testGetEntitiesWithPrimaryFilters();
+  }
+
+  @Test
+  public void testGetEntitiesWithSecondaryFilters() throws IOException {
+    super.testGetEntitiesWithSecondaryFilters();
+  }
+
+  @Test
+  public void testGetEvents() throws IOException {
+    super.testGetEvents();
+  }
+
+  @Test
+  public void testCacheSizes() {
+    Configuration conf = new Configuration();
+    assertEquals(10000, LeveldbTimelineStore.getStartTimeReadCacheSize(conf));
+    assertEquals(10000, LeveldbTimelineStore.getStartTimeWriteCacheSize(conf));
+    conf.setInt(
+        YarnConfiguration.TIMELINE_SERVICE_LEVELDB_START_TIME_READ_CACHE_SIZE,
+        10001);
+    assertEquals(10001, LeveldbTimelineStore.getStartTimeReadCacheSize(conf));
+    conf = new Configuration();
+    conf.setInt(
+        YarnConfiguration.TIMELINE_SERVICE_LEVELDB_START_TIME_WRITE_CACHE_SIZE,
+        10002);
+    assertEquals(10002, LeveldbTimelineStore.getStartTimeWriteCacheSize(conf));
+  }
+
+  private boolean deleteNextEntity(String entityType, byte[] ts)
+      throws IOException, InterruptedException {
+    DBIterator iterator = null;
+    DBIterator pfIterator = null;
+    try {
+      iterator = ((LeveldbTimelineStore)store).getDbIterator(false);
+      pfIterator = ((LeveldbTimelineStore)store).getDbIterator(false);
+      return ((LeveldbTimelineStore)store).deleteNextEntity(entityType, ts,
+          iterator, pfIterator, false);
+    } finally {
+      IOUtils.cleanup(null, iterator, pfIterator);
+    }
+  }
+
+  @Test
+  public void testGetEntityTypes() throws IOException {
+    List<String> entityTypes = ((LeveldbTimelineStore)store).getEntityTypes();
+    assertEquals(4, entityTypes.size());
+    assertEquals(entityType1, entityTypes.get(0));
+    assertEquals(entityType2, entityTypes.get(1));
+    assertEquals(entityType4, entityTypes.get(2));
+    assertEquals(entityType5, entityTypes.get(3));
+  }
+
+  @Test
+  public void testDeleteEntities() throws IOException, InterruptedException {
+    assertEquals(2, getEntities("type_1").size());
+    assertEquals(1, getEntities("type_2").size());
+
+    assertEquals(false, deleteNextEntity(entityType1,
+        writeReverseOrderedLong(122l)));
+    assertEquals(2, getEntities("type_1").size());
+    assertEquals(1, getEntities("type_2").size());
+
+    assertEquals(true, deleteNextEntity(entityType1,
+        writeReverseOrderedLong(123l)));
+    List<TimelineEntity> entities = getEntities("type_2");
+    assertEquals(1, entities.size());
+    verifyEntityInfo(entityId2, entityType2, events2, Collections.singletonMap(
+        entityType1, Collections.singleton(entityId1b)), EMPTY_PRIMARY_FILTERS,
+        EMPTY_MAP, entities.get(0));
+    entities = getEntitiesWithPrimaryFilter("type_1", userFilter);
+    assertEquals(1, entities.size());
+    verifyEntityInfo(entityId1b, entityType1, events1, EMPTY_REL_ENTITIES,
+        primaryFilters, otherInfo, entities.get(0));
+
+    ((LeveldbTimelineStore)store).discardOldEntities(-123l);
+    assertEquals(1, getEntities("type_1").size());
+    assertEquals(0, getEntities("type_2").size());
+    assertEquals(3, ((LeveldbTimelineStore)store).getEntityTypes().size());
+
+    ((LeveldbTimelineStore)store).discardOldEntities(123l);
+    assertEquals(0, getEntities("type_1").size());
+    assertEquals(0, getEntities("type_2").size());
+    assertEquals(0, ((LeveldbTimelineStore)store).getEntityTypes().size());
+    assertEquals(0, getEntitiesWithPrimaryFilter("type_1", userFilter).size());
+  }
+
+  @Test
+  public void testDeleteEntitiesPrimaryFilters()
+      throws IOException, InterruptedException {
+    Map<String, Set<Object>> primaryFilter =
+        Collections.singletonMap("user", Collections.singleton(
+            (Object) "otheruser"));
+    TimelineEntities atsEntities = new TimelineEntities();
+    atsEntities.setEntities(Collections.singletonList(createEntity(entityId1b,
+        entityType1, 789l, Collections.singletonList(ev2), null, primaryFilter,
+        null)));
+    TimelinePutResponse response = store.put(atsEntities);
+    assertEquals(0, response.getErrors().size());
+
+    NameValuePair pfPair = new NameValuePair("user", "otheruser");
+    List<TimelineEntity> entities = getEntitiesWithPrimaryFilter("type_1",
+        pfPair);
+    assertEquals(1, entities.size());
+    verifyEntityInfo(entityId1b, entityType1, Collections.singletonList(ev2),
+        EMPTY_REL_ENTITIES, primaryFilter, EMPTY_MAP, entities.get(0));
+
+    entities = getEntitiesWithPrimaryFilter("type_1", userFilter);
+    assertEquals(2, entities.size());
+    verifyEntityInfo(entityId1, entityType1, events1, EMPTY_REL_ENTITIES,
+        primaryFilters, otherInfo, entities.get(0));
+    verifyEntityInfo(entityId1b, entityType1, events1, EMPTY_REL_ENTITIES,
+        primaryFilters, otherInfo, entities.get(1));
+
+    ((LeveldbTimelineStore)store).discardOldEntities(-123l);
+    assertEquals(1, getEntitiesWithPrimaryFilter("type_1", pfPair).size());
+    assertEquals(2, getEntitiesWithPrimaryFilter("type_1", userFilter).size());
+
+    ((LeveldbTimelineStore)store).discardOldEntities(123l);
+    assertEquals(0, getEntities("type_1").size());
+    assertEquals(0, getEntities("type_2").size());
+    assertEquals(0, ((LeveldbTimelineStore)store).getEntityTypes().size());
+
+    assertEquals(0, getEntitiesWithPrimaryFilter("type_1", pfPair).size());
+    assertEquals(0, getEntitiesWithPrimaryFilter("type_1", userFilter).size());
+  }
+
+  @Test
+  public void testFromTsWithDeletion()
+      throws IOException, InterruptedException {
+    long l = System.currentTimeMillis();
+    assertEquals(2, getEntitiesFromTs("type_1", l).size());
+    assertEquals(1, getEntitiesFromTs("type_2", l).size());
+    assertEquals(2, getEntitiesFromTsWithPrimaryFilter("type_1", userFilter,
+        l).size());
+    ((LeveldbTimelineStore)store).discardOldEntities(123l);
+    assertEquals(0, getEntitiesFromTs("type_1", l).size());
+    assertEquals(0, getEntitiesFromTs("type_2", l).size());
+    assertEquals(0, getEntitiesFromTsWithPrimaryFilter("type_1", userFilter,
+        l).size());
+    assertEquals(0, getEntities("type_1").size());
+    assertEquals(0, getEntities("type_2").size());
+    assertEquals(0, getEntitiesFromTsWithPrimaryFilter("type_1", userFilter,
+        l).size());
+    loadTestData();
+    assertEquals(0, getEntitiesFromTs("type_1", l).size());
+    assertEquals(0, getEntitiesFromTs("type_2", l).size());
+    assertEquals(0, getEntitiesFromTsWithPrimaryFilter("type_1", userFilter,
+        l).size());
+    assertEquals(2, getEntities("type_1").size());
+    assertEquals(1, getEntities("type_2").size());
+    assertEquals(2, getEntitiesWithPrimaryFilter("type_1", userFilter).size());
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/timeline/TestMemoryTimelineStore.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/timeline/TestMemoryTimelineStore.java b/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/timeline/TestMemoryTimelineStore.java
new file mode 100644
index 0000000..415de53
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/timeline/TestMemoryTimelineStore.java
@@ -0,0 +1,83 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.applicationhistoryservice.timeline;
+
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.io.IOException;
+
+public class TestMemoryTimelineStore extends TimelineStoreTestUtils {
+
+  @Before
+  public void setup() throws Exception {
+    store = new MemoryTimelineStore();
+    store.init(new YarnConfiguration());
+    store.start();
+    loadTestData();
+    loadVerificationData();
+  }
+
+  @After
+  public void tearDown() throws Exception {
+    store.stop();
+  }
+
+  public TimelineStore getTimelineStore() {
+    return store;
+  }
+
+  @Test
+  public void testGetSingleEntity() throws IOException {
+    super.testGetSingleEntity();
+  }
+
+  @Test
+  public void testGetEntities() throws IOException {
+    super.testGetEntities();
+  }
+
+  @Test
+  public void testGetEntitiesWithFromId() throws IOException {
+    super.testGetEntitiesWithFromId();
+  }
+
+  @Test
+  public void testGetEntitiesWithFromTs() throws IOException {
+    super.testGetEntitiesWithFromTs();
+  }
+
+  @Test
+  public void testGetEntitiesWithPrimaryFilters() throws IOException {
+    super.testGetEntitiesWithPrimaryFilters();
+  }
+
+  @Test
+  public void testGetEntitiesWithSecondaryFilters() throws IOException {
+    super.testGetEntitiesWithSecondaryFilters();
+  }
+
+  @Test
+  public void testGetEvents() throws IOException {
+    super.testGetEvents();
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/timeline/TimelineStoreTestUtils.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/timeline/TimelineStoreTestUtils.java b/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/timeline/TimelineStoreTestUtils.java
new file mode 100644
index 0000000..d760536
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/timeline/TimelineStoreTestUtils.java
@@ -0,0 +1,789 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.applicationhistoryservice.timeline;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.EnumSet;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Set;
+import java.util.SortedSet;
+import java.util.TreeSet;
+
+import org.apache.hadoop.yarn.api.records.timeline.TimelineEntities;
+import org.apache.hadoop.yarn.api.records.timeline.TimelineEntity;
+import org.apache.hadoop.yarn.api.records.timeline.TimelineEvent;
+import org.apache.hadoop.yarn.api.records.timeline.TimelinePutResponse;
+import org.apache.hadoop.yarn.api.records.timeline.TimelineEvents.EventsOfOneEntity;
+import org.apache.hadoop.yarn.api.records.timeline.TimelinePutResponse.TimelinePutError;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.timeline.TimelineReader.Field;
+
+public class TimelineStoreTestUtils {
+
+  protected static final List<TimelineEvent> EMPTY_EVENTS =
+      Collections.emptyList();
+  protected static final Map<String, Object> EMPTY_MAP =
+      Collections.emptyMap();
+  protected static final Map<String, Set<Object>> EMPTY_PRIMARY_FILTERS =
+      Collections.emptyMap();
+  protected static final Map<String, Set<String>> EMPTY_REL_ENTITIES =
+      Collections.emptyMap();
+
+  protected TimelineStore store;
+  protected String entityId1;
+  protected String entityType1;
+  protected String entityId1b;
+  protected String entityId2;
+  protected String entityType2;
+  protected String entityId4;
+  protected String entityType4;
+  protected String entityId5;
+  protected String entityType5;
+  protected Map<String, Set<Object>> primaryFilters;
+  protected Map<String, Object> secondaryFilters;
+  protected Map<String, Object> allFilters;
+  protected Map<String, Object> otherInfo;
+  protected Map<String, Set<String>> relEntityMap;
+  protected Map<String, Set<String>> relEntityMap2;
+  protected NameValuePair userFilter;
+  protected NameValuePair numericFilter1;
+  protected NameValuePair numericFilter2;
+  protected NameValuePair numericFilter3;
+  protected Collection<NameValuePair> goodTestingFilters;
+  protected Collection<NameValuePair> badTestingFilters;
+  protected TimelineEvent ev1;
+  protected TimelineEvent ev2;
+  protected TimelineEvent ev3;
+  protected TimelineEvent ev4;
+  protected Map<String, Object> eventInfo;
+  protected List<TimelineEvent> events1;
+  protected List<TimelineEvent> events2;
+  protected long beforeTs;
+
+  /**
+   * Load test data into the given store
+   */
+  protected void loadTestData() throws IOException {
+    beforeTs = System.currentTimeMillis()-1;
+    TimelineEntities entities = new TimelineEntities();
+    Map<String, Set<Object>> primaryFilters =
+        new HashMap<String, Set<Object>>();
+    Set<Object> l1 = new HashSet<Object>();
+    l1.add("username");
+    Set<Object> l2 = new HashSet<Object>();
+    l2.add((long)Integer.MAX_VALUE);
+    Set<Object> l3 = new HashSet<Object>();
+    l3.add("123abc");
+    Set<Object> l4 = new HashSet<Object>();
+    l4.add((long)Integer.MAX_VALUE + 1l);
+    primaryFilters.put("user", l1);
+    primaryFilters.put("appname", l2);
+    primaryFilters.put("other", l3);
+    primaryFilters.put("long", l4);
+    Map<String, Object> secondaryFilters = new HashMap<String, Object>();
+    secondaryFilters.put("startTime", 123456l);
+    secondaryFilters.put("status", "RUNNING");
+    Map<String, Object> otherInfo1 = new HashMap<String, Object>();
+    otherInfo1.put("info1", "val1");
+    otherInfo1.putAll(secondaryFilters);
+
+    String entityId1 = "id_1";
+    String entityType1 = "type_1";
+    String entityId1b = "id_2";
+    String entityId2 = "id_2";
+    String entityType2 = "type_2";
+    String entityId4 = "id_4";
+    String entityType4 = "type_4";
+    String entityId5 = "id_5";
+    String entityType5 = "type_5";
+
+    Map<String, Set<String>> relatedEntities =
+        new HashMap<String, Set<String>>();
+    relatedEntities.put(entityType2, Collections.singleton(entityId2));
+
+    TimelineEvent ev3 = createEvent(789l, "launch_event", null);
+    TimelineEvent ev4 = createEvent(-123l, "init_event", null);
+    List<TimelineEvent> events = new ArrayList<TimelineEvent>();
+    events.add(ev3);
+    events.add(ev4);
+    entities.setEntities(Collections.singletonList(createEntity(entityId2,
+        entityType2, null, events, null, null, null)));
+    TimelinePutResponse response = store.put(entities);
+    assertEquals(0, response.getErrors().size());
+
+    TimelineEvent ev1 = createEvent(123l, "start_event", null);
+    entities.setEntities(Collections.singletonList(createEntity(entityId1,
+        entityType1, 123l, Collections.singletonList(ev1),
+        relatedEntities, primaryFilters, otherInfo1)));
+    response = store.put(entities);
+    assertEquals(0, response.getErrors().size());
+    entities.setEntities(Collections.singletonList(createEntity(entityId1b,
+        entityType1, null, Collections.singletonList(ev1), relatedEntities,
+        primaryFilters, otherInfo1)));
+    response = store.put(entities);
+    assertEquals(0, response.getErrors().size());
+
+    Map<String, Object> eventInfo = new HashMap<String, Object>();
+    eventInfo.put("event info 1", "val1");
+    TimelineEvent ev2 = createEvent(456l, "end_event", eventInfo);
+    Map<String, Object> otherInfo2 = new HashMap<String, Object>();
+    otherInfo2.put("info2", "val2");
+    entities.setEntities(Collections.singletonList(createEntity(entityId1,
+        entityType1, null, Collections.singletonList(ev2), null,
+        primaryFilters, otherInfo2)));
+    response = store.put(entities);
+    assertEquals(0, response.getErrors().size());
+    entities.setEntities(Collections.singletonList(createEntity(entityId1b,
+        entityType1, 789l, Collections.singletonList(ev2), null,
+        primaryFilters, otherInfo2)));
+    response = store.put(entities);
+    assertEquals(0, response.getErrors().size());
+
+    entities.setEntities(Collections.singletonList(createEntity(
+        "badentityid", "badentity", null, null, null, null, otherInfo1)));
+    response = store.put(entities);
+    assertEquals(1, response.getErrors().size());
+    TimelinePutError error = response.getErrors().get(0);
+    assertEquals("badentityid", error.getEntityId());
+    assertEquals("badentity", error.getEntityType());
+    assertEquals(TimelinePutError.NO_START_TIME, error.getErrorCode());
+
+    relatedEntities.clear();
+    relatedEntities.put(entityType5, Collections.singleton(entityId5));
+    entities.setEntities(Collections.singletonList(createEntity(entityId4,
+        entityType4, 42l, null, relatedEntities, null, null)));
+    response = store.put(entities);
+    assertEquals(0, response.getErrors().size());
+  }
+
+  /**
+   * Load verification data
+   */
+  protected void loadVerificationData() throws Exception {
+    userFilter = new NameValuePair("user", "username");
+    numericFilter1 = new NameValuePair("appname", Integer.MAX_VALUE);
+    numericFilter2 = new NameValuePair("long", (long)Integer.MAX_VALUE + 1l);
+    numericFilter3 = new NameValuePair("other", "123abc");
+    goodTestingFilters = new ArrayList<NameValuePair>();
+    goodTestingFilters.add(new NameValuePair("appname", Integer.MAX_VALUE));
+    goodTestingFilters.add(new NameValuePair("status", "RUNNING"));
+    badTestingFilters = new ArrayList<NameValuePair>();
+    badTestingFilters.add(new NameValuePair("appname", Integer.MAX_VALUE));
+    badTestingFilters.add(new NameValuePair("status", "FINISHED"));
+
+    primaryFilters = new HashMap<String, Set<Object>>();
+    Set<Object> l1 = new HashSet<Object>();
+    l1.add("username");
+    Set<Object> l2 = new HashSet<Object>();
+    l2.add(Integer.MAX_VALUE);
+    Set<Object> l3 = new HashSet<Object>();
+    l3.add("123abc");
+    Set<Object> l4 = new HashSet<Object>();
+    l4.add((long)Integer.MAX_VALUE + 1l);
+    primaryFilters.put("user", l1);
+    primaryFilters.put("appname", l2);
+    primaryFilters.put("other", l3);
+    primaryFilters.put("long", l4);
+    secondaryFilters = new HashMap<String, Object>();
+    secondaryFilters.put("startTime", 123456);
+    secondaryFilters.put("status", "RUNNING");
+    allFilters = new HashMap<String, Object>();
+    allFilters.putAll(secondaryFilters);
+    for (Entry<String, Set<Object>> pf : primaryFilters.entrySet()) {
+      for (Object o : pf.getValue()) {
+        allFilters.put(pf.getKey(), o);
+      }
+    }
+    otherInfo = new HashMap<String, Object>();
+    otherInfo.put("info1", "val1");
+    otherInfo.put("info2", "val2");
+    otherInfo.putAll(secondaryFilters);
+
+    entityId1 = "id_1";
+    entityType1 = "type_1";
+    entityId1b = "id_2";
+    entityId2 = "id_2";
+    entityType2 = "type_2";
+    entityId4 = "id_4";
+    entityType4 = "type_4";
+    entityId5 = "id_5";
+    entityType5 = "type_5";
+
+    ev1 = createEvent(123l, "start_event", null);
+
+    eventInfo = new HashMap<String, Object>();
+    eventInfo.put("event info 1", "val1");
+    ev2 = createEvent(456l, "end_event", eventInfo);
+    events1 = new ArrayList<TimelineEvent>();
+    events1.add(ev2);
+    events1.add(ev1);
+
+    relEntityMap =
+        new HashMap<String, Set<String>>();
+    Set<String> ids = new HashSet<String>();
+    ids.add(entityId1);
+    ids.add(entityId1b);
+    relEntityMap.put(entityType1, ids);
+
+    relEntityMap2 =
+        new HashMap<String, Set<String>>();
+    relEntityMap2.put(entityType4, Collections.singleton(entityId4));
+
+    ev3 = createEvent(789l, "launch_event", null);
+    ev4 = createEvent(-123l, "init_event", null);
+    events2 = new ArrayList<TimelineEvent>();
+    events2.add(ev3);
+    events2.add(ev4);
+  }
+
+  public void testGetSingleEntity() throws IOException {
+    // test getting entity info
+    verifyEntityInfo(null, null, null, null, null, null,
+        store.getEntity("id_1", "type_2", EnumSet.allOf(Field.class)));
+
+    verifyEntityInfo(entityId1, entityType1, events1, EMPTY_REL_ENTITIES,
+        primaryFilters, otherInfo, 123l, store.getEntity(entityId1,
+        entityType1, EnumSet.allOf(Field.class)));
+
+    verifyEntityInfo(entityId1b, entityType1, events1, EMPTY_REL_ENTITIES,
+        primaryFilters, otherInfo, 123l, store.getEntity(entityId1b,
+        entityType1, EnumSet.allOf(Field.class)));
+
+    verifyEntityInfo(entityId2, entityType2, events2, relEntityMap,
+        EMPTY_PRIMARY_FILTERS, EMPTY_MAP, -123l, store.getEntity(entityId2,
+        entityType2, EnumSet.allOf(Field.class)));
+
+    verifyEntityInfo(entityId4, entityType4, EMPTY_EVENTS, EMPTY_REL_ENTITIES,
+        EMPTY_PRIMARY_FILTERS, EMPTY_MAP, 42l, store.getEntity(entityId4,
+        entityType4, EnumSet.allOf(Field.class)));
+
+    verifyEntityInfo(entityId5, entityType5, EMPTY_EVENTS, relEntityMap2,
+        EMPTY_PRIMARY_FILTERS, EMPTY_MAP, 42l, store.getEntity(entityId5,
+        entityType5, EnumSet.allOf(Field.class)));
+
+    // test getting single fields
+    verifyEntityInfo(entityId1, entityType1, events1, null, null, null,
+        store.getEntity(entityId1, entityType1, EnumSet.of(Field.EVENTS)));
+
+    verifyEntityInfo(entityId1, entityType1, Collections.singletonList(ev2),
+        null, null, null, store.getEntity(entityId1, entityType1,
+        EnumSet.of(Field.LAST_EVENT_ONLY)));
+
+    verifyEntityInfo(entityId1b, entityType1, events1, EMPTY_REL_ENTITIES,
+        primaryFilters, otherInfo, store.getEntity(entityId1b, entityType1,
+        null));
+
+    verifyEntityInfo(entityId1, entityType1, null, null, primaryFilters, null,
+        store.getEntity(entityId1, entityType1,
+            EnumSet.of(Field.PRIMARY_FILTERS)));
+
+    verifyEntityInfo(entityId1, entityType1, null, null, null, otherInfo,
+        store.getEntity(entityId1, entityType1, EnumSet.of(Field.OTHER_INFO)));
+
+    verifyEntityInfo(entityId2, entityType2, null, relEntityMap, null, null,
+        store.getEntity(entityId2, entityType2,
+            EnumSet.of(Field.RELATED_ENTITIES)));
+  }
+
+  protected List<TimelineEntity> getEntities(String entityType)
+      throws IOException {
+    return store.getEntities(entityType, null, null, null, null, null,
+        null, null, null).getEntities();
+  }
+
+  protected List<TimelineEntity> getEntitiesWithPrimaryFilter(
+      String entityType, NameValuePair primaryFilter) throws IOException {
+    return store.getEntities(entityType, null, null, null, null, null,
+        primaryFilter, null, null).getEntities();
+  }
+
+  protected List<TimelineEntity> getEntitiesFromId(String entityType,
+      String fromId) throws IOException {
+    return store.getEntities(entityType, null, null, null, fromId, null,
+        null, null, null).getEntities();
+  }
+
+  protected List<TimelineEntity> getEntitiesFromTs(String entityType,
+      long fromTs) throws IOException {
+    return store.getEntities(entityType, null, null, null, null, fromTs,
+        null, null, null).getEntities();
+  }
+
+  protected List<TimelineEntity> getEntitiesFromIdWithPrimaryFilter(
+      String entityType, NameValuePair primaryFilter, String fromId)
+      throws IOException {
+    return store.getEntities(entityType, null, null, null, fromId, null,
+        primaryFilter, null, null).getEntities();
+  }
+
+  protected List<TimelineEntity> getEntitiesFromTsWithPrimaryFilter(
+      String entityType, NameValuePair primaryFilter, long fromTs)
+      throws IOException {
+    return store.getEntities(entityType, null, null, null, null, fromTs,
+        primaryFilter, null, null).getEntities();
+  }
+
+  protected List<TimelineEntity> getEntitiesFromIdWithWindow(String entityType,
+      Long windowEnd, String fromId) throws IOException {
+    return store.getEntities(entityType, null, null, windowEnd, fromId, null,
+        null, null, null).getEntities();
+  }
+
+  protected List<TimelineEntity> getEntitiesFromIdWithPrimaryFilterAndWindow(
+      String entityType, Long windowEnd, String fromId,
+      NameValuePair primaryFilter) throws IOException {
+    return store.getEntities(entityType, null, null, windowEnd, fromId, null,
+        primaryFilter, null, null).getEntities();
+  }
+
+  protected List<TimelineEntity> getEntitiesWithFilters(String entityType,
+      NameValuePair primaryFilter, Collection<NameValuePair> secondaryFilters)
+      throws IOException {
+    return store.getEntities(entityType, null, null, null, null, null,
+        primaryFilter, secondaryFilters, null).getEntities();
+  }
+
+  protected List<TimelineEntity> getEntities(String entityType, Long limit,
+      Long windowStart, Long windowEnd, NameValuePair primaryFilter,
+      EnumSet<Field> fields) throws IOException {
+    return store.getEntities(entityType, limit, windowStart, windowEnd, null,
+        null, primaryFilter, null, fields).getEntities();
+  }
+
+  public void testGetEntities() throws IOException {
+    // test getting entities
+    assertEquals("nonzero entities size for nonexistent type", 0,
+        getEntities("type_0").size());
+    assertEquals("nonzero entities size for nonexistent type", 0,
+        getEntities("type_3").size());
+    assertEquals("nonzero entities size for nonexistent type", 0,
+        getEntities("type_6").size());
+    assertEquals("nonzero entities size for nonexistent type", 0,
+        getEntitiesWithPrimaryFilter("type_0", userFilter).size());
+    assertEquals("nonzero entities size for nonexistent type", 0,
+        getEntitiesWithPrimaryFilter("type_3", userFilter).size());
+    assertEquals("nonzero entities size for nonexistent type", 0,
+        getEntitiesWithPrimaryFilter("type_6", userFilter).size());
+
+    List<TimelineEntity> entities = getEntities("type_1");
+    assertEquals(2, entities.size());
+    verifyEntityInfo(entityId1, entityType1, events1, EMPTY_REL_ENTITIES,
+        primaryFilters, otherInfo, entities.get(0));
+    verifyEntityInfo(entityId1b, entityType1, events1, EMPTY_REL_ENTITIES,
+        primaryFilters, otherInfo, entities.get(1));
+
+    entities = getEntities("type_2");
+    assertEquals(1, entities.size());
+    verifyEntityInfo(entityId2, entityType2, events2, relEntityMap,
+        EMPTY_PRIMARY_FILTERS, EMPTY_MAP, entities.get(0));
+
+    entities = getEntities("type_1", 1l, null, null, null,
+        EnumSet.allOf(Field.class));
+    assertEquals(1, entities.size());
+    verifyEntityInfo(entityId1, entityType1, events1, EMPTY_REL_ENTITIES,
+        primaryFilters, otherInfo, entities.get(0));
+
+    entities = getEntities("type_1", 1l, 0l, null, null,
+        EnumSet.allOf(Field.class));
+    assertEquals(1, entities.size());
+    verifyEntityInfo(entityId1, entityType1, events1, EMPTY_REL_ENTITIES,
+        primaryFilters, otherInfo, entities.get(0));
+
+    entities = getEntities("type_1", null, 234l, null, null,
+        EnumSet.allOf(Field.class));
+    assertEquals(0, entities.size());
+
+    entities = getEntities("type_1", null, 123l, null, null,
+        EnumSet.allOf(Field.class));
+    assertEquals(0, entities.size());
+
+    entities = getEntities("type_1", null, 234l, 345l, null,
+        EnumSet.allOf(Field.class));
+    assertEquals(0, entities.size());
+
+    entities = getEntities("type_1", null, null, 345l, null,
+        EnumSet.allOf(Field.class));
+    assertEquals(2, entities.size());
+    verifyEntityInfo(entityId1, entityType1, events1, EMPTY_REL_ENTITIES,
+        primaryFilters, otherInfo, entities.get(0));
+    verifyEntityInfo(entityId1b, entityType1, events1, EMPTY_REL_ENTITIES,
+        primaryFilters, otherInfo, entities.get(1));
+
+    entities = getEntities("type_1", null, null, 123l, null,
+        EnumSet.allOf(Field.class));
+    assertEquals(2, entities.size());
+    verifyEntityInfo(entityId1, entityType1, events1, EMPTY_REL_ENTITIES,
+        primaryFilters, otherInfo, entities.get(0));
+    verifyEntityInfo(entityId1b, entityType1, events1, EMPTY_REL_ENTITIES,
+        primaryFilters, otherInfo, entities.get(1));
+  }
+
+  public void testGetEntitiesWithFromId() throws IOException {
+    List<TimelineEntity> entities = getEntitiesFromId("type_1", entityId1);
+    assertEquals(2, entities.size());
+    verifyEntityInfo(entityId1, entityType1, events1, EMPTY_REL_ENTITIES,
+        primaryFilters, otherInfo, entities.get(0));
+    verifyEntityInfo(entityId1b, entityType1, events1, EMPTY_REL_ENTITIES,
+        primaryFilters, otherInfo, entities.get(1));
+
+    entities = getEntitiesFromId("type_1", entityId1b);
+    assertEquals(1, entities.size());
+    verifyEntityInfo(entityId1b, entityType1, events1, EMPTY_REL_ENTITIES,
+        primaryFilters, otherInfo, entities.get(0));
+
+    entities = getEntitiesFromIdWithWindow("type_1", 0l, entityId1);
+    assertEquals(0, entities.size());
+
+    entities = getEntitiesFromId("type_2", "a");
+    assertEquals(0, entities.size());
+
+    entities = getEntitiesFromId("type_2", entityId2);
+    assertEquals(1, entities.size());
+    verifyEntityInfo(entityId2, entityType2, events2, relEntityMap,
+        EMPTY_PRIMARY_FILTERS, EMPTY_MAP, entities.get(0));
+
+    entities = getEntitiesFromIdWithWindow("type_2", -456l, null);
+    assertEquals(0, entities.size());
+
+    entities = getEntitiesFromIdWithWindow("type_2", -456l, "a");
+    assertEquals(0, entities.size());
+
+    entities = getEntitiesFromIdWithWindow("type_2", 0l, null);
+    assertEquals(1, entities.size());
+
+    entities = getEntitiesFromIdWithWindow("type_2", 0l, entityId2);
+    assertEquals(1, entities.size());
+
+    // same tests with primary filters
+    entities = getEntitiesFromIdWithPrimaryFilter("type_1", userFilter,
+        entityId1);
+    assertEquals(2, entities.size());
+    verifyEntityInfo(entityId1, entityType1, events1, EMPTY_REL_ENTITIES,
+        primaryFilters, otherInfo, entities.get(0));
+    verifyEntityInfo(entityId1b, entityType1, events1, EMPTY_REL_ENTITIES,
+        primaryFilters, otherInfo, entities.get(1));
+
+    entities = getEntitiesFromIdWithPrimaryFilter("type_1", userFilter,
+        entityId1b);
+    assertEquals(1, entities.size());
+    verifyEntityInfo(entityId1b, entityType1, events1, EMPTY_REL_ENTITIES,
+        primaryFilters, otherInfo, entities.get(0));
+
+    entities = getEntitiesFromIdWithPrimaryFilterAndWindow("type_1", 0l,
+        entityId1, userFilter);
+    assertEquals(0, entities.size());
+
+    entities = getEntitiesFromIdWithPrimaryFilter("type_2", userFilter, "a");
+    assertEquals(0, entities.size());
+  }
+
+  public void testGetEntitiesWithFromTs() throws IOException {
+    assertEquals(0, getEntitiesFromTs("type_1", beforeTs).size());
+    assertEquals(0, getEntitiesFromTs("type_2", beforeTs).size());
+    assertEquals(0, getEntitiesFromTsWithPrimaryFilter("type_1", userFilter,
+        beforeTs).size());
+    long afterTs = System.currentTimeMillis();
+    assertEquals(2, getEntitiesFromTs("type_1", afterTs).size());
+    assertEquals(1, getEntitiesFromTs("type_2", afterTs).size());
+    assertEquals(2, getEntitiesFromTsWithPrimaryFilter("type_1", userFilter,
+        afterTs).size());
+    assertEquals(2, getEntities("type_1").size());
+    assertEquals(1, getEntities("type_2").size());
+    assertEquals(2, getEntitiesWithPrimaryFilter("type_1", userFilter).size());
+    // check insert time is not overwritten
+    long beforeTs = this.beforeTs;
+    loadTestData();
+    assertEquals(0, getEntitiesFromTs("type_1", beforeTs).size());
+    assertEquals(0, getEntitiesFromTs("type_2", beforeTs).size());
+    assertEquals(0, getEntitiesFromTsWithPrimaryFilter("type_1", userFilter,
+        beforeTs).size());
+    assertEquals(2, getEntitiesFromTs("type_1", afterTs).size());
+    assertEquals(1, getEntitiesFromTs("type_2", afterTs).size());
+    assertEquals(2, getEntitiesFromTsWithPrimaryFilter("type_1", userFilter,
+        afterTs).size());
+  }
+
+  public void testGetEntitiesWithPrimaryFilters() throws IOException {
+    // test using primary filter
+    assertEquals("nonzero entities size for primary filter", 0,
+        getEntitiesWithPrimaryFilter("type_1",
+            new NameValuePair("none", "none")).size());
+    assertEquals("nonzero entities size for primary filter", 0,
+        getEntitiesWithPrimaryFilter("type_2",
+            new NameValuePair("none", "none")).size());
+    assertEquals("nonzero entities size for primary filter", 0,
+        getEntitiesWithPrimaryFilter("type_3",
+            new NameValuePair("none", "none")).size());
+
+    List<TimelineEntity> entities = getEntitiesWithPrimaryFilter("type_1",
+        userFilter);
+    assertEquals(2, entities.size());
+    verifyEntityInfo(entityId1, entityType1, events1, EMPTY_REL_ENTITIES,
+        primaryFilters, otherInfo, entities.get(0));
+    verifyEntityInfo(entityId1b, entityType1, events1, EMPTY_REL_ENTITIES,
+        primaryFilters, otherInfo, entities.get(1));
+
+    entities = getEntitiesWithPrimaryFilter("type_1", numericFilter1);
+    assertEquals(2, entities.size());
+    verifyEntityInfo(entityId1, entityType1, events1, EMPTY_REL_ENTITIES,
+        primaryFilters, otherInfo, entities.get(0));
+    verifyEntityInfo(entityId1b, entityType1, events1, EMPTY_REL_ENTITIES,
+        primaryFilters, otherInfo, entities.get(1));
+
+    entities = getEntitiesWithPrimaryFilter("type_1", numericFilter2);
+    assertEquals(2, entities.size());
+    verifyEntityInfo(entityId1, entityType1, events1, EMPTY_REL_ENTITIES,
+        primaryFilters, otherInfo, entities.get(0));
+    verifyEntityInfo(entityId1b, entityType1, events1, EMPTY_REL_ENTITIES,
+        primaryFilters, otherInfo, entities.get(1));
+
+    entities = getEntitiesWithPrimaryFilter("type_1", numericFilter3);
+    assertEquals(2, entities.size());
+    verifyEntityInfo(entityId1, entityType1, events1, EMPTY_REL_ENTITIES,
+        primaryFilters, otherInfo, entities.get(0));
+    verifyEntityInfo(entityId1b, entityType1, events1, EMPTY_REL_ENTITIES,
+        primaryFilters, otherInfo, entities.get(1));
+
+    entities = getEntitiesWithPrimaryFilter("type_2", userFilter);
+    assertEquals(0, entities.size());
+
+    entities = getEntities("type_1", 1l, null, null, userFilter, null);
+    assertEquals(1, entities.size());
+    verifyEntityInfo(entityId1, entityType1, events1, EMPTY_REL_ENTITIES,
+        primaryFilters, otherInfo, entities.get(0));
+
+    entities = getEntities("type_1", 1l, 0l, null, userFilter, null);
+    assertEquals(1, entities.size());
+    verifyEntityInfo(entityId1, entityType1, events1, EMPTY_REL_ENTITIES,
+        primaryFilters, otherInfo, entities.get(0));
+
+    entities = getEntities("type_1", null, 234l, null, userFilter, null);
+    assertEquals(0, entities.size());
+
+    entities = getEntities("type_1", null, 234l, 345l, userFilter, null);
+    assertEquals(0, entities.size());
+
+    entities = getEntities("type_1", null, null, 345l, userFilter, null);
+    assertEquals(2, entities.size());
+    verifyEntityInfo(entityId1, entityType1, events1, EMPTY_REL_ENTITIES,
+        primaryFilters, otherInfo, entities.get(0));
+    verifyEntityInfo(entityId1b, entityType1, events1, EMPTY_REL_ENTITIES,
+        primaryFilters, otherInfo, entities.get(1));
+  }
+
+  public void testGetEntitiesWithSecondaryFilters() throws IOException {
+    // test using secondary filter
+    List<TimelineEntity> entities = getEntitiesWithFilters("type_1", null,
+        goodTestingFilters);
+    assertEquals(2, entities.size());
+    verifyEntityInfo(entityId1, entityType1, events1, EMPTY_REL_ENTITIES,
+        primaryFilters, otherInfo, entities.get(0));
+    verifyEntityInfo(entityId1b, entityType1, events1, EMPTY_REL_ENTITIES,
+        primaryFilters, otherInfo, entities.get(1));
+
+    entities = getEntitiesWithFilters("type_1", userFilter, goodTestingFilters);
+    assertEquals(2, entities.size());
+    verifyEntityInfo(entityId1, entityType1, events1, EMPTY_REL_ENTITIES,
+        primaryFilters, otherInfo, entities.get(0));
+    verifyEntityInfo(entityId1b, entityType1, events1, EMPTY_REL_ENTITIES,
+        primaryFilters, otherInfo, entities.get(1));
+
+    entities = getEntitiesWithFilters("type_1", null,
+        Collections.singleton(new NameValuePair("user", "none")));
+    assertEquals(0, entities.size());
+
+    entities = getEntitiesWithFilters("type_1", null, badTestingFilters);
+    assertEquals(0, entities.size());
+
+    entities = getEntitiesWithFilters("type_1", userFilter, badTestingFilters);
+    assertEquals(0, entities.size());
+  }
+
+  public void testGetEvents() throws IOException {
+    // test getting entity timelines
+    SortedSet<String> sortedSet = new TreeSet<String>();
+    sortedSet.add(entityId1);
+    List<EventsOfOneEntity> timelines =
+        store.getEntityTimelines(entityType1, sortedSet, null, null,
+            null, null).getAllEvents();
+    assertEquals(1, timelines.size());
+    verifyEntityTimeline(timelines.get(0), entityId1, entityType1, ev2, ev1);
+
+    sortedSet.add(entityId1b);
+    timelines = store.getEntityTimelines(entityType1, sortedSet, null,
+        null, null, null).getAllEvents();
+    assertEquals(2, timelines.size());
+    verifyEntityTimeline(timelines.get(0), entityId1, entityType1, ev2, ev1);
+    verifyEntityTimeline(timelines.get(1), entityId1b, entityType1, ev2, ev1);
+
+    timelines = store.getEntityTimelines(entityType1, sortedSet, 1l,
+        null, null, null).getAllEvents();
+    assertEquals(2, timelines.size());
+    verifyEntityTimeline(timelines.get(0), entityId1, entityType1, ev2);
+    verifyEntityTimeline(timelines.get(1), entityId1b, entityType1, ev2);
+
+    timelines = store.getEntityTimelines(entityType1, sortedSet, null,
+        345l, null, null).getAllEvents();
+    assertEquals(2, timelines.size());
+    verifyEntityTimeline(timelines.get(0), entityId1, entityType1, ev2);
+    verifyEntityTimeline(timelines.get(1), entityId1b, entityType1, ev2);
+
+    timelines = store.getEntityTimelines(entityType1, sortedSet, null,
+        123l, null, null).getAllEvents();
+    assertEquals(2, timelines.size());
+    verifyEntityTimeline(timelines.get(0), entityId1, entityType1, ev2);
+    verifyEntityTimeline(timelines.get(1), entityId1b, entityType1, ev2);
+
+    timelines = store.getEntityTimelines(entityType1, sortedSet, null,
+        null, 345l, null).getAllEvents();
+    assertEquals(2, timelines.size());
+    verifyEntityTimeline(timelines.get(0), entityId1, entityType1, ev1);
+    verifyEntityTimeline(timelines.get(1), entityId1b, entityType1, ev1);
+
+    timelines = store.getEntityTimelines(entityType1, sortedSet, null,
+        null, 123l, null).getAllEvents();
+    assertEquals(2, timelines.size());
+    verifyEntityTimeline(timelines.get(0), entityId1, entityType1, ev1);
+    verifyEntityTimeline(timelines.get(1), entityId1b, entityType1, ev1);
+
+    timelines = store.getEntityTimelines(entityType1, sortedSet, null,
+        null, null, Collections.singleton("end_event")).getAllEvents();
+    assertEquals(2, timelines.size());
+    verifyEntityTimeline(timelines.get(0), entityId1, entityType1, ev2);
+    verifyEntityTimeline(timelines.get(1), entityId1b, entityType1, ev2);
+
+    sortedSet.add(entityId2);
+    timelines = store.getEntityTimelines(entityType2, sortedSet, null,
+        null, null, null).getAllEvents();
+    assertEquals(1, timelines.size());
+    verifyEntityTimeline(timelines.get(0), entityId2, entityType2, ev3, ev4);
+  }
+
+  /**
+   * Verify a single entity and its start time
+   */
+  protected static void verifyEntityInfo(String entityId, String entityType,
+      List<TimelineEvent> events, Map<String, Set<String>> relatedEntities,
+      Map<String, Set<Object>> primaryFilters, Map<String, Object> otherInfo,
+      Long startTime, TimelineEntity retrievedEntityInfo) {
+
+    verifyEntityInfo(entityId, entityType, events, relatedEntities,
+        primaryFilters, otherInfo, retrievedEntityInfo);
+    assertEquals(startTime, retrievedEntityInfo.getStartTime());
+  }
+
+  /**
+   * Verify a single entity
+   */
+  protected static void verifyEntityInfo(String entityId, String entityType,
+      List<TimelineEvent> events, Map<String, Set<String>> relatedEntities,
+      Map<String, Set<Object>> primaryFilters, Map<String, Object> otherInfo,
+      TimelineEntity retrievedEntityInfo) {
+    if (entityId == null) {
+      assertNull(retrievedEntityInfo);
+      return;
+    }
+    assertEquals(entityId, retrievedEntityInfo.getEntityId());
+    assertEquals(entityType, retrievedEntityInfo.getEntityType());
+    if (events == null) {
+      assertNull(retrievedEntityInfo.getEvents());
+    } else {
+      assertEquals(events, retrievedEntityInfo.getEvents());
+    }
+    if (relatedEntities == null) {
+      assertNull(retrievedEntityInfo.getRelatedEntities());
+    } else {
+      assertEquals(relatedEntities, retrievedEntityInfo.getRelatedEntities());
+    }
+    if (primaryFilters == null) {
+      assertNull(retrievedEntityInfo.getPrimaryFilters());
+    } else {
+      assertTrue(primaryFilters.equals(
+          retrievedEntityInfo.getPrimaryFilters()));
+    }
+    if (otherInfo == null) {
+      assertNull(retrievedEntityInfo.getOtherInfo());
+    } else {
+      assertTrue(otherInfo.equals(retrievedEntityInfo.getOtherInfo()));
+    }
+  }
+
+  /**
+   * Verify timeline events
+   */
+  private static void verifyEntityTimeline(
+      EventsOfOneEntity retrievedEvents, String entityId, String entityType,
+      TimelineEvent... actualEvents) {
+    assertEquals(entityId, retrievedEvents.getEntityId());
+    assertEquals(entityType, retrievedEvents.getEntityType());
+    assertEquals(actualEvents.length, retrievedEvents.getEvents().size());
+    for (int i = 0; i < actualEvents.length; i++) {
+      assertEquals(actualEvents[i], retrievedEvents.getEvents().get(i));
+    }
+  }
+
+  /**
+   * Create a test entity
+   */
+  protected static TimelineEntity createEntity(String entityId, String entityType,
+      Long startTime, List<TimelineEvent> events,
+      Map<String, Set<String>> relatedEntities,
+      Map<String, Set<Object>> primaryFilters,
+      Map<String, Object> otherInfo) {
+    TimelineEntity entity = new TimelineEntity();
+    entity.setEntityId(entityId);
+    entity.setEntityType(entityType);
+    entity.setStartTime(startTime);
+    entity.setEvents(events);
+    if (relatedEntities != null) {
+      for (Entry<String, Set<String>> e : relatedEntities.entrySet()) {
+        for (String v : e.getValue()) {
+          entity.addRelatedEntity(e.getKey(), v);
+        }
+      }
+    } else {
+      entity.setRelatedEntities(null);
+    }
+    entity.setPrimaryFilters(primaryFilters);
+    entity.setOtherInfo(otherInfo);
+    return entity;
+  }
+
+  /**
+   * Create a test event
+   */
+  private static TimelineEvent createEvent(long timestamp, String type, Map<String,
+      Object> info) {
+    TimelineEvent event = new TimelineEvent();
+    event.setTimestamp(timestamp);
+    event.setEventType(type);
+    event.setEventInfo(info);
+    return event;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/TestAHSWebApp.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/TestAHSWebApp.java b/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/TestAHSWebApp.java
new file mode 100644
index 0000000..0e65a50
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/TestAHSWebApp.java
@@ -0,0 +1,182 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.applicationhistoryservice.webapp;
+
+import static org.apache.hadoop.yarn.webapp.Params.TITLE;
+import static org.mockito.Mockito.mock;
+import junit.framework.Assert;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.YarnApplicationState;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.server.api.ApplicationContext;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.ApplicationHistoryManager;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.ApplicationHistoryManagerImpl;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.ApplicationHistoryStore;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.ApplicationHistoryStoreTestUtils;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.MemoryApplicationHistoryStore;
+import org.apache.hadoop.yarn.util.StringHelper;
+import org.apache.hadoop.yarn.webapp.YarnWebParams;
+import org.apache.hadoop.yarn.webapp.test.WebAppTests;
+import org.junit.Before;
+import org.junit.Test;
+
+import com.google.inject.Injector;
+
+public class TestAHSWebApp extends ApplicationHistoryStoreTestUtils {
+
+  public void setApplicationHistoryStore(ApplicationHistoryStore store) {
+    this.store = store;
+  }
+
+  @Before
+  public void setup() {
+    store = new MemoryApplicationHistoryStore();
+  }
+
+  @Test
+  public void testAppControllerIndex() throws Exception {
+    ApplicationHistoryManager ahManager = mock(ApplicationHistoryManager.class);
+    Injector injector =
+        WebAppTests.createMockInjector(ApplicationHistoryManager.class,
+          ahManager);
+    AHSController controller = injector.getInstance(AHSController.class);
+    controller.index();
+    Assert
+      .assertEquals("Application History", controller.get(TITLE, "unknown"));
+  }
+
+  @Test
+  public void testView() throws Exception {
+    Injector injector =
+        WebAppTests.createMockInjector(ApplicationContext.class,
+          mockApplicationHistoryManager(5, 1, 1));
+    AHSView ahsViewInstance = injector.getInstance(AHSView.class);
+
+    ahsViewInstance.render();
+    WebAppTests.flushOutput(injector);
+
+    ahsViewInstance.set(YarnWebParams.APP_STATE,
+      YarnApplicationState.FAILED.toString());
+    ahsViewInstance.render();
+    WebAppTests.flushOutput(injector);
+
+    ahsViewInstance.set(YarnWebParams.APP_STATE, StringHelper.cjoin(
+      YarnApplicationState.FAILED.toString(), YarnApplicationState.KILLED));
+    ahsViewInstance.render();
+    WebAppTests.flushOutput(injector);
+  }
+
+  @Test
+  public void testAppPage() throws Exception {
+    Injector injector =
+        WebAppTests.createMockInjector(ApplicationContext.class,
+          mockApplicationHistoryManager(1, 5, 1));
+    AppPage appPageInstance = injector.getInstance(AppPage.class);
+
+    appPageInstance.render();
+    WebAppTests.flushOutput(injector);
+
+    appPageInstance.set(YarnWebParams.APPLICATION_ID, ApplicationId
+      .newInstance(0, 1).toString());
+    appPageInstance.render();
+    WebAppTests.flushOutput(injector);
+  }
+
+  @Test
+  public void testAppAttemptPage() throws Exception {
+    Injector injector =
+        WebAppTests.createMockInjector(ApplicationContext.class,
+          mockApplicationHistoryManager(1, 1, 5));
+    AppAttemptPage appAttemptPageInstance =
+        injector.getInstance(AppAttemptPage.class);
+
+    appAttemptPageInstance.render();
+    WebAppTests.flushOutput(injector);
+
+    appAttemptPageInstance.set(YarnWebParams.APPLICATION_ATTEMPT_ID,
+      ApplicationAttemptId.newInstance(ApplicationId.newInstance(0, 1), 1)
+        .toString());
+    appAttemptPageInstance.render();
+    WebAppTests.flushOutput(injector);
+  }
+
+  @Test
+  public void testContainerPage() throws Exception {
+    Injector injector =
+        WebAppTests.createMockInjector(ApplicationContext.class,
+          mockApplicationHistoryManager(1, 1, 1));
+    ContainerPage containerPageInstance =
+        injector.getInstance(ContainerPage.class);
+
+    containerPageInstance.render();
+    WebAppTests.flushOutput(injector);
+
+    containerPageInstance.set(
+      YarnWebParams.CONTAINER_ID,
+      ContainerId
+        .newInstance(
+          ApplicationAttemptId.newInstance(ApplicationId.newInstance(0, 1), 1),
+          1).toString());
+    containerPageInstance.render();
+    WebAppTests.flushOutput(injector);
+  }
+
+  ApplicationHistoryManager mockApplicationHistoryManager(int numApps,
+      int numAppAttempts, int numContainers) throws Exception {
+    ApplicationHistoryManager ahManager =
+        new MockApplicationHistoryManagerImpl(store);
+    for (int i = 1; i <= numApps; ++i) {
+      ApplicationId appId = ApplicationId.newInstance(0, i);
+      writeApplicationStartData(appId);
+      for (int j = 1; j <= numAppAttempts; ++j) {
+        ApplicationAttemptId appAttemptId =
+            ApplicationAttemptId.newInstance(appId, j);
+        writeApplicationAttemptStartData(appAttemptId);
+        for (int k = 1; k <= numContainers; ++k) {
+          ContainerId containerId = ContainerId.newInstance(appAttemptId, k);
+          writeContainerStartData(containerId);
+          writeContainerFinishData(containerId);
+        }
+        writeApplicationAttemptFinishData(appAttemptId);
+      }
+      writeApplicationFinishData(appId);
+    }
+    return ahManager;
+  }
+
+  class MockApplicationHistoryManagerImpl extends ApplicationHistoryManagerImpl {
+
+    public MockApplicationHistoryManagerImpl(ApplicationHistoryStore store) {
+      super();
+      init(new YarnConfiguration());
+      start();
+    }
+
+    @Override
+    protected ApplicationHistoryStore createApplicationHistoryStore(
+        Configuration conf) {
+      return store;
+    }
+  };
+
+}


[05/22] AMBARI-5707. Metrics system prototype implementation. (swagle)

Posted by sw...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/_psutil_osx.c
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/_psutil_osx.c b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/_psutil_osx.c
new file mode 100644
index 0000000..0c83345
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/_psutil_osx.c
@@ -0,0 +1,1881 @@
+/*
+ * Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ *
+ * OS X platform-specific module methods for _psutil_osx
+ */
+
+#include <Python.h>
+#include <assert.h>
+#include <errno.h>
+#include <stdbool.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <utmpx.h>
+#include <sys/sysctl.h>
+#include <sys/vmmeter.h>
+#include <libproc.h>
+#include <sys/proc_info.h>
+#include <netinet/tcp_fsm.h>
+#include <arpa/inet.h>
+#include <net/if_dl.h>
+#include <pwd.h>
+
+#include <mach/mach.h>
+#include <mach/task.h>
+#include <mach/mach_init.h>
+#include <mach/host_info.h>
+#include <mach/mach_host.h>
+#include <mach/mach_traps.h>
+#include <mach/mach_vm.h>
+#include <mach/shared_region.h>
+
+#include <mach-o/loader.h>
+
+#include <CoreFoundation/CoreFoundation.h>
+#include <IOKit/IOKitLib.h>
+#include <IOKit/storage/IOBlockStorageDriver.h>
+#include <IOKit/storage/IOMedia.h>
+#include <IOKit/IOBSD.h>
+
+#include "_psutil_osx.h"
+#include "_psutil_common.h"
+#include "arch/osx/process_info.h"
+
+
+/*
+ * A wrapper around host_statistics() invoked with HOST_VM_INFO.
+ */
+int
+psutil_sys_vminfo(vm_statistics_data_t *vmstat)
+{
+    kern_return_t ret;
+    mach_msg_type_number_t count = sizeof(*vmstat) / sizeof(integer_t);
+    mach_port_t mport = mach_host_self();
+
+    ret = host_statistics(mport, HOST_VM_INFO, (host_info_t)vmstat, &count);
+    if (ret != KERN_SUCCESS) {
+        PyErr_Format(PyExc_RuntimeError,
+                     "host_statistics() failed: %s", mach_error_string(ret));
+        return 0;
+    }
+    mach_port_deallocate(mach_task_self(), mport);
+    return 1;
+}
+
+
+/*
+ * Return a Python list of all the PIDs running on the system.
+ */
+static PyObject *
+psutil_pids(PyObject *self, PyObject *args)
+{
+    kinfo_proc *proclist = NULL;
+    kinfo_proc *orig_address = NULL;
+    size_t num_processes;
+    size_t idx;
+    PyObject *pid = NULL;
+    PyObject *retlist = PyList_New(0);
+
+    if (retlist == NULL)
+        return NULL;
+
+    if (psutil_get_proc_list(&proclist, &num_processes) != 0) {
+        PyErr_SetString(PyExc_RuntimeError,
+                        "failed to retrieve process list.");
+        goto error;
+    }
+
+    if (num_processes > 0) {
+        // save the address of proclist so we can free it later
+        orig_address = proclist;
+        for (idx = 0; idx < num_processes; idx++) {
+            pid = Py_BuildValue("i", proclist->kp_proc.p_pid);
+            if (!pid)
+                goto error;
+            if (PyList_Append(retlist, pid))
+                goto error;
+            Py_DECREF(pid);
+            proclist++;
+        }
+        free(orig_address);
+    }
+    return retlist;
+
+error:
+    Py_XDECREF(pid);
+    Py_DECREF(retlist);
+    if (orig_address != NULL)
+        free(orig_address);
+    return NULL;
+}
+
+
+/*
+ * Return process name from kinfo_proc as a Python string.
+ */
+static PyObject *
+psutil_proc_name(PyObject *self, PyObject *args)
+{
+    long pid;
+    struct kinfo_proc kp;
+    if (! PyArg_ParseTuple(args, "l", &pid)) {
+        return NULL;
+    }
+    if (psutil_get_kinfo_proc(pid, &kp) == -1) {
+        return NULL;
+    }
+    return Py_BuildValue("s", kp.kp_proc.p_comm);
+}
+
+
+/*
+ * Return process current working directory.
+ */
+static PyObject *
+psutil_proc_cwd(PyObject *self, PyObject *args)
+{
+    long pid;
+    struct proc_vnodepathinfo pathinfo;
+
+    if (! PyArg_ParseTuple(args, "l", &pid)) {
+        return NULL;
+    }
+
+    if (! psutil_proc_pidinfo(pid, PROC_PIDVNODEPATHINFO, &pathinfo,
+                              sizeof(pathinfo)))
+    {
+        return NULL;
+    }
+    return Py_BuildValue("s", pathinfo.pvi_cdir.vip_path);
+}
+
+
+/*
+ * Return path of the process executable.
+ */
+static PyObject *
+psutil_proc_exe(PyObject *self, PyObject *args)
+{
+    long pid;
+    char buf[PATH_MAX];
+    int ret;
+
+    if (! PyArg_ParseTuple(args, "l", &pid)) {
+        return NULL;
+    }
+    ret = proc_pidpath(pid, &buf, sizeof(buf));
+    if (ret == 0) {
+        if (! psutil_pid_exists(pid)) {
+            return NoSuchProcess();
+        }
+        else {
+            return AccessDenied();
+        }
+    }
+    return Py_BuildValue("s", buf);
+}
+
+
+/*
+ * Return process cmdline as a Python list of cmdline arguments.
+ */
+static PyObject *
+psutil_proc_cmdline(PyObject *self, PyObject *args)
+{
+    long pid;
+    PyObject *arglist = NULL;
+
+    if (! PyArg_ParseTuple(args, "l", &pid)) {
+        return NULL;
+    }
+
+    // get the commandline, defined in arch/osx/process_info.c
+    arglist = psutil_get_arg_list(pid);
+    return arglist;
+}
+
+
+/*
+ * Return process parent pid from kinfo_proc as a Python integer.
+ */
+static PyObject *
+psutil_proc_ppid(PyObject *self, PyObject *args)
+{
+    long pid;
+    struct kinfo_proc kp;
+    if (! PyArg_ParseTuple(args, "l", &pid)) {
+        return NULL;
+    }
+    if (psutil_get_kinfo_proc(pid, &kp) == -1) {
+        return NULL;
+    }
+    return Py_BuildValue("l", (long)kp.kp_eproc.e_ppid);
+}
+
+
+/*
+ * Return process real uid from kinfo_proc as a Python integer.
+ */
+static PyObject *
+psutil_proc_uids(PyObject *self, PyObject *args)
+{
+    long pid;
+    struct kinfo_proc kp;
+    if (! PyArg_ParseTuple(args, "l", &pid)) {
+        return NULL;
+    }
+    if (psutil_get_kinfo_proc(pid, &kp) == -1) {
+        return NULL;
+    }
+    return Py_BuildValue("lll",
+                         (long)kp.kp_eproc.e_pcred.p_ruid,
+                         (long)kp.kp_eproc.e_ucred.cr_uid,
+                         (long)kp.kp_eproc.e_pcred.p_svuid);
+}
+
+
+/*
+ * Return process real group id from ki_comm as a Python integer.
+ */
+static PyObject *
+psutil_proc_gids(PyObject *self, PyObject *args)
+{
+    long pid;
+    struct kinfo_proc kp;
+    if (! PyArg_ParseTuple(args, "l", &pid)) {
+        return NULL;
+    }
+    if (psutil_get_kinfo_proc(pid, &kp) == -1) {
+        return NULL;
+    }
+    return Py_BuildValue("lll",
+                         (long)kp.kp_eproc.e_pcred.p_rgid,
+                         (long)kp.kp_eproc.e_ucred.cr_groups[0],
+                         (long)kp.kp_eproc.e_pcred.p_svgid);
+}
+
+
+/*
+ * Return process controlling terminal number as an integer.
+ */
+static PyObject *
+psutil_proc_tty_nr(PyObject *self, PyObject *args)
+{
+    long pid;
+    struct kinfo_proc kp;
+    if (! PyArg_ParseTuple(args, "l", &pid)) {
+        return NULL;
+    }
+    if (psutil_get_kinfo_proc(pid, &kp) == -1) {
+        return NULL;
+    }
+    return Py_BuildValue("i", kp.kp_eproc.e_tdev);
+}
+
+
+/*
+ * Return a list of tuples for every process memory maps.
+ * 'procstat' cmdline utility has been used as an example.
+ */
+static PyObject *
+psutil_proc_memory_maps(PyObject *self, PyObject *args)
+{
+    char buf[PATH_MAX];
+    char addr_str[34];
+    char perms[8];
+    int pagesize = getpagesize();
+    long pid;
+    kern_return_t err = KERN_SUCCESS;
+    mach_port_t task = MACH_PORT_NULL;
+    uint32_t depth = 1;
+    vm_address_t address = 0;
+    vm_size_t size = 0;
+
+    PyObject *py_tuple = NULL;
+    PyObject *py_list = PyList_New(0);
+
+    if (py_list == NULL)
+        return NULL;
+
+    if (! PyArg_ParseTuple(args, "l", &pid)) {
+        goto error;
+    }
+
+    err = task_for_pid(mach_task_self(), pid, &task);
+
+    if (err != KERN_SUCCESS) {
+        if (! psutil_pid_exists(pid)) {
+            NoSuchProcess();
+        }
+        else {
+            // pid exists, so return AccessDenied error since task_for_pid()
+            // failed
+            AccessDenied();
+        }
+        goto error;
+    }
+
+    while (1) {
+        py_tuple = NULL;
+        struct vm_region_submap_info_64 info;
+        mach_msg_type_number_t count = VM_REGION_SUBMAP_INFO_COUNT_64;
+
+        err = vm_region_recurse_64(task, &address, &size, &depth,
+                                   (vm_region_info_64_t)&info, &count);
+
+        if (err == KERN_INVALID_ADDRESS) {
+            break;
+        }
+
+        if (info.is_submap) {
+            depth++;
+        }
+        else {
+            // Free/Reset the char[]s to avoid weird paths
+            memset(buf, 0, sizeof(buf));
+            memset(addr_str, 0, sizeof(addr_str));
+            memset(perms, 0, sizeof(perms));
+
+            sprintf(addr_str, "%016lx-%016lx", address, address + size);
+            sprintf(perms, "%c%c%c/%c%c%c",
+                    (info.protection & VM_PROT_READ) ? 'r' : '-',
+                    (info.protection & VM_PROT_WRITE) ? 'w' : '-',
+                    (info.protection & VM_PROT_EXECUTE) ? 'x' : '-',
+                    (info.max_protection & VM_PROT_READ) ? 'r' : '-',
+                    (info.max_protection & VM_PROT_WRITE) ? 'w' : '-',
+                    (info.max_protection & VM_PROT_EXECUTE) ? 'x' : '-');
+
+            err = proc_regionfilename(pid, address, buf, sizeof(buf));
+
+            if (info.share_mode == SM_COW && info.ref_count == 1) {
+                // Treat single reference SM_COW as SM_PRIVATE
+                info.share_mode = SM_PRIVATE;
+            }
+
+            if (strlen(buf) == 0) {
+                switch (info.share_mode) {
+                /*
+                case SM_LARGE_PAGE:
+                    // Treat SM_LARGE_PAGE the same as SM_PRIVATE
+                    // since they are not shareable and are wired.
+                */
+                case SM_COW:
+                    strcpy(buf, "[cow]");
+                    break;
+                case SM_PRIVATE:
+                    strcpy(buf, "[prv]");
+                    break;
+                case SM_EMPTY:
+                    strcpy(buf, "[nul]");
+                    break;
+                case SM_SHARED:
+                case SM_TRUESHARED:
+                    strcpy(buf, "[shm]");
+                    break;
+                case SM_PRIVATE_ALIASED:
+                    strcpy(buf, "[ali]");
+                    break;
+                case SM_SHARED_ALIASED:
+                    strcpy(buf, "[s/a]");
+                    break;
+                default:
+                    strcpy(buf, "[???]");
+                }
+            }
+
+            py_tuple = Py_BuildValue(
+                "sssIIIIIH",
+                addr_str,                                 // "start-end"address
+                perms,                                    // "rwx" permissions
+                buf,                                      // path
+                info.pages_resident * pagesize,           // rss
+                info.pages_shared_now_private * pagesize, // private
+                info.pages_swapped_out * pagesize,        // swapped
+                info.pages_dirtied * pagesize,            // dirtied
+                info.ref_count,                           // ref count
+                info.shadow_depth                         // shadow depth
+            );
+            if (!py_tuple)
+                goto error;
+            if (PyList_Append(py_list, py_tuple))
+                goto error;
+            Py_DECREF(py_tuple);
+        }
+
+        // increment address for the next map/file
+        address += size;
+    }
+
+    if (task != MACH_PORT_NULL)
+        mach_port_deallocate(mach_task_self(), task);
+
+    return py_list;
+
+error:
+    if (task != MACH_PORT_NULL)
+        mach_port_deallocate(mach_task_self(), task);
+    Py_XDECREF(py_tuple);
+    Py_DECREF(py_list);
+    return NULL;
+}
+
+
+/*
+ * Return the number of logical CPUs in the system.
+ * XXX this could be shared with BSD.
+ */
+static PyObject *
+psutil_cpu_count_logical(PyObject *self, PyObject *args)
+{
+    int mib[2];
+    int ncpu;
+    size_t len;
+
+    mib[0] = CTL_HW;
+    mib[1] = HW_NCPU;
+    len = sizeof(ncpu);
+
+    if (sysctl(mib, 2, &ncpu, &len, NULL, 0) == -1) {
+        // mimic os.cpu_count()
+        Py_INCREF(Py_None);
+        return Py_None;
+    }
+    else {
+        return Py_BuildValue("i", ncpu);
+    }
+}
+
+
+/*
+ * Return the number of physical CPUs in the system.
+ */
+static PyObject *
+psutil_cpu_count_phys(PyObject *self, PyObject *args)
+{
+    int num;
+    size_t size = sizeof(int);
+    if (sysctlbyname("hw.physicalcpu", &num, &size, NULL, 0)) {
+        // mimic os.cpu_count()
+        Py_INCREF(Py_None);
+        return Py_None;
+    }
+    return Py_BuildValue("i", num);
+}
+
+
+#define TV2DOUBLE(t)    ((t).tv_sec + (t).tv_usec / 1000000.0)
+
+/*
+ * Return a Python tuple (user_time, kernel_time)
+ */
+static PyObject *
+psutil_proc_cpu_times(PyObject *self, PyObject *args)
+{
+    long pid;
+    struct proc_taskinfo pti;
+    if (! PyArg_ParseTuple(args, "l", &pid)) {
+        return NULL;
+    }
+    if (! psutil_proc_pidinfo(pid, PROC_PIDTASKINFO, &pti, sizeof(pti))) {
+        return NULL;
+    }
+    return Py_BuildValue("(dd)",
+                         (float)pti.pti_total_user / 1000000000.0,
+                         (float)pti.pti_total_system / 1000000000.0);
+}
+
+
+/*
+ * Return a Python float indicating the process create time expressed in
+ * seconds since the epoch.
+ */
+static PyObject *
+psutil_proc_create_time(PyObject *self, PyObject *args)
+{
+    long pid;
+    struct kinfo_proc kp;
+    if (! PyArg_ParseTuple(args, "l", &pid)) {
+        return NULL;
+    }
+    if (psutil_get_kinfo_proc(pid, &kp) == -1) {
+        return NULL;
+    }
+    return Py_BuildValue("d", TV2DOUBLE(kp.kp_proc.p_starttime));
+}
+
+
+/*
+ * Return extended memory info about a process.
+ */
+static PyObject *
+psutil_proc_memory_info(PyObject *self, PyObject *args)
+{
+    long pid;
+    struct proc_taskinfo pti;
+    if (! PyArg_ParseTuple(args, "l", &pid)) {
+        return NULL;
+    }
+    if (! psutil_proc_pidinfo(pid, PROC_PIDTASKINFO, &pti, sizeof(pti))) {
+        return NULL;
+    }
+
+    // Note: determining other memory stats on OSX is a mess:
+    // http://www.opensource.apple.com/source/top/top-67/libtop.c?txt
+    // I just give up...
+    // struct proc_regioninfo pri;
+    // psutil_proc_pidinfo(pid, PROC_PIDREGIONINFO, &pri, sizeof(pri))
+    return Py_BuildValue(
+        "(KKkk)",
+        pti.pti_resident_size,  // resident memory size (rss)
+        pti.pti_virtual_size,   // virtual memory size (vms)
+        pti.pti_faults,         // number of page faults (pages)
+        pti.pti_pageins         // number of actual pageins (pages)
+    );
+}
+
+
+/*
+ * Return number of threads used by process as a Python integer.
+ */
+static PyObject *
+psutil_proc_num_threads(PyObject *self, PyObject *args)
+{
+    long pid;
+    struct proc_taskinfo pti;
+    if (! PyArg_ParseTuple(args, "l", &pid)) {
+        return NULL;
+    }
+    if (! psutil_proc_pidinfo(pid, PROC_PIDTASKINFO, &pti, sizeof(pti))) {
+        return NULL;
+    }
+    return Py_BuildValue("k", pti.pti_threadnum);
+}
+
+
+/*
+ * Return the number of context switches performed by process.
+ */
+static PyObject *
+psutil_proc_num_ctx_switches(PyObject *self, PyObject *args)
+{
+    long pid;
+    struct proc_taskinfo pti;
+    if (! PyArg_ParseTuple(args, "l", &pid)) {
+        return NULL;
+    }
+    if (! psutil_proc_pidinfo(pid, PROC_PIDTASKINFO, &pti, sizeof(pti))) {
+        return NULL;
+    }
+    // unvoluntary value seems not to be available;
+    // pti.pti_csw probably refers to the sum of the two (getrusage()
+    // numbers seems to confirm this theory).
+    return Py_BuildValue("ki", pti.pti_csw, 0);
+}
+
+
+/*
+ * Return system virtual memory stats
+ */
+static PyObject *
+psutil_virtual_mem(PyObject *self, PyObject *args)
+{
+
+    int      mib[2];
+    uint64_t total;
+    size_t   len = sizeof(total);
+    vm_statistics_data_t vm;
+    int pagesize = getpagesize();
+
+    // physical mem
+    mib[0] = CTL_HW;
+    mib[1] = HW_MEMSIZE;
+    if (sysctl(mib, 2, &total, &len, NULL, 0)) {
+        if (errno != 0)
+            PyErr_SetFromErrno(PyExc_OSError);
+        else
+            PyErr_Format(PyExc_RuntimeError, "sysctl(HW_MEMSIZE) failed");
+        return NULL;
+    }
+
+    // vm
+    if (!psutil_sys_vminfo(&vm)) {
+        return NULL;
+    }
+
+    return Py_BuildValue(
+        "KKKKK",
+        total,
+        (unsigned long long) vm.active_count * pagesize,
+        (unsigned long long) vm.inactive_count * pagesize,
+        (unsigned long long) vm.wire_count * pagesize,
+        (unsigned long long) vm.free_count * pagesize
+    );
+}
+
+
+/*
+ * Return stats about swap memory.
+ */
+static PyObject *
+psutil_swap_mem(PyObject *self, PyObject *args)
+{
+    int mib[2];
+    size_t size;
+    struct xsw_usage totals;
+    vm_statistics_data_t vmstat;
+    int pagesize = getpagesize();
+
+    mib[0] = CTL_VM;
+    mib[1] = VM_SWAPUSAGE;
+    size = sizeof(totals);
+    if (sysctl(mib, 2, &totals, &size, NULL, 0) == -1) {
+        if (errno != 0)
+            PyErr_SetFromErrno(PyExc_OSError);
+        else
+            PyErr_Format(PyExc_RuntimeError, "sysctl(VM_SWAPUSAGE) failed");
+        return NULL;
+    }
+    if (!psutil_sys_vminfo(&vmstat)) {
+        return NULL;
+    }
+
+    return Py_BuildValue(
+        "LLLKK",
+        totals.xsu_total,
+        totals.xsu_used,
+        totals.xsu_avail,
+        (unsigned long long)vmstat.pageins * pagesize,
+        (unsigned long long)vmstat.pageouts * pagesize);
+}
+
+
+/*
+ * Return a Python tuple representing user, kernel and idle CPU times
+ */
+static PyObject *
+psutil_cpu_times(PyObject *self, PyObject *args)
+{
+    mach_msg_type_number_t count = HOST_CPU_LOAD_INFO_COUNT;
+    kern_return_t error;
+    host_cpu_load_info_data_t r_load;
+
+    mach_port_t host_port = mach_host_self();
+    error = host_statistics(host_port, HOST_CPU_LOAD_INFO,
+                            (host_info_t)&r_load, &count);
+    if (error != KERN_SUCCESS) {
+        return PyErr_Format(PyExc_RuntimeError,
+                            "Error in host_statistics(): %s",
+                            mach_error_string(error));
+    }
+    mach_port_deallocate(mach_task_self(), host_port);
+
+    return Py_BuildValue(
+        "(dddd)",
+        (double)r_load.cpu_ticks[CPU_STATE_USER] / CLK_TCK,
+        (double)r_load.cpu_ticks[CPU_STATE_NICE] / CLK_TCK,
+        (double)r_load.cpu_ticks[CPU_STATE_SYSTEM] / CLK_TCK,
+        (double)r_load.cpu_ticks[CPU_STATE_IDLE] / CLK_TCK
+    );
+}
+
+
+/*
+ * Return a Python list of tuple representing per-cpu times
+ */
+static PyObject *
+psutil_per_cpu_times(PyObject *self, PyObject *args)
+{
+    natural_t cpu_count;
+    processor_info_array_t info_array;
+    mach_msg_type_number_t info_count;
+    kern_return_t error;
+    processor_cpu_load_info_data_t *cpu_load_info = NULL;
+    int i, ret;
+    PyObject *py_retlist = PyList_New(0);
+    PyObject *py_cputime = NULL;
+
+    if (py_retlist == NULL)
+        return NULL;
+
+    mach_port_t host_port = mach_host_self();
+    error = host_processor_info(host_port, PROCESSOR_CPU_LOAD_INFO,
+                                &cpu_count, &info_array, &info_count);
+    if (error != KERN_SUCCESS) {
+        PyErr_Format(PyExc_RuntimeError, "Error in host_processor_info(): %s",
+                     mach_error_string(error));
+        goto error;
+    }
+    mach_port_deallocate(mach_task_self(), host_port);
+
+    cpu_load_info = (processor_cpu_load_info_data_t *) info_array;
+
+    for (i = 0; i < cpu_count; i++) {
+        py_cputime = Py_BuildValue(
+            "(dddd)",
+            (double)cpu_load_info[i].cpu_ticks[CPU_STATE_USER] / CLK_TCK,
+            (double)cpu_load_info[i].cpu_ticks[CPU_STATE_NICE] / CLK_TCK,
+            (double)cpu_load_info[i].cpu_ticks[CPU_STATE_SYSTEM] / CLK_TCK,
+            (double)cpu_load_info[i].cpu_ticks[CPU_STATE_IDLE] / CLK_TCK
+        );
+        if (!py_cputime)
+            goto error;
+        if (PyList_Append(py_retlist, py_cputime))
+            goto error;
+        Py_DECREF(py_cputime);
+    }
+
+    ret = vm_deallocate(mach_task_self(), (vm_address_t)info_array,
+                        info_count * sizeof(int));
+    if (ret != KERN_SUCCESS) {
+        PyErr_WarnEx(PyExc_RuntimeWarning, "vm_deallocate() failed", 2);
+    }
+    return py_retlist;
+
+error:
+    Py_XDECREF(py_cputime);
+    Py_DECREF(py_retlist);
+    if (cpu_load_info != NULL) {
+        ret = vm_deallocate(mach_task_self(), (vm_address_t)info_array,
+                            info_count * sizeof(int));
+        if (ret != KERN_SUCCESS) {
+            PyErr_WarnEx(PyExc_RuntimeWarning, "vm_deallocate() failed", 2);
+        }
+    }
+    return NULL;
+}
+
+
+/*
+ * Return a Python float indicating the system boot time expressed in
+ * seconds since the epoch.
+ */
+static PyObject *
+psutil_boot_time(PyObject *self, PyObject *args)
+{
+    // fetch sysctl "kern.boottime"
+    static int request[2] = { CTL_KERN, KERN_BOOTTIME };
+    struct timeval result;
+    size_t result_len = sizeof result;
+    time_t boot_time = 0;
+
+    if (sysctl(request, 2, &result, &result_len, NULL, 0) == -1) {
+        PyErr_SetFromErrno(PyExc_OSError);
+        return NULL;
+    }
+    boot_time = result.tv_sec;
+    return Py_BuildValue("f", (float)boot_time);
+}
+
+
+/*
+ * Return a list of tuples including device, mount point and fs type
+ * for all partitions mounted on the system.
+ */
+static PyObject *
+psutil_disk_partitions(PyObject *self, PyObject *args)
+{
+    int num;
+    int i;
+    long len;
+    uint64_t flags;
+    char opts[400];
+    struct statfs *fs = NULL;
+    PyObject *py_retlist = PyList_New(0);
+    PyObject *py_tuple = NULL;
+
+    if (py_retlist == NULL)
+        return NULL;
+
+    // get the number of mount points
+    Py_BEGIN_ALLOW_THREADS
+    num = getfsstat(NULL, 0, MNT_NOWAIT);
+    Py_END_ALLOW_THREADS
+    if (num == -1) {
+        PyErr_SetFromErrno(PyExc_OSError);
+        goto error;
+    }
+
+    len = sizeof(*fs) * num;
+    fs = malloc(len);
+    if (fs == NULL) {
+        PyErr_NoMemory();
+        goto error;
+    }
+
+    Py_BEGIN_ALLOW_THREADS
+    num = getfsstat(fs, len, MNT_NOWAIT);
+    Py_END_ALLOW_THREADS
+    if (num == -1) {
+        PyErr_SetFromErrno(PyExc_OSError);
+        goto error;
+    }
+
+    for (i = 0; i < num; i++) {
+        opts[0] = 0;
+        flags = fs[i].f_flags;
+
+        // see sys/mount.h
+        if (flags & MNT_RDONLY)
+            strlcat(opts, "ro", sizeof(opts));
+        else
+            strlcat(opts, "rw", sizeof(opts));
+        if (flags & MNT_SYNCHRONOUS)
+            strlcat(opts, ",sync", sizeof(opts));
+        if (flags & MNT_NOEXEC)
+            strlcat(opts, ",noexec", sizeof(opts));
+        if (flags & MNT_NOSUID)
+            strlcat(opts, ",nosuid", sizeof(opts));
+        if (flags & MNT_UNION)
+            strlcat(opts, ",union", sizeof(opts));
+        if (flags & MNT_ASYNC)
+            strlcat(opts, ",async", sizeof(opts));
+        if (flags & MNT_EXPORTED)
+            strlcat(opts, ",exported", sizeof(opts));
+        if (flags & MNT_QUARANTINE)
+            strlcat(opts, ",quarantine", sizeof(opts));
+        if (flags & MNT_LOCAL)
+            strlcat(opts, ",local", sizeof(opts));
+        if (flags & MNT_QUOTA)
+            strlcat(opts, ",quota", sizeof(opts));
+        if (flags & MNT_ROOTFS)
+            strlcat(opts, ",rootfs", sizeof(opts));
+        if (flags & MNT_DOVOLFS)
+            strlcat(opts, ",dovolfs", sizeof(opts));
+        if (flags & MNT_DONTBROWSE)
+            strlcat(opts, ",dontbrowse", sizeof(opts));
+        if (flags & MNT_IGNORE_OWNERSHIP)
+            strlcat(opts, ",ignore-ownership", sizeof(opts));
+        if (flags & MNT_AUTOMOUNTED)
+            strlcat(opts, ",automounted", sizeof(opts));
+        if (flags & MNT_JOURNALED)
+            strlcat(opts, ",journaled", sizeof(opts));
+        if (flags & MNT_NOUSERXATTR)
+            strlcat(opts, ",nouserxattr", sizeof(opts));
+        if (flags & MNT_DEFWRITE)
+            strlcat(opts, ",defwrite", sizeof(opts));
+        if (flags & MNT_MULTILABEL)
+            strlcat(opts, ",multilabel", sizeof(opts));
+        if (flags & MNT_NOATIME)
+            strlcat(opts, ",noatime", sizeof(opts));
+        if (flags & MNT_UPDATE)
+            strlcat(opts, ",update", sizeof(opts));
+        if (flags & MNT_RELOAD)
+            strlcat(opts, ",reload", sizeof(opts));
+        if (flags & MNT_FORCE)
+            strlcat(opts, ",force", sizeof(opts));
+        if (flags & MNT_CMDFLAGS)
+            strlcat(opts, ",cmdflags", sizeof(opts));
+
+        py_tuple = Py_BuildValue(
+            "(ssss)", fs[i].f_mntfromname,  // device
+            fs[i].f_mntonname,    // mount point
+            fs[i].f_fstypename,   // fs type
+            opts);                // options
+        if (!py_tuple)
+            goto error;
+        if (PyList_Append(py_retlist, py_tuple))
+            goto error;
+        Py_DECREF(py_tuple);
+    }
+
+    free(fs);
+    return py_retlist;
+
+error:
+    Py_XDECREF(py_tuple);
+    Py_DECREF(py_retlist);
+    if (fs != NULL)
+        free(fs);
+    return NULL;
+}
+
+
+/*
+ * Return process status as a Python integer.
+ */
+static PyObject *
+psutil_proc_status(PyObject *self, PyObject *args)
+{
+    long pid;
+    struct kinfo_proc kp;
+    if (! PyArg_ParseTuple(args, "l", &pid)) {
+        return NULL;
+    }
+    if (psutil_get_kinfo_proc(pid, &kp) == -1) {
+        return NULL;
+    }
+    return Py_BuildValue("i", (int)kp.kp_proc.p_stat);
+}
+
+
+/*
+ * Return process threads
+ */
+static PyObject *
+psutil_proc_threads(PyObject *self, PyObject *args)
+{
+    long pid;
+    int err, j, ret;
+    kern_return_t kr;
+    unsigned int info_count = TASK_BASIC_INFO_COUNT;
+    mach_port_t task = MACH_PORT_NULL;
+    struct task_basic_info tasks_info;
+    thread_act_port_array_t thread_list = NULL;
+    thread_info_data_t thinfo_basic;
+    thread_basic_info_t basic_info_th;
+    mach_msg_type_number_t thread_count, thread_info_count;
+
+    PyObject *retList = PyList_New(0);
+    PyObject *pyTuple = NULL;
+
+    if (retList == NULL)
+        return NULL;
+
+    // the argument passed should be a process id
+    if (! PyArg_ParseTuple(args, "l", &pid)) {
+        goto error;
+    }
+
+    // task_for_pid() requires special privileges
+    err = task_for_pid(mach_task_self(), pid, &task);
+    if (err != KERN_SUCCESS) {
+        if (! psutil_pid_exists(pid)) {
+            NoSuchProcess();
+        }
+        else {
+            AccessDenied();
+        }
+        goto error;
+    }
+
+    info_count = TASK_BASIC_INFO_COUNT;
+    err = task_info(task, TASK_BASIC_INFO, (task_info_t)&tasks_info,
+                    &info_count);
+    if (err != KERN_SUCCESS) {
+        // errcode 4 is "invalid argument" (access denied)
+        if (err == 4) {
+            AccessDenied();
+        }
+        else {
+            // otherwise throw a runtime error with appropriate error code
+            PyErr_Format(PyExc_RuntimeError,
+                         "task_info(TASK_BASIC_INFO) failed");
+        }
+        goto error;
+    }
+
+    err = task_threads(task, &thread_list, &thread_count);
+    if (err != KERN_SUCCESS) {
+        PyErr_Format(PyExc_RuntimeError, "task_threads() failed");
+        goto error;
+    }
+
+    for (j = 0; j < thread_count; j++) {
+        pyTuple = NULL;
+        thread_info_count = THREAD_INFO_MAX;
+        kr = thread_info(thread_list[j], THREAD_BASIC_INFO,
+                         (thread_info_t)thinfo_basic, &thread_info_count);
+        if (kr != KERN_SUCCESS) {
+            PyErr_Format(PyExc_RuntimeError,
+                         "thread_info() with flag THREAD_BASIC_INFO failed");
+            goto error;
+        }
+
+        basic_info_th = (thread_basic_info_t)thinfo_basic;
+        pyTuple = Py_BuildValue(
+            "Iff",
+            j + 1,
+            (float)basic_info_th->user_time.microseconds / 1000000.0,
+            (float)basic_info_th->system_time.microseconds / 1000000.0
+        );
+        if (!pyTuple)
+            goto error;
+        if (PyList_Append(retList, pyTuple))
+            goto error;
+        Py_DECREF(pyTuple);
+    }
+
+    ret = vm_deallocate(task, (vm_address_t)thread_list,
+                        thread_count * sizeof(int));
+    if (ret != KERN_SUCCESS) {
+        PyErr_WarnEx(PyExc_RuntimeWarning, "vm_deallocate() failed", 2);
+    }
+
+    mach_port_deallocate(mach_task_self(), task);
+
+    return retList;
+
+error:
+    if (task != MACH_PORT_NULL)
+        mach_port_deallocate(mach_task_self(), task);
+    Py_XDECREF(pyTuple);
+    Py_DECREF(retList);
+    if (thread_list != NULL) {
+        ret = vm_deallocate(task, (vm_address_t)thread_list,
+                            thread_count * sizeof(int));
+        if (ret != KERN_SUCCESS) {
+            PyErr_WarnEx(PyExc_RuntimeWarning, "vm_deallocate() failed", 2);
+        }
+    }
+    return NULL;
+}
+
+
+/*
+ * Return process open files as a Python tuple.
+ * References:
+ * - lsof source code: http://goo.gl/SYW79 and http://goo.gl/m78fd
+ * - /usr/include/sys/proc_info.h
+ */
+static PyObject *
+psutil_proc_open_files(PyObject *self, PyObject *args)
+{
+    long pid;
+    int pidinfo_result;
+    int iterations;
+    int i;
+    int nb;
+
+    struct proc_fdinfo *fds_pointer = NULL;
+    struct proc_fdinfo *fdp_pointer;
+    struct vnode_fdinfowithpath vi;
+
+    PyObject *retList = PyList_New(0);
+    PyObject *tuple = NULL;
+
+    if (retList == NULL)
+        return NULL;
+
+    if (! PyArg_ParseTuple(args, "l", &pid)) {
+        goto error;
+    }
+
+    pidinfo_result = proc_pidinfo(pid, PROC_PIDLISTFDS, 0, NULL, 0);
+    if (pidinfo_result <= 0) {
+        // may be be ignored later if errno != 0
+        PyErr_Format(PyExc_RuntimeError,
+                     "proc_pidinfo(PROC_PIDLISTFDS) failed");
+        goto error;
+    }
+
+    fds_pointer = malloc(pidinfo_result);
+    if (fds_pointer == NULL) {
+        PyErr_NoMemory();
+        goto error;
+    }
+    pidinfo_result = proc_pidinfo(pid, PROC_PIDLISTFDS, 0, fds_pointer,
+                                  pidinfo_result);
+    if (pidinfo_result <= 0) {
+        // may be be ignored later if errno != 0
+        PyErr_Format(PyExc_RuntimeError,
+                     "proc_pidinfo(PROC_PIDLISTFDS) failed");
+        goto error;
+    }
+
+    iterations = (pidinfo_result / PROC_PIDLISTFD_SIZE);
+
+    for (i = 0; i < iterations; i++) {
+        tuple = NULL;
+        fdp_pointer = &fds_pointer[i];
+
+        if (fdp_pointer->proc_fdtype == PROX_FDTYPE_VNODE)
+        {
+            nb = proc_pidfdinfo(pid,
+                                fdp_pointer->proc_fd,
+                                PROC_PIDFDVNODEPATHINFO,
+                                &vi,
+                                sizeof(vi));
+
+            // --- errors checking
+            if (nb <= 0) {
+                if ((errno == ENOENT) || (errno == EBADF)) {
+                    // no such file or directory or bad file descriptor;
+                    // let's assume the file has been closed or removed
+                    continue;
+                }
+                // may be be ignored later if errno != 0
+                PyErr_Format(PyExc_RuntimeError,
+                             "proc_pidinfo(PROC_PIDFDVNODEPATHINFO) failed");
+                goto error;
+            }
+            if (nb < sizeof(vi)) {
+                PyErr_Format(PyExc_RuntimeError,
+                             "proc_pidinfo(PROC_PIDFDVNODEPATHINFO) failed "
+                             "(buffer mismatch)");
+                goto error;
+            }
+            // --- /errors checking
+
+            // --- construct python list
+            tuple = Py_BuildValue("(si)",
+                                  vi.pvip.vip_path,
+                                  (int)fdp_pointer->proc_fd);
+            if (!tuple)
+                goto error;
+            if (PyList_Append(retList, tuple))
+                goto error;
+            Py_DECREF(tuple);
+            // --- /construct python list
+        }
+    }
+
+    free(fds_pointer);
+    return retList;
+
+error:
+    Py_XDECREF(tuple);
+    Py_DECREF(retList);
+    if (fds_pointer != NULL) {
+        free(fds_pointer);
+    }
+    if (errno != 0) {
+        return PyErr_SetFromErrno(PyExc_OSError);
+    }
+    else if (! psutil_pid_exists(pid)) {
+        return NoSuchProcess();
+    }
+    else {
+        // exception has already been set earlier
+        return NULL;
+    }
+}
+
+
+// a signaler for connections without an actual status
+static int PSUTIL_CONN_NONE = 128;
+
+/*
+ * Return process TCP and UDP connections as a list of tuples.
+ * References:
+ * - lsof source code: http://goo.gl/SYW79 and http://goo.gl/wNrC0
+ * - /usr/include/sys/proc_info.h
+ */
+static PyObject *
+psutil_proc_connections(PyObject *self, PyObject *args)
+{
+    long pid;
+    int pidinfo_result;
+    int iterations;
+    int i;
+    int nb;
+
+    struct proc_fdinfo *fds_pointer = NULL;
+    struct proc_fdinfo *fdp_pointer;
+    struct socket_fdinfo si;
+
+    PyObject *retList = PyList_New(0);
+    PyObject *tuple = NULL;
+    PyObject *laddr = NULL;
+    PyObject *raddr = NULL;
+    PyObject *af_filter = NULL;
+    PyObject *type_filter = NULL;
+
+    if (retList == NULL)
+        return NULL;
+
+    if (! PyArg_ParseTuple(args, "lOO", &pid, &af_filter, &type_filter)) {
+        goto error;
+    }
+
+    if (!PySequence_Check(af_filter) || !PySequence_Check(type_filter)) {
+        PyErr_SetString(PyExc_TypeError, "arg 2 or 3 is not a sequence");
+        goto error;
+    }
+
+    if (pid == 0) {
+        return retList;
+    }
+
+    pidinfo_result = proc_pidinfo(pid, PROC_PIDLISTFDS, 0, NULL, 0);
+    if (pidinfo_result <= 0) {
+        goto error;
+    }
+
+    fds_pointer = malloc(pidinfo_result);
+    if (fds_pointer == NULL) {
+        PyErr_NoMemory();
+        goto error;
+    }
+    pidinfo_result = proc_pidinfo(pid, PROC_PIDLISTFDS, 0, fds_pointer,
+                                  pidinfo_result);
+
+    if (pidinfo_result <= 0) {
+        goto error;
+    }
+
+    iterations = (pidinfo_result / PROC_PIDLISTFD_SIZE);
+
+    for (i = 0; i < iterations; i++) {
+        tuple = NULL;
+        laddr = NULL;
+        raddr = NULL;
+        errno = 0;
+        fdp_pointer = &fds_pointer[i];
+
+        if (fdp_pointer->proc_fdtype == PROX_FDTYPE_SOCKET)
+        {
+            nb = proc_pidfdinfo(pid, fdp_pointer->proc_fd,
+                                PROC_PIDFDSOCKETINFO, &si, sizeof(si));
+
+            // --- errors checking
+            if (nb <= 0) {
+                if (errno == EBADF) {
+                    // let's assume socket has been closed
+                    continue;
+                }
+                if (errno != 0) {
+                    PyErr_SetFromErrno(PyExc_OSError);
+                }
+                else {
+                    PyErr_Format(
+                        PyExc_RuntimeError,
+                        "proc_pidinfo(PROC_PIDFDVNODEPATHINFO) failed");
+                }
+                goto error;
+            }
+            if (nb < sizeof(si)) {
+                PyErr_Format(PyExc_RuntimeError,
+                             "proc_pidinfo(PROC_PIDFDVNODEPATHINFO) failed "
+                             "(buffer mismatch)");
+                goto error;
+            }
+            // --- /errors checking
+
+            //
+            int fd, family, type, lport, rport, state;
+            char lip[200], rip[200];
+            int inseq;
+            PyObject *_family;
+            PyObject *_type;
+
+            fd = (int)fdp_pointer->proc_fd;
+            family = si.psi.soi_family;
+            type = si.psi.soi_type;
+
+            // apply filters
+            _family = PyLong_FromLong((long)family);
+            inseq = PySequence_Contains(af_filter, _family);
+            Py_DECREF(_family);
+            if (inseq == 0) {
+                continue;
+            }
+            _type = PyLong_FromLong((long)type);
+            inseq = PySequence_Contains(type_filter, _type);
+            Py_DECREF(_type);
+            if (inseq == 0) {
+                continue;
+            }
+
+            if (errno != 0) {
+                PyErr_SetFromErrno(PyExc_OSError);
+                goto error;
+            }
+
+            if ((family == AF_INET) || (family == AF_INET6)) {
+                if (family == AF_INET) {
+                    inet_ntop(AF_INET,
+                              &si.psi.soi_proto.pri_tcp.tcpsi_ini. \
+                                  insi_laddr.ina_46.i46a_addr4,
+                              lip,
+                              sizeof(lip));
+                    inet_ntop(AF_INET,
+                              &si.psi.soi_proto.pri_tcp.tcpsi_ini.insi_faddr. \
+                                  ina_46.i46a_addr4,
+                              rip,
+                              sizeof(rip));
+                }
+                else {
+                    inet_ntop(AF_INET6,
+                              &si.psi.soi_proto.pri_tcp.tcpsi_ini. \
+                                  insi_laddr.ina_6,
+                              lip, sizeof(lip));
+                    inet_ntop(AF_INET6,
+                              &si.psi.soi_proto.pri_tcp.tcpsi_ini. \
+                                  insi_faddr.ina_6,
+                              rip, sizeof(rip));
+                }
+
+                // check for inet_ntop failures
+                if (errno != 0) {
+                    PyErr_SetFromErrno(PyExc_OSError);
+                    goto error;
+                }
+
+                lport = ntohs(si.psi.soi_proto.pri_tcp.tcpsi_ini.insi_lport);
+                rport = ntohs(si.psi.soi_proto.pri_tcp.tcpsi_ini.insi_fport);
+                if (type == SOCK_STREAM) {
+                    state = (int)si.psi.soi_proto.pri_tcp.tcpsi_state;
+                }
+                else {
+                    state = PSUTIL_CONN_NONE;
+                }
+
+                laddr = Py_BuildValue("(si)", lip, lport);
+                if (!laddr)
+                    goto error;
+                if (rport != 0) {
+                    raddr = Py_BuildValue("(si)", rip, rport);
+                }
+                else {
+                    raddr = Py_BuildValue("()");
+                }
+                if (!raddr)
+                    goto error;
+
+                // construct the python list
+                tuple = Py_BuildValue("(iiiNNi)", fd, family, type, laddr,
+                                      raddr, state);
+                if (!tuple)
+                    goto error;
+                if (PyList_Append(retList, tuple))
+                    goto error;
+                Py_DECREF(tuple);
+            }
+            else if (family == AF_UNIX) {
+                // construct the python list
+                tuple = Py_BuildValue(
+                    "(iiissi)",
+                    fd, family, type,
+                    si.psi.soi_proto.pri_un.unsi_addr.ua_sun.sun_path,
+                    si.psi.soi_proto.pri_un.unsi_caddr.ua_sun.sun_path,
+                    PSUTIL_CONN_NONE);
+                if (!tuple)
+                    goto error;
+                if (PyList_Append(retList, tuple))
+                    goto error;
+                Py_DECREF(tuple);
+            }
+        }
+    }
+
+    free(fds_pointer);
+    return retList;
+
+error:
+    Py_XDECREF(tuple);
+    Py_XDECREF(laddr);
+    Py_XDECREF(raddr);
+    Py_DECREF(retList);
+
+    if (fds_pointer != NULL) {
+        free(fds_pointer);
+    }
+    if (errno != 0) {
+        return PyErr_SetFromErrno(PyExc_OSError);
+    }
+    else if (! psutil_pid_exists(pid) ) {
+        return NoSuchProcess();
+    }
+    else {
+        return PyErr_Format(PyExc_RuntimeError,
+                            "proc_pidinfo(PROC_PIDLISTFDS) failed");
+    }
+}
+
+
+/*
+ * Return number of file descriptors opened by process.
+ */
+static PyObject *
+psutil_proc_num_fds(PyObject *self, PyObject *args)
+{
+    long pid;
+    int pidinfo_result;
+    int num;
+    struct proc_fdinfo *fds_pointer;
+
+    if (! PyArg_ParseTuple(args, "l", &pid)) {
+        return NULL;
+    }
+
+    pidinfo_result = proc_pidinfo(pid, PROC_PIDLISTFDS, 0, NULL, 0);
+    if (pidinfo_result <= 0) {
+        return PyErr_SetFromErrno(PyExc_OSError);
+    }
+
+    fds_pointer = malloc(pidinfo_result);
+    if (fds_pointer == NULL) {
+        return PyErr_NoMemory();
+    }
+    pidinfo_result = proc_pidinfo(pid, PROC_PIDLISTFDS, 0, fds_pointer,
+                                  pidinfo_result);
+    if (pidinfo_result <= 0) {
+        free(fds_pointer);
+        return PyErr_SetFromErrno(PyExc_OSError);
+    }
+
+    num = (pidinfo_result / PROC_PIDLISTFD_SIZE);
+    free(fds_pointer);
+    return Py_BuildValue("i", num);
+}
+
+
+/*
+ * Return a Python list of named tuples with overall network I/O information
+ */
+static PyObject *
+psutil_net_io_counters(PyObject *self, PyObject *args)
+{
+    char *buf = NULL, *lim, *next;
+    struct if_msghdr *ifm;
+    int mib[6];
+    size_t len;
+    PyObject *py_retdict = PyDict_New();
+    PyObject *py_ifc_info = NULL;
+
+    if (py_retdict == NULL)
+        return NULL;
+
+    mib[0] = CTL_NET;          // networking subsystem
+    mib[1] = PF_ROUTE;         // type of information
+    mib[2] = 0;                // protocol (IPPROTO_xxx)
+    mib[3] = 0;                // address family
+    mib[4] = NET_RT_IFLIST2;   // operation
+    mib[5] = 0;
+
+    if (sysctl(mib, 6, NULL, &len, NULL, 0) < 0) {
+        PyErr_SetFromErrno(PyExc_OSError);
+        goto error;
+    }
+
+    buf = malloc(len);
+    if (buf == NULL) {
+        PyErr_NoMemory();
+        goto error;
+    }
+
+    if (sysctl(mib, 6, buf, &len, NULL, 0) < 0) {
+        PyErr_SetFromErrno(PyExc_OSError);
+        goto error;
+    }
+
+    lim = buf + len;
+
+    for (next = buf; next < lim; ) {
+        ifm = (struct if_msghdr *)next;
+        next += ifm->ifm_msglen;
+
+        if (ifm->ifm_type == RTM_IFINFO2) {
+            py_ifc_info = NULL;
+            struct if_msghdr2 *if2m = (struct if_msghdr2 *)ifm;
+            struct sockaddr_dl *sdl = (struct sockaddr_dl *)(if2m + 1);
+            char ifc_name[32];
+
+            strncpy(ifc_name, sdl->sdl_data, sdl->sdl_nlen);
+            ifc_name[sdl->sdl_nlen] = 0;
+
+            py_ifc_info = Py_BuildValue(
+                "(KKKKKKKi)",
+                if2m->ifm_data.ifi_obytes,
+                if2m->ifm_data.ifi_ibytes,
+                if2m->ifm_data.ifi_opackets,
+                if2m->ifm_data.ifi_ipackets,
+                if2m->ifm_data.ifi_ierrors,
+                if2m->ifm_data.ifi_oerrors,
+                if2m->ifm_data.ifi_iqdrops,
+                0);  // dropout not supported
+
+            if (!py_ifc_info)
+                goto error;
+            if (PyDict_SetItemString(py_retdict, ifc_name, py_ifc_info))
+                goto error;
+            Py_DECREF(py_ifc_info);
+        }
+        else {
+            continue;
+        }
+    }
+
+    free(buf);
+    return py_retdict;
+
+error:
+    Py_XDECREF(py_ifc_info);
+    Py_DECREF(py_retdict);
+    if (buf != NULL)
+        free(buf);
+    return NULL;
+}
+
+
+/*
+ * Return a Python dict of tuples for disk I/O information
+ */
+static PyObject *
+psutil_disk_io_counters(PyObject *self, PyObject *args)
+{
+    CFDictionaryRef parent_dict;
+    CFDictionaryRef props_dict;
+    CFDictionaryRef stats_dict;
+    io_registry_entry_t parent;
+    io_registry_entry_t disk;
+    io_iterator_t disk_list;
+    PyObject *py_retdict = PyDict_New();
+    PyObject *py_disk_info = NULL;
+
+    if (py_retdict == NULL)
+        return NULL;
+
+    // Get list of disks
+    if (IOServiceGetMatchingServices(kIOMasterPortDefault,
+                                     IOServiceMatching(kIOMediaClass),
+                                     &disk_list) != kIOReturnSuccess) {
+        PyErr_SetString(PyExc_RuntimeError,
+                        "unable to get the list of disks.");
+        goto error;
+    }
+
+    // Iterate over disks
+    while ((disk = IOIteratorNext(disk_list)) != 0) {
+        py_disk_info = NULL;
+        parent_dict = NULL;
+        props_dict = NULL;
+        stats_dict = NULL;
+
+        if (IORegistryEntryGetParentEntry(disk, kIOServicePlane, &parent)
+                != kIOReturnSuccess) {
+            PyErr_SetString(PyExc_RuntimeError,
+                            "unable to get the disk's parent.");
+            IOObjectRelease(disk);
+            goto error;
+        }
+
+        if (IOObjectConformsTo(parent, "IOBlockStorageDriver")) {
+            if (IORegistryEntryCreateCFProperties(
+                    disk,
+                    (CFMutableDictionaryRef *) &parent_dict,
+                    kCFAllocatorDefault,
+                    kNilOptions
+                ) != kIOReturnSuccess)
+            {
+                PyErr_SetString(PyExc_RuntimeError,
+                                "unable to get the parent's properties.");
+                IOObjectRelease(disk);
+                IOObjectRelease(parent);
+                goto error;
+            }
+
+            if (IORegistryEntryCreateCFProperties(
+                    parent,
+                    (CFMutableDictionaryRef *) &props_dict,
+                    kCFAllocatorDefault,
+                    kNilOptions
+                ) != kIOReturnSuccess)
+            {
+                PyErr_SetString(PyExc_RuntimeError,
+                                "unable to get the disk properties.");
+                CFRelease(props_dict);
+                IOObjectRelease(disk);
+                IOObjectRelease(parent);
+                goto error;
+            }
+
+            const int kMaxDiskNameSize = 64;
+            CFStringRef disk_name_ref = (CFStringRef)CFDictionaryGetValue(
+                parent_dict, CFSTR(kIOBSDNameKey));
+            char disk_name[kMaxDiskNameSize];
+
+            CFStringGetCString(disk_name_ref,
+                               disk_name,
+                               kMaxDiskNameSize,
+                               CFStringGetSystemEncoding());
+
+            stats_dict = (CFDictionaryRef)CFDictionaryGetValue(
+                props_dict, CFSTR(kIOBlockStorageDriverStatisticsKey));
+
+            if (stats_dict == NULL) {
+                PyErr_SetString(PyExc_RuntimeError,
+                                "Unable to get disk stats.");
+                goto error;
+            }
+
+            CFNumberRef number;
+            int64_t reads = 0;
+            int64_t writes = 0;
+            int64_t read_bytes = 0;
+            int64_t write_bytes = 0;
+            int64_t read_time = 0;
+            int64_t write_time = 0;
+
+            // Get disk reads/writes
+            if ((number = (CFNumberRef)CFDictionaryGetValue(
+                    stats_dict,
+                    CFSTR(kIOBlockStorageDriverStatisticsReadsKey))))
+            {
+                CFNumberGetValue(number, kCFNumberSInt64Type, &reads);
+            }
+            if ((number = (CFNumberRef)CFDictionaryGetValue(
+                    stats_dict,
+                    CFSTR(kIOBlockStorageDriverStatisticsWritesKey))))
+            {
+                CFNumberGetValue(number, kCFNumberSInt64Type, &writes);
+            }
+
+            // Get disk bytes read/written
+            if ((number = (CFNumberRef)CFDictionaryGetValue(
+                    stats_dict,
+                    CFSTR(kIOBlockStorageDriverStatisticsBytesReadKey))))
+            {
+                CFNumberGetValue(number, kCFNumberSInt64Type, &read_bytes);
+            }
+            if ((number = (CFNumberRef)CFDictionaryGetValue(
+                    stats_dict,
+                    CFSTR(kIOBlockStorageDriverStatisticsBytesWrittenKey))))
+            {
+                CFNumberGetValue(number, kCFNumberSInt64Type, &write_bytes);
+            }
+
+            // Get disk time spent reading/writing (nanoseconds)
+            if ((number = (CFNumberRef)CFDictionaryGetValue(
+                    stats_dict,
+                    CFSTR(kIOBlockStorageDriverStatisticsTotalReadTimeKey))))
+            {
+                CFNumberGetValue(number, kCFNumberSInt64Type, &read_time);
+            }
+            if ((number = (CFNumberRef)CFDictionaryGetValue(
+                    stats_dict,
+                    CFSTR(kIOBlockStorageDriverStatisticsTotalWriteTimeKey))))
+            {
+                CFNumberGetValue(number, kCFNumberSInt64Type, &write_time);
+            }
+
+            // Read/Write time on OS X comes back in nanoseconds and in psutil
+            // we've standardized on milliseconds so do the conversion.
+            py_disk_info = Py_BuildValue(
+                "(KKKKKK)",
+                reads,
+                writes,
+                read_bytes,
+                write_bytes,
+                read_time / 1000 / 1000,
+                write_time / 1000 / 1000);
+           if (!py_disk_info)
+                goto error;
+            if (PyDict_SetItemString(py_retdict, disk_name, py_disk_info))
+                goto error;
+            Py_DECREF(py_disk_info);
+
+            CFRelease(parent_dict);
+            IOObjectRelease(parent);
+            CFRelease(props_dict);
+            IOObjectRelease(disk);
+        }
+    }
+
+    IOObjectRelease (disk_list);
+
+    return py_retdict;
+
+error:
+    Py_XDECREF(py_disk_info);
+    Py_DECREF(py_retdict);
+    return NULL;
+}
+
+
+/*
+ * Return currently connected users as a list of tuples.
+ */
+static PyObject *
+psutil_users(PyObject *self, PyObject *args)
+{
+    struct utmpx *utx;
+    PyObject *ret_list = PyList_New(0);
+    PyObject *tuple = NULL;
+
+    if (ret_list == NULL)
+        return NULL;
+    while ((utx = getutxent()) != NULL) {
+        if (utx->ut_type != USER_PROCESS)
+            continue;
+        tuple = Py_BuildValue(
+            "(sssf)",
+            utx->ut_user,             // username
+            utx->ut_line,             // tty
+            utx->ut_host,             // hostname
+            (float)utx->ut_tv.tv_sec  // start time
+        );
+        if (!tuple) {
+            endutxent();
+            goto error;
+        }
+        if (PyList_Append(ret_list, tuple)) {
+            endutxent();
+            goto error;
+        }
+        Py_DECREF(tuple);
+    }
+
+    endutxent();
+    return ret_list;
+
+error:
+    Py_XDECREF(tuple);
+    Py_DECREF(ret_list);
+    return NULL;
+}
+
+
+/*
+ * define the psutil C module methods and initialize the module.
+ */
+static PyMethodDef
+PsutilMethods[] =
+{
+    // --- per-process functions
+
+    {"proc_name", psutil_proc_name, METH_VARARGS,
+     "Return process name"},
+    {"proc_cmdline", psutil_proc_cmdline, METH_VARARGS,
+     "Return process cmdline as a list of cmdline arguments"},
+    {"proc_exe", psutil_proc_exe, METH_VARARGS,
+     "Return path of the process executable"},
+    {"proc_cwd", psutil_proc_cwd, METH_VARARGS,
+     "Return process current working directory."},
+    {"proc_ppid", psutil_proc_ppid, METH_VARARGS,
+     "Return process ppid as an integer"},
+    {"proc_uids", psutil_proc_uids, METH_VARARGS,
+     "Return process real user id as an integer"},
+    {"proc_gids", psutil_proc_gids, METH_VARARGS,
+     "Return process real group id as an integer"},
+    {"proc_cpu_times", psutil_proc_cpu_times, METH_VARARGS,
+     "Return tuple of user/kern time for the given PID"},
+    {"proc_create_time", psutil_proc_create_time, METH_VARARGS,
+     "Return a float indicating the process create time expressed in "
+     "seconds since the epoch"},
+    {"proc_memory_info", psutil_proc_memory_info, METH_VARARGS,
+     "Return memory information about a process"},
+    {"proc_num_threads", psutil_proc_num_threads, METH_VARARGS,
+     "Return number of threads used by process"},
+    {"proc_status", psutil_proc_status, METH_VARARGS,
+     "Return process status as an integer"},
+    {"proc_threads", psutil_proc_threads, METH_VARARGS,
+     "Return process threads as a list of tuples"},
+    {"proc_open_files", psutil_proc_open_files, METH_VARARGS,
+     "Return files opened by process as a list of tuples"},
+    {"proc_num_fds", psutil_proc_num_fds, METH_VARARGS,
+     "Return the number of fds opened by process."},
+    {"proc_num_ctx_switches", psutil_proc_num_ctx_switches, METH_VARARGS,
+     "Return the number of context switches performed by process"},
+    {"proc_connections", psutil_proc_connections, METH_VARARGS,
+     "Get process TCP and UDP connections as a list of tuples"},
+    {"proc_tty_nr", psutil_proc_tty_nr, METH_VARARGS,
+     "Return process tty number as an integer"},
+    {"proc_memory_maps", psutil_proc_memory_maps, METH_VARARGS,
+     "Return a list of tuples for every process's memory map"},
+
+    // --- system-related functions
+
+    {"pids", psutil_pids, METH_VARARGS,
+     "Returns a list of PIDs currently running on the system"},
+    {"cpu_count_logical", psutil_cpu_count_logical, METH_VARARGS,
+     "Return number of logical CPUs on the system"},
+    {"cpu_count_phys", psutil_cpu_count_phys, METH_VARARGS,
+     "Return number of physical CPUs on the system"},
+    {"virtual_mem", psutil_virtual_mem, METH_VARARGS,
+     "Return system virtual memory stats"},
+    {"swap_mem", psutil_swap_mem, METH_VARARGS,
+     "Return stats about swap memory, in bytes"},
+    {"cpu_times", psutil_cpu_times, METH_VARARGS,
+     "Return system cpu times as a tuple (user, system, nice, idle, irc)"},
+    {"per_cpu_times", psutil_per_cpu_times, METH_VARARGS,
+     "Return system per-cpu times as a list of tuples"},
+    {"boot_time", psutil_boot_time, METH_VARARGS,
+     "Return the system boot time expressed in seconds since the epoch."},
+    {"disk_partitions", psutil_disk_partitions, METH_VARARGS,
+     "Return a list of tuples including device, mount point and "
+     "fs type for all partitions mounted on the system."},
+    {"net_io_counters", psutil_net_io_counters, METH_VARARGS,
+     "Return dict of tuples of networks I/O information."},
+    {"disk_io_counters", psutil_disk_io_counters, METH_VARARGS,
+     "Return dict of tuples of disks I/O information."},
+    {"users", psutil_users, METH_VARARGS,
+     "Return currently connected users as a list of tuples"},
+
+    {NULL, NULL, 0, NULL}
+};
+
+
+struct module_state {
+    PyObject *error;
+};
+
+#if PY_MAJOR_VERSION >= 3
+#define GETSTATE(m) ((struct module_state*)PyModule_GetState(m))
+#else
+#define GETSTATE(m) (&_state)
+#endif
+
+#if PY_MAJOR_VERSION >= 3
+
+static int
+psutil_osx_traverse(PyObject *m, visitproc visit, void *arg) {
+    Py_VISIT(GETSTATE(m)->error);
+    return 0;
+}
+
+static int
+psutil_osx_clear(PyObject *m) {
+    Py_CLEAR(GETSTATE(m)->error);
+    return 0;
+}
+
+
+static struct PyModuleDef moduledef = {
+    PyModuleDef_HEAD_INIT,
+    "psutil_osx",
+    NULL,
+    sizeof(struct module_state),
+    PsutilMethods,
+    NULL,
+    psutil_osx_traverse,
+    psutil_osx_clear,
+    NULL
+};
+
+#define INITERROR return NULL
+
+PyMODINIT_FUNC PyInit__psutil_osx(void)
+
+#else
+#define INITERROR return
+
+void
+init_psutil_osx(void)
+#endif
+{
+#if PY_MAJOR_VERSION >= 3
+    PyObject *module = PyModule_Create(&moduledef);
+#else
+    PyObject *module = Py_InitModule("_psutil_osx", PsutilMethods);
+#endif
+    // process status constants, defined in:
+    // http://fxr.watson.org/fxr/source/bsd/sys/proc.h?v=xnu-792.6.70#L149
+    PyModule_AddIntConstant(module, "SIDL", SIDL);
+    PyModule_AddIntConstant(module, "SRUN", SRUN);
+    PyModule_AddIntConstant(module, "SSLEEP", SSLEEP);
+    PyModule_AddIntConstant(module, "SSTOP", SSTOP);
+    PyModule_AddIntConstant(module, "SZOMB", SZOMB);
+    // connection status constants
+    PyModule_AddIntConstant(module, "TCPS_CLOSED", TCPS_CLOSED);
+    PyModule_AddIntConstant(module, "TCPS_CLOSING", TCPS_CLOSING);
+    PyModule_AddIntConstant(module, "TCPS_CLOSE_WAIT", TCPS_CLOSE_WAIT);
+    PyModule_AddIntConstant(module, "TCPS_LISTEN", TCPS_LISTEN);
+    PyModule_AddIntConstant(module, "TCPS_ESTABLISHED", TCPS_ESTABLISHED);
+    PyModule_AddIntConstant(module, "TCPS_SYN_SENT", TCPS_SYN_SENT);
+    PyModule_AddIntConstant(module, "TCPS_SYN_RECEIVED", TCPS_SYN_RECEIVED);
+    PyModule_AddIntConstant(module, "TCPS_FIN_WAIT_1", TCPS_FIN_WAIT_1);
+    PyModule_AddIntConstant(module, "TCPS_FIN_WAIT_2", TCPS_FIN_WAIT_2);
+    PyModule_AddIntConstant(module, "TCPS_LAST_ACK", TCPS_LAST_ACK);
+    PyModule_AddIntConstant(module, "TCPS_TIME_WAIT", TCPS_TIME_WAIT);
+    PyModule_AddIntConstant(module, "PSUTIL_CONN_NONE", PSUTIL_CONN_NONE);
+
+    if (module == NULL) {
+        INITERROR;
+    }
+#if PY_MAJOR_VERSION >= 3
+    return module;
+#endif
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/_psutil_osx.h
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/_psutil_osx.h b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/_psutil_osx.h
new file mode 100644
index 0000000..907a8e5
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/_psutil_osx.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2009, Jay Loden, Giampaolo Rodola'. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include <Python.h>
+
+// --- per-process functions
+static PyObject* psutil_proc_cmdline(PyObject* self, PyObject* args);
+static PyObject* psutil_proc_connections(PyObject* self, PyObject* args);
+static PyObject* psutil_proc_cpu_times(PyObject* self, PyObject* args);
+static PyObject* psutil_proc_create_time(PyObject* self, PyObject* args);
+static PyObject* psutil_proc_cwd(PyObject* self, PyObject* args);
+static PyObject* psutil_proc_exe(PyObject* self, PyObject* args);
+static PyObject* psutil_proc_gids(PyObject* self, PyObject* args);
+static PyObject* psutil_proc_memory_info(PyObject* self, PyObject* args);
+static PyObject* psutil_proc_memory_maps(PyObject* self, PyObject* args);
+static PyObject* psutil_proc_name(PyObject* self, PyObject* args);
+static PyObject* psutil_proc_num_fds(PyObject* self, PyObject* args);
+static PyObject* psutil_proc_num_threads(PyObject* self, PyObject* args);
+static PyObject* psutil_proc_open_files(PyObject* self, PyObject* args);
+static PyObject* psutil_proc_ppid(PyObject* self, PyObject* args);
+static PyObject* psutil_proc_status(PyObject* self, PyObject* args);
+static PyObject* psutil_proc_threads(PyObject* self, PyObject* args);
+static PyObject* psutil_proc_tty_nr(PyObject* self, PyObject* args);
+static PyObject* psutil_proc_uids(PyObject* self, PyObject* args);
+
+// --- system-related functions
+static PyObject* psutil_boot_time(PyObject* self, PyObject* args);
+static PyObject* psutil_cpu_count_logical(PyObject* self, PyObject* args);
+static PyObject* psutil_cpu_count_phys(PyObject* self, PyObject* args);
+static PyObject* psutil_cpu_times(PyObject* self, PyObject* args);
+static PyObject* psutil_disk_io_counters(PyObject* self, PyObject* args);
+static PyObject* psutil_disk_partitions(PyObject* self, PyObject* args);
+static PyObject* psutil_net_io_counters(PyObject* self, PyObject* args);
+static PyObject* psutil_per_cpu_times(PyObject* self, PyObject* args);
+static PyObject* psutil_pids(PyObject* self, PyObject* args);
+static PyObject* psutil_swap_mem(PyObject* self, PyObject* args);
+static PyObject* psutil_users(PyObject* self, PyObject* args);
+static PyObject* psutil_virtual_mem(PyObject* self, PyObject* args);

http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/_psutil_posix.c
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/_psutil_posix.c b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/_psutil_posix.c
new file mode 100644
index 0000000..ad2e59d
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/_psutil_posix.c
@@ -0,0 +1,128 @@
+/*
+ * Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ *
+ * Functions specific to all POSIX compliant platforms.
+ */
+
+#include <Python.h>
+#include <errno.h>
+#include <stdlib.h>
+#include <sys/resource.h>
+
+#include "_psutil_posix.h"
+
+
+/*
+ * Given a PID return process priority as a Python integer.
+ */
+static PyObject *
+psutil_posix_getpriority(PyObject *self, PyObject *args)
+{
+    long pid;
+    int priority;
+    errno = 0;
+    if (! PyArg_ParseTuple(args, "l", &pid)) {
+        return NULL;
+    }
+    priority = getpriority(PRIO_PROCESS, pid);
+    if (errno != 0) {
+        return PyErr_SetFromErrno(PyExc_OSError);
+    }
+    return Py_BuildValue("i", priority);
+}
+
+
+/*
+ * Given a PID and a value change process priority.
+ */
+static PyObject *
+psutil_posix_setpriority(PyObject *self, PyObject *args)
+{
+    long pid;
+    int priority;
+    int retval;
+    if (! PyArg_ParseTuple(args, "li", &pid, &priority)) {
+        return NULL;
+    }
+    retval = setpriority(PRIO_PROCESS, pid, priority);
+    if (retval == -1) {
+        return PyErr_SetFromErrno(PyExc_OSError);
+    }
+    Py_INCREF(Py_None);
+    return Py_None;
+}
+
+
+/*
+ * define the psutil C module methods and initialize the module.
+ */
+static PyMethodDef
+PsutilMethods[] =
+{
+    {"getpriority", psutil_posix_getpriority, METH_VARARGS,
+     "Return process priority"},
+    {"setpriority", psutil_posix_setpriority, METH_VARARGS,
+     "Set process priority"},
+    {NULL, NULL, 0, NULL}
+};
+
+struct module_state {
+    PyObject *error;
+};
+
+#if PY_MAJOR_VERSION >= 3
+#define GETSTATE(m) ((struct module_state*)PyModule_GetState(m))
+#else
+#define GETSTATE(m) (&_state)
+#endif
+
+#if PY_MAJOR_VERSION >= 3
+
+static int
+psutil_posix_traverse(PyObject *m, visitproc visit, void *arg) {
+    Py_VISIT(GETSTATE(m)->error);
+    return 0;
+}
+
+static int
+psutil_posix_clear(PyObject *m) {
+    Py_CLEAR(GETSTATE(m)->error);
+    return 0;
+}
+
+static struct PyModuleDef moduledef = {
+    PyModuleDef_HEAD_INIT,
+    "psutil_posix",
+    NULL,
+    sizeof(struct module_state),
+    PsutilMethods,
+    NULL,
+    psutil_posix_traverse,
+    psutil_posix_clear,
+    NULL
+};
+
+#define INITERROR return NULL
+
+PyMODINIT_FUNC PyInit__psutil_posix(void)
+
+#else
+#define INITERROR return
+
+void init_psutil_posix(void)
+#endif
+{
+#if PY_MAJOR_VERSION >= 3
+    PyObject *module = PyModule_Create(&moduledef);
+#else
+    PyObject *module = Py_InitModule("_psutil_posix", PsutilMethods);
+#endif
+    if (module == NULL) {
+        INITERROR;
+    }
+#if PY_MAJOR_VERSION >= 3
+    return module;
+#endif
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/_psutil_posix.h
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/_psutil_posix.h b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/_psutil_posix.h
new file mode 100644
index 0000000..5a4681d
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/_psutil_posix.h
@@ -0,0 +1,10 @@
+/*
+ * Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include <Python.h>
+
+static PyObject* psutil_posix_getpriority(PyObject* self, PyObject* args);
+static PyObject* psutil_posix_setpriority(PyObject* self, PyObject* args);


[03/22] AMBARI-5707. Metrics system prototype implementation. (swagle)

Posted by sw...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/_psutil_windows.c
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/_psutil_windows.c b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/_psutil_windows.c
new file mode 100644
index 0000000..6694389
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/_psutil_windows.c
@@ -0,0 +1,3241 @@
+/*
+ * Copyright (c) 2009, Jay Loden, Giampaolo Rodola'. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ *
+ * Windows platform-specific module methods for _psutil_windows
+ */
+
+// Fixes clash between winsock2.h and windows.h
+#define WIN32_LEAN_AND_MEAN
+
+#include <Python.h>
+#include <windows.h>
+#include <Psapi.h>
+#include <time.h>
+#include <lm.h>
+#include <WinIoCtl.h>
+#include <tchar.h>
+#include <tlhelp32.h>
+#include <winsock2.h>
+#include <iphlpapi.h>
+#include <wtsapi32.h>
+
+// Link with Iphlpapi.lib
+#pragma comment(lib, "IPHLPAPI.lib")
+
+#include "_psutil_windows.h"
+#include "_psutil_common.h"
+#include "arch/windows/security.h"
+#include "arch/windows/process_info.h"
+#include "arch/windows/process_handles.h"
+#include "arch/windows/ntextapi.h"
+
+#ifdef __MINGW32__
+#include "arch/windows/glpi.h"
+#endif
+
+/*
+ * Return a Python float representing the system uptime expressed in seconds
+ * since the epoch.
+ */
+static PyObject *
+psutil_boot_time(PyObject *self, PyObject *args)
+{
+    double  uptime;
+    time_t pt;
+    FILETIME fileTime;
+    long long ll;
+
+    GetSystemTimeAsFileTime(&fileTime);
+
+    /*
+    HUGE thanks to:
+    http://johnstewien.spaces.live.com/blog/cns!E6885DB5CEBABBC8!831.entry
+
+    This function converts the FILETIME structure to the 32 bit
+    Unix time structure.
+    The time_t is a 32-bit value for the number of seconds since
+    January 1, 1970. A FILETIME is a 64-bit for the number of
+    100-nanosecond periods since January 1, 1601. Convert by
+    subtracting the number of 100-nanosecond period betwee 01-01-1970
+    and 01-01-1601, from time_t the divide by 1e+7 to get to the same
+    base granularity.
+    */
+    ll = (((LONGLONG)(fileTime.dwHighDateTime)) << 32) \
+        + fileTime.dwLowDateTime;
+    pt = (time_t)((ll - 116444736000000000ull) / 10000000ull);
+
+    // XXX - By using GetTickCount() time will wrap around to zero if the
+    // system is run continuously for 49.7 days.
+    uptime = GetTickCount() / 1000.00f;
+    return Py_BuildValue("d", (double)pt - uptime);
+}
+
+
+/*
+ * Return 1 if PID exists in the current process list, else 0.
+ */
+static PyObject *
+psutil_pid_exists(PyObject *self, PyObject *args)
+{
+    long pid;
+    int status;
+
+    if (! PyArg_ParseTuple(args, "l", &pid)) {
+        return NULL;
+    }
+
+    status = psutil_pid_is_running(pid);
+    if (-1 == status) {
+        return NULL; // exception raised in psutil_pid_is_running()
+    }
+    return PyBool_FromLong(status);
+}
+
+
+/*
+ * Return a Python list of all the PIDs running on the system.
+ */
+static PyObject *
+psutil_pids(PyObject *self, PyObject *args)
+{
+    DWORD *proclist = NULL;
+    DWORD numberOfReturnedPIDs;
+    DWORD i;
+    PyObject *pid = NULL;
+    PyObject *retlist = PyList_New(0);
+
+    if (retlist == NULL) {
+        return NULL;
+    }
+    proclist = psutil_get_pids(&numberOfReturnedPIDs);
+    if (NULL == proclist) {
+        goto error;
+    }
+
+    for (i = 0; i < numberOfReturnedPIDs; i++) {
+        pid = Py_BuildValue("I", proclist[i]);
+        if (!pid)
+            goto error;
+        if (PyList_Append(retlist, pid))
+            goto error;
+        Py_DECREF(pid);
+    }
+
+    // free C array allocated for PIDs
+    free(proclist);
+    return retlist;
+
+error:
+    Py_XDECREF(pid);
+    Py_DECREF(retlist);
+    if (proclist != NULL)
+        free(proclist);
+    return NULL;
+}
+
+
+/*
+ * Kill a process given its PID.
+ */
+static PyObject *
+psutil_proc_kill(PyObject *self, PyObject *args)
+{
+    HANDLE hProcess;
+    long pid;
+
+    if (! PyArg_ParseTuple(args, "l", &pid)) {
+        return NULL;
+    }
+    if (pid == 0) {
+        return AccessDenied();
+    }
+
+    hProcess = OpenProcess(PROCESS_TERMINATE, FALSE, pid);
+    if (hProcess == NULL) {
+        if (GetLastError() == ERROR_INVALID_PARAMETER) {
+            // see http://code.google.com/p/psutil/issues/detail?id=24
+            NoSuchProcess();
+        }
+        else {
+            PyErr_SetFromWindowsErr(0);
+        }
+        return NULL;
+    }
+
+    // kill the process
+    if (! TerminateProcess(hProcess, 0)) {
+        PyErr_SetFromWindowsErr(0);
+        CloseHandle(hProcess);
+        return NULL;
+    }
+
+    CloseHandle(hProcess);
+    Py_INCREF(Py_None);
+    return Py_None;
+}
+
+
+/*
+ * Wait for process to terminate and return its exit code.
+ */
+static PyObject *
+psutil_proc_wait(PyObject *self, PyObject *args)
+{
+    HANDLE hProcess;
+    DWORD ExitCode;
+    DWORD retVal;
+    long pid;
+    long timeout;
+
+    if (! PyArg_ParseTuple(args, "ll", &pid, &timeout)) {
+        return NULL;
+    }
+    if (pid == 0) {
+        return AccessDenied();
+    }
+
+    hProcess = OpenProcess(SYNCHRONIZE | PROCESS_QUERY_INFORMATION,
+                           FALSE, pid);
+    if (hProcess == NULL) {
+        if (GetLastError() == ERROR_INVALID_PARAMETER) {
+            // no such process; we do not want to raise NSP but
+            // return None instead.
+            Py_INCREF(Py_None);
+            return Py_None;
+        }
+        else {
+            PyErr_SetFromWindowsErr(0);
+            return NULL;
+        }
+    }
+
+    // wait until the process has terminated
+    Py_BEGIN_ALLOW_THREADS
+    retVal = WaitForSingleObject(hProcess, timeout);
+    Py_END_ALLOW_THREADS
+
+    if (retVal == WAIT_FAILED) {
+        CloseHandle(hProcess);
+        return PyErr_SetFromWindowsErr(GetLastError());
+    }
+    if (retVal == WAIT_TIMEOUT) {
+        CloseHandle(hProcess);
+        return Py_BuildValue("l", WAIT_TIMEOUT);
+    }
+
+    // get the exit code; note: subprocess module (erroneously?) uses
+    // what returned by WaitForSingleObject
+    if (GetExitCodeProcess(hProcess, &ExitCode) == 0) {
+        CloseHandle(hProcess);
+        return PyErr_SetFromWindowsErr(GetLastError());
+    }
+    CloseHandle(hProcess);
+#if PY_MAJOR_VERSION >= 3
+    return PyLong_FromLong((long) ExitCode);
+#else
+    return PyInt_FromLong((long) ExitCode);
+#endif
+}
+
+
+/*
+ * Return a Python tuple (user_time, kernel_time)
+ */
+static PyObject *
+psutil_proc_cpu_times(PyObject *self, PyObject *args)
+{
+    long        pid;
+    HANDLE      hProcess;
+    FILETIME    ftCreate, ftExit, ftKernel, ftUser;
+
+    if (! PyArg_ParseTuple(args, "l", &pid)) {
+        return NULL;
+    }
+
+    hProcess = psutil_handle_from_pid(pid);
+    if (hProcess == NULL) {
+        return NULL;
+    }
+
+    if (! GetProcessTimes(hProcess, &ftCreate, &ftExit, &ftKernel, &ftUser)) {
+        CloseHandle(hProcess);
+        if (GetLastError() == ERROR_ACCESS_DENIED) {
+            // usually means the process has died so we throw a NoSuchProcess
+            // here
+            return NoSuchProcess();
+        }
+        else {
+            PyErr_SetFromWindowsErr(0);
+            return NULL;
+        }
+    }
+
+    CloseHandle(hProcess);
+
+    /*
+     * User and kernel times are represented as a FILETIME structure
+     * wich contains a 64-bit value representing the number of
+     * 100-nanosecond intervals since January 1, 1601 (UTC):
+     * http://msdn.microsoft.com/en-us/library/ms724284(VS.85).aspx
+     * To convert it into a float representing the seconds that the
+     * process has executed in user/kernel mode I borrowed the code
+     * below from Python's Modules/posixmodule.c
+     */
+    return Py_BuildValue(
+       "(dd)",
+       (double)(ftUser.dwHighDateTime * 429.4967296 + \
+                ftUser.dwLowDateTime * 1e-7),
+       (double)(ftKernel.dwHighDateTime * 429.4967296 + \
+                ftKernel.dwLowDateTime * 1e-7)
+   );
+}
+
+
+/*
+ * Alternative implementation of the one above but bypasses ACCESS DENIED.
+ */
+static PyObject *
+psutil_proc_cpu_times_2(PyObject *self, PyObject *args)
+{
+    DWORD pid;
+    PSYSTEM_PROCESS_INFORMATION process;
+    PVOID buffer;
+    double user, kernel;
+
+    if (! PyArg_ParseTuple(args, "l", &pid)) {
+        return NULL;
+    }
+    if (! psutil_get_proc_info(pid, &process, &buffer)) {
+        return NULL;
+    }
+    user = (double)process->UserTime.HighPart * 429.4967296 + \
+           (double)process->UserTime.LowPart * 1e-7;
+    kernel = (double)process->KernelTime.HighPart * 429.4967296 + \
+             (double)process->KernelTime.LowPart * 1e-7;
+    free(buffer);
+    return Py_BuildValue("(dd)", user, kernel);
+}
+
+
+/*
+ * Return a Python float indicating the process create time expressed in
+ * seconds since the epoch.
+ */
+static PyObject *
+psutil_proc_create_time(PyObject *self, PyObject *args)
+{
+    long        pid;
+    long long   unix_time;
+    DWORD       exitCode;
+    HANDLE      hProcess;
+    BOOL        ret;
+    FILETIME    ftCreate, ftExit, ftKernel, ftUser;
+
+    if (! PyArg_ParseTuple(args, "l", &pid)) {
+        return NULL;
+    }
+
+    // special case for PIDs 0 and 4, return system boot time
+    if (0 == pid || 4 == pid) {
+        return psutil_boot_time(NULL, NULL);
+    }
+
+    hProcess = psutil_handle_from_pid(pid);
+    if (hProcess == NULL) {
+        return NULL;
+    }
+
+    if (! GetProcessTimes(hProcess, &ftCreate, &ftExit, &ftKernel, &ftUser)) {
+        CloseHandle(hProcess);
+        if (GetLastError() == ERROR_ACCESS_DENIED) {
+            // usually means the process has died so we throw a
+            // NoSuchProcess here
+            return NoSuchProcess();
+        }
+        else {
+            PyErr_SetFromWindowsErr(0);
+            return NULL;
+        }
+    }
+
+    // Make sure the process is not gone as OpenProcess alone seems to be
+    // unreliable in doing so (it seems a previous call to p.wait() makes
+    // it unreliable).
+    // This check is important as creation time is used to make sure the
+    // process is still running.
+    ret = GetExitCodeProcess(hProcess, &exitCode);
+    CloseHandle(hProcess);
+    if (ret != 0) {
+        if (exitCode != STILL_ACTIVE) {
+            return NoSuchProcess();
+        }
+    }
+    else {
+        // Ignore access denied as it means the process is still alive.
+        // For all other errors, we want an exception.
+        if (GetLastError() != ERROR_ACCESS_DENIED) {
+            PyErr_SetFromWindowsErr(0);
+            return NULL;
+        }
+    }
+
+    /*
+    Convert the FILETIME structure to a Unix time.
+    It's the best I could find by googling and borrowing code here and there.
+    The time returned has a precision of 1 second.
+    */
+    unix_time = ((LONGLONG)ftCreate.dwHighDateTime) << 32;
+    unix_time += ftCreate.dwLowDateTime - 116444736000000000LL;
+    unix_time /= 10000000;
+    return Py_BuildValue("d", (double)unix_time);
+}
+
+
+/*
+ * Alternative implementation of the one above but bypasses ACCESS DENIED.
+ */
+static PyObject *
+psutil_proc_create_time_2(PyObject *self, PyObject *args)
+{
+    DWORD pid;
+    PSYSTEM_PROCESS_INFORMATION process;
+    PVOID buffer;
+    long long   unix_time;
+
+    if (! PyArg_ParseTuple(args, "l", &pid)) {
+        return NULL;
+    }
+    if (! psutil_get_proc_info(pid, &process, &buffer)) {
+        return NULL;
+    }
+    // special case for PIDs 0 and 4, return system boot time
+    if (0 == pid || 4 == pid) {
+        return psutil_boot_time(NULL, NULL);
+    }
+    /*
+    Convert the LARGE_INTEGER union to a Unix time.
+    It's the best I could find by googling and borrowing code here and there.
+    The time returned has a precision of 1 second.
+    */
+    unix_time = ((LONGLONG)process->CreateTime.HighPart) << 32;
+    unix_time += process->CreateTime.LowPart - 116444736000000000LL;
+    unix_time /= 10000000;
+    free(buffer);
+    return Py_BuildValue("d", (double)unix_time);
+}
+
+
+/*
+ * Return the number of logical CPUs.
+ */
+static PyObject *
+psutil_cpu_count_logical(PyObject *self, PyObject *args)
+{
+    SYSTEM_INFO system_info;
+    system_info.dwNumberOfProcessors = 0;
+
+    GetSystemInfo(&system_info);
+    if (system_info.dwNumberOfProcessors == 0) {
+        // mimic os.cpu_count()
+        Py_INCREF(Py_None);
+        return Py_None;
+    }
+    else {
+        return Py_BuildValue("I", system_info.dwNumberOfProcessors);
+    }
+}
+
+
+typedef BOOL (WINAPI *LPFN_GLPI) (PSYSTEM_LOGICAL_PROCESSOR_INFORMATION,
+                                  PDWORD);
+
+/*
+ * Return the number of physical CPU cores.
+ */
+static PyObject *
+psutil_cpu_count_phys(PyObject *self, PyObject *args)
+{
+    LPFN_GLPI glpi;
+    DWORD rc;
+    PSYSTEM_LOGICAL_PROCESSOR_INFORMATION buffer = NULL;
+    PSYSTEM_LOGICAL_PROCESSOR_INFORMATION ptr = NULL;
+    DWORD length = 0;
+    DWORD offset = 0;
+    int ncpus = 0;
+
+    glpi = (LPFN_GLPI)GetProcAddress(GetModuleHandle(TEXT("kernel32")),
+                                     "GetLogicalProcessorInformation");
+    if (glpi == NULL)
+        goto return_none;
+
+    while (1) {
+        rc = glpi(buffer, &length);
+        if (rc == FALSE) {
+            if (GetLastError() == ERROR_INSUFFICIENT_BUFFER) {
+                if (buffer)
+                    free(buffer);
+                buffer = (PSYSTEM_LOGICAL_PROCESSOR_INFORMATION)malloc(
+                    length);
+                if (NULL == buffer) {
+                    PyErr_NoMemory();
+                    return NULL;
+                }
+            }
+            else {
+                goto return_none;
+            }
+        }
+        else {
+            break;
+        }
+    }
+
+    ptr = buffer;
+    while (offset + sizeof(SYSTEM_LOGICAL_PROCESSOR_INFORMATION) <= length) {
+        if (ptr->Relationship == RelationProcessorCore)
+            ncpus += 1;
+        offset += sizeof(SYSTEM_LOGICAL_PROCESSOR_INFORMATION);
+        ptr++;
+    }
+
+    free(buffer);
+    if (ncpus == 0)
+        goto return_none;
+    else
+        return Py_BuildValue("i", ncpus);
+
+return_none:
+    // mimic os.cpu_count()
+    if (buffer != NULL)
+        free(buffer);
+    Py_INCREF(Py_None);
+    return Py_None;
+}
+
+
+/*
+ * Return process cmdline as a Python list of cmdline arguments.
+ */
+static PyObject *
+psutil_proc_cmdline(PyObject *self, PyObject *args) {
+    long pid;
+    int pid_return;
+    PyObject *arglist;
+
+    if (! PyArg_ParseTuple(args, "l", &pid)) {
+        return NULL;
+    }
+    if ((pid == 0) || (pid == 4)) {
+        return Py_BuildValue("[]");
+    }
+
+    pid_return = psutil_pid_is_running(pid);
+    if (pid_return == 0) {
+        return NoSuchProcess();
+    }
+    if (pid_return == -1) {
+        return NULL;
+    }
+
+    // XXX the assumptio below probably needs to go away
+
+    // May fail any of several ReadProcessMemory calls etc. and
+    // not indicate a real problem so we ignore any errors and
+    // just live without commandline.
+    arglist = psutil_get_arg_list(pid);
+    if ( NULL == arglist ) {
+        // carry on anyway, clear any exceptions too
+        PyErr_Clear();
+        return Py_BuildValue("[]");
+    }
+
+    return arglist;
+}
+
+
+/*
+ * Return process executable path.
+ */
+static PyObject *
+psutil_proc_exe(PyObject *self, PyObject *args) {
+    long pid;
+    HANDLE hProcess;
+    wchar_t exe[MAX_PATH];
+    if (! PyArg_ParseTuple(args, "l", &pid)) {
+        return NULL;
+    }
+
+    hProcess = psutil_handle_from_pid_waccess(pid, PROCESS_QUERY_INFORMATION);
+    if (NULL == hProcess) {
+        return NULL;
+    }
+    if (GetProcessImageFileNameW(hProcess, &exe, MAX_PATH) == 0) {
+        CloseHandle(hProcess);
+        if (GetLastError() == ERROR_INVALID_PARAMETER) {
+            // see https://code.google.com/p/psutil/issues/detail?id=414
+            AccessDenied();
+        }
+        else {
+            PyErr_SetFromWindowsErr(0);
+        }
+        return NULL;
+    }
+    CloseHandle(hProcess);
+    return Py_BuildValue("u", exe);
+}
+
+
+/*
+ * Return process memory information as a Python tuple.
+ */
+static PyObject *
+psutil_proc_memory_info(PyObject *self, PyObject *args)
+{
+    HANDLE hProcess;
+    DWORD pid;
+#if (_WIN32_WINNT >= 0x0501)  // Windows XP with SP2
+    PROCESS_MEMORY_COUNTERS_EX cnt;
+#else
+    PROCESS_MEMORY_COUNTERS cnt;
+#endif
+    SIZE_T private = 0;
+
+    if (! PyArg_ParseTuple(args, "l", &pid)) {
+        return NULL;
+    }
+
+    hProcess = psutil_handle_from_pid(pid);
+    if (NULL == hProcess) {
+        return NULL;
+    }
+
+    if (! GetProcessMemoryInfo(hProcess, &cnt, sizeof(cnt)) ) {
+        CloseHandle(hProcess);
+        return PyErr_SetFromWindowsErr(0);
+    }
+
+#if (_WIN32_WINNT >= 0x0501)  // Windows XP with SP2
+    private = cnt.PrivateUsage;
+#endif
+
+    CloseHandle(hProcess);
+
+    // PROCESS_MEMORY_COUNTERS values are defined as SIZE_T which on 64bits
+    // is an (unsigned long long) and on 32bits is an (unsigned int).
+    // "_WIN64" is defined if we're running a 64bit Python interpreter not
+    // exclusively if the *system* is 64bit.
+#if defined(_WIN64)
+    return Py_BuildValue(
+        "(kKKKKKKKKK)",
+        cnt.PageFaultCount,  // unsigned long
+        (unsigned long long)cnt.PeakWorkingSetSize,
+        (unsigned long long)cnt.WorkingSetSize,
+        (unsigned long long)cnt.QuotaPeakPagedPoolUsage,
+        (unsigned long long)cnt.QuotaPagedPoolUsage,
+        (unsigned long long)cnt.QuotaPeakNonPagedPoolUsage,
+        (unsigned long long)cnt.QuotaNonPagedPoolUsage,
+        (unsigned long long)cnt.PagefileUsage,
+        (unsigned long long)cnt.PeakPagefileUsage,
+        (unsigned long long)private);
+#else
+    return Py_BuildValue(
+        "(kIIIIIIIII)",
+        cnt.PageFaultCount,    // unsigned long
+        (unsigned int)cnt.PeakWorkingSetSize,
+        (unsigned int)cnt.WorkingSetSize,
+        (unsigned int)cnt.QuotaPeakPagedPoolUsage,
+        (unsigned int)cnt.QuotaPagedPoolUsage,
+        (unsigned int)cnt.QuotaPeakNonPagedPoolUsage,
+        (unsigned int)cnt.QuotaNonPagedPoolUsage,
+        (unsigned int)cnt.PagefileUsage,
+        (unsigned int)cnt.PeakPagefileUsage,
+        (unsigned int)private);
+#endif
+}
+
+
+/*
+ * Alternative implementation of the one above but bypasses ACCESS DENIED.
+ */
+static PyObject *
+psutil_proc_memory_info_2(PyObject *self, PyObject *args)
+{
+    DWORD pid;
+    PSYSTEM_PROCESS_INFORMATION process;
+    PVOID buffer;
+    SIZE_T private;
+    unsigned long pfault_count;
+
+#if defined(_WIN64)
+    unsigned long long m1, m2, m3, m4, m5, m6, m7, m8;
+#else
+    unsigned int m1, m2, m3, m4, m5, m6, m7, m8;
+#endif
+
+    if (! PyArg_ParseTuple(args, "l", &pid)) {
+        return NULL;
+    }
+    if (! psutil_get_proc_info(pid, &process, &buffer)) {
+        return NULL;
+    }
+
+#if (_WIN32_WINNT >= 0x0501)  // Windows XP with SP2
+    private = process->PrivatePageCount;
+#else
+    private = 0;
+#endif
+    pfault_count = process->PageFaultCount;
+
+    m1 = process->PeakWorkingSetSize;
+    m2 = process->WorkingSetSize;
+    m3 = process->QuotaPeakPagedPoolUsage;
+    m4 = process->QuotaPagedPoolUsage;
+    m5 = process->QuotaPeakNonPagedPoolUsage;
+    m6 = process->QuotaNonPagedPoolUsage;
+    m7 = process->PagefileUsage;
+    m8 = process->PeakPagefileUsage;
+
+    free(buffer);
+
+    // SYSTEM_PROCESS_INFORMATION values are defined as SIZE_T which on 64
+    // bits is an (unsigned long long) and on 32bits is an (unsigned int).
+    // "_WIN64" is defined if we're running a 64bit Python interpreter not
+    // exclusively if the *system* is 64bit.
+#if defined(_WIN64)
+    return Py_BuildValue("(kKKKKKKKKK)",
+#else
+    return Py_BuildValue("(kIIIIIIIII)",
+#endif
+        pfault_count, m1, m2, m3, m4, m5, m6, m7, m8, private);
+}
+
+
+/*
+ * Return a Python integer indicating the total amount of physical memory
+ * in bytes.
+ */
+static PyObject *
+psutil_virtual_mem(PyObject *self, PyObject *args)
+{
+    MEMORYSTATUSEX memInfo;
+    memInfo.dwLength = sizeof(MEMORYSTATUSEX);
+
+    if (! GlobalMemoryStatusEx(&memInfo) ) {
+        return PyErr_SetFromWindowsErr(0);
+    }
+
+    return Py_BuildValue("(LLLLLL)",
+                         memInfo.ullTotalPhys,      // total
+                         memInfo.ullAvailPhys,      // avail
+                         memInfo.ullTotalPageFile,  // total page file
+                         memInfo.ullAvailPageFile,  // avail page file
+                         memInfo.ullTotalVirtual,   // total virtual
+                         memInfo.ullAvailVirtual);  // avail virtual
+}
+
+
+#define LO_T ((float)1e-7)
+#define HI_T (LO_T*4294967296.0)
+
+
+/*
+ * Retrieves system CPU timing information as a (user, system, idle)
+ * tuple. On a multiprocessor system, the values returned are the
+ * sum of the designated times across all processors.
+ */
+static PyObject *
+psutil_cpu_times(PyObject *self, PyObject *args)
+{
+    float idle, kernel, user, system;
+    FILETIME idle_time, kernel_time, user_time;
+
+    if (!GetSystemTimes(&idle_time, &kernel_time, &user_time)) {
+        return PyErr_SetFromWindowsErr(0);
+    }
+
+    idle = (float)((HI_T * idle_time.dwHighDateTime) + \
+                   (LO_T * idle_time.dwLowDateTime));
+    user = (float)((HI_T * user_time.dwHighDateTime) + \
+                   (LO_T * user_time.dwLowDateTime));
+    kernel = (float)((HI_T * kernel_time.dwHighDateTime) + \
+                     (LO_T * kernel_time.dwLowDateTime));
+
+    // Kernel time includes idle time.
+    // We return only busy kernel time subtracting idle time from
+    // kernel time.
+    system = (kernel - idle);
+    return Py_BuildValue("(fff)", user, system, idle);
+}
+
+
+/*
+ * Same as above but for all system CPUs.
+ */
+static PyObject *
+psutil_per_cpu_times(PyObject *self, PyObject *args)
+{
+    float idle, kernel, user;
+    typedef DWORD (_stdcall * NTQSI_PROC) (int, PVOID, ULONG, PULONG);
+    NTQSI_PROC NtQuerySystemInformation;
+    HINSTANCE hNtDll;
+    SYSTEM_PROCESSOR_PERFORMANCE_INFORMATION *sppi = NULL;
+    SYSTEM_INFO si;
+    UINT i;
+    PyObject *arg = NULL;
+    PyObject *retlist = PyList_New(0);
+
+    if (retlist == NULL)
+        return NULL;
+
+    // dynamic linking is mandatory to use NtQuerySystemInformation
+    hNtDll = LoadLibrary(TEXT("ntdll.dll"));
+    if (hNtDll != NULL) {
+        // gets NtQuerySystemInformation address
+        NtQuerySystemInformation = (NTQSI_PROC)GetProcAddress(
+                                       hNtDll, "NtQuerySystemInformation");
+
+        if (NtQuerySystemInformation != NULL)
+        {
+            // retrives number of processors
+            GetSystemInfo(&si);
+
+            // allocates an array of SYSTEM_PROCESSOR_PERFORMANCE_INFORMATION
+            // structures, one per processor
+            sppi = (SYSTEM_PROCESSOR_PERFORMANCE_INFORMATION *) \
+                   malloc(si.dwNumberOfProcessors * \
+                          sizeof(SYSTEM_PROCESSOR_PERFORMANCE_INFORMATION));
+            if (sppi != NULL)
+            {
+                // gets cpu time informations
+                if (0 == NtQuerySystemInformation(
+                            SystemProcessorPerformanceInformation,
+                            sppi,
+                            si.dwNumberOfProcessors * sizeof
+                            (SYSTEM_PROCESSOR_PERFORMANCE_INFORMATION),
+                            NULL)
+                   )
+                {
+                    // computes system global times summing each
+                    // processor value
+                    idle = user = kernel = 0;
+                    for (i = 0; i < si.dwNumberOfProcessors; i++) {
+                        arg = NULL;
+                        user = (float)((HI_T * sppi[i].UserTime.HighPart) +
+                                       (LO_T * sppi[i].UserTime.LowPart));
+                        idle = (float)((HI_T * sppi[i].IdleTime.HighPart) +
+                                       (LO_T * sppi[i].IdleTime.LowPart));
+                        kernel = (float)((HI_T * sppi[i].KernelTime.HighPart) +
+                                         (LO_T * sppi[i].KernelTime.LowPart));
+                        // kernel time includes idle time on windows
+                        // we return only busy kernel time subtracting
+                        // idle time from kernel time
+                        arg = Py_BuildValue("(ddd)",
+                                            user,
+                                            kernel - idle,
+                                            idle);
+                        if (!arg)
+                            goto error;
+                        if (PyList_Append(retlist, arg))
+                            goto error;
+                        Py_DECREF(arg);
+                    }
+                    free(sppi);
+                    FreeLibrary(hNtDll);
+                    return retlist;
+
+                }  // END NtQuerySystemInformation
+            }  // END malloc SYSTEM_PROCESSOR_PERFORMANCE_INFORMATION
+        }  // END GetProcAddress
+    }  // END LoadLibrary
+    goto error;
+
+error:
+    Py_XDECREF(arg);
+    Py_DECREF(retlist);
+    if (sppi) {
+        free(sppi);
+    }
+    if (hNtDll) {
+        FreeLibrary(hNtDll);
+    }
+    PyErr_SetFromWindowsErr(0);
+    return NULL;
+}
+
+
+/*
+ * Return process current working directory as a Python string.
+ */
+
+static PyObject *
+psutil_proc_cwd(PyObject *self, PyObject *args)
+{
+    long pid;
+    HANDLE processHandle = NULL;
+    PVOID pebAddress;
+    PVOID rtlUserProcParamsAddress;
+    UNICODE_STRING currentDirectory;
+    WCHAR *currentDirectoryContent = NULL;
+    PyObject *returnPyObj = NULL;
+    PyObject *cwd_from_wchar = NULL;
+    PyObject *cwd = NULL;
+
+    if (! PyArg_ParseTuple(args, "l", &pid)) {
+        return NULL;
+    }
+
+    processHandle = psutil_handle_from_pid(pid);
+    if (processHandle == NULL) {
+        return NULL;
+    }
+
+    pebAddress = psutil_get_peb_address(processHandle);
+
+    // get the address of ProcessParameters
+#ifdef _WIN64
+    if (!ReadProcessMemory(processHandle, (PCHAR)pebAddress + 32,
+                           &rtlUserProcParamsAddress, sizeof(PVOID), NULL))
+#else
+    if (!ReadProcessMemory(processHandle, (PCHAR)pebAddress + 0x10,
+                           &rtlUserProcParamsAddress, sizeof(PVOID), NULL))
+#endif
+    {
+        CloseHandle(processHandle);
+        if (GetLastError() == ERROR_PARTIAL_COPY) {
+            // this occurs quite often with system processes
+            return AccessDenied();
+        }
+        else {
+            return PyErr_SetFromWindowsErr(0);
+        }
+    }
+
+    // Read the currentDirectory UNICODE_STRING structure.
+    // 0x24 refers to "CurrentDirectoryPath" of RTL_USER_PROCESS_PARAMETERS
+    // structure, see:
+    // http://wj32.wordpress.com/2009/01/24/
+    //     howto-get-the-command-line-of-processes/
+#ifdef _WIN64
+    if (!ReadProcessMemory(processHandle, (PCHAR)rtlUserProcParamsAddress + 56,
+                           &currentDirectory, sizeof(currentDirectory), NULL))
+#else
+    if (!ReadProcessMemory(processHandle,
+                           (PCHAR)rtlUserProcParamsAddress + 0x24,
+                           &currentDirectory, sizeof(currentDirectory), NULL))
+#endif
+    {
+        CloseHandle(processHandle);
+        if (GetLastError() == ERROR_PARTIAL_COPY) {
+            // this occurs quite often with system processes
+            return AccessDenied();
+        }
+        else {
+            return PyErr_SetFromWindowsErr(0);
+        }
+    }
+
+    // allocate memory to hold cwd
+    currentDirectoryContent = (WCHAR *)malloc(currentDirectory.Length + 1);
+    if (currentDirectoryContent == NULL) {
+        PyErr_NoMemory();
+        goto error;
+    }
+
+    // read cwd
+    if (!ReadProcessMemory(processHandle, currentDirectory.Buffer,
+                           currentDirectoryContent, currentDirectory.Length,
+                           NULL))
+    {
+        if (GetLastError() == ERROR_PARTIAL_COPY) {
+            // this occurs quite often with system processes
+            AccessDenied();
+        }
+        else {
+            PyErr_SetFromWindowsErr(0);
+        }
+        goto error;
+    }
+
+    // null-terminate the string to prevent wcslen from returning
+    // incorrect length the length specifier is in characters, but
+    // currentDirectory.Length is in bytes
+    currentDirectoryContent[(currentDirectory.Length / sizeof(WCHAR))] = '\0';
+
+    // convert wchar array to a Python unicode string, and then to UTF8
+    cwd_from_wchar = PyUnicode_FromWideChar(currentDirectoryContent,
+                                            wcslen(currentDirectoryContent));
+    if (cwd_from_wchar == NULL)
+        goto error;
+
+#if PY_MAJOR_VERSION >= 3
+    cwd = PyUnicode_FromObject(cwd_from_wchar);
+#else
+    cwd = PyUnicode_AsUTF8String(cwd_from_wchar);
+#endif
+    if (cwd == NULL)
+        goto error;
+
+    // decrement the reference count on our temp unicode str to avoid
+    // mem leak
+    returnPyObj = Py_BuildValue("N", cwd);
+    if (!returnPyObj)
+        goto error;
+
+    Py_DECREF(cwd_from_wchar);
+
+    CloseHandle(processHandle);
+    free(currentDirectoryContent);
+    return returnPyObj;
+
+error:
+    Py_XDECREF(cwd_from_wchar);
+    Py_XDECREF(cwd);
+    Py_XDECREF(returnPyObj);
+    if (currentDirectoryContent != NULL)
+        free(currentDirectoryContent);
+    if (processHandle != NULL)
+        CloseHandle(processHandle);
+    return NULL;
+}
+
+
+/*
+ * Resume or suspends a process
+ */
+int
+psutil_proc_suspend_or_resume(DWORD pid, int suspend)
+{
+    // a huge thanks to http://www.codeproject.com/KB/threads/pausep.aspx
+    HANDLE hThreadSnap = NULL;
+    THREADENTRY32  te32 = {0};
+
+    if (pid == 0) {
+        AccessDenied();
+        return FALSE;
+    }
+
+    hThreadSnap = CreateToolhelp32Snapshot(TH32CS_SNAPTHREAD, 0);
+    if (hThreadSnap == INVALID_HANDLE_VALUE) {
+        PyErr_SetFromWindowsErr(0);
+        return FALSE;
+    }
+
+    // Fill in the size of the structure before using it
+    te32.dwSize = sizeof(THREADENTRY32);
+
+    if (! Thread32First(hThreadSnap, &te32)) {
+        PyErr_SetFromWindowsErr(0);
+        CloseHandle(hThreadSnap);
+        return FALSE;
+    }
+
+    // Walk the thread snapshot to find all threads of the process.
+    // If the thread belongs to the process, add its information
+    // to the display list.
+    do
+    {
+        if (te32.th32OwnerProcessID == pid)
+        {
+            HANDLE hThread = OpenThread(THREAD_SUSPEND_RESUME, FALSE,
+                                        te32.th32ThreadID);
+            if (hThread == NULL) {
+                PyErr_SetFromWindowsErr(0);
+                CloseHandle(hThread);
+                CloseHandle(hThreadSnap);
+                return FALSE;
+            }
+            if (suspend == 1)
+            {
+                if (SuspendThread(hThread) == (DWORD) - 1) {
+                    PyErr_SetFromWindowsErr(0);
+                    CloseHandle(hThread);
+                    CloseHandle(hThreadSnap);
+                    return FALSE;
+                }
+            }
+            else
+            {
+                if (ResumeThread(hThread) == (DWORD) - 1) {
+                    PyErr_SetFromWindowsErr(0);
+                    CloseHandle(hThread);
+                    CloseHandle(hThreadSnap);
+                    return FALSE;
+                }
+            }
+            CloseHandle(hThread);
+        }
+    } while (Thread32Next(hThreadSnap, &te32));
+
+    CloseHandle(hThreadSnap);
+    return TRUE;
+}
+
+
+static PyObject *
+psutil_proc_suspend(PyObject *self, PyObject *args)
+{
+    long pid;
+    int suspend = 1;
+    if (! PyArg_ParseTuple(args, "l", &pid)) {
+        return NULL;
+    }
+
+    if (! psutil_proc_suspend_or_resume(pid, suspend)) {
+        return NULL;
+    }
+    Py_INCREF(Py_None);
+    return Py_None;
+}
+
+
+static PyObject *
+psutil_proc_resume(PyObject *self, PyObject *args)
+{
+    long pid;
+    int suspend = 0;
+    if (! PyArg_ParseTuple(args, "l", &pid)) {
+        return NULL;
+    }
+
+    if (! psutil_proc_suspend_or_resume(pid, suspend)) {
+        return NULL;
+    }
+    Py_INCREF(Py_None);
+    return Py_None;
+}
+
+
+static PyObject *
+psutil_proc_num_threads(PyObject *self, PyObject *args)
+{
+    DWORD pid;
+    PSYSTEM_PROCESS_INFORMATION process;
+    PVOID buffer;
+    int num;
+
+    if (! PyArg_ParseTuple(args, "l", &pid)) {
+        return NULL;
+    }
+    if (! psutil_get_proc_info(pid, &process, &buffer)) {
+        return NULL;
+    }
+    num = (int)process->NumberOfThreads;
+    free(buffer);
+    return Py_BuildValue("i", num);
+}
+
+
+static PyObject *
+psutil_proc_threads(PyObject *self, PyObject *args)
+{
+    HANDLE hThread;
+    THREADENTRY32 te32 = {0};
+    long pid;
+    int pid_return;
+    int rc;
+    FILETIME ftDummy, ftKernel, ftUser;
+    PyObject *retList = PyList_New(0);
+    PyObject *pyTuple = NULL;
+    HANDLE hThreadSnap = NULL;
+
+    if (retList == NULL) {
+        return NULL;
+    }
+    if (! PyArg_ParseTuple(args, "l", &pid)) {
+        goto error;
+    }
+    if (pid == 0) {
+        // raise AD instead of returning 0 as procexp is able to
+        // retrieve useful information somehow
+        AccessDenied();
+        goto error;
+    }
+
+    pid_return = psutil_pid_is_running(pid);
+    if (pid_return == 0) {
+        NoSuchProcess();
+        goto error;
+    }
+    if (pid_return == -1) {
+        goto error;
+    }
+
+    hThreadSnap = CreateToolhelp32Snapshot(TH32CS_SNAPTHREAD, 0);
+    if (hThreadSnap == INVALID_HANDLE_VALUE) {
+        PyErr_SetFromWindowsErr(0);
+        goto error;
+    }
+
+    // Fill in the size of the structure before using it
+    te32.dwSize = sizeof(THREADENTRY32);
+
+    if (! Thread32First(hThreadSnap, &te32)) {
+        PyErr_SetFromWindowsErr(0);
+        goto error;
+    }
+
+    // Walk the thread snapshot to find all threads of the process.
+    // If the thread belongs to the process, increase the counter.
+    do
+    {
+        if (te32.th32OwnerProcessID == pid)
+        {
+            pyTuple = NULL;
+            hThread = NULL;
+            hThread = OpenThread(THREAD_QUERY_INFORMATION,
+                                 FALSE, te32.th32ThreadID);
+            if (hThread == NULL) {
+                // thread has disappeared on us
+                continue;
+            }
+
+            rc = GetThreadTimes(hThread, &ftDummy, &ftDummy, &ftKernel,
+                                &ftUser);
+            if (rc == 0) {
+                PyErr_SetFromWindowsErr(0);
+                goto error;
+            }
+
+            /*
+             * User and kernel times are represented as a FILETIME structure
+             * wich contains a 64-bit value representing the number of
+             * 100-nanosecond intervals since January 1, 1601 (UTC):
+             * http://msdn.microsoft.com/en-us/library/ms724284(VS.85).aspx
+             * To convert it into a float representing the seconds that the
+             * process has executed in user/kernel mode I borrowed the code
+             * below from Python's Modules/posixmodule.c
+             */
+            pyTuple = Py_BuildValue(
+                "kdd",
+                te32.th32ThreadID,
+                (double)(ftUser.dwHighDateTime * 429.4967296 + \
+                         ftUser.dwLowDateTime * 1e-7),
+                (double)(ftKernel.dwHighDateTime * 429.4967296 + \
+                         ftKernel.dwLowDateTime * 1e-7));
+            if (!pyTuple)
+                goto error;
+            if (PyList_Append(retList, pyTuple))
+                goto error;
+            Py_DECREF(pyTuple);
+
+            CloseHandle(hThread);
+        }
+    } while (Thread32Next(hThreadSnap, &te32));
+
+    CloseHandle(hThreadSnap);
+    return retList;
+
+error:
+    Py_XDECREF(pyTuple);
+    Py_DECREF(retList);
+    if (hThread != NULL)
+        CloseHandle(hThread);
+    if (hThreadSnap != NULL) {
+        CloseHandle(hThreadSnap);
+    }
+    return NULL;
+}
+
+
+static PyObject *
+psutil_proc_open_files(PyObject *self, PyObject *args)
+{
+    long       pid;
+    HANDLE     processHandle;
+    DWORD      access = PROCESS_DUP_HANDLE | PROCESS_QUERY_INFORMATION;
+    PyObject  *filesList;
+
+    if (! PyArg_ParseTuple(args, "l", &pid)) {
+        return NULL;
+    }
+
+    processHandle = psutil_handle_from_pid_waccess(pid, access);
+    if (processHandle == NULL) {
+        return NULL;
+    }
+
+    filesList = psutil_get_open_files(pid, processHandle);
+    CloseHandle(processHandle);
+    if (filesList == NULL) {
+        return PyErr_SetFromWindowsErr(0);
+    }
+    return filesList;
+}
+
+
+/*
+ Accept a filename's drive in native  format like "\Device\HarddiskVolume1\"
+ and return the corresponding drive letter (e.g. "C:\\").
+ If no match is found return an empty string.
+*/
+static PyObject *
+psutil_win32_QueryDosDevice(PyObject *self, PyObject *args)
+{
+    LPCTSTR   lpDevicePath;
+    TCHAR d = TEXT('A');
+    TCHAR     szBuff[5];
+
+    if (!PyArg_ParseTuple(args, "s", &lpDevicePath)) {
+        return NULL;
+    }
+
+    while (d <= TEXT('Z'))
+    {
+        TCHAR szDeviceName[3] = {d, TEXT(':'), TEXT('\0')};
+        TCHAR szTarget[512] = {0};
+        if (QueryDosDevice(szDeviceName, szTarget, 511) != 0) {
+            if (_tcscmp(lpDevicePath, szTarget) == 0) {
+                _stprintf(szBuff, TEXT("%c:"), d);
+                return Py_BuildValue("s", szBuff);
+            }
+        }
+        d++;
+    }
+    return Py_BuildValue("s", "");
+}
+
+
+/*
+ * Return process username as a "DOMAIN//USERNAME" string.
+ */
+static PyObject *
+psutil_proc_username(PyObject *self, PyObject *args)
+{
+    long pid;
+    HANDLE processHandle;
+    HANDLE tokenHandle;
+    PTOKEN_USER user;
+    ULONG bufferSize;
+    PTSTR name;
+    ULONG nameSize;
+    PTSTR domainName;
+    ULONG domainNameSize;
+    SID_NAME_USE nameUse;
+    PTSTR fullName;
+    PyObject *returnObject;
+
+    if (! PyArg_ParseTuple(args, "l", &pid)) {
+        return NULL;
+    }
+
+    processHandle = psutil_handle_from_pid_waccess(
+        pid, PROCESS_QUERY_INFORMATION);
+    if (processHandle == NULL) {
+        return NULL;
+    }
+
+    if (!OpenProcessToken(processHandle, TOKEN_QUERY, &tokenHandle)) {
+        CloseHandle(processHandle);
+        return PyErr_SetFromWindowsErr(0);
+    }
+
+    CloseHandle(processHandle);
+
+    // Get the user SID.
+
+    bufferSize = 0x100;
+    user = malloc(bufferSize);
+    if (user == NULL) {
+        return PyErr_NoMemory();
+    }
+
+    if (!GetTokenInformation(tokenHandle, TokenUser, user, bufferSize,
+                             &bufferSize))
+    {
+        free(user);
+        user = malloc(bufferSize);
+        if (user == NULL) {
+            CloseHandle(tokenHandle);
+            return PyErr_NoMemory();
+        }
+        if (!GetTokenInformation(tokenHandle, TokenUser, user, bufferSize,
+                                 &bufferSize))
+        {
+            free(user);
+            CloseHandle(tokenHandle);
+            return PyErr_SetFromWindowsErr(0);
+        }
+    }
+
+    CloseHandle(tokenHandle);
+
+    // resolve the SID to a name
+    nameSize = 0x100;
+    domainNameSize = 0x100;
+
+    name = malloc(nameSize * sizeof(TCHAR));
+    if (name == NULL)
+        return PyErr_NoMemory();
+    domainName = malloc(domainNameSize * sizeof(TCHAR));
+    if (domainName == NULL)
+        return PyErr_NoMemory();
+
+    if (!LookupAccountSid(NULL, user->User.Sid, name, &nameSize, domainName,
+                          &domainNameSize, &nameUse))
+    {
+        free(name);
+        free(domainName);
+        name = malloc(nameSize * sizeof(TCHAR));
+        if (name == NULL)
+            return PyErr_NoMemory();
+        domainName = malloc(domainNameSize * sizeof(TCHAR));
+        if (domainName == NULL)
+            return PyErr_NoMemory();
+        if (!LookupAccountSid(NULL, user->User.Sid, name, &nameSize,
+                              domainName, &domainNameSize, &nameUse))
+        {
+            free(name);
+            free(domainName);
+            free(user);
+
+            return PyErr_SetFromWindowsErr(0);
+        }
+    }
+
+    nameSize = _tcslen(name);
+    domainNameSize = _tcslen(domainName);
+
+    // build the full username string
+    fullName = malloc((domainNameSize + 1 + nameSize + 1) * sizeof(TCHAR));
+    if (fullName == NULL) {
+        free(name);
+        free(domainName);
+        free(user);
+        return PyErr_NoMemory();
+    }
+    memcpy(fullName, domainName, domainNameSize);
+    fullName[domainNameSize] = '\\';
+    memcpy(&fullName[domainNameSize + 1], name, nameSize);
+    fullName[domainNameSize + 1 + nameSize] = '\0';
+
+    returnObject = Py_BuildValue("s", fullName);
+
+    free(fullName);
+    free(name);
+    free(domainName);
+    free(user);
+
+    return returnObject;
+}
+
+
+// --- network connections mingw32 support
+
+#ifndef _IPRTRMIB_H
+typedef struct _MIB_TCP6ROW_OWNER_PID {
+    UCHAR ucLocalAddr[16];
+    DWORD dwLocalScopeId;
+    DWORD dwLocalPort;
+    UCHAR ucRemoteAddr[16];
+    DWORD dwRemoteScopeId;
+    DWORD dwRemotePort;
+    DWORD dwState;
+    DWORD dwOwningPid;
+} MIB_TCP6ROW_OWNER_PID, *PMIB_TCP6ROW_OWNER_PID;
+
+typedef struct _MIB_TCP6TABLE_OWNER_PID {
+    DWORD dwNumEntries;
+    MIB_TCP6ROW_OWNER_PID table[ANY_SIZE];
+} MIB_TCP6TABLE_OWNER_PID, *PMIB_TCP6TABLE_OWNER_PID;
+#endif
+
+#ifndef __IPHLPAPI_H__
+typedef struct in6_addr {
+    union {
+        UCHAR Byte[16];
+        USHORT Word[8];
+    } u;
+} IN6_ADDR, *PIN6_ADDR, FAR *LPIN6_ADDR;
+
+typedef enum _UDP_TABLE_CLASS {
+    UDP_TABLE_BASIC,
+    UDP_TABLE_OWNER_PID,
+    UDP_TABLE_OWNER_MODULE
+} UDP_TABLE_CLASS, *PUDP_TABLE_CLASS;
+
+typedef struct _MIB_UDPROW_OWNER_PID {
+    DWORD dwLocalAddr;
+    DWORD dwLocalPort;
+    DWORD dwOwningPid;
+} MIB_UDPROW_OWNER_PID, *PMIB_UDPROW_OWNER_PID;
+
+typedef struct _MIB_UDPTABLE_OWNER_PID {
+    DWORD dwNumEntries;
+    MIB_UDPROW_OWNER_PID table[ANY_SIZE];
+} MIB_UDPTABLE_OWNER_PID, *PMIB_UDPTABLE_OWNER_PID;
+#endif
+
+typedef struct _MIB_UDP6ROW_OWNER_PID {
+    UCHAR ucLocalAddr[16];
+    DWORD dwLocalScopeId;
+    DWORD dwLocalPort;
+    DWORD dwOwningPid;
+} MIB_UDP6ROW_OWNER_PID, *PMIB_UDP6ROW_OWNER_PID;
+
+typedef struct _MIB_UDP6TABLE_OWNER_PID {
+    DWORD dwNumEntries;
+    MIB_UDP6ROW_OWNER_PID table[ANY_SIZE];
+} MIB_UDP6TABLE_OWNER_PID, *PMIB_UDP6TABLE_OWNER_PID;
+
+
+#define BYTESWAP_USHORT(x) ((((USHORT)(x) << 8) | ((USHORT)(x) >> 8)) & 0xffff)
+
+#ifndef AF_INET6
+#define AF_INET6 23
+#endif
+
+#define _psutil_conn_decref_objs() \
+    Py_DECREF(_AF_INET); \
+    Py_DECREF(_AF_INET6);\
+    Py_DECREF(_SOCK_STREAM);\
+    Py_DECREF(_SOCK_DGRAM);
+
+// a signaler for connections without an actual status
+static int PSUTIL_CONN_NONE = 128;
+
+
+/*
+ * Return a list of network connections opened by a process
+ */
+static PyObject *
+psutil_net_connections(PyObject *self, PyObject *args)
+{
+    static long null_address[4] = { 0, 0, 0, 0 };
+
+    unsigned long pid;
+    PyObject *connectionsList;
+    PyObject *connectionTuple = NULL;
+    PyObject *af_filter = NULL;
+    PyObject *type_filter = NULL;
+
+    PyObject *_AF_INET = PyLong_FromLong((long)AF_INET);
+    PyObject *_AF_INET6 = PyLong_FromLong((long)AF_INET6);
+    PyObject *_SOCK_STREAM = PyLong_FromLong((long)SOCK_STREAM);
+    PyObject *_SOCK_DGRAM = PyLong_FromLong((long)SOCK_DGRAM);
+
+    typedef PSTR (NTAPI * _RtlIpv4AddressToStringA)(struct in_addr *, PSTR);
+    _RtlIpv4AddressToStringA rtlIpv4AddressToStringA;
+    typedef PSTR (NTAPI * _RtlIpv6AddressToStringA)(struct in6_addr *, PSTR);
+    _RtlIpv6AddressToStringA rtlIpv6AddressToStringA;
+    typedef DWORD (WINAPI * _GetExtendedTcpTable)(PVOID, PDWORD, BOOL, ULONG,
+                                                  TCP_TABLE_CLASS, ULONG);
+    _GetExtendedTcpTable getExtendedTcpTable;
+    typedef DWORD (WINAPI * _GetExtendedUdpTable)(PVOID, PDWORD, BOOL, ULONG,
+                                                  UDP_TABLE_CLASS, ULONG);
+    _GetExtendedUdpTable getExtendedUdpTable;
+    PVOID table = NULL;
+    DWORD tableSize;
+    PMIB_TCPTABLE_OWNER_PID tcp4Table;
+    PMIB_UDPTABLE_OWNER_PID udp4Table;
+    PMIB_TCP6TABLE_OWNER_PID tcp6Table;
+    PMIB_UDP6TABLE_OWNER_PID udp6Table;
+    ULONG i;
+    CHAR addressBufferLocal[65];
+    PyObject *addressTupleLocal = NULL;
+    CHAR addressBufferRemote[65];
+    PyObject *addressTupleRemote = NULL;
+
+    if (! PyArg_ParseTuple(args, "lOO", &pid, &af_filter, &type_filter)) {
+        _psutil_conn_decref_objs();
+        return NULL;
+    }
+
+    if (!PySequence_Check(af_filter) || !PySequence_Check(type_filter)) {
+        _psutil_conn_decref_objs();
+        PyErr_SetString(PyExc_TypeError, "arg 2 or 3 is not a sequence");
+        return NULL;
+    }
+
+    if (pid != -1) {
+        if (psutil_pid_is_running(pid) == 0) {
+            _psutil_conn_decref_objs();
+            return NoSuchProcess();
+        }
+    }
+
+    // Import some functions.
+    {
+        HMODULE ntdll;
+        HMODULE iphlpapi;
+
+        ntdll = LoadLibrary(TEXT("ntdll.dll"));
+        rtlIpv4AddressToStringA = (_RtlIpv4AddressToStringA)GetProcAddress(
+                                   ntdll, "RtlIpv4AddressToStringA");
+        rtlIpv6AddressToStringA = (_RtlIpv6AddressToStringA)GetProcAddress(
+                                   ntdll, "RtlIpv6AddressToStringA");
+        /* TODO: Check these two function pointers */
+
+        iphlpapi = LoadLibrary(TEXT("iphlpapi.dll"));
+        getExtendedTcpTable = (_GetExtendedTcpTable)GetProcAddress(iphlpapi,
+                              "GetExtendedTcpTable");
+        getExtendedUdpTable = (_GetExtendedUdpTable)GetProcAddress(iphlpapi,
+                              "GetExtendedUdpTable");
+        FreeLibrary(ntdll);
+        FreeLibrary(iphlpapi);
+    }
+
+    if ((getExtendedTcpTable == NULL) || (getExtendedUdpTable == NULL)) {
+        PyErr_SetString(PyExc_NotImplementedError,
+                        "feature not supported on this Windows version");
+        _psutil_conn_decref_objs();
+        return NULL;
+    }
+
+    connectionsList = PyList_New(0);
+    if (connectionsList == NULL) {
+        _psutil_conn_decref_objs();
+        return NULL;
+    }
+
+    // TCP IPv4
+
+    if ((PySequence_Contains(af_filter, _AF_INET) == 1) &&
+            (PySequence_Contains(type_filter, _SOCK_STREAM) == 1))
+    {
+        table = NULL;
+        connectionTuple = NULL;
+        addressTupleLocal = NULL;
+        addressTupleRemote = NULL;
+        tableSize = 0;
+        getExtendedTcpTable(NULL, &tableSize, FALSE, AF_INET,
+                            TCP_TABLE_OWNER_PID_ALL, 0);
+
+        table = malloc(tableSize);
+        if (table == NULL) {
+            PyErr_NoMemory();
+            goto error;
+        }
+
+        if (getExtendedTcpTable(table, &tableSize, FALSE, AF_INET,
+                                TCP_TABLE_OWNER_PID_ALL, 0) == 0)
+        {
+            tcp4Table = table;
+
+            for (i = 0; i < tcp4Table->dwNumEntries; i++)
+            {
+                if (pid != -1) {
+                    if (tcp4Table->table[i].dwOwningPid != pid) {
+                        continue;
+                    }
+                }
+
+                if (tcp4Table->table[i].dwLocalAddr != 0 ||
+                        tcp4Table->table[i].dwLocalPort != 0)
+                {
+                    struct in_addr addr;
+
+                    addr.S_un.S_addr = tcp4Table->table[i].dwLocalAddr;
+                    rtlIpv4AddressToStringA(&addr, addressBufferLocal);
+                    addressTupleLocal = Py_BuildValue(
+                        "(si)",
+                        addressBufferLocal,
+                        BYTESWAP_USHORT(tcp4Table->table[i].dwLocalPort));
+                }
+                else {
+                    addressTupleLocal = PyTuple_New(0);
+                }
+
+                if (addressTupleLocal == NULL)
+                    goto error;
+
+                // On Windows <= XP, remote addr is filled even if socket
+                // is in LISTEN mode in which case we just ignore it.
+                if ((tcp4Table->table[i].dwRemoteAddr != 0 ||
+                        tcp4Table->table[i].dwRemotePort != 0) &&
+                        (tcp4Table->table[i].dwState != MIB_TCP_STATE_LISTEN))
+                {
+                    struct in_addr addr;
+
+                    addr.S_un.S_addr = tcp4Table->table[i].dwRemoteAddr;
+                    rtlIpv4AddressToStringA(&addr, addressBufferRemote);
+                    addressTupleRemote = Py_BuildValue(
+                        "(si)",
+                        addressBufferRemote,
+                        BYTESWAP_USHORT(tcp4Table->table[i].dwRemotePort));
+                }
+                else
+                {
+                    addressTupleRemote = PyTuple_New(0);
+                }
+
+                if (addressTupleRemote == NULL)
+                    goto error;
+
+                connectionTuple = Py_BuildValue(
+                    "(iiiNNiI)",
+                    -1,
+                    AF_INET,
+                    SOCK_STREAM,
+                    addressTupleLocal,
+                    addressTupleRemote,
+                    tcp4Table->table[i].dwState,
+                    tcp4Table->table[i].dwOwningPid);
+                if (!connectionTuple)
+                    goto error;
+                if (PyList_Append(connectionsList, connectionTuple))
+                    goto error;
+                Py_DECREF(connectionTuple);
+            }
+        }
+
+        free(table);
+    }
+
+    // TCP IPv6
+
+    if ((PySequence_Contains(af_filter, _AF_INET6) == 1) &&
+            (PySequence_Contains(type_filter, _SOCK_STREAM) == 1))
+    {
+        table = NULL;
+        connectionTuple = NULL;
+        addressTupleLocal = NULL;
+        addressTupleRemote = NULL;
+        tableSize = 0;
+        getExtendedTcpTable(NULL, &tableSize, FALSE, AF_INET6,
+                            TCP_TABLE_OWNER_PID_ALL, 0);
+
+        table = malloc(tableSize);
+        if (table == NULL) {
+            PyErr_NoMemory();
+            goto error;
+        }
+
+        if (getExtendedTcpTable(table, &tableSize, FALSE, AF_INET6,
+                                TCP_TABLE_OWNER_PID_ALL, 0) == 0)
+        {
+            tcp6Table = table;
+
+            for (i = 0; i < tcp6Table->dwNumEntries; i++)
+            {
+                if (pid != -1) {
+                    if (tcp6Table->table[i].dwOwningPid != pid) {
+                        continue;
+                    }
+                }
+
+                if (memcmp(tcp6Table->table[i].ucLocalAddr, null_address, 16)
+                        != 0 || tcp6Table->table[i].dwLocalPort != 0)
+                {
+                    struct in6_addr addr;
+
+                    memcpy(&addr, tcp6Table->table[i].ucLocalAddr, 16);
+                    rtlIpv6AddressToStringA(&addr, addressBufferLocal);
+                    addressTupleLocal = Py_BuildValue(
+                        "(si)",
+                        addressBufferLocal,
+                        BYTESWAP_USHORT(tcp6Table->table[i].dwLocalPort));
+                }
+                else
+                {
+                    addressTupleLocal = PyTuple_New(0);
+                }
+
+                if (addressTupleLocal == NULL)
+                    goto error;
+
+                // On Windows <= XP, remote addr is filled even if socket
+                // is in LISTEN mode in which case we just ignore it.
+                if ((memcmp(tcp6Table->table[i].ucRemoteAddr, null_address, 16)
+                        != 0 ||
+                        tcp6Table->table[i].dwRemotePort != 0) &&
+                        (tcp6Table->table[i].dwState != MIB_TCP_STATE_LISTEN))
+                {
+                    struct in6_addr addr;
+
+                    memcpy(&addr, tcp6Table->table[i].ucRemoteAddr, 16);
+                    rtlIpv6AddressToStringA(&addr, addressBufferRemote);
+                    addressTupleRemote = Py_BuildValue(
+                        "(si)",
+                        addressBufferRemote,
+                        BYTESWAP_USHORT(tcp6Table->table[i].dwRemotePort));
+                }
+                else
+                {
+                    addressTupleRemote = PyTuple_New(0);
+                }
+
+                if (addressTupleRemote == NULL)
+                    goto error;
+
+                connectionTuple = Py_BuildValue(
+                    "(iiiNNiI)",
+                    -1,
+                    AF_INET6,
+                    SOCK_STREAM,
+                    addressTupleLocal,
+                    addressTupleRemote,
+                    tcp6Table->table[i].dwState,
+                    tcp6Table->table[i].dwOwningPid);
+                if (!connectionTuple)
+                    goto error;
+                if (PyList_Append(connectionsList, connectionTuple))
+                    goto error;
+                Py_DECREF(connectionTuple);
+            }
+        }
+
+        free(table);
+    }
+
+    // UDP IPv4
+
+    if ((PySequence_Contains(af_filter, _AF_INET) == 1) &&
+            (PySequence_Contains(type_filter, _SOCK_DGRAM) == 1))
+    {
+        table = NULL;
+        connectionTuple = NULL;
+        addressTupleLocal = NULL;
+        addressTupleRemote = NULL;
+        tableSize = 0;
+        getExtendedUdpTable(NULL, &tableSize, FALSE, AF_INET,
+                            UDP_TABLE_OWNER_PID, 0);
+
+        table = malloc(tableSize);
+        if (table == NULL) {
+            PyErr_NoMemory();
+            goto error;
+        }
+
+        if (getExtendedUdpTable(table, &tableSize, FALSE, AF_INET,
+                                UDP_TABLE_OWNER_PID, 0) == 0)
+        {
+            udp4Table = table;
+
+            for (i = 0; i < udp4Table->dwNumEntries; i++)
+            {
+                if (pid != -1) {
+                    if (udp4Table->table[i].dwOwningPid != pid) {
+                        continue;
+                    }
+                }
+
+                if (udp4Table->table[i].dwLocalAddr != 0 ||
+                    udp4Table->table[i].dwLocalPort != 0)
+                {
+                    struct in_addr addr;
+
+                    addr.S_un.S_addr = udp4Table->table[i].dwLocalAddr;
+                    rtlIpv4AddressToStringA(&addr, addressBufferLocal);
+                    addressTupleLocal = Py_BuildValue(
+                        "(si)",
+                        addressBufferLocal,
+                        BYTESWAP_USHORT(udp4Table->table[i].dwLocalPort));
+                }
+                else {
+                    addressTupleLocal = PyTuple_New(0);
+                }
+
+                if (addressTupleLocal == NULL)
+                    goto error;
+
+                connectionTuple = Py_BuildValue(
+                    "(iiiNNiI)",
+                    -1,
+                    AF_INET,
+                    SOCK_DGRAM,
+                    addressTupleLocal,
+                    PyTuple_New(0),
+                    PSUTIL_CONN_NONE,
+                    udp4Table->table[i].dwOwningPid);
+                if (!connectionTuple)
+                    goto error;
+                if (PyList_Append(connectionsList, connectionTuple))
+                    goto error;
+                Py_DECREF(connectionTuple);
+            }
+        }
+
+        free(table);
+    }
+
+    // UDP IPv6
+
+    if ((PySequence_Contains(af_filter, _AF_INET6) == 1) &&
+            (PySequence_Contains(type_filter, _SOCK_DGRAM) == 1))
+    {
+        table = NULL;
+        connectionTuple = NULL;
+        addressTupleLocal = NULL;
+        addressTupleRemote = NULL;
+        tableSize = 0;
+        getExtendedUdpTable(NULL, &tableSize, FALSE,
+                            AF_INET6, UDP_TABLE_OWNER_PID, 0);
+
+        table = malloc(tableSize);
+        if (table == NULL) {
+            PyErr_NoMemory();
+            goto error;
+        }
+
+        if (getExtendedUdpTable(table, &tableSize, FALSE, AF_INET6,
+                                UDP_TABLE_OWNER_PID, 0) == 0)
+        {
+            udp6Table = table;
+
+            for (i = 0; i < udp6Table->dwNumEntries; i++)
+            {
+                if (pid != -1) {
+                    if (udp6Table->table[i].dwOwningPid != pid) {
+                        continue;
+                    }
+                }
+
+                if (memcmp(udp6Table->table[i].ucLocalAddr, null_address, 16)
+                        != 0 || udp6Table->table[i].dwLocalPort != 0)
+                {
+                    struct in6_addr addr;
+
+                    memcpy(&addr, udp6Table->table[i].ucLocalAddr, 16);
+                    rtlIpv6AddressToStringA(&addr, addressBufferLocal);
+                    addressTupleLocal = Py_BuildValue(
+                        "(si)",
+                        addressBufferLocal,
+                        BYTESWAP_USHORT(udp6Table->table[i].dwLocalPort));
+                }
+                else {
+                    addressTupleLocal = PyTuple_New(0);
+                }
+
+                if (addressTupleLocal == NULL)
+                    goto error;
+
+                connectionTuple = Py_BuildValue(
+                    "(iiiNNiI)",
+                    -1,
+                    AF_INET6,
+                    SOCK_DGRAM,
+                    addressTupleLocal,
+                    PyTuple_New(0),
+                    PSUTIL_CONN_NONE,
+                    udp6Table->table[i].dwOwningPid);
+                if (!connectionTuple)
+                    goto error;
+                if (PyList_Append(connectionsList, connectionTuple))
+                    goto error;
+                Py_DECREF(connectionTuple);
+            }
+        }
+
+        free(table);
+    }
+
+    _psutil_conn_decref_objs();
+    return connectionsList;
+
+error:
+    _psutil_conn_decref_objs();
+    Py_XDECREF(connectionTuple);
+    Py_XDECREF(addressTupleLocal);
+    Py_XDECREF(addressTupleRemote);
+    Py_DECREF(connectionsList);
+    if (table != NULL)
+        free(table);
+    return NULL;
+}
+
+
+/*
+ * Get process priority as a Python integer.
+ */
+static PyObject *
+psutil_proc_priority_get(PyObject *self, PyObject *args)
+{
+    long pid;
+    DWORD priority;
+    HANDLE hProcess;
+    if (! PyArg_ParseTuple(args, "l", &pid)) {
+        return NULL;
+    }
+
+    hProcess = psutil_handle_from_pid(pid);
+    if (hProcess == NULL) {
+        return NULL;
+    }
+
+    priority = GetPriorityClass(hProcess);
+    CloseHandle(hProcess);
+    if (priority == 0) {
+        PyErr_SetFromWindowsErr(0);
+        return NULL;
+    }
+    return Py_BuildValue("i", priority);
+}
+
+
+/*
+ * Set process priority.
+ */
+static PyObject *
+psutil_proc_priority_set(PyObject *self, PyObject *args)
+{
+    long pid;
+    int priority;
+    int retval;
+    HANDLE hProcess;
+    DWORD dwDesiredAccess = \
+        PROCESS_QUERY_INFORMATION | PROCESS_SET_INFORMATION;
+    if (! PyArg_ParseTuple(args, "li", &pid, &priority)) {
+        return NULL;
+    }
+
+    hProcess = psutil_handle_from_pid_waccess(pid, dwDesiredAccess);
+    if (hProcess == NULL) {
+        return NULL;
+    }
+
+    retval = SetPriorityClass(hProcess, priority);
+    CloseHandle(hProcess);
+    if (retval == 0) {
+        PyErr_SetFromWindowsErr(0);
+        return NULL;
+    }
+    Py_INCREF(Py_None);
+    return Py_None;
+}
+
+
+#if (_WIN32_WINNT >= 0x0600)  // Windows Vista
+/*
+ * Get process IO priority as a Python integer.
+ */
+static PyObject *
+psutil_proc_io_priority_get(PyObject *self, PyObject *args)
+{
+    long pid;
+    HANDLE hProcess;
+    PULONG IoPriority;
+
+    _NtQueryInformationProcess NtQueryInformationProcess =
+        (_NtQueryInformationProcess)GetProcAddress(
+            GetModuleHandleA("ntdll.dll"), "NtQueryInformationProcess");
+
+    if (! PyArg_ParseTuple(args, "l", &pid)) {
+        return NULL;
+    }
+    hProcess = psutil_handle_from_pid(pid);
+    if (hProcess == NULL) {
+        return NULL;
+    }
+
+    NtQueryInformationProcess(
+        hProcess,
+        ProcessIoPriority,
+        &IoPriority,
+        sizeof(ULONG),
+        NULL
+    );
+    CloseHandle(hProcess);
+    return Py_BuildValue("i", IoPriority);
+}
+
+
+/*
+ * Set process IO priority.
+ */
+static PyObject *
+psutil_proc_io_priority_set(PyObject *self, PyObject *args)
+{
+    long pid;
+    int prio;
+    HANDLE hProcess;
+
+    _NtSetInformationProcess NtSetInformationProcess =
+        (_NtSetInformationProcess)GetProcAddress(
+            GetModuleHandleA("ntdll.dll"), "NtSetInformationProcess");
+
+    if (NtSetInformationProcess == NULL) {
+        PyErr_SetString(PyExc_RuntimeError,
+                        "couldn't get NtSetInformationProcess");
+        return NULL;
+    }
+
+    if (! PyArg_ParseTuple(args, "li", &pid, &prio)) {
+        return NULL;
+    }
+    hProcess = psutil_handle_from_pid_waccess(pid, PROCESS_ALL_ACCESS);
+    if (hProcess == NULL) {
+        return NULL;
+    }
+
+    NtSetInformationProcess(
+        hProcess,
+        ProcessIoPriority,
+        (PVOID)&prio,
+        sizeof((PVOID)prio)
+    );
+
+    CloseHandle(hProcess);
+    Py_INCREF(Py_None);
+    return Py_None;
+}
+#endif
+
+
+/*
+ * Return a Python tuple referencing process I/O counters.
+ */
+static PyObject *
+psutil_proc_io_counters(PyObject *self, PyObject *args)
+{
+    DWORD pid;
+    HANDLE hProcess;
+    IO_COUNTERS IoCounters;
+
+    if (! PyArg_ParseTuple(args, "l", &pid)) {
+        return NULL;
+    }
+    hProcess = psutil_handle_from_pid(pid);
+    if (NULL == hProcess) {
+        return NULL;
+    }
+    if (! GetProcessIoCounters(hProcess, &IoCounters)) {
+        CloseHandle(hProcess);
+        return PyErr_SetFromWindowsErr(0);
+    }
+    CloseHandle(hProcess);
+    return Py_BuildValue("(KKKK)",
+                         IoCounters.ReadOperationCount,
+                         IoCounters.WriteOperationCount,
+                         IoCounters.ReadTransferCount,
+                         IoCounters.WriteTransferCount);
+}
+
+
+/*
+ * Alternative implementation of the one above but bypasses ACCESS DENIED.
+ */
+static PyObject *
+psutil_proc_io_counters_2(PyObject *self, PyObject *args)
+{
+    DWORD pid;
+    PSYSTEM_PROCESS_INFORMATION process;
+    PVOID buffer;
+    LONGLONG rcount, wcount, rbytes, wbytes;
+
+    if (! PyArg_ParseTuple(args, "l", &pid)) {
+        return NULL;
+    }
+    if (! psutil_get_proc_info(pid, &process, &buffer)) {
+        return NULL;
+    }
+    rcount = process->ReadOperationCount.QuadPart;
+    wcount = process->WriteOperationCount.QuadPart;
+    rbytes = process->ReadTransferCount.QuadPart;
+    wbytes = process->WriteTransferCount.QuadPart;
+    free(buffer);
+    return Py_BuildValue("KKKK", rcount, wcount, rbytes, wbytes);
+}
+
+
+/*
+ * Return process CPU affinity as a bitmask
+ */
+static PyObject *
+psutil_proc_cpu_affinity_get(PyObject *self, PyObject *args)
+{
+    DWORD pid;
+    HANDLE hProcess;
+    PDWORD_PTR proc_mask;
+    PDWORD_PTR system_mask;
+
+    if (! PyArg_ParseTuple(args, "l", &pid)) {
+        return NULL;
+    }
+    hProcess = psutil_handle_from_pid(pid);
+    if (hProcess == NULL) {
+        return NULL;
+    }
+    if (GetProcessAffinityMask(hProcess, &proc_mask, &system_mask) == 0) {
+        CloseHandle(hProcess);
+        return PyErr_SetFromWindowsErr(0);
+    }
+
+    CloseHandle(hProcess);
+#ifdef _WIN64
+    return Py_BuildValue("K", (unsigned long long)proc_mask);
+#else
+    return Py_BuildValue("k", (unsigned long)proc_mask);
+#endif
+}
+
+
+/*
+ * Set process CPU affinity
+ */
+static PyObject *
+psutil_proc_cpu_affinity_set(PyObject *self, PyObject *args)
+{
+    DWORD pid;
+    HANDLE hProcess;
+    DWORD dwDesiredAccess = \
+        PROCESS_QUERY_INFORMATION | PROCESS_SET_INFORMATION;
+    DWORD_PTR mask;
+
+#ifdef _WIN64
+    if (! PyArg_ParseTuple(args, "lK", &pid, &mask))
+#else
+    if (! PyArg_ParseTuple(args, "lk", &pid, &mask))
+#endif
+    {
+        return NULL;
+    }
+    hProcess = psutil_handle_from_pid_waccess(pid, dwDesiredAccess);
+    if (hProcess == NULL) {
+        return NULL;
+    }
+
+    if (SetProcessAffinityMask(hProcess, mask) == 0) {
+        CloseHandle(hProcess);
+        return PyErr_SetFromWindowsErr(0);
+    }
+
+    CloseHandle(hProcess);
+    Py_INCREF(Py_None);
+    return Py_None;
+}
+
+
+/*
+ * Return True if one of the process threads is in a waiting or
+ * suspended status.
+ */
+static PyObject *
+psutil_proc_is_suspended(PyObject *self, PyObject *args)
+{
+    DWORD pid;
+    ULONG i;
+    PSYSTEM_PROCESS_INFORMATION process;
+    PVOID buffer;
+
+    if (! PyArg_ParseTuple(args, "l", &pid)) {
+        return NULL;
+    }
+    if (! psutil_get_proc_info(pid, &process, &buffer)) {
+        return NULL;
+    }
+    for (i = 0; i < process->NumberOfThreads; i++) {
+        if (process->Threads[i].ThreadState != Waiting ||
+                process->Threads[i].WaitReason != Suspended)
+        {
+            free(buffer);
+            Py_RETURN_FALSE;
+        }
+    }
+    free(buffer);
+    Py_RETURN_TRUE;
+}
+
+
+/*
+ * Return path's disk total and free as a Python tuple.
+ */
+static PyObject *
+psutil_disk_usage(PyObject *self, PyObject *args)
+{
+    BOOL retval;
+    ULARGE_INTEGER _, total, free;
+    char *path;
+
+    if (PyArg_ParseTuple(args, "u", &path)) {
+        Py_BEGIN_ALLOW_THREADS
+        retval = GetDiskFreeSpaceExW((LPCWSTR)path, &_, &total, &free);
+        Py_END_ALLOW_THREADS
+        goto return_;
+    }
+
+    // on Python 2 we also want to accept plain strings other
+    // than Unicode
+#if PY_MAJOR_VERSION <= 2
+    PyErr_Clear();  // drop the argument parsing error
+    if (PyArg_ParseTuple(args, "s", &path)) {
+        Py_BEGIN_ALLOW_THREADS
+        retval = GetDiskFreeSpaceEx(path, &_, &total, &free);
+        Py_END_ALLOW_THREADS
+        goto return_;
+    }
+#endif
+
+    return NULL;
+
+return_:
+    if (retval == 0)
+        return PyErr_SetFromWindowsErr(0);
+    else
+        return Py_BuildValue("(LL)", total.QuadPart, free.QuadPart);
+}
+
+
+/*
+ * Return a Python list of named tuples with overall network I/O information
+ */
+static PyObject *
+psutil_net_io_counters(PyObject *self, PyObject *args)
+{
+    int attempts = 0;
+    int i;
+    int outBufLen = 15000;
+    char ifname[MAX_PATH];
+    DWORD dwRetVal = 0;
+    MIB_IFROW *pIfRow = NULL;
+    ULONG flags = 0;
+    ULONG family = AF_UNSPEC;
+    PIP_ADAPTER_ADDRESSES pAddresses = NULL;
+    PIP_ADAPTER_ADDRESSES pCurrAddresses = NULL;
+
+    PyObject *py_retdict = PyDict_New();
+    PyObject *py_nic_info = NULL;
+    PyObject *py_nic_name = NULL;
+
+    if (py_retdict == NULL) {
+        return NULL;
+    }
+    do {
+        pAddresses = (IP_ADAPTER_ADDRESSES *) malloc(outBufLen);
+        if (pAddresses == NULL) {
+            PyErr_NoMemory();
+            goto error;
+        }
+
+        dwRetVal = GetAdaptersAddresses(family, flags, NULL, pAddresses,
+                                        &outBufLen);
+        if (dwRetVal == ERROR_BUFFER_OVERFLOW) {
+            free(pAddresses);
+            pAddresses = NULL;
+        }
+        else {
+            break;
+        }
+
+        attempts++;
+    } while ((dwRetVal == ERROR_BUFFER_OVERFLOW) && (attempts < 3));
+
+    if (dwRetVal != NO_ERROR) {
+        PyErr_SetString(PyExc_RuntimeError, "GetAdaptersAddresses() failed.");
+        goto error;
+    }
+
+    pCurrAddresses = pAddresses;
+    while (pCurrAddresses) {
+        py_nic_name = NULL;
+        py_nic_info = NULL;
+        pIfRow = (MIB_IFROW *) malloc(sizeof(MIB_IFROW));
+
+        if (pIfRow == NULL) {
+            PyErr_NoMemory();
+            goto error;
+        }
+
+        pIfRow->dwIndex = pCurrAddresses->IfIndex;
+        dwRetVal = GetIfEntry(pIfRow);
+        if (dwRetVal != NO_ERROR) {
+            PyErr_SetString(PyExc_RuntimeError, "GetIfEntry() failed.");
+            goto error;
+        }
+
+        py_nic_info = Py_BuildValue("(kkkkkkkk)",
+                                    pIfRow->dwOutOctets,
+                                    pIfRow->dwInOctets,
+                                    pIfRow->dwOutUcastPkts,
+                                    pIfRow->dwInUcastPkts,
+                                    pIfRow->dwInErrors,
+                                    pIfRow->dwOutErrors,
+                                    pIfRow->dwInDiscards,
+                                    pIfRow->dwOutDiscards);
+        if (!py_nic_info)
+            goto error;
+
+        sprintf(ifname, "%wS", pCurrAddresses->FriendlyName);
+
+#if PY_MAJOR_VERSION >= 3
+        // XXX - Dirty hack to avoid encoding errors on Python 3, see:
+        // https://code.google.com/p/psutil/issues/detail?id=446#c9
+        for (i = 0; i < MAX_PATH; i++) {
+            if (*(ifname+i) < 0 || *(ifname+i) > 256) {
+                // replace the non unicode character
+                *(ifname+i) = '?';
+            }
+            else if (*(ifname+i) == '\0') {
+                break;
+            }
+        }
+#endif
+        py_nic_name = Py_BuildValue("s", ifname);
+        if (py_nic_name == NULL)
+            goto error;
+        if (PyDict_SetItem(py_retdict, py_nic_name, py_nic_info))
+            goto error;
+        Py_XDECREF(py_nic_name);
+        Py_XDECREF(py_nic_info);
+
+        free(pIfRow);
+        pCurrAddresses = pCurrAddresses->Next;
+    }
+
+    free(pAddresses);
+    return py_retdict;
+
+error:
+    Py_XDECREF(py_nic_name);
+    Py_XDECREF(py_nic_info);
+    Py_DECREF(py_retdict);
+    if (pAddresses != NULL)
+        free(pAddresses);
+    if (pIfRow != NULL)
+        free(pIfRow);
+    return NULL;
+}
+
+// fix for mingw32, see
+// https://code.google.com/p/psutil/issues/detail?id=351#c2
+typedef struct _DISK_PERFORMANCE_WIN_2008 {
+    LARGE_INTEGER BytesRead;
+    LARGE_INTEGER BytesWritten;
+    LARGE_INTEGER ReadTime;
+    LARGE_INTEGER WriteTime;
+    LARGE_INTEGER IdleTime;
+    DWORD         ReadCount;
+    DWORD         WriteCount;
+    DWORD         QueueDepth;
+    DWORD         SplitCount;
+    LARGE_INTEGER QueryTime;
+    DWORD         StorageDeviceNumber;
+    WCHAR         StorageManagerName[8];
+} DISK_PERFORMANCE_WIN_2008;
+
+/*
+ * Return a Python dict of tuples for disk I/O information
+ */
+static PyObject *
+psutil_disk_io_counters(PyObject *self, PyObject *args)
+{
+    DISK_PERFORMANCE_WIN_2008 diskPerformance;
+    DWORD dwSize;
+    HANDLE hDevice = NULL;
+    char szDevice[MAX_PATH];
+    char szDeviceDisplay[MAX_PATH];
+    int devNum;
+    PyObject *py_retdict = PyDict_New();
+    PyObject *py_disk_info = NULL;
+    if (py_retdict == NULL) {
+        return NULL;
+    }
+
+    // Apparently there's no way to figure out how many times we have
+    // to iterate in order to find valid drives.
+    // Let's assume 32, which is higher than 26, the number of letters
+    // in the alphabet (from A:\ to Z:\).
+    for (devNum = 0; devNum <= 32; ++devNum) {
+        py_disk_info = NULL;
+        sprintf(szDevice, "\\\\.\\PhysicalDrive%d", devNum);
+        hDevice = CreateFile(szDevice, 0, FILE_SHARE_READ | FILE_SHARE_WRITE,
+                             NULL, OPEN_EXISTING, 0, NULL);
+
+        if (hDevice == INVALID_HANDLE_VALUE) {
+            continue;
+        }
+        if (DeviceIoControl(hDevice, IOCTL_DISK_PERFORMANCE, NULL, 0,
+                            &diskPerformance, sizeof(diskPerformance),
+                            &dwSize, NULL))
+        {
+            sprintf(szDeviceDisplay, "PhysicalDrive%d", devNum);
+            py_disk_info = Py_BuildValue(
+                "(IILLLL)",
+                diskPerformance.ReadCount,
+                diskPerformance.WriteCount,
+                diskPerformance.BytesRead,
+                diskPerformance.BytesWritten,
+                (diskPerformance.ReadTime.QuadPart * 10) / 1000,
+                (diskPerformance.WriteTime.QuadPart * 10) / 1000);
+            if (!py_disk_info)
+                goto error;
+            if (PyDict_SetItemString(py_retdict, szDeviceDisplay,
+                                     py_disk_info))
+            {
+                goto error;
+            }
+            Py_XDECREF(py_disk_info);
+        }
+        else {
+            // XXX we might get here with ERROR_INSUFFICIENT_BUFFER when
+            // compiling with mingw32; not sure what to do.
+            // return PyErr_SetFromWindowsErr(0);
+            ;;
+        }
+
+        CloseHandle(hDevice);
+    }
+
+    return py_retdict;
+
+error:
+    Py_XDECREF(py_disk_info);
+    Py_DECREF(py_retdict);
+    if (hDevice != NULL)
+        CloseHandle(hDevice);
+    return NULL;
+}
+
+
+static char *psutil_get_drive_type(int type)
+{
+    switch (type) {
+    case DRIVE_FIXED:
+        return "fixed";
+    case DRIVE_CDROM:
+        return "cdrom";
+    case DRIVE_REMOVABLE:
+        return "removable";
+    case DRIVE_UNKNOWN:
+        return "unknown";
+    case DRIVE_NO_ROOT_DIR:
+        return "unmounted";
+    case DRIVE_REMOTE:
+        return "remote";
+    case DRIVE_RAMDISK:
+        return "ramdisk";
+    default:
+        return "?";
+    }
+}
+
+
+#ifndef _ARRAYSIZE
+#define _ARRAYSIZE(a) (sizeof(a)/sizeof(a[0]))
+#endif
+
+/*
+ * Return disk partitions as a list of tuples such as
+ * (drive_letter, drive_letter, type, "")
+ */
+static PyObject *
+psutil_disk_partitions(PyObject *self, PyObject *args)
+{
+    DWORD num_bytes;
+    char drive_strings[255];
+    char *drive_letter = drive_strings;
+    int all;
+    int type;
+    int ret;
+    char opts[20];
+    LPTSTR fs_type[MAX_PATH + 1] = { 0 };
+    DWORD pflags = 0;
+    PyObject *py_all;
+    PyObject *py_retlist = PyList_New(0);
+    PyObject *py_tuple = NULL;
+
+    if (py_retlist == NULL) {
+        return NULL;
+    }
+
+    // avoid to visualize a message box in case something goes wrong
+    // see http://code.google.com/p/psutil/issues/detail?id=264
+    SetErrorMode(SEM_FAILCRITICALERRORS);
+
+    if (! PyArg_ParseTuple(args, "O", &py_all)) {
+        goto error;
+    }
+    all = PyObject_IsTrue(py_all);
+
+    Py_BEGIN_ALLOW_THREADS
+    num_bytes = GetLogicalDriveStrings(254, drive_letter);
+    Py_END_ALLOW_THREADS
+
+    if (num_bytes == 0) {
+        PyErr_SetFromWindowsErr(0);
+        goto error;
+    }
+
+    while (*drive_letter != 0) {
+        py_tuple = NULL;
+        opts[0] = 0;
+        fs_type[0] = 0;
+
+        Py_BEGIN_ALLOW_THREADS
+        type = GetDriveType(drive_letter);
+        Py_END_ALLOW_THREADS
+
+        // by default we only show hard drives and cd-roms
+        if (all == 0) {
+            if ((type == DRIVE_UNKNOWN) ||
+                    (type == DRIVE_NO_ROOT_DIR) ||
+                    (type == DRIVE_REMOTE) ||
+                    (type == DRIVE_RAMDISK)) {
+                goto next;
+            }
+            // floppy disk: skip it by default as it introduces a
+            // considerable slowdown.
+            if ((type == DRIVE_REMOVABLE) &&
+                    (strcmp(drive_letter, "A:\\")  == 0)) {
+                goto next;
+            }
+        }
+
+        ret = GetVolumeInformation(
+            (LPCTSTR)drive_letter, NULL, _ARRAYSIZE(drive_letter),
+            NULL, NULL, &pflags, (LPTSTR)fs_type, _ARRAYSIZE(fs_type));
+        if (ret == 0) {
+            // We might get here in case of a floppy hard drive, in
+            // which case the error is (21, "device not ready").
+            // Let's pretend it didn't happen as we already have
+            // the drive name and type ('removable').
+            strcat(opts, "");
+            SetLastError(0);
+        }
+        else {
+            if (pflags & FILE_READ_ONLY_VOLUME) {
+                strcat(opts, "ro");
+            }
+            else {
+                strcat(opts, "rw");
+            }
+            if (pflags & FILE_VOLUME_IS_COMPRESSED) {
+                strcat(opts, ",compressed");
+            }
+        }
+
+        if (strlen(opts) > 0) {
+            strcat(opts, ",");
+        }
+        strcat(opts, psutil_get_drive_type(type));
+
+        py_tuple = Py_BuildValue(
+            "(ssss)",
+            drive_letter,
+            drive_letter,
+            fs_type,  // either FAT, FAT32, NTFS, HPFS, CDFS, UDF or NWFS
+            opts);
+        if (!py_tuple)
+            goto error;
+        if (PyList_Append(py_retlist, py_tuple))
+            goto error;
+        Py_DECREF(py_tuple);
+        goto next;
+
+next:
+        drive_letter = strchr(drive_letter, 0) + 1;
+    }
+
+    SetErrorMode(0);
+    return py_retlist;
+
+error:
+    SetErrorMode(0);
+    Py_XDECREF(py_tuple);
+    Py_DECREF(py_retlist);
+    return NULL;
+}
+
+
+#ifdef UNICODE
+#define WTSOpenServer WTSOpenServerW
+#else
+#define WTSOpenServer WTSOpenServerA
+#endif
+
+
+/*
+ * Return a Python dict of tuples for disk I/O information
+ */
+static PyObject *
+psutil_users(PyObject *self, PyObject *args)
+{
+    HANDLE hServer = NULL;
+    LPTSTR buffer_user = NULL;
+    LPTSTR buffer_addr = NULL;
+    PWTS_SESSION_INFO sessions = NULL;
+    DWORD count;
+    DWORD i;
+    DWORD sessionId;
+    DWORD bytes;
+    PWTS_CLIENT_ADDRESS address;
+    char address_str[50];
+    long long unix_time;
+
+    PWINSTATIONQUERYINFORMATIONW WinStationQueryInformationW;
+    WINSTATION_INFO station_info;
+    HINSTANCE hInstWinSta = NULL;
+    ULONG returnLen;
+
+    PyObject *py_retlist = PyList_New(0);
+    PyObject *py_tuple = NULL;
+    PyObject *py_address = NULL;
+    if (py_retlist == NULL) {
+        return NULL;
+    }
+
+    hInstWinSta = LoadLibraryA("winsta.dll");
+    WinStationQueryInformationW = (PWINSTATIONQUERYINFORMATIONW) \
+        GetProcAddress(hInstWinSta, "WinStationQueryInformationW");
+
+    hServer = WTSOpenServer('\0');
+    if (hServer == NULL) {
+        PyErr_SetFromWindowsErr(0);
+        goto error;
+    }
+
+    if (WTSEnumerateSessions(hServer, 0, 1, &sessions, &count) == 0) {
+        PyErr_SetFromWindowsErr(0);
+        goto error;
+    }
+
+    for (i = 0; i < count; i++) {
+        py_address = NULL;
+        py_tuple = NULL;
+        sessionId = sessions[i].SessionId;
+        if (buffer_user != NULL) {
+            WTSFreeMemory(buffer_user);
+        }
+        if (buffer_addr != NULL) {
+            WTSFreeMemory(buffer_addr);
+        }
+
+        buffer_user = NULL;
+        buffer_addr = NULL;
+
+        // username
+        bytes = 0;
+        if (WTSQuerySessionInformation(hServer, sessionId, WTSUserName,
+                                       &buffer_user, &bytes) == 0) {
+            PyErr_SetFromWindowsErr(0);
+            goto error;
+        }
+        if (bytes == 1) {
+            continue;
+        }
+
+        // address
+        bytes = 0;
+        if (WTSQuerySessionInformation(hServer, sessionId, WTSClientAddress,
+                                       &buffer_addr, &bytes) == 0) {
+            PyErr_SetFromWindowsErr(0);
+            goto error;
+        }
+
+        address = (PWTS_CLIENT_ADDRESS)buffer_addr;
+        if (address->AddressFamily == 0) {  // AF_INET
+            sprintf(address_str,
+                    "%u.%u.%u.%u",
+                    address->Address[0],
+                    address->Address[1],
+                    address->Address[2],
+                    address->Address[3]);
+            py_address = Py_BuildValue("s", address_str);
+            if (!py_address)
+                goto error;
+        }
+        else {
+            py_address = Py_None;
+        }
+
+        // login time
+        if (!WinStationQueryInformationW(hServer,
+                                         sessionId,
+                                         WinStationInformation,
+                                         &station_info,
+                                         sizeof(station_info),
+                                         &returnLen))
+        {
+            goto error;
+        }
+
+        unix_time = ((LONGLONG)station_info.ConnectTime.dwHighDateTime) << 32;
+        unix_time += \
+            station_info.ConnectTime.dwLowDateTime - 116444736000000000LL;
+        unix_time /= 10000000;
+
+        py_tuple = Py_BuildValue("sOd", buffer_user, py_address,
+                                 (double)unix_time);
+        if (!py_tuple)
+            goto error;
+        if (PyList_Append(py_retlist, py_tuple))
+            goto error;
+        Py_XDECREF(py_address);
+        Py_XDECREF(py_tuple);
+    }
+
+    WTSCloseServer(hServer);
+    WTSFreeMemory(sessions);
+    WTSFreeMemory(buffer_user);
+    WTSFreeMemory(buffer_addr);
+    FreeLibrary(hInstWinSta);
+    return py_retlist;
+
+error:
+    Py_XDECREF(py_tuple);
+    Py_XDECREF(py_address);
+    Py_DECREF(py_retlist);
+
+    if (hInstWinSta != NULL) {
+        FreeLibrary(hInstWinSta);
+    }
+    if (hServer != NULL) {
+        WTSCloseServer(hServer);
+    }
+    if (sessions != NULL) {
+        WTSFreeMemory(sessions);
+    }
+    if (buffer_user != NULL) {
+        WTSFreeMemory(buffer_user);
+    }
+    if (buffer_addr != NULL) {
+        WTSFreeMemory(buffer_addr);
+    }
+    return NULL;
+}
+
+
+/*
+ * Return the number of handles opened by process.
+ */
+static PyObject *
+psutil_proc_num_handles(PyObject *self, PyObject *args)
+{
+    DWORD pid;
+    HANDLE hProcess;
+    DWORD handleCount;
+
+    if (! PyArg_ParseTuple(args, "l", &pid)) {
+        return NULL;
+    }
+    hProcess = psutil_handle_from_pid(pid);
+    if (NULL == hProcess) {
+        return NULL;
+    }
+    if (! GetProcessHandleCount(hProcess, &handleCount)) {
+        CloseHandle(hProcess);
+        return PyErr_SetFromWindowsErr(0);
+    }
+    CloseHandle(hProcess);
+    return Py_BuildValue("k", handleCount);
+}
+
+
+/*
+ * Alternative implementation of the one above but bypasses ACCESS DENIED.
+ */
+static PyObject *
+psutil_proc_num_handles_2(PyObject *self, PyObject *args)
+{
+    DWORD pid;
+    PSYSTEM_PROCESS_INFORMATION process;
+    PVOID buffer;
+    ULONG count;
+
+    if (! PyArg_ParseTuple(args, "l", &pid)) {
+        return NULL;
+    }
+    if (! psutil_get_proc_info(pid, &process, &buffer)) {
+        return NULL;
+    }
+    count = process->HandleCount;
+    free(buffer);
+    return Py_BuildValue("k", count);
+}
+
+
+/*
+ * Return the number of context switches executed by process.
+ */
+static PyObject *
+psutil_proc_num_ctx_switches(PyObject *self, PyObject *args)
+{
+    DWORD pid;
+    PSYSTEM_PROCESS_INFORMATION process;
+    PVOID buffer;
+    ULONG i;
+    ULONG total = 0;
+
+    if (! PyArg_ParseTuple(args, "l", &pid)) {
+        return NULL;
+    }
+    if (! psutil_get_proc_info(pid, &process, &buffer)) {
+        return NULL;
+    }
+    for (i = 0; i < process->NumberOfThreads; i++) {
+        total += process->Threads[i].ContextSwitches;
+    }
+    free(buffer);
+    return Py_BuildValue("ki", total, 0);
+}
+
+
+static char *get_region_protection_string(ULONG protection)
+{
+    switch (protection & 0xff) {
+    case PAGE_NOACCESS:
+        return "";
+    case PAGE_READONLY:
+        return "r";
+    case PAGE_READWRITE:
+        return "rw";
+    case PAGE_WRITECOPY:
+        return "wc";
+    case PAGE_EXECUTE:
+        return "x";
+    case PAGE_EXECUTE_READ:
+        return "xr";
+    case PAGE_EXECUTE_READWRITE:
+        return "xrw";
+    case PAGE_EXECUTE_WRITECOPY:
+        return "xwc";
+    default:
+        return "?";
+    }
+}
+
+
+/*
+ * Return a list of process's memory mappings.
+ */
+static PyObject *
+psutil_proc_memory_maps(PyObject *self, PyObject *args)
+{
+    DWORD pid;
+    HANDLE hProcess = NULL;
+    MEMORY_BASIC_INFORMATION basicInfo;
+    PVOID baseAddress;
+    PVOID previousAllocationBase;
+    CHAR mappedFileName[MAX_PATH];
+    SYSTEM_INFO system_info;
+    LPVOID maxAddr;
+    PyObject *py_list = PyList_New(0);
+    PyObject *py_tuple = NULL;
+
+    if (py_list == NULL) {
+        return NULL;
+    }
+    if (! PyArg_ParseTuple(args, "l", &pid)) {
+        goto error;
+    }
+    hProcess = psutil_handle_from_pid(pid);
+    if (NULL == hProcess) {
+        goto error;
+    }
+
+    GetSystemInfo(&system_info);
+    maxAddr = system_info.lpMaximumApplicationAddress;
+    baseAddress = NULL;
+    previousAllocationBase = NULL;
+
+    while (VirtualQueryEx(hProcess, baseAddress, &basicInfo,
+                          sizeof(MEMORY_BASIC_INFORMATION)))
+    {
+        py_tuple = NULL;
+        if (baseAddress > maxAddr) {
+            break;
+        }
+        if (GetMappedFileNameA(hProcess, baseAddress, mappedFileName,
+                               sizeof(mappedFileName)))
+        {
+            py_tuple = Py_BuildValue(
+                "(kssI)",
+                (unsigned long)baseAddress,
+                get_region_protection_string(basicInfo.Protect),
+                mappedFileName,
+                basicInfo.RegionSize);
+            if (!py_tuple)
+                goto error;
+            if (PyList_Append(py_list, py_tuple))
+                goto error;
+            Py_DECREF(py_tuple);
+        }
+        previousAllocationBase = basicInfo.AllocationBase;
+        baseAddress = (PCHAR)baseAddress + basicInfo.RegionSize;
+    }
+
+    CloseHandle(hProcess);
+    return py_list;
+
+error:
+    Py_XDECREF(py_tuple);
+    Py_DECREF(py_list);
+    if (hProcess != NULL)
+        CloseHandle(hProcess);
+    return NULL;
+}
+
+
+/*
+ * Return a {pid:ppid, ...} dict for all running processes.
+ */
+static PyObject *
+psutil_ppid_map(PyObject *self, PyObject *args)
+{
+    PyObject *pid = NULL;
+    PyObject *ppid = NULL;
+    PyObject *py_retdict = PyDict_New();
+    HANDLE handle = NULL;
+    PROCESSENTRY32 pe = {0};
+    pe.dwSize = sizeof(PROCESSENTRY32);
+
+    if (py_retdict == NULL)
+        return NULL;
+    handle = CreateToolhelp32Snapshot(TH32CS_SNAPPROCESS, 0);
+    if (handle == INVALID_HANDLE_VALUE) {
+        PyErr_SetFromWindowsErr(0);
+        Py_DECREF(py_retdict);
+        return NULL;
+    }
+
+    if (Process32First(handle, &pe)) {
+        do {
+            pid = Py_BuildValue("I", pe.th32ProcessID);
+            if (pid == NULL)
+                goto error;
+            ppid = Py_BuildValue("I", pe.th32ParentProcessID);
+            if (ppid == NULL)
+                goto error;
+            if (PyDict_SetItem(py_retdict, pid, ppid))
+                goto error;
+            Py_DECREF(pid);
+            Py_DECREF(ppid);
+        } while (Process32Next(handle, &pe));
+    }
+
+    CloseHandle(handle);
+    return py_retdict;
+
+error:
+    Py_XDECREF(pid);
+    Py_XDECREF(ppid);
+    Py_DECREF(py_retdict);
+    CloseHandle(handle);
+    return NULL;
+}
+
+
+// ------------------------ Python init ---------------------------
+
+static PyMethodDef
+PsutilMethods[] =
+{
+    // --- per-process functions
+
+    {"proc_cmdline", psutil_proc_cmdline, METH_VARARGS,
+     "Return process cmdline as a list of cmdline arguments"},
+    {"proc_exe", psutil_proc_exe, METH_VARARGS,
+     "Return path of the process executable"},
+    {"proc_kill", psutil_proc_kill, METH_VARARGS,
+     "Kill the process identified by the given PID"},
+    {"proc_cpu_times", psutil_proc_cpu_times, METH_VARARGS,
+     "Return tuple of user/kern time for the given PID"},
+    {"proc_create_time", psutil_proc_create_time, METH_VARARGS,
+     "Return a float indicating the process create time expressed in "
+     "seconds since the epoch"},
+    {"proc_memory_info", psutil_proc_memory_info, METH_VARARGS,
+     "Return a tuple of process memory information"},
+    {"proc_cwd", psutil_proc_cwd, METH_VARARGS,
+     "Return process current working directory"},
+    {"proc_suspend", psutil_proc_suspend, METH_VARARGS,
+     "Suspend a process"},
+    {"proc_resume", psutil_proc_resume, METH_VARARGS,
+     "Resume a process"},
+    {"proc_open_files", psutil_proc_open_files, METH_VARARGS,
+     "Return files opened by process"},
+    {"proc_username", psutil_proc_username, METH_VARARGS,
+     "Return the username of a process"},
+    {"proc_num_threads", psutil_proc_num_threads, METH_VARARGS,
+     "Return the network connections of a process"},
+    {"proc_threads", psutil_proc_threads, METH_VARARGS,
+     "Return process threads information as a list of tuple"},
+    {"proc_wait", psutil_proc_wait, METH_VARARGS,
+     "Wait for process to terminate and return its exit code."},
+   

<TRUNCATED>

[17/22] AMBARI-5707. Metrics system prototype implementation. (swagle)

Posted by sw...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSController.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSController.java b/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSController.java
new file mode 100644
index 0000000..4e00bc8
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSController.java
@@ -0,0 +1,55 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.applicationhistoryservice.webapp;
+
+import org.apache.hadoop.yarn.webapp.Controller;
+
+import com.google.inject.Inject;
+
+public class AHSController extends Controller {
+
+  @Inject
+  AHSController(RequestContext ctx) {
+    super(ctx);
+  }
+
+  @Override
+  public void index() {
+    setTitle("Application History");
+  }
+
+  public void app() {
+    render(AppPage.class);
+  }
+
+  public void appattempt() {
+    render(AppAttemptPage.class);
+  }
+
+  public void container() {
+    render(ContainerPage.class);
+  }
+
+  /**
+   * Render the logs page.
+   */
+  public void logs() {
+    render(AHSLogsPage.class);
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSLogsPage.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSLogsPage.java b/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSLogsPage.java
new file mode 100644
index 0000000..8821bc0
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSLogsPage.java
@@ -0,0 +1,55 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.applicationhistoryservice.webapp;
+
+import static org.apache.hadoop.yarn.webapp.YarnWebParams.CONTAINER_ID;
+import static org.apache.hadoop.yarn.webapp.YarnWebParams.ENTITY_STRING;
+
+import org.apache.hadoop.yarn.webapp.SubView;
+import org.apache.hadoop.yarn.webapp.log.AggregatedLogsBlock;
+
+public class AHSLogsPage extends AHSView {
+  /*
+   * (non-Javadoc)
+   * 
+   * @see
+   * org.apache.hadoop.yarn.server.applicationhistoryservice.webapp.AHSView#
+   * preHead(org.apache.hadoop .yarn.webapp.hamlet.Hamlet.HTML)
+   */
+  @Override
+  protected void preHead(Page.HTML<_> html) {
+    String logEntity = $(ENTITY_STRING);
+    if (logEntity == null || logEntity.isEmpty()) {
+      logEntity = $(CONTAINER_ID);
+    }
+    if (logEntity == null || logEntity.isEmpty()) {
+      logEntity = "UNKNOWN";
+    }
+    commonPreHead(html);
+  }
+
+  /**
+   * The content of this page is the AggregatedLogsBlock
+   * 
+   * @return AggregatedLogsBlock.class
+   */
+  @Override
+  protected Class<? extends SubView> content() {
+    return AggregatedLogsBlock.class;
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSView.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSView.java b/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSView.java
new file mode 100644
index 0000000..4baa75d
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSView.java
@@ -0,0 +1,90 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.applicationhistoryservice.webapp;
+
+import static org.apache.hadoop.yarn.util.StringHelper.sjoin;
+import static org.apache.hadoop.yarn.webapp.YarnWebParams.APP_STATE;
+import static org.apache.hadoop.yarn.webapp.view.JQueryUI.ACCORDION;
+import static org.apache.hadoop.yarn.webapp.view.JQueryUI.ACCORDION_ID;
+import static org.apache.hadoop.yarn.webapp.view.JQueryUI.DATATABLES;
+import static org.apache.hadoop.yarn.webapp.view.JQueryUI.DATATABLES_ID;
+import static org.apache.hadoop.yarn.webapp.view.JQueryUI.initID;
+import static org.apache.hadoop.yarn.webapp.view.JQueryUI.tableInit;
+
+import org.apache.hadoop.yarn.server.webapp.AppsBlock;
+import org.apache.hadoop.yarn.webapp.SubView;
+import org.apache.hadoop.yarn.webapp.view.TwoColumnLayout;
+
+// Do NOT rename/refactor this to AHSView as it will wreak havoc
+// on Mac OS HFS
+public class AHSView extends TwoColumnLayout {
+  static final int MAX_DISPLAY_ROWS = 100; // direct table rendering
+  static final int MAX_FAST_ROWS = 1000; // inline js array
+
+  @Override
+  protected void preHead(Page.HTML<_> html) {
+    commonPreHead(html);
+    set(DATATABLES_ID, "apps");
+    set(initID(DATATABLES, "apps"), appsTableInit());
+    setTableStyles(html, "apps", ".queue {width:6em}", ".ui {width:8em}");
+
+    // Set the correct title.
+    String reqState = $(APP_STATE);
+    reqState = (reqState == null || reqState.isEmpty() ? "All" : reqState);
+    setTitle(sjoin(reqState, "Applications"));
+  }
+
+  protected void commonPreHead(Page.HTML<_> html) {
+    set(ACCORDION_ID, "nav");
+    set(initID(ACCORDION, "nav"), "{autoHeight:false, active:0}");
+  }
+
+  @Override
+  protected Class<? extends SubView> nav() {
+    return NavBlock.class;
+  }
+
+  @Override
+  protected Class<? extends SubView> content() {
+    return AppsBlock.class;
+  }
+
+  private String appsTableInit() {
+    // id, user, name, queue, starttime, finishtime, state, status, progress, ui
+    return tableInit().append(", 'aaData': appsTableData")
+      .append(", bDeferRender: true").append(", bProcessing: true")
+
+      .append("\n, aoColumnDefs: ").append(getAppsTableColumnDefs())
+
+      // Sort by id upon page load
+      .append(", aaSorting: [[0, 'desc']]}").toString();
+  }
+
+  protected String getAppsTableColumnDefs() {
+    StringBuilder sb = new StringBuilder();
+    return sb.append("[\n").append("{'sType':'numeric', 'aTargets': [0]")
+      .append(", 'mRender': parseHadoopID }")
+
+      .append("\n, {'sType':'numeric', 'aTargets': [5, 6]")
+      .append(", 'mRender': renderHadoopDate }")
+
+      .append("\n, {'sType':'numeric', bSearchable:false, 'aTargets': [9]")
+      .append(", 'mRender': parseHadoopProgress }]").toString();
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSWebApp.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSWebApp.java b/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSWebApp.java
new file mode 100644
index 0000000..8cff741
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSWebApp.java
@@ -0,0 +1,63 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.applicationhistoryservice.webapp;
+
+import static org.apache.hadoop.yarn.util.StringHelper.pajoin;
+
+import org.apache.hadoop.yarn.server.api.ApplicationContext;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.ApplicationHistoryManager;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.TimelineMetricStore;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.timeline.TimelineStore;
+import org.apache.hadoop.yarn.webapp.GenericExceptionHandler;
+import org.apache.hadoop.yarn.webapp.WebApp;
+import org.apache.hadoop.yarn.webapp.YarnJacksonJaxbJsonProvider;
+import org.apache.hadoop.yarn.webapp.YarnWebParams;
+
+public class AHSWebApp extends WebApp implements YarnWebParams {
+
+  private final ApplicationHistoryManager applicationHistoryManager;
+  private final TimelineStore timelineStore;
+  private final TimelineMetricStore timelineMetricStore;
+
+  public AHSWebApp(ApplicationHistoryManager applicationHistoryManager,
+      TimelineStore timelineStore, TimelineMetricStore timelineMetricStore) {
+    this.applicationHistoryManager = applicationHistoryManager;
+    this.timelineStore = timelineStore;
+    this.timelineMetricStore = timelineMetricStore;
+  }
+
+  @Override
+  public void setup() {
+    bind(YarnJacksonJaxbJsonProvider.class);
+    bind(AHSWebServices.class);
+    bind(TimelineWebServices.class);
+    bind(GenericExceptionHandler.class);
+    bind(ApplicationContext.class).toInstance(applicationHistoryManager);
+    bind(TimelineStore.class).toInstance(timelineStore);
+    bind(TimelineMetricStore.class).toInstance(timelineMetricStore);
+    route("/", AHSController.class);
+    route(pajoin("/apps", APP_STATE), AHSController.class);
+    route(pajoin("/app", APPLICATION_ID), AHSController.class, "app");
+    route(pajoin("/appattempt", APPLICATION_ATTEMPT_ID), AHSController.class,
+      "appattempt");
+    route(pajoin("/container", CONTAINER_ID), AHSController.class, "container");
+    route(
+      pajoin("/logs", NM_NODENAME, CONTAINER_ID, ENTITY_STRING, APP_OWNER,
+        CONTAINER_LOG_TYPE), AHSController.class, "logs");
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSWebServices.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSWebServices.java b/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSWebServices.java
new file mode 100644
index 0000000..2040f57
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSWebServices.java
@@ -0,0 +1,162 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.applicationhistoryservice.webapp;
+
+import java.util.Collections;
+import java.util.Set;
+
+import javax.servlet.http.HttpServletRequest;
+import javax.servlet.http.HttpServletResponse;
+import javax.ws.rs.GET;
+import javax.ws.rs.Path;
+import javax.ws.rs.PathParam;
+import javax.ws.rs.Produces;
+import javax.ws.rs.QueryParam;
+import javax.ws.rs.core.Context;
+import javax.ws.rs.core.MediaType;
+
+import org.apache.hadoop.yarn.api.records.YarnApplicationState;
+import org.apache.hadoop.yarn.server.api.ApplicationContext;
+import org.apache.hadoop.yarn.server.webapp.WebServices;
+import org.apache.hadoop.yarn.server.webapp.dao.AppAttemptInfo;
+import org.apache.hadoop.yarn.server.webapp.dao.AppAttemptsInfo;
+import org.apache.hadoop.yarn.server.webapp.dao.AppInfo;
+import org.apache.hadoop.yarn.server.webapp.dao.AppsInfo;
+import org.apache.hadoop.yarn.server.webapp.dao.ContainerInfo;
+import org.apache.hadoop.yarn.server.webapp.dao.ContainersInfo;
+import org.apache.hadoop.yarn.webapp.BadRequestException;
+
+import com.google.inject.Inject;
+import com.google.inject.Singleton;
+
+@Singleton
+@Path("/ws/v1/applicationhistory")
+public class AHSWebServices extends WebServices {
+
+  @Inject
+  public AHSWebServices(ApplicationContext appContext) {
+    super(appContext);
+  }
+
+  @GET
+  @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
+  public AppsInfo get(@Context HttpServletRequest req,
+      @Context HttpServletResponse res) {
+    return getApps(req, res, null, Collections.<String> emptySet(), null, null,
+      null, null, null, null, null, null, Collections.<String> emptySet());
+  }
+
+  @GET
+  @Path("/apps")
+  @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
+  @Override
+  public AppsInfo getApps(@Context HttpServletRequest req,
+      @Context HttpServletResponse res, @QueryParam("state") String stateQuery,
+      @QueryParam("states") Set<String> statesQuery,
+      @QueryParam("finalStatus") String finalStatusQuery,
+      @QueryParam("user") String userQuery,
+      @QueryParam("queue") String queueQuery,
+      @QueryParam("limit") String count,
+      @QueryParam("startedTimeBegin") String startedBegin,
+      @QueryParam("startedTimeEnd") String startedEnd,
+      @QueryParam("finishedTimeBegin") String finishBegin,
+      @QueryParam("finishedTimeEnd") String finishEnd,
+      @QueryParam("applicationTypes") Set<String> applicationTypes) {
+    init(res);
+    validateStates(stateQuery, statesQuery);
+    return super.getApps(req, res, stateQuery, statesQuery, finalStatusQuery,
+      userQuery, queueQuery, count, startedBegin, startedEnd, finishBegin,
+      finishEnd, applicationTypes);
+  }
+
+  @GET
+  @Path("/apps/{appid}")
+  @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
+  @Override
+  public AppInfo getApp(@Context HttpServletRequest req,
+      @Context HttpServletResponse res, @PathParam("appid") String appId) {
+    init(res);
+    return super.getApp(req, res, appId);
+  }
+
+  @GET
+  @Path("/apps/{appid}/appattempts")
+  @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
+  @Override
+  public AppAttemptsInfo getAppAttempts(@Context HttpServletRequest req,
+      @Context HttpServletResponse res, @PathParam("appid") String appId) {
+    init(res);
+    return super.getAppAttempts(req, res, appId);
+  }
+
+  @GET
+  @Path("/apps/{appid}/appattempts/{appattemptid}")
+  @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
+  @Override
+  public AppAttemptInfo getAppAttempt(@Context HttpServletRequest req,
+      @Context HttpServletResponse res, @PathParam("appid") String appId,
+      @PathParam("appattemptid") String appAttemptId) {
+    init(res);
+    return super.getAppAttempt(req, res, appId, appAttemptId);
+  }
+
+  @GET
+  @Path("/apps/{appid}/appattempts/{appattemptid}/containers")
+  @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
+  @Override
+  public ContainersInfo getContainers(@Context HttpServletRequest req,
+      @Context HttpServletResponse res, @PathParam("appid") String appId,
+      @PathParam("appattemptid") String appAttemptId) {
+    init(res);
+    return super.getContainers(req, res, appId, appAttemptId);
+  }
+
+  @GET
+  @Path("/apps/{appid}/appattempts/{appattemptid}/containers/{containerid}")
+  @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
+  @Override
+  public ContainerInfo getContainer(@Context HttpServletRequest req,
+      @Context HttpServletResponse res, @PathParam("appid") String appId,
+      @PathParam("appattemptid") String appAttemptId,
+      @PathParam("containerid") String containerId) {
+    init(res);
+    return super.getContainer(req, res, appId, appAttemptId, containerId);
+  }
+
+  private static void
+      validateStates(String stateQuery, Set<String> statesQuery) {
+    // stateQuery is deprecated.
+    if (stateQuery != null && !stateQuery.isEmpty()) {
+      statesQuery.add(stateQuery);
+    }
+    Set<String> appStates = parseQueries(statesQuery, true);
+    for (String appState : appStates) {
+      switch (YarnApplicationState.valueOf(appState.toUpperCase())) {
+        case FINISHED:
+        case FAILED:
+        case KILLED:
+          continue;
+        default:
+          throw new BadRequestException("Invalid application-state " + appState
+              + " specified. It should be a final state");
+      }
+    }
+  }
+
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AppAttemptPage.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AppAttemptPage.java b/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AppAttemptPage.java
new file mode 100644
index 0000000..63b44bd
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AppAttemptPage.java
@@ -0,0 +1,69 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.applicationhistoryservice.webapp;
+
+import static org.apache.hadoop.yarn.util.StringHelper.join;
+import static org.apache.hadoop.yarn.webapp.view.JQueryUI.DATATABLES;
+import static org.apache.hadoop.yarn.webapp.view.JQueryUI.DATATABLES_ID;
+import static org.apache.hadoop.yarn.webapp.view.JQueryUI.initID;
+import static org.apache.hadoop.yarn.webapp.view.JQueryUI.tableInit;
+
+import org.apache.hadoop.yarn.server.webapp.AppAttemptBlock;
+import org.apache.hadoop.yarn.webapp.SubView;
+import org.apache.hadoop.yarn.webapp.YarnWebParams;
+
+public class AppAttemptPage extends AHSView {
+
+  @Override
+  protected void preHead(Page.HTML<_> html) {
+    commonPreHead(html);
+
+    String appAttemptId = $(YarnWebParams.APPLICATION_ATTEMPT_ID);
+    set(
+      TITLE,
+      appAttemptId.isEmpty() ? "Bad request: missing application attempt ID"
+          : join("Application Attempt ",
+            $(YarnWebParams.APPLICATION_ATTEMPT_ID)));
+
+    set(DATATABLES_ID, "containers");
+    set(initID(DATATABLES, "containers"), containersTableInit());
+    setTableStyles(html, "containers", ".queue {width:6em}", ".ui {width:8em}");
+  }
+
+  @Override
+  protected Class<? extends SubView> content() {
+    return AppAttemptBlock.class;
+  }
+
+  private String containersTableInit() {
+    return tableInit().append(", 'aaData': containersTableData")
+      .append(", bDeferRender: true").append(", bProcessing: true")
+
+      .append("\n, aoColumnDefs: ").append(getContainersTableColumnDefs())
+
+      // Sort by id upon page load
+      .append(", aaSorting: [[0, 'desc']]}").toString();
+  }
+
+  protected String getContainersTableColumnDefs() {
+    StringBuilder sb = new StringBuilder();
+    return sb.append("[\n").append("{'sType':'numeric', 'aTargets': [0]")
+      .append(", 'mRender': parseHadoopID }]").toString();
+  }
+
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AppPage.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AppPage.java b/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AppPage.java
new file mode 100644
index 0000000..96ca659
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AppPage.java
@@ -0,0 +1,71 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.applicationhistoryservice.webapp;
+
+import static org.apache.hadoop.yarn.util.StringHelper.join;
+import static org.apache.hadoop.yarn.webapp.view.JQueryUI.DATATABLES;
+import static org.apache.hadoop.yarn.webapp.view.JQueryUI.DATATABLES_ID;
+import static org.apache.hadoop.yarn.webapp.view.JQueryUI.initID;
+import static org.apache.hadoop.yarn.webapp.view.JQueryUI.tableInit;
+
+import org.apache.hadoop.yarn.server.webapp.AppBlock;
+import org.apache.hadoop.yarn.webapp.SubView;
+import org.apache.hadoop.yarn.webapp.YarnWebParams;
+
+public class AppPage extends AHSView {
+
+  @Override
+  protected void preHead(Page.HTML<_> html) {
+    commonPreHead(html);
+
+    String appId = $(YarnWebParams.APPLICATION_ID);
+    set(
+      TITLE,
+      appId.isEmpty() ? "Bad request: missing application ID" : join(
+        "Application ", $(YarnWebParams.APPLICATION_ID)));
+
+    set(DATATABLES_ID, "attempts");
+    set(initID(DATATABLES, "attempts"), attemptsTableInit());
+    setTableStyles(html, "attempts", ".queue {width:6em}", ".ui {width:8em}");
+  }
+
+  @Override
+  protected Class<? extends SubView> content() {
+    return AppBlock.class;
+  }
+
+  private String attemptsTableInit() {
+    return tableInit().append(", 'aaData': attemptsTableData")
+      .append(", bDeferRender: true").append(", bProcessing: true")
+
+      .append("\n, aoColumnDefs: ").append(getAttemptsTableColumnDefs())
+
+      // Sort by id upon page load
+      .append(", aaSorting: [[0, 'desc']]}").toString();
+  }
+
+  protected String getAttemptsTableColumnDefs() {
+    StringBuilder sb = new StringBuilder();
+    return sb.append("[\n").append("{'sType':'numeric', 'aTargets': [0]")
+      .append(", 'mRender': parseHadoopID }")
+
+      .append("\n, {'sType':'numeric', 'aTargets': [1]")
+      .append(", 'mRender': renderHadoopDate }]").toString();
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/ContainerPage.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/ContainerPage.java b/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/ContainerPage.java
new file mode 100644
index 0000000..1be8a26
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/ContainerPage.java
@@ -0,0 +1,41 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.applicationhistoryservice.webapp;
+
+import static org.apache.hadoop.yarn.util.StringHelper.join;
+
+import org.apache.hadoop.yarn.server.webapp.ContainerBlock;
+import org.apache.hadoop.yarn.webapp.SubView;
+import org.apache.hadoop.yarn.webapp.YarnWebParams;
+
+public class ContainerPage extends AHSView {
+
+  @Override
+  protected void preHead(Page.HTML<_> html) {
+    commonPreHead(html);
+
+    String containerId = $(YarnWebParams.CONTAINER_ID);
+    set(TITLE, containerId.isEmpty() ? "Bad request: missing container ID"
+        : join("Container ", $(YarnWebParams.CONTAINER_ID)));
+  }
+
+  @Override
+  protected Class<? extends SubView> content() {
+    return ContainerBlock.class;
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/JAXBContextResolver.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/JAXBContextResolver.java b/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/JAXBContextResolver.java
new file mode 100644
index 0000000..5fd0124
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/JAXBContextResolver.java
@@ -0,0 +1,64 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.applicationhistoryservice.webapp;
+
+import java.util.Arrays;
+import java.util.HashSet;
+import java.util.Set;
+
+import javax.ws.rs.ext.ContextResolver;
+import javax.ws.rs.ext.Provider;
+import javax.xml.bind.JAXBContext;
+
+import org.apache.hadoop.yarn.server.webapp.dao.AppAttemptInfo;
+import org.apache.hadoop.yarn.server.webapp.dao.AppAttemptsInfo;
+import org.apache.hadoop.yarn.server.webapp.dao.AppInfo;
+import org.apache.hadoop.yarn.server.webapp.dao.AppsInfo;
+import org.apache.hadoop.yarn.server.webapp.dao.ContainerInfo;
+import org.apache.hadoop.yarn.server.webapp.dao.ContainersInfo;
+
+import com.google.inject.Singleton;
+import com.sun.jersey.api.json.JSONConfiguration;
+import com.sun.jersey.api.json.JSONJAXBContext;
+
+@Singleton
+@Provider
+@SuppressWarnings("rawtypes")
+public class JAXBContextResolver implements ContextResolver<JAXBContext> {
+
+  private JAXBContext context;
+  private final Set<Class> types;
+
+  // you have to specify all the dao classes here
+  private final Class[] cTypes = { AppInfo.class, AppsInfo.class,
+      AppAttemptInfo.class, AppAttemptsInfo.class, ContainerInfo.class,
+      ContainersInfo.class };
+
+  public JAXBContextResolver() throws Exception {
+    this.types = new HashSet<Class>(Arrays.asList(cTypes));
+    this.context =
+        new JSONJAXBContext(JSONConfiguration.natural().rootUnwrapping(false)
+          .build(), cTypes);
+  }
+
+  @Override
+  public JAXBContext getContext(Class<?> objectType) {
+    return (types.contains(objectType)) ? context : null;
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/NavBlock.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/NavBlock.java b/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/NavBlock.java
new file mode 100644
index 0000000..e84ddec
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/NavBlock.java
@@ -0,0 +1,51 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements.  See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership.  The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License.  You may obtain a copy of the License at
+*
+*     http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.applicationhistoryservice.webapp;
+
+import org.apache.hadoop.yarn.api.records.YarnApplicationState;
+import org.apache.hadoop.yarn.webapp.view.HtmlBlock;
+
+public class NavBlock extends HtmlBlock {
+
+  @Override
+  public void render(Block html) {
+    html.
+        div("#nav").
+            h3("Application History").
+                ul().
+                    li().a(url("apps"), "Applications").
+                        ul().
+                            li().a(url("apps",
+                                YarnApplicationState.FINISHED.toString()),
+                                YarnApplicationState.FINISHED.toString()).
+                            _().
+                            li().a(url("apps",
+                                YarnApplicationState.FAILED.toString()),
+                                YarnApplicationState.FAILED.toString()).
+                            _().
+                            li().a(url("apps",
+                                YarnApplicationState.KILLED.toString()),
+                                YarnApplicationState.KILLED.toString()).
+                            _().
+                        _().
+                    _().
+                _().
+            _();
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/TimelineWebServices.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/TimelineWebServices.java b/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/TimelineWebServices.java
new file mode 100644
index 0000000..c9d56fc
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/TimelineWebServices.java
@@ -0,0 +1,504 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.applicationhistoryservice.webapp;
+
+import com.google.inject.Inject;
+import com.google.inject.Singleton;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience.Public;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
+import org.apache.hadoop.yarn.api.records.timeline.TimelineEntities;
+import org.apache.hadoop.yarn.api.records.timeline.TimelineEntity;
+import org.apache.hadoop.yarn.api.records.timeline.TimelineEvents;
+import org.apache.hadoop.metrics2.sink.timeline.TimelineMetric;
+import org.apache.hadoop.metrics2.sink.timeline.TimelineMetrics;
+import org.apache.hadoop.yarn.api.records.timeline.TimelinePutResponse;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.TimelineMetricStore;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.timeline.EntityIdentifier;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.timeline.GenericObjectMapper;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.timeline.NameValuePair;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.timeline.TimelineReader.Field;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.timeline.TimelineStore;
+import org.apache.hadoop.yarn.util.timeline.TimelineUtils;
+import org.apache.hadoop.yarn.webapp.BadRequestException;
+
+import javax.servlet.http.HttpServletRequest;
+import javax.servlet.http.HttpServletResponse;
+import javax.ws.rs.Consumes;
+import javax.ws.rs.GET;
+import javax.ws.rs.POST;
+import javax.ws.rs.Path;
+import javax.ws.rs.PathParam;
+import javax.ws.rs.Produces;
+import javax.ws.rs.QueryParam;
+import javax.ws.rs.WebApplicationException;
+import javax.ws.rs.core.Context;
+import javax.ws.rs.core.MediaType;
+import javax.ws.rs.core.Response;
+import javax.xml.bind.annotation.XmlAccessType;
+import javax.xml.bind.annotation.XmlAccessorType;
+import javax.xml.bind.annotation.XmlElement;
+import javax.xml.bind.annotation.XmlRootElement;
+import java.io.IOException;
+import java.sql.SQLException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.EnumSet;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+import java.util.SortedSet;
+import java.util.TreeSet;
+
+import static org.apache.hadoop.yarn.util.StringHelper.CSV_JOINER;
+
+@Singleton
+@Path("/ws/v1/timeline")
+//TODO: support XML serialization/deserialization
+public class TimelineWebServices {
+
+  private static final Log LOG = LogFactory.getLog(TimelineWebServices.class);
+
+  private TimelineStore store;
+  private TimelineMetricStore timelineMetricStore;
+
+  @Inject
+  public TimelineWebServices(TimelineStore store,
+                             TimelineMetricStore timelineMetricStore) {
+    this.store = store;
+    this.timelineMetricStore = timelineMetricStore;
+  }
+
+  @XmlRootElement(name = "about")
+  @XmlAccessorType(XmlAccessType.NONE)
+  @Public
+  @Unstable
+  public static class AboutInfo {
+
+    private String about;
+
+    public AboutInfo() {
+
+    }
+
+    public AboutInfo(String about) {
+      this.about = about;
+    }
+
+    @XmlElement(name = "About")
+    public String getAbout() {
+      return about;
+    }
+
+    public void setAbout(String about) {
+      this.about = about;
+    }
+
+  }
+
+  /**
+   * Return the description of the timeline web services.
+   */
+  @GET
+  @Produces({ MediaType.APPLICATION_JSON /* , MediaType.APPLICATION_XML */})
+  public AboutInfo about(
+      @Context HttpServletRequest req,
+      @Context HttpServletResponse res) {
+    init(res);
+    return new AboutInfo("Timeline API");
+  }
+
+  /**
+   * Return a list of entities that match the given parameters.
+   */
+  @GET
+  @Path("/{entityType}")
+  @Produces({ MediaType.APPLICATION_JSON /* , MediaType.APPLICATION_XML */})
+  public TimelineEntities getEntities(
+      @Context HttpServletRequest req,
+      @Context HttpServletResponse res,
+      @PathParam("entityType") String entityType,
+      @QueryParam("primaryFilter") String primaryFilter,
+      @QueryParam("secondaryFilter") String secondaryFilter,
+      @QueryParam("windowStart") String windowStart,
+      @QueryParam("windowEnd") String windowEnd,
+      @QueryParam("fromId") String fromId,
+      @QueryParam("fromTs") String fromTs,
+      @QueryParam("limit") String limit,
+      @QueryParam("fields") String fields) {
+    init(res);
+    TimelineEntities entities = null;
+    try {
+      entities = store.getEntities(
+          parseStr(entityType),
+          parseLongStr(limit),
+          parseLongStr(windowStart),
+          parseLongStr(windowEnd),
+          parseStr(fromId),
+          parseLongStr(fromTs),
+          parsePairStr(primaryFilter, ":"),
+          parsePairsStr(secondaryFilter, ",", ":"),
+          parseFieldsStr(fields, ","));
+    } catch (NumberFormatException e) {
+      throw new BadRequestException(
+          "windowStart, windowEnd or limit is not a numeric value.");
+    } catch (IllegalArgumentException e) {
+      throw new BadRequestException("requested invalid field.");
+    } catch (IOException e) {
+      LOG.error("Error getting entities", e);
+      throw new WebApplicationException(e,
+          Response.Status.INTERNAL_SERVER_ERROR);
+    }
+    if (entities == null) {
+      return new TimelineEntities();
+    }
+    return entities;
+  }
+
+  /**
+   * Return a single entity of the given entity type and Id.
+   */
+  @GET
+  @Path("/{entityType}/{entityId}")
+  @Produces({ MediaType.APPLICATION_JSON /* , MediaType.APPLICATION_XML */})
+  public TimelineEntity getEntity(
+      @Context HttpServletRequest req,
+      @Context HttpServletResponse res,
+      @PathParam("entityType") String entityType,
+      @PathParam("entityId") String entityId,
+      @QueryParam("fields") String fields) {
+    init(res);
+    TimelineEntity entity = null;
+    try {
+      entity =
+          store.getEntity(parseStr(entityId), parseStr(entityType),
+            parseFieldsStr(fields, ","));
+    } catch (IllegalArgumentException e) {
+      throw new BadRequestException(
+          "requested invalid field.");
+    } catch (IOException e) {
+      LOG.error("Error getting entity", e);
+      throw new WebApplicationException(e,
+          Response.Status.INTERNAL_SERVER_ERROR);
+    }
+    if (entity == null) {
+      throw new WebApplicationException(Response.Status.NOT_FOUND);
+    }
+    return entity;
+  }
+
+  /**
+   * Return the events that match the given parameters.
+   */
+  @GET
+  @Path("/{entityType}/events")
+  @Produces({ MediaType.APPLICATION_JSON /* , MediaType.APPLICATION_XML */})
+  public TimelineEvents getEvents(
+      @Context HttpServletRequest req,
+      @Context HttpServletResponse res,
+      @PathParam("entityType") String entityType,
+      @QueryParam("entityId") String entityId,
+      @QueryParam("eventType") String eventType,
+      @QueryParam("windowStart") String windowStart,
+      @QueryParam("windowEnd") String windowEnd,
+      @QueryParam("limit") String limit) {
+    init(res);
+    TimelineEvents events = null;
+    try {
+      events = store.getEntityTimelines(
+        parseStr(entityType),
+        parseArrayStr(entityId, ","),
+        parseLongStr(limit),
+        parseLongStr(windowStart),
+        parseLongStr(windowEnd),
+        parseArrayStr(eventType, ","));
+    } catch (NumberFormatException e) {
+      throw new BadRequestException(
+          "windowStart, windowEnd or limit is not a numeric value.");
+    } catch (IOException e) {
+      LOG.error("Error getting entity timelines", e);
+      throw new WebApplicationException(e,
+          Response.Status.INTERNAL_SERVER_ERROR);
+    }
+    if (events == null) {
+      return new TimelineEvents();
+    }
+    return events;
+  }
+
+  /**
+   * Store the given metrics into the timeline store, and return errors that
+   * happened during storing.
+   */
+  @Path("/metrics")
+  @POST
+  @Consumes({ MediaType.APPLICATION_JSON /* , MediaType.APPLICATION_XML */})
+  public TimelinePutResponse postMetrics(
+    @Context HttpServletRequest req,
+    @Context HttpServletResponse res,
+    TimelineMetrics metrics) {
+
+    init(res);
+    if (metrics == null) {
+      return new TimelinePutResponse();
+    }
+
+    try {
+
+      // TODO: Check ACLs for MetricEntity using the TimelineACLManager.
+      // TODO: Save owner of the MetricEntity.
+
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Storing metrics: " +
+          TimelineUtils.dumpTimelineRecordtoJSON(metrics, true));
+      }
+
+      return timelineMetricStore.putMetrics(metrics);
+
+    } catch (Exception e) {
+      LOG.error("Error saving metrics.", e);
+      throw new WebApplicationException(e, Response.Status.INTERNAL_SERVER_ERROR);
+    }
+  }
+
+  /**
+   * Query for a particular metric satisfying the filter criteria.
+   * @return {@link TimelineMetric}
+   */
+  @GET
+  @Path("/metrics/{metricName}")
+  @Produces({ MediaType.APPLICATION_JSON /* , MediaType.APPLICATION_XML */})
+  public TimelineMetric getTimelineMetric(
+    @Context HttpServletRequest req,
+    @Context HttpServletResponse res,
+    @PathParam("metricName") String metricName,
+    @QueryParam("appId") String appId,
+    @QueryParam("instanceId") String instanceId,
+    @QueryParam("hostname") String hostname,
+    @QueryParam("startTime") String startTime,
+    @QueryParam("endTime") String endTime,
+    @QueryParam("limit") String limit
+  ) {
+    init(res);
+    try {
+      return timelineMetricStore.getTimelineMetric(metricName, hostname,
+        appId, instanceId, parseLongStr(startTime), parseLongStr(endTime),
+        parseIntStr(limit));
+
+    } catch (NumberFormatException ne) {
+      throw new BadRequestException("startTime and limit should be numeric " +
+        "values");
+    } catch (SQLException sql) {
+      throw new WebApplicationException(sql,
+        Response.Status.INTERNAL_SERVER_ERROR);
+    } catch (IOException io) {
+      throw new WebApplicationException(io,
+        Response.Status.INTERNAL_SERVER_ERROR);
+    }
+  }
+
+  /**
+   * Query for a set of different metrics satisfying the filter criteria.
+   * All query params are optional. The default limit will apply if none
+   * specified.
+   *
+   * @param metricNames Comma separated list of metrics to retrieve.
+   * @param appId Application Id for the requested metrics.
+   * @param instanceId Application instance id.
+   * @param hostname Hostname where the metrics originated.
+   * @param startTime Start time for the metric records retrieved.
+   * @param limit limit on total number of {@link TimelineMetric} records
+   *              retrieved.
+   * @return {@link TimelineMetrics}
+   */
+  @GET
+  @Path("/metrics")
+  @Produces({ MediaType.APPLICATION_JSON /* , MediaType.APPLICATION_XML */})
+  public TimelineMetrics getTimelineMetrics(
+    @Context HttpServletRequest req,
+    @Context HttpServletResponse res,
+    @QueryParam("metricNames") String metricNames,
+    @QueryParam("appId") String appId,
+    @QueryParam("instanceId") String instanceId,
+    @QueryParam("hostname") String hostname,
+    @QueryParam("startTime") String startTime,
+    @QueryParam("endTime") String endTime,
+    @QueryParam("limit") String limit,
+    @QueryParam("grouped") String grouped
+  ) {
+    init(res);
+    try {
+      LOG.debug("Request for metrics => metricNames: " + metricNames + ", " +
+        "appId: " + appId + ", instanceId: " + instanceId + ", " +
+        "hostname: " + hostname + ", startTime: " + startTime + ", " +
+        "endTime: " + endTime);
+
+      return timelineMetricStore.getTimelineMetrics(
+        parseListStr(metricNames, ","), hostname, appId, instanceId,
+        parseLongStr(startTime), parseLongStr(endTime), parseIntStr(limit),
+        parseBoolean(grouped));
+
+    } catch (NumberFormatException ne) {
+      throw new BadRequestException("startTime and limit should be numeric " +
+        "values");
+    } catch (SQLException sql) {
+      throw new WebApplicationException(sql,
+        Response.Status.INTERNAL_SERVER_ERROR);
+    } catch (IOException io) {
+      throw new WebApplicationException(io,
+        Response.Status.INTERNAL_SERVER_ERROR);
+    }
+  }
+
+
+  /**
+   * Store the given entities into the timeline store, and return the errors
+   * that happen during storing.
+   */
+  @POST
+  @Consumes({ MediaType.APPLICATION_JSON /* , MediaType.APPLICATION_XML */})
+  public TimelinePutResponse postEntities(
+      @Context HttpServletRequest req,
+      @Context HttpServletResponse res,
+      TimelineEntities entities) {
+    init(res);
+    if (entities == null) {
+      return new TimelinePutResponse();
+    }
+    try {
+      List<EntityIdentifier> entityIDs = new ArrayList<EntityIdentifier>();
+      for (TimelineEntity entity : entities.getEntities()) {
+        EntityIdentifier entityID =
+            new EntityIdentifier(entity.getEntityId(), entity.getEntityType());
+        entityIDs.add(entityID);
+        if (LOG.isDebugEnabled()) {
+          LOG.debug("Storing the entity " + entityID + ", JSON-style content: "
+              + TimelineUtils.dumpTimelineRecordtoJSON(entity));
+        }
+      }
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Storing entities: " + CSV_JOINER.join(entityIDs));
+      }
+      return store.put(entities);
+    } catch (IOException e) {
+      LOG.error("Error putting entities", e);
+      throw new WebApplicationException(e,
+          Response.Status.INTERNAL_SERVER_ERROR);
+    }
+  }
+
+  private void init(HttpServletResponse response) {
+    response.setContentType(null);
+  }
+
+  private static SortedSet<String> parseArrayStr(String str, String delimiter) {
+    if (str == null) {
+      return null;
+    }
+    SortedSet<String> strSet = new TreeSet<String>();
+    String[] strs = str.split(delimiter);
+    for (String aStr : strs) {
+      strSet.add(aStr.trim());
+    }
+    return strSet;
+  }
+
+  private static NameValuePair parsePairStr(String str, String delimiter) {
+    if (str == null) {
+      return null;
+    }
+    String[] strs = str.split(delimiter, 2);
+    try {
+      return new NameValuePair(strs[0].trim(),
+          GenericObjectMapper.OBJECT_READER.readValue(strs[1].trim()));
+    } catch (Exception e) {
+      // didn't work as an Object, keep it as a String
+      return new NameValuePair(strs[0].trim(), strs[1].trim());
+    }
+  }
+
+  private static Collection<NameValuePair> parsePairsStr(
+      String str, String aDelimiter, String pDelimiter) {
+    if (str == null) {
+      return null;
+    }
+    String[] strs = str.split(aDelimiter);
+    Set<NameValuePair> pairs = new HashSet<NameValuePair>();
+    for (String aStr : strs) {
+      pairs.add(parsePairStr(aStr, pDelimiter));
+    }
+    return pairs;
+  }
+
+  private static EnumSet<Field> parseFieldsStr(String str, String delimiter) {
+    if (str == null) {
+      return null;
+    }
+    String[] strs = str.split(delimiter);
+    List<Field> fieldList = new ArrayList<Field>();
+    for (String s : strs) {
+      s = s.trim().toUpperCase();
+      if (s.equals("EVENTS")) {
+        fieldList.add(Field.EVENTS);
+      } else if (s.equals("LASTEVENTONLY")) {
+        fieldList.add(Field.LAST_EVENT_ONLY);
+      } else if (s.equals("RELATEDENTITIES")) {
+        fieldList.add(Field.RELATED_ENTITIES);
+      } else if (s.equals("PRIMARYFILTERS")) {
+        fieldList.add(Field.PRIMARY_FILTERS);
+      } else if (s.equals("OTHERINFO")) {
+        fieldList.add(Field.OTHER_INFO);
+      } else {
+        throw new IllegalArgumentException("Requested nonexistent field " + s);
+      }
+    }
+    if (fieldList.size() == 0) {
+      return null;
+    }
+    Field f1 = fieldList.remove(fieldList.size() - 1);
+    if (fieldList.size() == 0) {
+      return EnumSet.of(f1);
+    } else {
+      return EnumSet.of(f1, fieldList.toArray(new Field[fieldList.size()]));
+    }
+  }
+
+  private static Long parseLongStr(String str) {
+    return str == null ? null : Long.parseLong(str.trim());
+  }
+
+  private static Integer parseIntStr(String str) {
+    return str == null ? null : Integer.parseInt(str.trim());
+  }
+
+  private static boolean parseBoolean(String booleanStr) {
+    return booleanStr == null || Boolean.parseBoolean(booleanStr);
+  }
+
+  private static List<String> parseListStr(String str, String delimiter) {
+    return str == null ? null : Arrays.asList(str.trim().split(delimiter));
+  }
+
+  private static String parseStr(String str) {
+    return str == null ? null : str.trim();
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/util/timeline/TimelineUtilsExt.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/util/timeline/TimelineUtilsExt.java b/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/util/timeline/TimelineUtilsExt.java
new file mode 100644
index 0000000..31e8017
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/util/timeline/TimelineUtilsExt.java
@@ -0,0 +1,39 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.util.timeline;
+
+import org.codehaus.jackson.map.ObjectMapper;
+import org.codehaus.jackson.type.TypeReference;
+
+import java.io.IOException;
+import java.util.Map;
+
+public class TimelineUtilsExt extends TimelineUtils {
+  private static ObjectMapper mapper;
+
+  static {
+    mapper = new ObjectMapper();
+  }
+
+  private static TypeReference<Map<Long, Double>> metricValuesTypeRef =
+    new TypeReference<Map<Long, Double>>() {};
+
+  public static Map readMetricFromJSON(String json) throws IOException {
+    return mapper.readValue(json, metricValuesTypeRef);
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryStoreTestUtils.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryStoreTestUtils.java b/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryStoreTestUtils.java
new file mode 100644
index 0000000..c41b8a7
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryStoreTestUtils.java
@@ -0,0 +1,84 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.applicationhistoryservice;
+
+import java.io.IOException;
+
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.ContainerState;
+import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
+import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.api.records.Priority;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.api.records.YarnApplicationAttemptState;
+import org.apache.hadoop.yarn.api.records.YarnApplicationState;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ApplicationAttemptFinishData;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ApplicationAttemptStartData;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ApplicationFinishData;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ApplicationStartData;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ContainerFinishData;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ContainerStartData;
+
+public class ApplicationHistoryStoreTestUtils {
+
+  protected ApplicationHistoryStore store;
+
+  protected void writeApplicationStartData(ApplicationId appId)
+      throws IOException {
+    store.applicationStarted(ApplicationStartData.newInstance(appId,
+      appId.toString(), "test type", "test queue", "test user", 0, 0));
+  }
+
+  protected void writeApplicationFinishData(ApplicationId appId)
+      throws IOException {
+    store.applicationFinished(ApplicationFinishData.newInstance(appId, 0,
+      appId.toString(), FinalApplicationStatus.UNDEFINED,
+      YarnApplicationState.FINISHED));
+  }
+
+  protected void writeApplicationAttemptStartData(
+      ApplicationAttemptId appAttemptId) throws IOException {
+    store.applicationAttemptStarted(ApplicationAttemptStartData.newInstance(
+      appAttemptId, appAttemptId.toString(), 0,
+      ContainerId.newInstance(appAttemptId, 1)));
+  }
+
+  protected void writeApplicationAttemptFinishData(
+      ApplicationAttemptId appAttemptId) throws IOException {
+    store.applicationAttemptFinished(ApplicationAttemptFinishData.newInstance(
+      appAttemptId, appAttemptId.toString(), "test tracking url",
+      FinalApplicationStatus.UNDEFINED, YarnApplicationAttemptState.FINISHED));
+  }
+
+  protected void writeContainerStartData(ContainerId containerId)
+      throws IOException {
+    store.containerStarted(ContainerStartData.newInstance(containerId,
+      Resource.newInstance(0, 0), NodeId.newInstance("localhost", 0),
+      Priority.newInstance(containerId.getId()), 0));
+  }
+
+  protected void writeContainerFinishData(ContainerId containerId)
+      throws IOException {
+    store.containerFinished(ContainerFinishData.newInstance(containerId, 0,
+      containerId.toString(), 0, ContainerState.COMPLETE));
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestApplicationHistoryClientService.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestApplicationHistoryClientService.java b/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestApplicationHistoryClientService.java
new file mode 100644
index 0000000..f54637f
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestApplicationHistoryClientService.java
@@ -0,0 +1,206 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.applicationhistoryservice;
+
+import java.io.IOException;
+import java.util.List;
+
+import junit.framework.Assert;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptReportRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptReportResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptsRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptsResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.GetContainerReportRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetContainerReportResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.GetContainersRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetContainersResponse;
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptReport;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ApplicationReport;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.ContainerReport;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.exceptions.YarnException;
+import org.apache.hadoop.yarn.webapp.util.WebAppUtils;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+public class TestApplicationHistoryClientService extends
+    ApplicationHistoryStoreTestUtils {
+
+  ApplicationHistoryServer historyServer = null;
+  String expectedLogUrl = null;
+
+  @Before
+  public void setup() {
+    historyServer = new ApplicationHistoryServer();
+    Configuration config = new YarnConfiguration();
+    expectedLogUrl = WebAppUtils.getHttpSchemePrefix(config) +
+        WebAppUtils.getAHSWebAppURLWithoutScheme(config) +
+        "/applicationhistory/logs/localhost:0/container_0_0001_01_000001/" +
+        "container_0_0001_01_000001/test user";
+    config.setClass(YarnConfiguration.APPLICATION_HISTORY_STORE,
+      MemoryApplicationHistoryStore.class, ApplicationHistoryStore.class);
+    historyServer.init(config);
+    historyServer.start();
+    store =
+        ((ApplicationHistoryManagerImpl) historyServer.getApplicationHistory())
+          .getHistoryStore();
+  }
+
+  @After
+  public void tearDown() throws Exception {
+    historyServer.stop();
+  }
+
+  @Test
+  public void testApplicationReport() throws IOException, YarnException {
+    ApplicationId appId = null;
+    appId = ApplicationId.newInstance(0, 1);
+    writeApplicationStartData(appId);
+    writeApplicationFinishData(appId);
+    GetApplicationReportRequest request =
+        GetApplicationReportRequest.newInstance(appId);
+    GetApplicationReportResponse response =
+        historyServer.getClientService().getClientHandler()
+          .getApplicationReport(request);
+    ApplicationReport appReport = response.getApplicationReport();
+    Assert.assertNotNull(appReport);
+    Assert.assertEquals("application_0_0001", appReport.getApplicationId()
+      .toString());
+    Assert.assertEquals("test type", appReport.getApplicationType().toString());
+    Assert.assertEquals("test queue", appReport.getQueue().toString());
+  }
+
+  @Test
+  public void testApplications() throws IOException, YarnException {
+    ApplicationId appId = null;
+    appId = ApplicationId.newInstance(0, 1);
+    writeApplicationStartData(appId);
+    writeApplicationFinishData(appId);
+    ApplicationId appId1 = ApplicationId.newInstance(0, 2);
+    writeApplicationStartData(appId1);
+    writeApplicationFinishData(appId1);
+    GetApplicationsRequest request = GetApplicationsRequest.newInstance();
+    GetApplicationsResponse response =
+        historyServer.getClientService().getClientHandler()
+          .getApplications(request);
+    List<ApplicationReport> appReport = response.getApplicationList();
+    Assert.assertNotNull(appReport);
+    Assert.assertEquals(appId, appReport.get(0).getApplicationId());
+    Assert.assertEquals(appId1, appReport.get(1).getApplicationId());
+  }
+
+  @Test
+  public void testApplicationAttemptReport() throws IOException, YarnException {
+    ApplicationId appId = ApplicationId.newInstance(0, 1);
+    ApplicationAttemptId appAttemptId =
+        ApplicationAttemptId.newInstance(appId, 1);
+    writeApplicationAttemptStartData(appAttemptId);
+    writeApplicationAttemptFinishData(appAttemptId);
+    GetApplicationAttemptReportRequest request =
+        GetApplicationAttemptReportRequest.newInstance(appAttemptId);
+    GetApplicationAttemptReportResponse response =
+        historyServer.getClientService().getClientHandler()
+          .getApplicationAttemptReport(request);
+    ApplicationAttemptReport attemptReport =
+        response.getApplicationAttemptReport();
+    Assert.assertNotNull(attemptReport);
+    Assert.assertEquals("appattempt_0_0001_000001", attemptReport
+      .getApplicationAttemptId().toString());
+  }
+
+  @Test
+  public void testApplicationAttempts() throws IOException, YarnException {
+    ApplicationId appId = ApplicationId.newInstance(0, 1);
+    ApplicationAttemptId appAttemptId =
+        ApplicationAttemptId.newInstance(appId, 1);
+    ApplicationAttemptId appAttemptId1 =
+        ApplicationAttemptId.newInstance(appId, 2);
+    writeApplicationAttemptStartData(appAttemptId);
+    writeApplicationAttemptFinishData(appAttemptId);
+    writeApplicationAttemptStartData(appAttemptId1);
+    writeApplicationAttemptFinishData(appAttemptId1);
+    GetApplicationAttemptsRequest request =
+        GetApplicationAttemptsRequest.newInstance(appId);
+    GetApplicationAttemptsResponse response =
+        historyServer.getClientService().getClientHandler()
+          .getApplicationAttempts(request);
+    List<ApplicationAttemptReport> attemptReports =
+        response.getApplicationAttemptList();
+    Assert.assertNotNull(attemptReports);
+    Assert.assertEquals(appAttemptId, attemptReports.get(0)
+      .getApplicationAttemptId());
+    Assert.assertEquals(appAttemptId1, attemptReports.get(1)
+      .getApplicationAttemptId());
+  }
+
+  @Test
+  public void testContainerReport() throws IOException, YarnException {
+    ApplicationId appId = ApplicationId.newInstance(0, 1);
+    writeApplicationStartData(appId);
+    ApplicationAttemptId appAttemptId =
+        ApplicationAttemptId.newInstance(appId, 1);
+    ContainerId containerId = ContainerId.newInstance(appAttemptId, 1);
+    writeContainerStartData(containerId);
+    writeContainerFinishData(containerId);
+    writeApplicationFinishData(appId);
+    GetContainerReportRequest request =
+        GetContainerReportRequest.newInstance(containerId);
+    GetContainerReportResponse response =
+        historyServer.getClientService().getClientHandler()
+          .getContainerReport(request);
+    ContainerReport container = response.getContainerReport();
+    Assert.assertNotNull(container);
+    Assert.assertEquals(containerId, container.getContainerId());
+    Assert.assertEquals(expectedLogUrl, container.getLogUrl());
+  }
+
+  @Test
+  public void testContainers() throws IOException, YarnException {
+    ApplicationId appId = ApplicationId.newInstance(0, 1);
+    writeApplicationStartData(appId);
+    ApplicationAttemptId appAttemptId =
+        ApplicationAttemptId.newInstance(appId, 1);
+    ContainerId containerId = ContainerId.newInstance(appAttemptId, 1);
+    ContainerId containerId1 = ContainerId.newInstance(appAttemptId, 2);
+    writeContainerStartData(containerId);
+    writeContainerFinishData(containerId);
+    writeContainerStartData(containerId1);
+    writeContainerFinishData(containerId1);
+    writeApplicationFinishData(appId);
+    GetContainersRequest request =
+        GetContainersRequest.newInstance(appAttemptId);
+    GetContainersResponse response =
+        historyServer.getClientService().getClientHandler()
+          .getContainers(request);
+    List<ContainerReport> containers = response.getContainerList();
+    Assert.assertNotNull(containers);
+    Assert.assertEquals(containerId, containers.get(1).getContainerId());
+    Assert.assertEquals(containerId1, containers.get(0).getContainerId());
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestApplicationHistoryManagerImpl.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestApplicationHistoryManagerImpl.java b/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestApplicationHistoryManagerImpl.java
new file mode 100644
index 0000000..fec2bf3
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestApplicationHistoryManagerImpl.java
@@ -0,0 +1,74 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.applicationhistoryservice;
+
+import java.io.IOException;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ApplicationReport;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.exceptions.YarnException;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+
+public class TestApplicationHistoryManagerImpl extends
+    ApplicationHistoryStoreTestUtils {
+  ApplicationHistoryManagerImpl applicationHistoryManagerImpl = null;
+
+  @Before
+  public void setup() throws Exception {
+    Configuration config = new Configuration();
+    config.setClass(YarnConfiguration.APPLICATION_HISTORY_STORE,
+      MemoryApplicationHistoryStore.class, ApplicationHistoryStore.class);
+    applicationHistoryManagerImpl = new ApplicationHistoryManagerImpl();
+    applicationHistoryManagerImpl.init(config);
+    applicationHistoryManagerImpl.start();
+    store = applicationHistoryManagerImpl.getHistoryStore();
+  }
+
+  @After
+  public void tearDown() throws Exception {
+    applicationHistoryManagerImpl.stop();
+  }
+
+  @Test
+  public void testApplicationReport() throws IOException, YarnException {
+    ApplicationId appId = null;
+    appId = ApplicationId.newInstance(0, 1);
+    writeApplicationStartData(appId);
+    writeApplicationFinishData(appId);
+    ApplicationAttemptId appAttemptId =
+        ApplicationAttemptId.newInstance(appId, 1);
+    writeApplicationAttemptStartData(appAttemptId);
+    writeApplicationAttemptFinishData(appAttemptId);
+    ApplicationReport appReport =
+        applicationHistoryManagerImpl.getApplication(appId);
+    Assert.assertNotNull(appReport);
+    Assert.assertEquals(appId, appReport.getApplicationId());
+    Assert.assertEquals(appAttemptId,
+      appReport.getCurrentApplicationAttemptId());
+    Assert.assertEquals(appAttemptId.toString(), appReport.getHost());
+    Assert.assertEquals("test type", appReport.getApplicationType().toString());
+    Assert.assertEquals("test queue", appReport.getQueue().toString());
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestApplicationHistoryServer.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestApplicationHistoryServer.java b/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestApplicationHistoryServer.java
new file mode 100644
index 0000000..d6d20af
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestApplicationHistoryServer.java
@@ -0,0 +1,77 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.applicationhistoryservice;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.fail;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.service.Service.STATE;
+import org.apache.hadoop.util.ExitUtil;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.junit.After;
+import org.junit.Test;
+
+public class TestApplicationHistoryServer {
+
+  ApplicationHistoryServer historyServer = null;
+
+  // simple test init/start/stop ApplicationHistoryServer. Status should change.
+  @Test(timeout = 50000)
+  public void testStartStopServer() throws Exception {
+    historyServer = new ApplicationHistoryServer();
+    Configuration config = new YarnConfiguration();
+    historyServer.init(config);
+    assertEquals(STATE.INITED, historyServer.getServiceState());
+    assertEquals(3, historyServer.getServices().size());
+    ApplicationHistoryClientService historyService =
+        historyServer.getClientService();
+    assertNotNull(historyServer.getClientService());
+    assertEquals(STATE.INITED, historyService.getServiceState());
+
+    historyServer.start();
+    assertEquals(STATE.STARTED, historyServer.getServiceState());
+    assertEquals(STATE.STARTED, historyService.getServiceState());
+    historyServer.stop();
+    assertEquals(STATE.STOPPED, historyServer.getServiceState());
+  }
+
+  // test launch method
+  @Test(timeout = 60000)
+  public void testLaunch() throws Exception {
+
+    ExitUtil.disableSystemExit();
+    try {
+      historyServer =
+          ApplicationHistoryServer.launchAppHistoryServer(new String[0]);
+    } catch (ExitUtil.ExitException e) {
+      assertEquals(0, e.status);
+      ExitUtil.resetFirstExitException();
+      fail();
+    }
+  }
+
+  @After
+  public void stop() {
+    if (historyServer != null) {
+      historyServer.stop();
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestFileSystemApplicationHistoryStore.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestFileSystemApplicationHistoryStore.java b/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestFileSystemApplicationHistoryStore.java
new file mode 100644
index 0000000..bc16d36
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestFileSystemApplicationHistoryStore.java
@@ -0,0 +1,233 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.applicationhistoryservice;
+
+import java.io.IOException;
+import java.net.URI;
+
+import junit.framework.Assert;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.RawLocalFileSystem;
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.Priority;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ApplicationAttemptHistoryData;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ApplicationHistoryData;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ContainerHistoryData;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+public class TestFileSystemApplicationHistoryStore extends
+    ApplicationHistoryStoreTestUtils {
+
+  private FileSystem fs;
+  private Path fsWorkingPath;
+
+  @Before
+  public void setup() throws Exception {
+    fs = new RawLocalFileSystem();
+    Configuration conf = new Configuration();
+    fs.initialize(new URI("/"), conf);
+    fsWorkingPath = new Path("Test");
+    fs.delete(fsWorkingPath, true);
+    conf.set(YarnConfiguration.FS_APPLICATION_HISTORY_STORE_URI, fsWorkingPath.toString());
+    store = new FileSystemApplicationHistoryStore();
+    store.init(conf);
+    store.start();
+  }
+
+  @After
+  public void tearDown() throws Exception {
+    store.stop();
+    fs.delete(fsWorkingPath, true);
+    fs.close();
+  }
+
+  @Test
+  public void testReadWriteHistoryData() throws IOException {
+    testWriteHistoryData(5);
+    testReadHistoryData(5);
+  }
+
+  private void testWriteHistoryData(int num) throws IOException {
+    testWriteHistoryData(num, false, false);
+  }
+  
+  private void testWriteHistoryData(
+      int num, boolean missingContainer, boolean missingApplicationAttempt)
+          throws IOException {
+    // write application history data
+    for (int i = 1; i <= num; ++i) {
+      ApplicationId appId = ApplicationId.newInstance(0, i);
+      writeApplicationStartData(appId);
+
+      // write application attempt history data
+      for (int j = 1; j <= num; ++j) {
+        ApplicationAttemptId appAttemptId =
+            ApplicationAttemptId.newInstance(appId, j);
+        writeApplicationAttemptStartData(appAttemptId);
+
+        if (missingApplicationAttempt && j == num) {
+          continue;
+        }
+        // write container history data
+        for (int k = 1; k <= num; ++k) {
+          ContainerId containerId = ContainerId.newInstance(appAttemptId, k);
+          writeContainerStartData(containerId);
+          if (missingContainer && k == num) {
+            continue;
+          }
+          writeContainerFinishData(containerId);
+        }
+        writeApplicationAttemptFinishData(appAttemptId);
+      }
+      writeApplicationFinishData(appId);
+    }
+  }
+
+  private void testReadHistoryData(int num) throws IOException {
+    testReadHistoryData(num, false, false);
+  }
+  
+  private void testReadHistoryData(
+      int num, boolean missingContainer, boolean missingApplicationAttempt)
+          throws IOException {
+    // read application history data
+    Assert.assertEquals(num, store.getAllApplications().size());
+    for (int i = 1; i <= num; ++i) {
+      ApplicationId appId = ApplicationId.newInstance(0, i);
+      ApplicationHistoryData appData = store.getApplication(appId);
+      Assert.assertNotNull(appData);
+      Assert.assertEquals(appId.toString(), appData.getApplicationName());
+      Assert.assertEquals(appId.toString(), appData.getDiagnosticsInfo());
+
+      // read application attempt history data
+      Assert.assertEquals(num, store.getApplicationAttempts(appId).size());
+      for (int j = 1; j <= num; ++j) {
+        ApplicationAttemptId appAttemptId =
+            ApplicationAttemptId.newInstance(appId, j);
+        ApplicationAttemptHistoryData attemptData =
+            store.getApplicationAttempt(appAttemptId);
+        Assert.assertNotNull(attemptData);
+        Assert.assertEquals(appAttemptId.toString(), attemptData.getHost());
+        
+        if (missingApplicationAttempt && j == num) {
+          Assert.assertNull(attemptData.getDiagnosticsInfo());
+          continue;
+        } else {
+          Assert.assertEquals(appAttemptId.toString(),
+              attemptData.getDiagnosticsInfo());
+        }
+
+        // read container history data
+        Assert.assertEquals(num, store.getContainers(appAttemptId).size());
+        for (int k = 1; k <= num; ++k) {
+          ContainerId containerId = ContainerId.newInstance(appAttemptId, k);
+          ContainerHistoryData containerData = store.getContainer(containerId);
+          Assert.assertNotNull(containerData);
+          Assert.assertEquals(Priority.newInstance(containerId.getId()),
+            containerData.getPriority());
+          if (missingContainer && k == num) {
+            Assert.assertNull(containerData.getDiagnosticsInfo());
+          } else {
+            Assert.assertEquals(containerId.toString(),
+                containerData.getDiagnosticsInfo());
+          }
+        }
+        ContainerHistoryData masterContainer =
+            store.getAMContainer(appAttemptId);
+        Assert.assertNotNull(masterContainer);
+        Assert.assertEquals(ContainerId.newInstance(appAttemptId, 1),
+          masterContainer.getContainerId());
+      }
+    }
+  }
+
+  @Test
+  public void testWriteAfterApplicationFinish() throws IOException {
+    ApplicationId appId = ApplicationId.newInstance(0, 1);
+    writeApplicationStartData(appId);
+    writeApplicationFinishData(appId);
+    // write application attempt history data
+    ApplicationAttemptId appAttemptId =
+        ApplicationAttemptId.newInstance(appId, 1);
+    try {
+      writeApplicationAttemptStartData(appAttemptId);
+      Assert.fail();
+    } catch (IOException e) {
+      Assert.assertTrue(e.getMessage().contains("is not opened"));
+    }
+    try {
+      writeApplicationAttemptFinishData(appAttemptId);
+      Assert.fail();
+    } catch (IOException e) {
+      Assert.assertTrue(e.getMessage().contains("is not opened"));
+    }
+    // write container history data
+    ContainerId containerId = ContainerId.newInstance(appAttemptId, 1);
+    try {
+      writeContainerStartData(containerId);
+      Assert.fail();
+    } catch (IOException e) {
+      Assert.assertTrue(e.getMessage().contains("is not opened"));
+    }
+    try {
+      writeContainerFinishData(containerId);
+      Assert.fail();
+    } catch (IOException e) {
+      Assert.assertTrue(e.getMessage().contains("is not opened"));
+    }
+  }
+
+  @Test
+  public void testMassiveWriteContainerHistoryData() throws IOException {
+    long mb = 1024 * 1024;
+    long usedDiskBefore = fs.getContentSummary(fsWorkingPath).getLength() / mb;
+    ApplicationId appId = ApplicationId.newInstance(0, 1);
+    writeApplicationStartData(appId);
+    ApplicationAttemptId appAttemptId =
+        ApplicationAttemptId.newInstance(appId, 1);
+    for (int i = 1; i <= 100000; ++i) {
+      ContainerId containerId = ContainerId.newInstance(appAttemptId, i);
+      writeContainerStartData(containerId);
+      writeContainerFinishData(containerId);
+    }
+    writeApplicationFinishData(appId);
+    long usedDiskAfter = fs.getContentSummary(fsWorkingPath).getLength() / mb;
+    Assert.assertTrue((usedDiskAfter - usedDiskBefore) < 20);
+  }
+
+  @Test
+  public void testMissingContainerHistoryData() throws IOException {
+    testWriteHistoryData(3, true, false);
+    testReadHistoryData(3, true, false);
+  }
+  
+  @Test
+  public void testMissingApplicationAttemptHistoryData() throws IOException {
+    testWriteHistoryData(3, false, true);
+    testReadHistoryData(3, false, true);
+  }
+}


[10/22] AMBARI-5707. Metrics system prototype implementation. (swagle)

Posted by sw...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/docs/index.rst
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/docs/index.rst b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/docs/index.rst
new file mode 100644
index 0000000..12327a9
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/docs/index.rst
@@ -0,0 +1,1247 @@
+.. module:: psutil
+   :synopsis: psutil module
+.. moduleauthor:: Giampaolo Rodola' <gr...@gmail.com>
+
+.. warning::
+
+   This documentation refers to new 2.X version of psutil.
+   Instructions on how to port existing 1.2.1 code are
+   `here <http://grodola.blogspot.com/2014/01/psutil-20-porting.html>`__.
+   Old 1.2.1 documentation is still available
+   `here <https://code.google.com/p/psutil/wiki/Documentation>`__.
+
+psutil documentation
+====================
+
+Quick links
+-----------
+
+* `Home page <http://code.google.com/p/psutil>`__
+* `Blog <http://grodola.blogspot.com/search/label/psutil>`__
+* `Download <https://pypi.python.org/pypi?:action=display&name=psutil#downloads>`__
+* `Forum <http://groups.google.com/group/psutil/topics>`__
+* `What's new <https://psutil.googlecode.com/hg/HISTORY>`__
+
+About
+-----
+
+From project's home page:
+
+  psutil (python system and process utilities) is a cross-platform library for
+  retrieving information on running
+  **processes** and **system utilization** (CPU, memory, disks, network) in
+  **Python**.
+  It is useful mainly for **system monitoring**, **profiling** and **limiting
+  process resources** and **management of running processes**.
+  It implements many functionalities offered by command line tools
+  such as: *ps, top, lsof, netstat, ifconfig, who, df, kill, free, nice,
+  ionice, iostat, iotop, uptime, pidof, tty, taskset, pmap*.
+  It currently supports **Linux, Windows, OSX, FreeBSD** and **Sun Solaris**,
+  both **32-bit** and **64-bit** architectures, with Python versions from
+  **2.4** to **3.4**.
+  `Pypy <http://pypy.org/>`__ is also known to work.
+
+The psutil documentation you're reading is distributed as a single HTML page.
+
+System related functions
+========================
+
+CPU
+---
+
+.. function:: cpu_times(percpu=False)
+
+  Return system CPU times as a namedtuple.
+  Every attribute represents the seconds the CPU has spent in the given mode.
+  The attributes availability varies depending on the platform:
+
+  - **user**
+  - **system**
+  - **idle**
+  - **nice** *(UNIX)*
+  - **iowait** *(Linux)*
+  - **irq** *(Linux, FreeBSD)*
+  - **softirq** *(Linux)*
+  - **steal** *(Linux 2.6.11+)*
+  - **guest** *(Linux 2.6.24+)*
+  - **guest_nice** *(Linux 3.2.0+)*
+
+  When *percpu* is ``True`` return a list of nameduples for each logical CPU
+  on the system.
+  First element of the list refers to first CPU, second element to second CPU
+  and so on.
+  The order of the list is consistent across calls.
+  Example output on Linux:
+
+    >>> import psutil
+    >>> psutil.cpu_times()
+    scputimes(user=17411.7, nice=77.99, system=3797.02, idle=51266.57, iowait=732.58, irq=0.01, softirq=142.43, steal=0.0, guest=0.0, guest_nice=0.0)
+
+.. function:: cpu_percent(interval=None, percpu=False)
+
+  Return a float representing the current system-wide CPU utilization as a
+  percentage. When *interval* is > ``0.0`` compares system CPU times elapsed
+  before and after the interval (blocking).
+  When *interval* is ``0.0`` or ``None`` compares system CPU times elapsed
+  since last call or module import, returning immediately.
+  That means the first time this is called it will return a meaningless ``0.0``
+  value which you are supposed to ignore.
+  In this case is recommended for accuracy that this function be called with at
+  least ``0.1`` seconds between calls.
+  When *percpu* is ``True`` returns a list of floats representing the
+  utilization as a percentage for each CPU.
+  First element of the list refers to first CPU, second element to second CPU
+  and so on. The order of the list is consistent across calls.
+
+    >>> import psutil
+    >>> # blocking
+    >>> psutil.cpu_percent(interval=1)
+    2.0
+    >>> # non-blocking (percentage since last call)
+    >>> psutil.cpu_percent(interval=None)
+    2.9
+    >>> # blocking, per-cpu
+    >>> psutil.cpu_percent(interval=1, percpu=True)
+    [2.0, 1.0]
+    >>>
+
+  .. warning::
+
+    the first time this function is called with *interval* = ``0.0`` or ``None``
+    it will return a meaningless ``0.0`` value which you are supposed to
+    ignore.
+
+.. function:: cpu_times_percent(interval=None, percpu=False)
+
+  Same as :func:`cpu_percent()` but provides utilization percentages for each
+  specific CPU time as is returned by
+  :func:`psutil.cpu_times(percpu=True)<cpu_times()>`.
+  *interval* and
+  *percpu* arguments have the same meaning as in :func:`cpu_percent()`.
+
+  .. warning::
+
+    the first time this function is called with *interval* = ``0.0`` or
+    ``None`` it will return a meaningless ``0.0`` value which you are supposed
+    to ignore.
+
+.. function:: cpu_count(logical=True)
+
+    Return the number of logical CPUs in the system (same as
+    `os.cpu_count() <http://docs.python.org/3/library/os.html#os.cpu_count>`__
+    in Python 3.4).
+    If *logical* is ``False`` return the number of physical cores only (hyper
+    thread CPUs are excluded). Return ``None`` if undetermined.
+
+      >>> import psutil
+      >>> psutil.cpu_count()
+      4
+      >>> psutil.cpu_count(logical=False)
+      2
+      >>>
+
+Memory
+------
+
+.. function:: virtual_memory()
+
+  Return statistics about system memory usage as a namedtuple including the
+  following fields, expressed in bytes:
+
+  - **total**: total physical memory available.
+  - **available**: the actual amount of available memory that can be given
+    instantly to processes that request more memory in bytes; this is
+    calculated by summing different memory values depending on the platform
+    (e.g. free + buffers + cached on Linux) and it is supposed to be used to
+    monitor actual memory usage in a cross platform fashion.
+  - **percent**: the percentage usage calculated as
+    ``(total - available) / total * 100``.
+  - **used**: memory used, calculated differently depending on the platform and
+    designed for informational purposes only.
+  - **free**: memory not being used at all (zeroed) that is readily available;
+    note that this doesn't reflect the actual memory available (use 'available'
+    instead).
+
+  Platform-specific fields:
+
+  - **active**: (UNIX): memory currently in use or very recently used, and so
+    it is in RAM.
+  - **inactive**: (UNIX): memory that is marked as not used.
+  - **buffers**: (Linux, BSD): cache for things like file system metadata.
+  - **cached**: (Linux, BSD): cache for various things.
+  - **wired**: (BSD, OSX): memory that is marked to always stay in RAM. It is
+    never moved to disk.
+  - **shared**: (BSD): memory that may be simultaneously accessed by multiple
+    processes.
+
+  The sum of **used** and **available** does not necessarily equal **total**.
+  On Windows **available** and **free** are the same.
+  See `examples/meminfo.py <http://code.google.com/p/psutil/source/browse/examples/meminfo.py>`__
+  script providing an example on how to convert bytes in a human readable form.
+
+    >>> import psutil
+    >>> mem = psutil.virtual_memory()
+    >>> mem
+    svmem(total=8374149120L, available=1247768576L, percent=85.1, used=8246628352L, free=127520768L, active=3208777728, inactive=1133408256, buffers=342413312L, cached=777834496)
+    >>>
+    >>> THRESHOLD = 100 * 1024 * 1024  # 100MB
+    >>> if mem.available <= THRESHOLD:
+    ...     print("warning")
+    ...
+    >>>
+
+
+.. function:: swap_memory()
+
+  Return system swap memory statistics as a namedtuple including the following
+  fields:
+
+  * **total**: total swap memory in bytes
+  * **used**: used swap memory in bytes
+  * **free**: free swap memory in bytes
+  * **percent**: the percentage usage
+  * **sin**: the number of bytes the system has swapped in from disk
+    (cumulative)
+  * **sout**: the number of bytes the system has swapped out from disk
+    (cumulative)
+
+  **sin** and **sout** on Windows are meaningless and are always set to ``0``.
+  See `examples/meminfo.py <http://code.google.com/p/psutil/source/browse/examples/meminfo.py>`__
+  script providing an example on how to convert bytes in a human readable form.
+
+    >>> import psutil
+    >>> psutil.swap_memory()
+    sswap(total=2097147904L, used=886620160L, free=1210527744L, percent=42.3, sin=1050411008, sout=1906720768)
+
+Disks
+-----
+
+.. function:: disk_partitions(all=False)
+
+  Return all mounted disk partitions as a list of namedtuples including device,
+  mount point and filesystem type, similarly to "df" command on UNIX. If *all*
+  parameter is ``False`` return physical devices only (e.g. hard disks, cd-rom
+  drives, USB keys) and ignore all others (e.g. memory partitions such as
+  `/dev/shm <http://www.cyberciti.biz/tips/what-is-devshm-and-its-practical-usage.html>`__).
+  Namedtuple's **fstype** field is a string which varies depending on the
+  platform.
+  On Linux it can be one of the values found in /proc/filesystems (e.g.
+  ``'ext3'`` for an ext3 hard drive o ``'iso9660'`` for the CD-ROM drive).
+  On Windows it is determined via
+  `GetDriveType <http://msdn.microsoft.com/en-us/library/aa364939(v=vs.85).aspx>`__
+  and can be either ``"removable"``, ``"fixed"``, ``"remote"``, ``"cdrom"``,
+  ``"unmounted"`` or ``"ramdisk"``. On OSX and FreeBSD it is retrieved via
+  `getfsstat(2) <http://www.manpagez.com/man/2/getfsstat/>`__. See
+  `disk_usage.py <http://code.google.com/p/psutil/source/browse/examples/disk_usage.py>`__
+  script providing an example usage.
+
+    >>> import psutil
+    >>> psutil.disk_partitions()
+    [sdiskpart(device='/dev/sda3', mountpoint='/', fstype='ext4', opts='rw,errors=remount-ro'),
+     sdiskpart(device='/dev/sda7', mountpoint='/home', fstype='ext4', opts='rw')]
+
+.. function:: disk_usage(path)
+
+  Return disk usage statistics about the given *path* as a namedtuple including
+  **total**, **used** and **free** space expressed in bytes, plus the
+  **percentage** usage.
+  `OSError <http://docs.python.org/3/library/exceptions.html#OSError>`__ is
+  raised if *path* does not exist. See
+  `examples/disk_usage.py <http://code.google.com/p/psutil/source/browse/examples/disk_usage.py>`__
+  script providing an example usage. Starting from
+  `Python 3.3 <http://bugs.python.org/issue12442>`__  this is also
+  available as
+  `shutil.disk_usage() <http://docs.python.org/3/library/shutil.html#shutil.disk_usage>`__.
+  See
+  `disk_usage.py <http://code.google.com/p/psutil/source/browse/examples/disk_usage.py>`__
+  script providing an example usage.
+
+    >>> import psutil
+    >>> psutil.disk_usage('/')
+    sdiskusage(total=21378641920, used=4809781248, free=15482871808, percent=22.5)
+
+.. function:: disk_io_counters(perdisk=False)
+
+  Return system-wide disk I/O statistics as a namedtuple including the
+  following fields:
+
+  - **read_count**: number of reads
+  - **write_count**: number of writes
+  - **read_bytes**: number of bytes read
+  - **write_bytes**: number of bytes written
+  - **read_time**: time spent reading from disk (in milliseconds)
+  - **write_time**: time spent writing to disk (in milliseconds)
+
+  If *perdisk* is ``True`` return the same information for every physical disk
+  installed on the system as a dictionary with partition names as the keys and
+  the namedutuple described above as the values.
+  See `examples/iotop.py <http://code.google.com/p/psutil/source/browse/examples/iotop.py>`__
+  for an example application.
+
+    >>> import psutil
+    >>> psutil.disk_io_counters()
+    sdiskio(read_count=8141, write_count=2431, read_bytes=290203, write_bytes=537676, read_time=5868, write_time=94922)
+    >>>
+    >>> psutil.disk_io_counters(perdisk=True)
+    {'sda1': sdiskio(read_count=920, write_count=1, read_bytes=2933248, write_bytes=512, read_time=6016, write_time=4),
+     'sda2': sdiskio(read_count=18707, write_count=8830, read_bytes=6060, write_bytes=3443, read_time=24585, write_time=1572),
+     'sdb1': sdiskio(read_count=161, write_count=0, read_bytes=786432, write_bytes=0, read_time=44, write_time=0)}
+
+Network
+-------
+
+.. function:: net_io_counters(pernic=False)
+
+  Return system-wide network I/O statistics as a namedtuple including the
+  following attributes:
+
+  - **bytes_sent**: number of bytes sent
+  - **bytes_recv**: number of bytes received
+  - **packets_sent**: number of packets sent
+  - **packets_recv**: number of packets received
+  - **errin**: total number of errors while receiving
+  - **errout**: total number of errors while sending
+  - **dropin**: total number of incoming packets which were dropped
+  - **dropout**: total number of outgoing packets which were dropped (always 0
+    on OSX and BSD)
+
+  If *pernic* is ``True`` return the same information for every network
+  interface installed on the system as a dictionary with network interface
+  names as the keys and the namedtuple described above as the values.
+  See `examples/nettop.py <http://code.google.com/p/psutil/source/browse/examples/nettop.py>`__
+  for an example application.
+
+    >>> import psutil
+    >>> psutil.net_io_counters()
+    snetio(bytes_sent=14508483, bytes_recv=62749361, packets_sent=84311, packets_recv=94888, errin=0, errout=0, dropin=0, dropout=0)
+    >>>
+    >>> psutil.net_io_counters(pernic=True)
+    {'lo': snetio(bytes_sent=547971, bytes_recv=547971, packets_sent=5075, packets_recv=5075, errin=0, errout=0, dropin=0, dropout=0),
+    'wlan0': snetio(bytes_sent=13921765, bytes_recv=62162574, packets_sent=79097, packets_recv=89648, errin=0, errout=0, dropin=0, dropout=0)}
+
+.. function:: net_connections(kind='inet')
+
+  Return system-wide socket connections as a list of namedutples.
+  Every namedtuple provides 7 attributes:
+
+  - **fd**: the socket file descriptor, if retrievable, else ``-1``.
+    If the connection refers to the current process this may be passed to
+    `socket.fromfd() <http://docs.python.org/library/socket.html#socket.fromfd>`__
+    to obtain a usable socket object.
+  - **family**: the address family, either `AF_INET
+    <http://docs.python.org//library/socket.html#socket.AF_INET>`__,
+    `AF_INET6 <http://docs.python.org//library/socket.html#socket.AF_INET6>`__
+    or `AF_UNIX <http://docs.python.org//library/socket.html#socket.AF_UNIX>`__.
+  - **type**: the address type, either `SOCK_STREAM
+    <http://docs.python.org//library/socket.html#socket.SOCK_STREAM>`__ or
+    `SOCK_DGRAM
+    <http://docs.python.org//library/socket.html#socket.SOCK_DGRAM>`__.
+  - **laddr**: the local address as a ``(ip, port)`` tuple or a ``path``
+    in case of AF_UNIX sockets.
+  - **raddr**: the remote address as a ``(ip, port)`` tuple or an absolute
+    ``path`` in case of UNIX sockets.
+    When the remote endpoint is not connected you'll get an empty tuple
+    (AF_INET*) or ``None`` (AF_UNIX).
+    On Linux AF_UNIX sockets will always have this set to ``None``.
+  - **status**: represents the status of a TCP connection. The return value
+    is one of the :data:`psutil.CONN_* <psutil.CONN_ESTABLISHED>` constants
+    (a string).
+    For UDP and UNIX sockets this is always going to be
+    :const:`psutil.CONN_NONE`.
+  - **pid**: the PID of the process which opened the socket, if retrievable,
+    else ``None``. On some platforms (e.g. Linux) the availability of this
+    field changes depending on process privileges (root is needed).
+
+  The *kind* parameter is a string which filters for connections that fit the
+  following criteria:
+
+  .. table::
+
+   +----------------+-----------------------------------------------------+
+   | **Kind value** | **Connections using**                               |
+   +================+=====================================================+
+   | "inet"         | IPv4 and IPv6                                       |
+   +----------------+-----------------------------------------------------+
+   | "inet4"        | IPv4                                                |
+   +----------------+-----------------------------------------------------+
+   | "inet6"        | IPv6                                                |
+   +----------------+-----------------------------------------------------+
+   | "tcp"          | TCP                                                 |
+   +----------------+-----------------------------------------------------+
+   | "tcp4"         | TCP over IPv4                                       |
+   +----------------+-----------------------------------------------------+
+   | "tcp6"         | TCP over IPv6                                       |
+   +----------------+-----------------------------------------------------+
+   | "udp"          | UDP                                                 |
+   +----------------+-----------------------------------------------------+
+   | "udp4"         | UDP over IPv4                                       |
+   +----------------+-----------------------------------------------------+
+   | "udp6"         | UDP over IPv6                                       |
+   +----------------+-----------------------------------------------------+
+   | "unix"         | UNIX socket (both UDP and TCP protocols)            |
+   +----------------+-----------------------------------------------------+
+   | "all"          | the sum of all the possible families and protocols  |
+   +----------------+-----------------------------------------------------+
+
+  To get per-process connections use :meth:`Process.connections`.
+  Also, see
+  `netstat.py sample script <https://code.google.com/p/psutil/source/browse/examples/netstat.py>`__.
+  Example:
+
+    >>> import psutil
+    >>> psutil.net_connections()
+    [pconn(fd=115, family=2, type=1, laddr=('10.0.0.1', 48776), raddr=('93.186.135.91', 80), status='ESTABLISHED', pid=1254),
+     pconn(fd=117, family=2, type=1, laddr=('10.0.0.1', 43761), raddr=('72.14.234.100', 80), status='CLOSING', pid=2987),
+     pconn(fd=-1, family=2, type=1, laddr=('10.0.0.1', 60759), raddr=('72.14.234.104', 80), status='ESTABLISHED', pid=None),
+     pconn(fd=-1, family=2, type=1, laddr=('10.0.0.1', 51314), raddr=('72.14.234.83', 443), status='SYN_SENT', pid=None)
+     ...]
+
+  .. note:: (OSX) :class:`psutil.AccessDenied` is always raised unless running
+     as root (lsof does the same).
+  .. note:: (Solaris) UNIX sockets are not supported.
+
+  *New in 2.1.0*
+
+
+Other system info
+-----------------
+
+.. function:: users()
+
+  Return users currently connected on the system as a list of namedtuples
+  including the following fields:
+
+  - **user**: the name of the user.
+  - **terminal**: the tty or pseudo-tty associated with the user, if any,
+    else ``None``.
+  - **host**: the host name associated with the entry, if any.
+  - **started**: the creation time as a floating point number expressed in
+    seconds since the epoch.
+
+  Example::
+
+    >>> import psutil
+    >>> psutil.users()
+    [suser(name='giampaolo', terminal='pts/2', host='localhost', started=1340737536.0),
+     suser(name='giampaolo', terminal='pts/3', host='localhost', started=1340737792.0)]
+
+.. function:: boot_time()
+
+  Return the system boot time expressed in seconds since the epoch.
+  Example:
+
+  .. code-block:: python
+
+     >>> import psutil, datetime
+     >>> psutil.boot_time()
+     1389563460.0
+     >>> datetime.datetime.fromtimestamp(psutil.boot_time()).strftime("%Y-%m-%d %H:%M:%S")
+     '2014-01-12 22:51:00'
+
+Processes
+=========
+
+Functions
+---------
+
+.. function:: pids()
+
+  Return a list of current running PIDs. To iterate over all processes
+  :func:`process_iter()` should be preferred.
+
+.. function:: pid_exists(pid)
+
+  Check whether the given PID exists in the current process list. This is
+  faster than doing ``"pid in psutil.pids()"`` and should be preferred.
+
+.. function:: process_iter()
+
+  Return an iterator yielding a :class:`Process` class instance for all running
+  processes on the local machine.
+  Every instance is only created once and then cached into an internal table
+  which is updated every time an element is yielded.
+  Cached :class:`Process` instances are checked for identity so that you're
+  safe in case a PID has been reused by another process, in which case the
+  cached instance is updated.
+  This is should be preferred over :func:`psutil.pids()` for iterating over
+  processes.
+  Sorting order in which processes are returned is
+  based on their PID. Example usage::
+
+    import psutil
+
+    for proc in psutil.process_iter():
+        try:
+            pinfo = proc.as_dict(attrs=['pid', 'name'])
+        except psutil.NoSuchProcess:
+            pass
+        else:
+            print(pinfo)
+
+.. function:: wait_procs(procs, timeout=None, callback=None)
+
+  Convenience function which waits for a list of :class:`Process` instances to
+  terminate. Return a ``(gone, alive)`` tuple indicating which processes are
+  gone and which ones are still alive. The *gone* ones will have a new
+  *returncode* attribute indicating process exit status (it may be ``None``).
+  ``callback`` is a function which gets called every time a process terminates
+  (a :class:`Process` instance is passed as callback argument). Function will
+  return as soon as all processes terminate or when timeout occurs. Tipical use
+  case is:
+
+  - send SIGTERM to a list of processes
+  - give them some time to terminate
+  - send SIGKILL to those ones which are still alive
+
+  Example::
+
+    import psutil
+
+    def on_terminate(proc):
+        print("process {} terminated".format(proc))
+
+    procs = [...]  # a list of Process instances
+    for p in procs:
+        p.terminate()
+    gone, alive = wait_procs(procs, timeout=3, callback=on_terminate)
+    for p in alive:
+        p.kill()
+
+Exceptions
+----------
+
+.. class:: Error()
+
+  Base exception class. All other exceptions inherit from this one.
+
+.. class:: NoSuchProcess(pid, name=None, msg=None)
+
+   Raised by :class:`Process` class methods when no process with the given
+   *pid* is found in the current process list or when a process no longer
+   exists. "name" is the name the process had before disappearing
+   and gets set only if :meth:`Process.name()` was previosly called.
+
+.. class:: AccessDenied(pid=None, name=None, msg=None)
+
+    Raised by :class:`Process` class methods when permission to perform an
+    action is denied. "name" is the name of the process (may be ``None``).
+
+.. class:: TimeoutExpired(seconds, pid=None, name=None, msg=None)
+
+    Raised by :meth:`Process.wait` if timeout expires and process is still
+    alive.
+
+Process class
+-------------
+
+.. class:: Process(pid=None)
+
+  Represents an OS process with the given *pid*. If *pid* is omitted current
+  process *pid* (`os.getpid() <http://docs.python.org/library/os.html#os.getpid>`__)
+  is used.
+  Raise :class:`NoSuchProcess` if *pid* does not exist.
+  When accessing methods of this class always be  prepared to catch
+  :class:`NoSuchProcess` and :class:`AccessDenied` exceptions.
+  `hash() <http://docs.python.org/2/library/functions.html#hash>`__ builtin can
+  be used against instances of this class in order to identify a process
+  univocally over time (the hash is determined by mixing process PID
+  and creation time). As such it can also be used with
+  `set()s <http://docs.python.org/2/library/stdtypes.html#types-set>`__.
+
+  .. warning::
+
+    the way this class is bound to a process is uniquely via its **PID**.
+    That means that if the :class:`Process` instance is old enough and
+    the PID has been reused by another process in the meantime you might end up
+    interacting with another process.
+    The only exceptions for which process identity is pre-emptively checked
+    (via PID + creation time) and guaranteed are for
+    :meth:`nice` (set),
+    :meth:`ionice`  (set),
+    :meth:`cpu_affinity` (set),
+    :meth:`rlimit` (set),
+    :meth:`children`,
+    :meth:`parent`,
+    :meth:`suspend`
+    :meth:`resume`,
+    :meth:`send_signal`,
+    :meth:`terminate`, and
+    :meth:`kill`
+    methods.
+    To prevent this problem for all other methods you can use
+    :meth:`is_running()` before querying the process or use
+    :func:`process_iter()` in case you're iterating over all processes.
+
+  .. attribute:: pid
+
+     The process PID.
+
+  .. method:: ppid()
+
+     The process parent pid.  On Windows the return value is cached after first
+     call.
+
+  .. method:: name()
+
+     The process name. The return value is cached after first call.
+
+  .. method:: exe()
+
+     The process executable as an absolute path.
+     On some systems this may also be an empty string.
+     The return value is cached after first call.
+
+  .. method:: cmdline()
+
+     The command line this process has been called with.
+
+  .. method:: create_time()
+
+     The process creation time as a floating point number expressed in seconds
+     since the epoch, in
+     `UTC <http://en.wikipedia.org/wiki/Coordinated_universal_time>`__.
+     The return value is cached after first call.
+
+        >>> import psutil, datetime
+        >>> p = psutil.Process()
+        >>> p.create_time()
+        1307289803.47
+        >>> datetime.datetime.fromtimestamp(p.create_time()).strftime("%Y-%m-%d %H:%M:%S")
+        '2011-03-05 18:03:52'
+
+  .. method:: as_dict(attrs=[], ad_value=None)
+
+     Utility method returning process information as a hashable dictionary.
+     If *attrs* is specified it must be a list of strings reflecting available
+     :class:`Process` class's attribute names (e.g. ``['cpu_times', 'name']``)
+     else all public (read only) attributes are assumed. *ad_value* is the
+     value which gets assigned to a dict key in case :class:`AccessDenied`
+     exception is raised when retrieving that particular process information.
+
+        >>> import psutil
+        >>> p = psutil.Process()
+        >>> p.as_dict(attrs=['pid', 'name', 'username'])
+        {'username': 'giampaolo', 'pid': 12366, 'name': 'python'}
+
+  .. method:: parent()
+
+     Utility method which returns the parent process as a :class:`Process`
+     object pre-emptively checking whether PID has been reused. If no parent
+     PID is known return ``None``.
+
+  .. method:: status()
+
+     The current process status as a string. The returned string is one of the
+     :data:`psutil.STATUS_*<psutil.STATUS_RUNNING>` constants.
+
+  .. method:: cwd()
+
+     The process current working directory as an absolute path.
+
+  .. method:: username()
+
+     The name of the user that owns the process. On UNIX this is calculated by
+     using real process uid.
+
+  .. method:: uids()
+
+     The **real**, **effective** and **saved** user ids of this process as a
+     nameduple. This is the same as
+     `os.getresuid() <http://docs.python.org//library/os.html#os.getresuid>`__
+     but can be used for every process PID.
+
+     Availability: UNIX
+
+  .. method:: gids()
+
+     The **real**, **effective** and **saved** group ids of this process as a
+     nameduple. This is the same as
+     `os.getresgid() <http://docs.python.org//library/os.html#os.getresgid>`__
+     but can be used for every process PID.
+
+     Availability: UNIX
+
+  .. method:: terminal()
+
+     The terminal associated with this process, if any, else ``None``. This is
+     similar to "tty" command but can be used for every process PID.
+
+     Availability: UNIX
+
+  .. method:: nice(value=None)
+
+     Get or set process
+     `niceness <blogs.techrepublic.com.com/opensource/?p=140>`__ (priority).
+     On UNIX this is a number which usually goes from ``-20`` to ``20``.
+     The higher the nice value, the lower the priority of the process.
+
+        >>> import psutil
+        >>> p = psutil.Process()
+        >>> p.nice(10)  # set
+        >>> p.nice()  # get
+        10
+        >>>
+
+     On Windows this is available as well by using
+     `GetPriorityClass <http://msdn.microsoft.com/en-us/library/ms683211(v=vs.85).aspx>`__
+     and `SetPriorityClass <http://msdn.microsoft.com/en-us/library/ms686219(v=vs.85).aspx>`__
+     and *value* is one of the
+     :data:`psutil.*_PRIORITY_CLASS <psutil.ABOVE_NORMAL_PRIORITY_CLASS>`
+     constants.
+     Example which increases process priority on Windows:
+
+        >>> p.nice(psutil.HIGH_PRIORITY_CLASS)
+
+     Starting from `Python 3.3 <http://bugs.python.org/issue10784>`__ this
+     same functionality is available as
+     `os.getpriority() <http://docs.python.org/3/library/os.html#os.getpriority>`__
+     and
+     `os.setpriority() <http://docs.python.org/3/library/os.html#os.setpriority>`__.
+
+  .. method:: ionice(ioclass=None, value=None)
+
+     Get or set
+     `process I/O niceness <http://friedcpu.wordpress.com/2007/07/17/why-arent-you-using-ionice-yet/>`__ (priority).
+     On Linux *ioclass* is one of the
+     :data:`psutil.IOPRIO_CLASS_*<psutil.IOPRIO_CLASS_NONE>` constants.
+     *value* is a number which goes from  ``0`` to ``7``. The higher the value,
+     the lower the I/O priority of the process. On Windows only *ioclass* is
+     used and it can be set to ``2`` (normal), ``1`` (low) or ``0`` (very low).
+     The example below sets IDLE priority class for the current process,
+     meaning it will only get I/O time when no other process needs the disk:
+
+      >>> import psutil
+      >>> p = psutil.Process()
+      >>> p.ionice(psutil.IOPRIO_CLASS_IDLE)  # set
+      >>> p.ionice()  # get
+      pionice(ioclass=3, value=0)
+      >>>
+
+     On Windows only *ioclass* is used and it can be set to ``2`` (normal),
+     ``1`` (low) or ``0`` (very low).
+
+     Availability: Linux and Windows > Vista
+
+  .. method:: rlimit(resource, limits=None)
+
+     Get or set process resource limits (see
+     `man prlimit <http://linux.die.net/man/2/prlimit>`__). *resource* is one of
+     the :data:`psutil.RLIMIT_* <psutil.RLIMIT_INFINITY>` constants.
+     *limits* is a ``(soft, hard)`` tuple.
+     This is the same as `resource.getrlimit() <http://docs.python.org/library/resource.html#resource.getrlimit>`__
+     and `resource.setrlimit() <http://docs.python.org/library/resource.html#resource.setrlimit>`__
+     but can be used for every process PID and only on Linux.
+     Example:
+
+      >>> import psutil
+      >>> p = psutil.Process()
+      >>> # process may open no more than 128 file descriptors
+      >>> p.rlimit(psutil.RLIMIT_NOFILE, (128, 128))
+      >>> # process may create files no bigger than 1024 bytes
+      >>> p.rlimit(psutil.RLIMIT_FSIZE, (1024, 1024))
+      >>> # get
+      >>> p.rlimit(psutil.RLIMIT_FSIZE)
+      (1024, 1024)
+      >>>
+
+     Availability: Linux
+
+  .. method:: io_counters()
+
+     Return process I/O statistics as a namedtuple including the number of read
+     and write operations performed by the process and the amount of bytes read
+     and written. For Linux refer to
+     `/proc filesysem documentation <https://www.kernel.org/doc/Documentation/filesystems/proc.txt>`__.
+     On BSD there's apparently no way to retrieve bytes counters, hence ``-1``
+     is returned for **read_bytes** and **write_bytes** fields. OSX is not
+     supported.
+
+      >>> import psutil
+      >>> p = psutil.Process()
+      >>> p.io_counters()
+      pio(read_count=454556, write_count=3456, read_bytes=110592, write_bytes=0)
+
+     Availability: all platforms except OSX
+
+  .. method:: num_ctx_switches()
+
+     The number voluntary and involuntary context switches performed by
+     this process.
+
+  .. method:: num_fds()
+
+     The number of file descriptors used by this process.
+
+     Availability: UNIX
+
+  .. method:: num_handles()
+
+     The number of handles used by this process.
+
+     Availability: Windows
+
+  .. method:: num_threads()
+
+     The number of threads currently used by this process.
+
+  .. method:: threads()
+
+     Return threads opened by process as a list of namedtuples including thread
+     id and thread CPU times (user/system).
+
+  .. method:: cpu_times()
+
+     Return a tuple whose values are process CPU **user** and **system**
+     times which means the amount of time expressed in seconds that a process
+     has spent in
+     `user / system mode <http://stackoverflow.com/questions/556405/what-do-real-user-and-sys-mean-in-the-output-of-time1>`__.
+     This is similar to
+     `os.times() <http://docs.python.org//library/os.html#os.times>`__
+     but can be used for every process PID.
+
+  .. method:: cpu_percent(interval=None)
+
+     Return a float representing the process CPU utilization as a percentage.
+     When *interval* is > ``0.0`` compares process times to system CPU times
+     elapsed before and after the interval (blocking). When interval is ``0.0``
+     or ``None`` compares process times to system CPU times elapsed since last
+     call, returning immediately. That means the first time this is called it
+     will return a meaningless ``0.0`` value which you are supposed to ignore.
+     In this case is recommended for accuracy that this function be called a
+     second time with at least ``0.1`` seconds between calls. Example:
+
+      >>> import psutil
+      >>> p = psutil.Process()
+      >>>
+      >>> # blocking
+      >>> p.cpu_percent(interval=1)
+      2.0
+      >>> # non-blocking (percentage since last call)
+      >>> p.cpu_percent(interval=None)
+      2.9
+      >>>
+
+     .. note::
+        a percentage > 100 is legitimate as it can result from a process with
+        multiple threads running on different CPU cores.
+
+     .. warning::
+        the first time this method is called with interval = ``0.0`` or
+        ``None`` it will return a meaningless ``0.0`` value which you are
+        supposed to ignore.
+
+  .. method:: cpu_affinity(cpus=None)
+
+     Get or set process current
+     `CPU affinity <http://www.linuxjournal.com/article/6799?page=0,0>`__.
+     CPU affinity consists in telling the OS to run a certain process on a
+     limited set of CPUs only. The number of eligible CPUs can be obtained with
+     ``list(range(psutil.cpu_count()))``.
+
+      >>> import psutil
+      >>> psutil.cpu_count()
+      4
+      >>> p = psutil.Process()
+      >>> p.cpu_affinity()  # get
+      [0, 1, 2, 3]
+      >>> p.cpu_affinity([0])  # set; from now on, process will run on CPU #0 only
+      >>>
+
+     Availability: Linux, Windows
+
+  .. method:: memory_info()
+
+     Return a tuple representing RSS (Resident Set Size) and VMS (Virtual
+     Memory Size) in bytes. On UNIX *rss* and *vms* are the same values shown
+     by ps. On Windows *rss* and *vms* refer to "Mem Usage" and "VM Size"
+     columns of taskmgr.exe. For more detailed memory stats use
+     :meth:`memory_info_ex`.
+
+  .. method:: memory_info_ex()
+
+     Return a namedtuple with variable fields depending on the platform
+     representing extended memory information about the process.
+     All numbers are expressed in bytes.
+
+     +--------+---------+-------+-------+--------------------+
+     | Linux  | OSX     | BSD   | SunOS | Windows            |
+     +========+=========+=======+=======+====================+
+     | rss    | rss     | rss   | rss   | num_page_faults    |
+     +--------+---------+-------+-------+--------------------+
+     | vms    | vms     | vms   | vms   | peak_wset          |
+     +--------+---------+-------+-------+--------------------+
+     | shared | pfaults | text  |       | wset               |
+     +--------+---------+-------+-------+--------------------+
+     | text   | pageins | data  |       | peak_paged_pool    |
+     +--------+---------+-------+-------+--------------------+
+     | lib    |         | stack |       | paged_pool         |
+     +--------+---------+-------+-------+--------------------+
+     | data   |         |       |       | peak_nonpaged_pool |
+     +--------+---------+-------+-------+--------------------+
+     | dirty  |         |       |       | nonpaged_pool      |
+     +--------+---------+-------+-------+--------------------+
+     |        |         |       |       | pagefile           |
+     +--------+---------+-------+-------+--------------------+
+     |        |         |       |       | peak_pagefile      |
+     +--------+---------+-------+-------+--------------------+
+     |        |         |       |       | private            |
+     +--------+---------+-------+-------+--------------------+
+
+     Windows metrics are extracted from
+     `PROCESS_MEMORY_COUNTERS_EX <http://msdn.microsoft.com/en-us/library/windows/desktop/ms684874(v=vs.85).aspx>`__ structure.
+     Example on Linux:
+
+     >>> import psutil
+     >>> p = psutil.Process()
+     >>> p.memory_info_ex()
+     pextmem(rss=15491072, vms=84025344, shared=5206016, text=2555904, lib=0, data=9891840, dirty=0)
+
+  .. method:: memory_percent()
+
+     Compare physical system memory to process resident memory (RSS) and
+     calculate process memory utilization as a percentage.
+
+  .. method:: memory_maps(grouped=True)
+
+     Return process's mapped memory regions as a list of nameduples whose
+     fields are variable depending on the platform. As such, portable
+     applications should rely on namedtuple's `path` and `rss` fields only.
+     This method is useful to obtain a detailed representation of process
+     memory usage as explained
+     `here <http://bmaurer.blogspot.it/2006/03/memory-usage-with-smaps.html>`__.
+     If *grouped* is ``True`` the mapped regions with the same *path* are
+     grouped together and the different memory fields are summed.  If *grouped*
+     is ``False`` every mapped region is shown as a single entity and the
+     namedtuple will also include the mapped region's address space (*addr*)
+     and permission set (*perms*).
+     See `examples/pmap.py <http://code.google.com/p/psutil/source/browse/examples/pmap.py>`__
+     for an example application.
+
+      >>> import psutil
+      >>> p = psutil.Process()
+      >>> p.memory_maps()
+      [pmmap_grouped(path='/lib/x8664-linux-gnu/libutil-2.15.so', rss=16384, anonymous=8192, swap=0),
+       pmmap_grouped(path='/lib/x8664-linux-gnu/libc-2.15.so', rss=6384, anonymous=15, swap=0),
+       pmmap_grouped(path='/lib/x8664-linux-gnu/libcrypto.so.0.1', rss=34124, anonymous=1245, swap=0),
+       pmmap_grouped(path='[heap]', rss=54653, anonymous=8192, swap=0),
+       pmmap_grouped(path='[stack]', rss=1542, anonymous=166, swap=0),
+       ...]
+      >>>
+
+  .. method:: children(recursive=False)
+
+     Return the children of this process as a list of :Class:`Process` objects,
+     pre-emptively checking whether PID has been reused. If recursive is `True`
+     return all the parent descendants.
+     Example assuming *A == this process*:
+     ::
+
+          A ─┐
+             │
+             ├─ B (child) ─┐
+             │             └─ X (grandchild) ─┐
+             │                                └─ Y (great grandchild)
+             ├─ C (child)
+             └─ D (child)
+
+          >>> p.children()
+          B, C, D
+          >>> p.children(recursive=True)
+          B, X, Y, C, D
+
+     Note that in the example above if process X disappears process Y won't be
+     returned either as the reference to process A is lost.
+
+  .. method:: open_files()
+
+     Return regular files opened by process as a list of namedtuples including
+     the absolute file name and the file descriptor number (on Windows this is
+     always ``-1``). Example:
+
+      >>> import psutil
+      >>> f = open('file.ext', 'w')
+      >>> p = psutil.Process()
+      >>> p.open_files()
+      [popenfile(path='/home/giampaolo/svn/psutil/file.ext', fd=3)]
+
+  .. method:: connections(kind="inet")
+
+    Return socket connections opened by process as a list of namedutples.
+    To get system-wide connections use :func:`psutil.net_connections()`.
+    Every namedtuple provides 6 attributes:
+
+    - **fd**: the socket file descriptor. This can be passed to
+      `socket.fromfd() <http://docs.python.org/library/socket.html#socket.fromfd>`__
+      to obtain a usable socket object.
+      This is only available on UNIX; on Windows ``-1`` is always returned.
+    - **family**: the address family, either `AF_INET
+      <http://docs.python.org//library/socket.html#socket.AF_INET>`__,
+      `AF_INET6 <http://docs.python.org//library/socket.html#socket.AF_INET6>`__
+      or `AF_UNIX <http://docs.python.org//library/socket.html#socket.AF_UNIX>`__.
+    - **type**: the address type, either `SOCK_STREAM
+      <http://docs.python.org//library/socket.html#socket.SOCK_STREAM>`__ or
+      `SOCK_DGRAM
+      <http://docs.python.org//library/socket.html#socket.SOCK_DGRAM>`__.
+    - **laddr**: the local address as a ``(ip, port)`` tuple or a ``path``
+      in case of AF_UNIX sockets.
+    - **raddr**: the remote address as a ``(ip, port)`` tuple or an absolute
+      ``path`` in case of UNIX sockets.
+      When the remote endpoint is not connected you'll get an empty tuple
+      (AF_INET) or ``None`` (AF_UNIX).
+      On Linux AF_UNIX sockets will always have this set to ``None``.
+    - **status**: represents the status of a TCP connection. The return value
+      is one of the :data:`psutil.CONN_* <psutil.CONN_ESTABLISHED>` constants.
+      For UDP and UNIX sockets this is always going to be
+      :const:`psutil.CONN_NONE`.
+
+    The *kind* parameter is a string which filters for connections that fit the
+    following criteria:
+
+    .. table::
+
+     +----------------+-----------------------------------------------------+
+     | **Kind value** | **Connections using**                               |
+     +================+=====================================================+
+     | "inet"         | IPv4 and IPv6                                       |
+     +----------------+-----------------------------------------------------+
+     | "inet4"        | IPv4                                                |
+     +----------------+-----------------------------------------------------+
+     | "inet6"        | IPv6                                                |
+     +----------------+-----------------------------------------------------+
+     | "tcp"          | TCP                                                 |
+     +----------------+-----------------------------------------------------+
+     | "tcp4"         | TCP over IPv4                                       |
+     +----------------+-----------------------------------------------------+
+     | "tcp6"         | TCP over IPv6                                       |
+     +----------------+-----------------------------------------------------+
+     | "udp"          | UDP                                                 |
+     +----------------+-----------------------------------------------------+
+     | "udp4"         | UDP over IPv4                                       |
+     +----------------+-----------------------------------------------------+
+     | "udp6"         | UDP over IPv6                                       |
+     +----------------+-----------------------------------------------------+
+     | "unix"         | UNIX socket (both UDP and TCP protocols)            |
+     +----------------+-----------------------------------------------------+
+     | "all"          | the sum of all the possible families and protocols  |
+     +----------------+-----------------------------------------------------+
+
+    Example:
+
+      >>> import psutil
+      >>> p = psutil.Process(1694)
+      >>> p.name()
+      'firefox'
+      >>> p.connections()
+      [pconn(fd=115, family=2, type=1, laddr=('10.0.0.1', 48776), raddr=('93.186.135.91', 80), status='ESTABLISHED'),
+       pconn(fd=117, family=2, type=1, laddr=('10.0.0.1', 43761), raddr=('72.14.234.100', 80), status='CLOSING'),
+       pconn(fd=119, family=2, type=1, laddr=('10.0.0.1', 60759), raddr=('72.14.234.104', 80), status='ESTABLISHED'),
+       pconn(fd=123, family=2, type=1, laddr=('10.0.0.1', 51314), raddr=('72.14.234.83', 443), status='SYN_SENT')]
+
+  .. method:: is_running()
+
+     Return whether the current process is running in the current process list.
+     This is reliable also in case the process is gone and its PID reused by
+     another process, therefore it must be preferred over doing
+     ``psutil.pid_exists(p.pid)``.
+
+     .. note::
+      this will return ``True`` also if the process is a zombie
+      (``p.status() == psutil.STATUS_ZOMBIE``).
+
+  .. method:: send_signal(signal)
+
+     Send a signal to process (see
+     `signal module <http://docs.python.org//library/signal.html>`__
+     constants) pre-emptively checking whether PID has been reused.
+     This is the same as ``os.kill(pid, sig)``.
+     On Windows only **SIGTERM** is valid and is treated as an alias for
+     :meth:`kill()`.
+
+  .. method:: suspend()
+
+     Suspend process execution with **SIGSTOP** signal pre-emptively checking
+     whether PID has been reused.
+     On UNIX this is the same as ``os.kill(pid, signal.SIGSTOP)``.
+     On Windows this is done by suspending all process threads execution.
+
+  .. method:: resume()
+
+     Resume process execution with **SIGCONT** signal pre-emptively checking
+     whether PID has been reused.
+     On UNIX this is the same as ``os.kill(pid, signal.SIGCONT)``.
+     On Windows this is done by resuming all process threads execution.
+
+  .. method:: terminate()
+
+     Terminate the process with **SIGTERM** signal pre-emptively checking
+     whether PID has been reused.
+     On UNIX this is the same as ``os.kill(pid, signal.SIGTERM)``.
+     On Windows this is an alias for :meth:`kill`.
+
+  .. method:: kill()
+
+     Kill the current process by using **SIGKILL** signal pre-emptively
+     checking whether PID has been reused.
+     On UNIX this is the same as ``os.kill(pid, signal.SIGKILL)``.
+     On Windows this is done by using
+     `TerminateProcess <http://msdn.microsoft.com/en-us/library/windows/desktop/ms686714(v=vs.85).aspx>`__.
+
+  .. method:: wait(timeout=None)
+
+     Wait for process termination and if the process is a children of the
+     current one also return the exit code, else ``None``. On Windows there's
+     no such limitation (exit code is always returned). If the process is
+     already terminated immediately return ``None`` instead of raising
+     :class:`NoSuchProcess`. If *timeout* is specified and process is still
+     alive raise :class:`TimeoutExpired` exception. It can also be used in a
+     non-blocking fashion by specifying ``timeout=0`` in which case it will
+     either return immediately or raise :class:`TimeoutExpired`.
+     To wait for multiple processes use :func:`psutil.wait_procs()`.
+
+
+Popen class
+-----------
+
+.. class:: Popen(*args, **kwargs)
+
+  A more convenient interface to stdlib
+  `subprocess.Popen <http://docs.python.org/library/subprocess.html#subprocess.Popen>`__.
+  It starts a sub process and deals with it exactly as when using
+  `subprocess.Popen <http://docs.python.org/library/subprocess.html#subprocess.Popen>`__
+  but in addition it also provides all the methods of
+  :class:`psutil.Process` class in a single interface.
+  For method names common to both classes such as
+  :meth:`send_signal() <psutil.Process.send_signal()>`,
+  :meth:`terminate() <psutil.Process.terminate()>` and
+  :meth:`kill() <psutil.Process.kill()>`
+  :class:`psutil.Process` implementation takes precedence.
+  For a complete documentation refer to
+  `subprocess module documentation <http://docs.python.org/library/subprocess.html>`__.
+
+  .. note::
+
+     Unlike `subprocess.Popen <http://docs.python.org/library/subprocess.html#subprocess.Popen>`__
+     this class pre-emptively checks wheter PID has been reused on
+     :meth:`send_signal() <psutil.Process.send_signal()>`,
+     :meth:`terminate() <psutil.Process.terminate()>` and
+     :meth:`kill() <psutil.Process.kill()>`
+     so that you don't accidentally terminate another process, fixing
+     http://bugs.python.org/issue6973.
+
+  >>> import psutil
+  >>> from subprocess import PIPE
+  >>>
+  >>> p = psutil.Popen(["/usr/bin/python", "-c", "print('hello')"], stdout=PIPE)
+  >>> p.name()
+  'python'
+  >>> p.username()
+  'giampaolo'
+  >>> p.communicate()
+  ('hello\n', None)
+  >>> p.wait(timeout=2)
+  0
+  >>>
+
+Constants
+=========
+
+.. _const-pstatus:
+.. data:: STATUS_RUNNING
+          STATUS_SLEEPING
+          STATUS_DISK_SLEEP
+          STATUS_STOPPED
+          STATUS_TRACING_STOP
+          STATUS_ZOMBIE
+          STATUS_DEAD
+          STATUS_WAKE_KILL
+          STATUS_WAKING
+          STATUS_IDLE
+          STATUS_LOCKED
+          STATUS_WAITING
+
+  A set of strings representing the status of a process.
+  Returned by :meth:`psutil.Process.status()`.
+
+.. _const-conn:
+.. data:: CONN_ESTABLISHED
+          CONN_SYN_SENT
+          CONN_SYN_RECV
+          CONN_FIN_WAIT1
+          CONN_FIN_WAIT2
+          CONN_TIME_WAIT
+          CONN_CLOSE
+          CONN_CLOSE_WAIT
+          CONN_LAST_ACK
+          CONN_LISTEN
+          CONN_CLOSING
+          CONN_NONE
+          CONN_DELETE_TCB (Windows)
+          CONN_IDLE (Solaris)
+          CONN_BOUND (Solaris)
+
+  A set of strings representing the status of a TCP connection.
+  Returned by :meth:`psutil.Process.connections()` (`status` field).
+
+.. _const-prio:
+.. data:: ABOVE_NORMAL_PRIORITY_CLASS
+          BELOW_NORMAL_PRIORITY_CLASS
+          HIGH_PRIORITY_CLASS
+          IDLE_PRIORITY_CLASS
+          NORMAL_PRIORITY_CLASS
+          REALTIME_PRIORITY_CLASS
+
+  A set of integers representing the priority of a process on Windows (see
+  `MSDN documentation <http://msdn.microsoft.com/en-us/library/ms686219(v=vs.85).aspx>`__).
+  They can be used in conjunction with
+  :meth:`psutil.Process.nice()` to get or set process priority.
+
+  Availability: Windows
+
+.. _const-ioprio:
+.. data:: IOPRIO_CLASS_NONE
+          IOPRIO_CLASS_RT
+          IOPRIO_CLASS_BE
+          IOPRIO_CLASS_IDLE
+
+  A set of integers representing the I/O priority of a process on Linux. They
+  can be used in conjunction with :meth:`psutil.Process.ionice()` to get or set
+  process I/O priority.
+  *IOPRIO_CLASS_NONE* and *IOPRIO_CLASS_BE* (best effort) is the default for
+  any process that hasn't set a specific I/O priority.
+  *IOPRIO_CLASS_RT* (real time) means the process is given first access to the
+  disk, regardless of what else is going on in the system.
+  *IOPRIO_CLASS_IDLE* means the process will get I/O time when no-one else
+  needs the disk.
+  For further information refer to manuals of
+  `ionice <http://linux.die.net/man/1/ionice>`__
+  command line utility or
+  `ioprio_get <http://linux.die.net/man/2/ioprio_get>`__
+  system call.
+
+  Availability: Linux
+
+.. _const-rlimit:
+.. data:: RLIMIT_INFINITY
+          RLIMIT_AS
+          RLIMIT_CORE
+          RLIMIT_CPU
+          RLIMIT_DATA
+          RLIMIT_FSIZE
+          RLIMIT_LOCKS
+          RLIMIT_MEMLOCK
+          RLIMIT_MSGQUEUE
+          RLIMIT_NICE
+          RLIMIT_NOFILE
+          RLIMIT_NPROC
+          RLIMIT_RSS
+          RLIMIT_RTPRIO
+          RLIMIT_RTTIME
+          RLIMIT_RTPRIO
+          RLIMIT_SIGPENDING
+          RLIMIT_STACK
+
+  Constants used for getting and setting process resource limits to be used in
+  conjunction with :meth:`psutil.Process.rlimit()`. See
+  `man prlimit <http://linux.die.net/man/2/prlimit>`__ for futher information.
+
+  Availability: Linux

http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/docs/make.bat
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/docs/make.bat b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/docs/make.bat
new file mode 100644
index 0000000..9bc6751
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/docs/make.bat
@@ -0,0 +1,242 @@
+@ECHO OFF
+
+REM Command file for Sphinx documentation
+
+if "%SPHINXBUILD%" == "" (
+	set SPHINXBUILD=sphinx-build
+)
+set BUILDDIR=_build
+set ALLSPHINXOPTS=-d %BUILDDIR%/doctrees %SPHINXOPTS% .
+set I18NSPHINXOPTS=%SPHINXOPTS% .
+if NOT "%PAPER%" == "" (
+	set ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS%
+	set I18NSPHINXOPTS=-D latex_paper_size=%PAPER% %I18NSPHINXOPTS%
+)
+
+if "%1" == "" goto help
+
+if "%1" == "help" (
+	:help
+	echo.Please use `make ^<target^>` where ^<target^> is one of
+	echo.  html       to make standalone HTML files
+	echo.  dirhtml    to make HTML files named index.html in directories
+	echo.  singlehtml to make a single large HTML file
+	echo.  pickle     to make pickle files
+	echo.  json       to make JSON files
+	echo.  htmlhelp   to make HTML files and a HTML help project
+	echo.  qthelp     to make HTML files and a qthelp project
+	echo.  devhelp    to make HTML files and a Devhelp project
+	echo.  epub       to make an epub
+	echo.  latex      to make LaTeX files, you can set PAPER=a4 or PAPER=letter
+	echo.  text       to make text files
+	echo.  man        to make manual pages
+	echo.  texinfo    to make Texinfo files
+	echo.  gettext    to make PO message catalogs
+	echo.  changes    to make an overview over all changed/added/deprecated items
+	echo.  xml        to make Docutils-native XML files
+	echo.  pseudoxml  to make pseudoxml-XML files for display purposes
+	echo.  linkcheck  to check all external links for integrity
+	echo.  doctest    to run all doctests embedded in the documentation if enabled
+	goto end
+)
+
+if "%1" == "clean" (
+	for /d %%i in (%BUILDDIR%\*) do rmdir /q /s %%i
+	del /q /s %BUILDDIR%\*
+	goto end
+)
+
+
+%SPHINXBUILD% 2> nul
+if errorlevel 9009 (
+	echo.
+	echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
+	echo.installed, then set the SPHINXBUILD environment variable to point
+	echo.to the full path of the 'sphinx-build' executable. Alternatively you
+	echo.may add the Sphinx directory to PATH.
+	echo.
+	echo.If you don't have Sphinx installed, grab it from
+	echo.http://sphinx-doc.org/
+	exit /b 1
+)
+
+if "%1" == "html" (
+	%SPHINXBUILD% -b html %ALLSPHINXOPTS% %BUILDDIR%/html
+	if errorlevel 1 exit /b 1
+	echo.
+	echo.Build finished. The HTML pages are in %BUILDDIR%/html.
+	goto end
+)
+
+if "%1" == "dirhtml" (
+	%SPHINXBUILD% -b dirhtml %ALLSPHINXOPTS% %BUILDDIR%/dirhtml
+	if errorlevel 1 exit /b 1
+	echo.
+	echo.Build finished. The HTML pages are in %BUILDDIR%/dirhtml.
+	goto end
+)
+
+if "%1" == "singlehtml" (
+	%SPHINXBUILD% -b singlehtml %ALLSPHINXOPTS% %BUILDDIR%/singlehtml
+	if errorlevel 1 exit /b 1
+	echo.
+	echo.Build finished. The HTML pages are in %BUILDDIR%/singlehtml.
+	goto end
+)
+
+if "%1" == "pickle" (
+	%SPHINXBUILD% -b pickle %ALLSPHINXOPTS% %BUILDDIR%/pickle
+	if errorlevel 1 exit /b 1
+	echo.
+	echo.Build finished; now you can process the pickle files.
+	goto end
+)
+
+if "%1" == "json" (
+	%SPHINXBUILD% -b json %ALLSPHINXOPTS% %BUILDDIR%/json
+	if errorlevel 1 exit /b 1
+	echo.
+	echo.Build finished; now you can process the JSON files.
+	goto end
+)
+
+if "%1" == "htmlhelp" (
+	%SPHINXBUILD% -b htmlhelp %ALLSPHINXOPTS% %BUILDDIR%/htmlhelp
+	if errorlevel 1 exit /b 1
+	echo.
+	echo.Build finished; now you can run HTML Help Workshop with the ^
+.hhp project file in %BUILDDIR%/htmlhelp.
+	goto end
+)
+
+if "%1" == "qthelp" (
+	%SPHINXBUILD% -b qthelp %ALLSPHINXOPTS% %BUILDDIR%/qthelp
+	if errorlevel 1 exit /b 1
+	echo.
+	echo.Build finished; now you can run "qcollectiongenerator" with the ^
+.qhcp project file in %BUILDDIR%/qthelp, like this:
+	echo.^> qcollectiongenerator %BUILDDIR%\qthelp\psutil.qhcp
+	echo.To view the help file:
+	echo.^> assistant -collectionFile %BUILDDIR%\qthelp\psutil.ghc
+	goto end
+)
+
+if "%1" == "devhelp" (
+	%SPHINXBUILD% -b devhelp %ALLSPHINXOPTS% %BUILDDIR%/devhelp
+	if errorlevel 1 exit /b 1
+	echo.
+	echo.Build finished.
+	goto end
+)
+
+if "%1" == "epub" (
+	%SPHINXBUILD% -b epub %ALLSPHINXOPTS% %BUILDDIR%/epub
+	if errorlevel 1 exit /b 1
+	echo.
+	echo.Build finished. The epub file is in %BUILDDIR%/epub.
+	goto end
+)
+
+if "%1" == "latex" (
+	%SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex
+	if errorlevel 1 exit /b 1
+	echo.
+	echo.Build finished; the LaTeX files are in %BUILDDIR%/latex.
+	goto end
+)
+
+if "%1" == "latexpdf" (
+	%SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex
+	cd %BUILDDIR%/latex
+	make all-pdf
+	cd %BUILDDIR%/..
+	echo.
+	echo.Build finished; the PDF files are in %BUILDDIR%/latex.
+	goto end
+)
+
+if "%1" == "latexpdfja" (
+	%SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex
+	cd %BUILDDIR%/latex
+	make all-pdf-ja
+	cd %BUILDDIR%/..
+	echo.
+	echo.Build finished; the PDF files are in %BUILDDIR%/latex.
+	goto end
+)
+
+if "%1" == "text" (
+	%SPHINXBUILD% -b text %ALLSPHINXOPTS% %BUILDDIR%/text
+	if errorlevel 1 exit /b 1
+	echo.
+	echo.Build finished. The text files are in %BUILDDIR%/text.
+	goto end
+)
+
+if "%1" == "man" (
+	%SPHINXBUILD% -b man %ALLSPHINXOPTS% %BUILDDIR%/man
+	if errorlevel 1 exit /b 1
+	echo.
+	echo.Build finished. The manual pages are in %BUILDDIR%/man.
+	goto end
+)
+
+if "%1" == "texinfo" (
+	%SPHINXBUILD% -b texinfo %ALLSPHINXOPTS% %BUILDDIR%/texinfo
+	if errorlevel 1 exit /b 1
+	echo.
+	echo.Build finished. The Texinfo files are in %BUILDDIR%/texinfo.
+	goto end
+)
+
+if "%1" == "gettext" (
+	%SPHINXBUILD% -b gettext %I18NSPHINXOPTS% %BUILDDIR%/locale
+	if errorlevel 1 exit /b 1
+	echo.
+	echo.Build finished. The message catalogs are in %BUILDDIR%/locale.
+	goto end
+)
+
+if "%1" == "changes" (
+	%SPHINXBUILD% -b changes %ALLSPHINXOPTS% %BUILDDIR%/changes
+	if errorlevel 1 exit /b 1
+	echo.
+	echo.The overview file is in %BUILDDIR%/changes.
+	goto end
+)
+
+if "%1" == "linkcheck" (
+	%SPHINXBUILD% -b linkcheck %ALLSPHINXOPTS% %BUILDDIR%/linkcheck
+	if errorlevel 1 exit /b 1
+	echo.
+	echo.Link check complete; look for any errors in the above output ^
+or in %BUILDDIR%/linkcheck/output.txt.
+	goto end
+)
+
+if "%1" == "doctest" (
+	%SPHINXBUILD% -b doctest %ALLSPHINXOPTS% %BUILDDIR%/doctest
+	if errorlevel 1 exit /b 1
+	echo.
+	echo.Testing of doctests in the sources finished, look at the ^
+results in %BUILDDIR%/doctest/output.txt.
+	goto end
+)
+
+if "%1" == "xml" (
+	%SPHINXBUILD% -b xml %ALLSPHINXOPTS% %BUILDDIR%/xml
+	if errorlevel 1 exit /b 1
+	echo.
+	echo.Build finished. The XML files are in %BUILDDIR%/xml.
+	goto end
+)
+
+if "%1" == "pseudoxml" (
+	%SPHINXBUILD% -b pseudoxml %ALLSPHINXOPTS% %BUILDDIR%/pseudoxml
+	if errorlevel 1 exit /b 1
+	echo.
+	echo.Build finished. The pseudo-XML files are in %BUILDDIR%/pseudoxml.
+	goto end
+)
+
+:end

http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/examples/disk_usage.py
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/examples/disk_usage.py b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/examples/disk_usage.py
new file mode 100644
index 0000000..787cb0f
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/examples/disk_usage.py
@@ -0,0 +1,63 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+List all mounted disk partitions a-la "df -h" command.
+
+$ python examples/disk_usage.py
+Device               Total     Used     Free  Use %      Type  Mount
+/dev/sdb3            18.9G    14.7G     3.3G    77%      ext4  /
+/dev/sda6           345.9G    83.8G   244.5G    24%      ext4  /home
+/dev/sda1           296.0M    43.1M   252.9M    14%      vfat  /boot/efi
+/dev/sda2           600.0M   312.4M   287.6M    52%   fuseblk  /media/Recovery
+"""
+
+import sys
+import os
+import psutil
+from psutil._compat import print_
+
+
+def bytes2human(n):
+    # http://code.activestate.com/recipes/578019
+    # >>> bytes2human(10000)
+    # '9.8K'
+    # >>> bytes2human(100001221)
+    # '95.4M'
+    symbols = ('K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y')
+    prefix = {}
+    for i, s in enumerate(symbols):
+        prefix[s] = 1 << (i + 1) * 10
+    for s in reversed(symbols):
+        if n >= prefix[s]:
+            value = float(n) / prefix[s]
+            return '%.1f%s' % (value, s)
+    return "%sB" % n
+
+
+def main():
+    templ = "%-17s %8s %8s %8s %5s%% %9s  %s"
+    print_(templ % ("Device", "Total", "Used", "Free", "Use ", "Type",
+                    "Mount"))
+    for part in psutil.disk_partitions(all=False):
+        if os.name == 'nt':
+            if 'cdrom' in part.opts or part.fstype == '':
+                # skip cd-rom drives with no disk in it; they may raise
+                # ENOENT, pop-up a Windows GUI error for a non-ready
+                # partition or just hang.
+                continue
+        usage = psutil.disk_usage(part.mountpoint)
+        print_(templ % (
+            part.device,
+            bytes2human(usage.total),
+            bytes2human(usage.used),
+            bytes2human(usage.free),
+            int(usage.percent),
+            part.fstype,
+            part.mountpoint))
+
+if __name__ == '__main__':
+    sys.exit(main())

http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/examples/free.py
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/examples/free.py b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/examples/free.py
new file mode 100644
index 0000000..95e11fb
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/examples/free.py
@@ -0,0 +1,42 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+A clone of 'free' cmdline utility.
+
+$ python examples/free.py
+             total       used       free     shared    buffers      cache
+Mem:      10125520    8625996    1499524          0     349500    3307836
+Swap:            0          0          0
+"""
+
+import psutil
+from psutil._compat import print_
+
+
+def main():
+    virt = psutil.virtual_memory()
+    swap = psutil.swap_memory()
+    templ = "%-7s %10s %10s %10s %10s %10s %10s"
+    print_(templ % ('', 'total', 'used', 'free', 'shared', 'buffers', 'cache'))
+    print_(templ % (
+        'Mem:',
+        int(virt.total / 1024),
+        int(virt.used / 1024),
+        int(virt.free / 1024),
+        int(getattr(virt, 'shared', 0) / 1024),
+        int(getattr(virt, 'buffers', 0) / 1024),
+        int(getattr(virt, 'cached', 0) / 1024)))
+    print_(templ % (
+        'Swap:', int(swap.total / 1024),
+        int(swap.used / 1024),
+        int(swap.free / 1024),
+        '',
+        '',
+        ''))
+
+if __name__ == '__main__':
+    main()

http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/examples/iotop.py
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/examples/iotop.py b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/examples/iotop.py
new file mode 100644
index 0000000..47f1ca5
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/examples/iotop.py
@@ -0,0 +1,178 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+A clone of iotop (http://guichaz.free.fr/iotop/) showing real time
+disk I/O statistics.
+
+It works on Linux only (FreeBSD and OSX are missing support for IO
+counters).
+It doesn't work on Windows as curses module is required.
+
+Example output:
+
+$ python examples/iotop.py
+Total DISK READ: 0.00 B/s | Total DISK WRITE: 472.00 K/s
+PID   USER      DISK READ  DISK WRITE  COMMAND
+13155 giampao    0.00 B/s  428.00 K/s  /usr/bin/google-chrome-beta
+3260  giampao    0.00 B/s    0.00 B/s  bash
+3779  giampao    0.00 B/s    0.00 B/s  gnome-session --session=ubuntu
+3830  giampao    0.00 B/s    0.00 B/s  /usr/bin/dbus-launch
+3831  giampao    0.00 B/s    0.00 B/s  //bin/dbus-daemon --fork --print-pid 5
+3841  giampao    0.00 B/s    0.00 B/s  /usr/lib/at-spi-bus-launcher
+3845  giampao    0.00 B/s    0.00 B/s  /bin/dbus-daemon
+3848  giampao    0.00 B/s    0.00 B/s  /usr/lib/at-spi2-core/at-spi2-registryd
+3862  giampao    0.00 B/s    0.00 B/s  /usr/lib/gnome-settings-daemon
+
+Author: Giampaolo Rodola' <g....@gmail.com>
+"""
+
+import os
+import sys
+import psutil
+if not hasattr(psutil.Process, 'io_counters') or os.name != 'posix':
+    sys.exit('platform not supported')
+import time
+import curses
+import atexit
+
+
+# --- curses stuff
+def tear_down():
+    win.keypad(0)
+    curses.nocbreak()
+    curses.echo()
+    curses.endwin()
+
+win = curses.initscr()
+atexit.register(tear_down)
+curses.endwin()
+lineno = 0
+
+
+def print_line(line, highlight=False):
+    """A thin wrapper around curses's addstr()."""
+    global lineno
+    try:
+        if highlight:
+            line += " " * (win.getmaxyx()[1] - len(line))
+            win.addstr(lineno, 0, line, curses.A_REVERSE)
+        else:
+            win.addstr(lineno, 0, line, 0)
+    except curses.error:
+        lineno = 0
+        win.refresh()
+        raise
+    else:
+        lineno += 1
+# --- /curses stuff
+
+
+def bytes2human(n):
+    """
+    >>> bytes2human(10000)
+    '9.8 K/s'
+    >>> bytes2human(100001221)
+    '95.4 M/s'
+    """
+    symbols = ('K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y')
+    prefix = {}
+    for i, s in enumerate(symbols):
+        prefix[s] = 1 << (i + 1) * 10
+    for s in reversed(symbols):
+        if n >= prefix[s]:
+            value = float(n) / prefix[s]
+            return '%.2f %s/s' % (value, s)
+    return '%.2f B/s' % (n)
+
+
+def poll(interval):
+    """Calculate IO usage by comparing IO statics before and
+    after the interval.
+    Return a tuple including all currently running processes
+    sorted by IO activity and total disks I/O activity.
+    """
+    # first get a list of all processes and disk io counters
+    procs = [p for p in psutil.process_iter()]
+    for p in procs[:]:
+        try:
+            p._before = p.io_counters()
+        except psutil.Error:
+            procs.remove(p)
+            continue
+    disks_before = psutil.disk_io_counters()
+
+    # sleep some time
+    time.sleep(interval)
+
+    # then retrieve the same info again
+    for p in procs[:]:
+        try:
+            p._after = p.io_counters()
+            p._cmdline = ' '.join(p.cmdline())
+            if not p._cmdline:
+                p._cmdline = p.name()
+            p._username = p.username()
+        except psutil.NoSuchProcess:
+            procs.remove(p)
+    disks_after = psutil.disk_io_counters()
+
+    # finally calculate results by comparing data before and
+    # after the interval
+    for p in procs:
+        p._read_per_sec = p._after.read_bytes - p._before.read_bytes
+        p._write_per_sec = p._after.write_bytes - p._before.write_bytes
+        p._total = p._read_per_sec + p._write_per_sec
+
+    disks_read_per_sec = disks_after.read_bytes - disks_before.read_bytes
+    disks_write_per_sec = disks_after.write_bytes - disks_before.write_bytes
+
+    # sort processes by total disk IO so that the more intensive
+    # ones get listed first
+    processes = sorted(procs, key=lambda p: p._total, reverse=True)
+
+    return (processes, disks_read_per_sec, disks_write_per_sec)
+
+
+def refresh_window(procs, disks_read, disks_write):
+    """Print results on screen by using curses."""
+    curses.endwin()
+    templ = "%-5s %-7s %11s %11s  %s"
+    win.erase()
+
+    disks_tot = "Total DISK READ: %s | Total DISK WRITE: %s" \
+                % (bytes2human(disks_read), bytes2human(disks_write))
+    print_line(disks_tot)
+
+    header = templ % ("PID", "USER", "DISK READ", "DISK WRITE", "COMMAND")
+    print_line(header, highlight=True)
+
+    for p in procs:
+        line = templ % (
+            p.pid,
+            p._username[:7],
+            bytes2human(p._read_per_sec),
+            bytes2human(p._write_per_sec),
+            p._cmdline)
+        try:
+            print_line(line)
+        except curses.error:
+            break
+    win.refresh()
+
+
+def main():
+    try:
+        interval = 0
+        while 1:
+            args = poll(interval)
+            refresh_window(*args)
+            interval = 1
+    except (KeyboardInterrupt, SystemExit):
+        pass
+
+if __name__ == '__main__':
+    main()

http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/examples/killall.py
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/examples/killall.py b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/examples/killall.py
new file mode 100644
index 0000000..b548e7b
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/examples/killall.py
@@ -0,0 +1,32 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Kill a process by name.
+"""
+
+import os
+import sys
+import psutil
+
+
+def main():
+    if len(sys.argv) != 2:
+        sys.exit('usage: %s name' % __file__)
+    else:
+        NAME = sys.argv[1]
+
+    killed = []
+    for proc in psutil.process_iter():
+        if proc.name() == NAME and proc.pid != os.getpid():
+            proc.kill()
+            killed.append(proc.pid)
+    if not killed:
+        sys.exit('%s: no process found' % NAME)
+    else:
+        sys.exit(0)
+
+sys.exit(main())

http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/examples/meminfo.py
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/examples/meminfo.py b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/examples/meminfo.py
new file mode 100644
index 0000000..671f907
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/examples/meminfo.py
@@ -0,0 +1,69 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Print system memory information.
+
+$ python examples/meminfo.py
+MEMORY
+------
+Total      :    9.7G
+Available  :    4.9G
+Percent    :    49.0
+Used       :    8.2G
+Free       :    1.4G
+Active     :    5.6G
+Inactive   :    2.1G
+Buffers    :  341.2M
+Cached     :    3.2G
+
+SWAP
+----
+Total      :      0B
+Used       :      0B
+Free       :      0B
+Percent    :     0.0
+Sin        :      0B
+Sout       :      0B
+"""
+
+import psutil
+from psutil._compat import print_
+
+
+def bytes2human(n):
+    # http://code.activestate.com/recipes/578019
+    # >>> bytes2human(10000)
+    # '9.8K'
+    # >>> bytes2human(100001221)
+    # '95.4M'
+    symbols = ('K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y')
+    prefix = {}
+    for i, s in enumerate(symbols):
+        prefix[s] = 1 << (i + 1) * 10
+    for s in reversed(symbols):
+        if n >= prefix[s]:
+            value = float(n) / prefix[s]
+            return '%.1f%s' % (value, s)
+    return "%sB" % n
+
+
+def pprint_ntuple(nt):
+    for name in nt._fields:
+        value = getattr(nt, name)
+        if name != 'percent':
+            value = bytes2human(value)
+        print_('%-10s : %7s' % (name.capitalize(), value))
+
+
+def main():
+    print_('MEMORY\n------')
+    pprint_ntuple(psutil.virtual_memory())
+    print_('\nSWAP\n----')
+    pprint_ntuple(psutil.swap_memory())
+
+if __name__ == '__main__':
+    main()

http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/examples/netstat.py
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/examples/netstat.py b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/examples/netstat.py
new file mode 100644
index 0000000..70bc231
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/examples/netstat.py
@@ -0,0 +1,65 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+A clone of 'netstat -antp' on Linux.
+
+$ python examples/netstat.py
+Proto Local address      Remote address   Status        PID    Program name
+tcp   127.0.0.1:48256    127.0.0.1:45884  ESTABLISHED   13646  chrome
+tcp   127.0.0.1:47073    127.0.0.1:45884  ESTABLISHED   13646  chrome
+tcp   127.0.0.1:47072    127.0.0.1:45884  ESTABLISHED   13646  chrome
+tcp   127.0.0.1:45884    -                LISTEN        13651  GoogleTalkPlugi
+tcp   127.0.0.1:60948    -                LISTEN        13651  GoogleTalkPlugi
+tcp   172.17.42.1:49102  127.0.0.1:19305  CLOSE_WAIT    13651  GoogleTalkPlugi
+tcp   172.17.42.1:55797  127.0.0.1:443    CLOSE_WAIT    13651  GoogleTalkPlugi
+...
+"""
+
+import socket
+from socket import AF_INET, SOCK_STREAM, SOCK_DGRAM
+
+import psutil
+from psutil._compat import print_
+
+
+AD = "-"
+AF_INET6 = getattr(socket, 'AF_INET6', object())
+proto_map = {
+    (AF_INET, SOCK_STREAM): 'tcp',
+    (AF_INET6, SOCK_STREAM): 'tcp6',
+    (AF_INET, SOCK_DGRAM): 'udp',
+    (AF_INET6, SOCK_DGRAM): 'udp6',
+}
+
+
+def main():
+    templ = "%-5s %-30s %-30s %-13s %-6s %s"
+    print_(templ % (
+        "Proto", "Local address", "Remote address", "Status", "PID",
+        "Program name"))
+    proc_names = {}
+    for p in psutil.process_iter():
+        try:
+            proc_names[p.pid] = p.name()
+        except psutil.Error:
+            pass
+    for c in psutil.net_connections(kind='inet'):
+        laddr = "%s:%s" % (c.laddr)
+        raddr = ""
+        if c.raddr:
+            raddr = "%s:%s" % (c.raddr)
+        print_(templ % (
+            proto_map[(c.family, c.type)],
+            laddr,
+            raddr or AD,
+            c.status,
+            c.pid or AD,
+            proc_names.get(c.pid, '?')[:15],
+        ))
+
+if __name__ == '__main__':
+    main()

http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/examples/nettop.py
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/examples/nettop.py b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/examples/nettop.py
new file mode 100644
index 0000000..857285c
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/examples/nettop.py
@@ -0,0 +1,165 @@
+#!/usr/bin/env python
+#
+# $Id: iotop.py 1160 2011-10-14 18:50:36Z g.rodola@gmail.com $
+#
+# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Shows real-time network statistics.
+
+Author: Giampaolo Rodola' <g....@gmail.com>
+
+$ python examples/nettop.py
+-----------------------------------------------------------
+total bytes:           sent: 1.49 G       received: 4.82 G
+total packets:         sent: 7338724      received: 8082712
+
+wlan0                     TOTAL         PER-SEC
+-----------------------------------------------------------
+bytes-sent               1.29 G        0.00 B/s
+bytes-recv               3.48 G        0.00 B/s
+pkts-sent               7221782               0
+pkts-recv               6753724               0
+
+eth1                      TOTAL         PER-SEC
+-----------------------------------------------------------
+bytes-sent             131.77 M        0.00 B/s
+bytes-recv               1.28 G        0.00 B/s
+pkts-sent                     0               0
+pkts-recv               1214470               0
+"""
+
+import sys
+import os
+if os.name != 'posix':
+    sys.exit('platform not supported')
+import atexit
+import curses
+import time
+
+import psutil
+
+
+# --- curses stuff
+def tear_down():
+    win.keypad(0)
+    curses.nocbreak()
+    curses.echo()
+    curses.endwin()
+
+win = curses.initscr()
+atexit.register(tear_down)
+curses.endwin()
+lineno = 0
+
+
+def print_line(line, highlight=False):
+    """A thin wrapper around curses's addstr()."""
+    global lineno
+    try:
+        if highlight:
+            line += " " * (win.getmaxyx()[1] - len(line))
+            win.addstr(lineno, 0, line, curses.A_REVERSE)
+        else:
+            win.addstr(lineno, 0, line, 0)
+    except curses.error:
+        lineno = 0
+        win.refresh()
+        raise
+    else:
+        lineno += 1
+# --- curses stuff
+
+
+def bytes2human(n):
+    """
+    >>> bytes2human(10000)
+    '9.8 K'
+    >>> bytes2human(100001221)
+    '95.4 M'
+    """
+    symbols = ('K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y')
+    prefix = {}
+    for i, s in enumerate(symbols):
+        prefix[s] = 1 << (i + 1) * 10
+    for s in reversed(symbols):
+        if n >= prefix[s]:
+            value = float(n) / prefix[s]
+            return '%.2f %s' % (value, s)
+    return '%.2f B' % (n)
+
+
+def poll(interval):
+    """Retrieve raw stats within an interval window."""
+    tot_before = psutil.net_io_counters()
+    pnic_before = psutil.net_io_counters(pernic=True)
+    # sleep some time
+    time.sleep(interval)
+    tot_after = psutil.net_io_counters()
+    pnic_after = psutil.net_io_counters(pernic=True)
+    return (tot_before, tot_after, pnic_before, pnic_after)
+
+
+def refresh_window(tot_before, tot_after, pnic_before, pnic_after):
+    """Print stats on screen."""
+    global lineno
+
+    # totals
+    print_line("total bytes:           sent: %-10s   received: %s" % (
+        bytes2human(tot_after.bytes_sent),
+        bytes2human(tot_after.bytes_recv))
+    )
+    print_line("total packets:         sent: %-10s   received: %s" % (
+        tot_after.packets_sent, tot_after.packets_recv))
+
+    # per-network interface details: let's sort network interfaces so
+    # that the ones which generated more traffic are shown first
+    print_line("")
+    nic_names = list(pnic_after.keys())
+    nic_names.sort(key=lambda x: sum(pnic_after[x]), reverse=True)
+    for name in nic_names:
+        stats_before = pnic_before[name]
+        stats_after = pnic_after[name]
+        templ = "%-15s %15s %15s"
+        print_line(templ % (name, "TOTAL", "PER-SEC"), highlight=True)
+        print_line(templ % (
+            "bytes-sent",
+            bytes2human(stats_after.bytes_sent),
+            bytes2human(
+                stats_after.bytes_sent - stats_before.bytes_sent) + '/s',
+        ))
+        print_line(templ % (
+            "bytes-recv",
+            bytes2human(stats_after.bytes_recv),
+            bytes2human(
+                stats_after.bytes_recv - stats_before.bytes_recv) + '/s',
+        ))
+        print_line(templ % (
+            "pkts-sent",
+            stats_after.packets_sent,
+            stats_after.packets_sent - stats_before.packets_sent,
+        ))
+        print_line(templ % (
+            "pkts-recv",
+            stats_after.packets_recv,
+            stats_after.packets_recv - stats_before.packets_recv,
+        ))
+        print_line("")
+    win.refresh()
+    lineno = 0
+
+
+def main():
+    try:
+        interval = 0
+        while True:
+            args = poll(interval)
+            refresh_window(*args)
+            interval = 1
+    except (KeyboardInterrupt, SystemExit):
+        pass
+
+if __name__ == '__main__':
+    main()

http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/examples/pmap.py
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/examples/pmap.py b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/examples/pmap.py
new file mode 100644
index 0000000..1936c0b
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/examples/pmap.py
@@ -0,0 +1,58 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+A clone of 'pmap' utility on Linux, 'vmmap' on OSX and 'procstat -v' on BSD.
+Report memory map of a process.
+
+$ python examples/pmap.py 32402
+pid=32402, name=hg
+Address                 RSS  Mode    Mapping
+0000000000400000      1200K  r-xp    /usr/bin/python2.7
+0000000000838000         4K  r--p    /usr/bin/python2.7
+0000000000839000       304K  rw-p    /usr/bin/python2.7
+00000000008ae000        68K  rw-p    [anon]
+000000000275e000      5396K  rw-p    [heap]
+00002b29bb1e0000       124K  r-xp    /lib/x86_64-linux-gnu/ld-2.17.so
+00002b29bb203000         8K  rw-p    [anon]
+00002b29bb220000       528K  rw-p    [anon]
+00002b29bb2d8000       768K  rw-p    [anon]
+00002b29bb402000         4K  r--p    /lib/x86_64-linux-gnu/ld-2.17.so
+00002b29bb403000         8K  rw-p    /lib/x86_64-linux-gnu/ld-2.17.so
+00002b29bb405000        60K  r-xp    /lib/x86_64-linux-gnu/libpthread-2.17.so
+00002b29bb41d000         0K  ---p    /lib/x86_64-linux-gnu/libpthread-2.17.so
+00007fff94be6000        48K  rw-p    [stack]
+00007fff94dd1000         4K  r-xp    [vdso]
+ffffffffff600000         0K  r-xp    [vsyscall]
+...
+"""
+
+import sys
+
+import psutil
+from psutil._compat import print_
+
+
+def main():
+    if len(sys.argv) != 2:
+        sys.exit('usage: pmap <pid>')
+    p = psutil.Process(int(sys.argv[1]))
+    print_("pid=%s, name=%s" % (p.pid, p.name()))
+    templ = "%-16s %10s  %-7s %s"
+    print_(templ % ("Address", "RSS", "Mode", "Mapping"))
+    total_rss = 0
+    for m in p.memory_maps(grouped=False):
+        total_rss += m.rss
+        print_(templ % (
+            m.addr.split('-')[0].zfill(16),
+            str(m.rss / 1024) + 'K',
+            m.perms,
+            m.path))
+    print_("-" * 33)
+    print_(templ % ("Total", str(total_rss / 1024) + 'K', '', ''))
+
+if __name__ == '__main__':
+    main()

http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/examples/process_detail.py
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/examples/process_detail.py b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/examples/process_detail.py
new file mode 100644
index 0000000..371142c
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/examples/process_detail.py
@@ -0,0 +1,162 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Print detailed information about a process.
+Author: Giampaolo Rodola' <g....@gmail.com>
+
+$ python examples/process_detail.py
+pid               820
+name              python
+exe               /usr/bin/python2.7
+parent            29613 (bash)
+cmdline           python examples/process_detail.py
+started           2014-41-27 03:41
+user              giampaolo
+uids              real=1000, effective=1000, saved=1000
+gids              real=1000, effective=1000, saved=1000
+terminal          /dev/pts/17
+cwd               /ssd/svn/psutil
+memory            0.1% (resident=10.6M, virtual=58.5M)
+cpu               0.0% (user=0.09, system=0.0)
+status            running
+niceness          0
+num threads       1
+I/O               bytes-read=0B, bytes-written=0B
+open files
+running threads   id=820, user-time=0.09, sys-time=0.0
+"""
+
+import datetime
+import os
+import socket
+import sys
+
+import psutil
+
+
+POSIX = os.name == 'posix'
+
+
+def convert_bytes(n):
+    symbols = ('K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y')
+    prefix = {}
+    for i, s in enumerate(symbols):
+        prefix[s] = 1 << (i + 1) * 10
+    for s in reversed(symbols):
+        if n >= prefix[s]:
+            value = float(n) / prefix[s]
+            return '%.1f%s' % (value, s)
+    return "%sB" % n
+
+
+def print_(a, b):
+    if sys.stdout.isatty() and POSIX:
+        fmt = '\x1b[1;32m%-17s\x1b[0m %s' % (a, b)
+    else:
+        fmt = '%-15s %s' % (a, b)
+    # python 2/3 compatibility layer
+    sys.stdout.write(fmt + '\n')
+    sys.stdout.flush()
+
+
+def run(pid):
+    ACCESS_DENIED = ''
+    try:
+        p = psutil.Process(pid)
+        pinfo = p.as_dict(ad_value=ACCESS_DENIED)
+    except psutil.NoSuchProcess:
+        sys.exit(str(sys.exc_info()[1]))
+
+    try:
+        parent = p.parent()
+        if parent:
+            parent = '(%s)' % parent.name()
+        else:
+            parent = ''
+    except psutil.Error:
+        parent = ''
+    started = datetime.datetime.fromtimestamp(
+        pinfo['create_time']).strftime('%Y-%M-%d %H:%M')
+    io = pinfo.get('io_counters', ACCESS_DENIED)
+    mem = '%s%% (resident=%s, virtual=%s) ' % (
+        round(pinfo['memory_percent'], 1),
+        convert_bytes(pinfo['memory_info'].rss),
+        convert_bytes(pinfo['memory_info'].vms))
+    children = p.children()
+
+    print_('pid', pinfo['pid'])
+    print_('name', pinfo['name'])
+    print_('exe', pinfo['exe'])
+    print_('parent', '%s %s' % (pinfo['ppid'], parent))
+    print_('cmdline', ' '.join(pinfo['cmdline']))
+    print_('started', started)
+    print_('user', pinfo['username'])
+    if POSIX and pinfo['uids'] and pinfo['gids']:
+        print_('uids', 'real=%s, effective=%s, saved=%s' % pinfo['uids'])
+    if POSIX and pinfo['gids']:
+        print_('gids', 'real=%s, effective=%s, saved=%s' % pinfo['gids'])
+    if POSIX:
+        print_('terminal', pinfo['terminal'] or '')
+    if hasattr(p, 'getcwd'):
+        print_('cwd', pinfo['cwd'])
+    print_('memory', mem)
+    print_('cpu', '%s%% (user=%s, system=%s)' % (
+        pinfo['cpu_percent'],
+        getattr(pinfo['cpu_times'], 'user', '?'),
+        getattr(pinfo['cpu_times'], 'system', '?')))
+    print_('status', pinfo['status'])
+    print_('niceness', pinfo['nice'])
+    print_('num threads', pinfo['num_threads'])
+    if io != ACCESS_DENIED:
+        print_('I/O', 'bytes-read=%s, bytes-written=%s' % (
+            convert_bytes(io.read_bytes),
+            convert_bytes(io.write_bytes)))
+    if children:
+        print_('children', '')
+        for child in children:
+            print_('', 'pid=%s name=%s' % (child.pid, child.name()))
+
+    if pinfo['open_files'] != ACCESS_DENIED:
+        print_('open files', '')
+        for file in pinfo['open_files']:
+            print_('', 'fd=%s %s ' % (file.fd, file.path))
+
+    if pinfo['threads']:
+        print_('running threads', '')
+        for thread in pinfo['threads']:
+            print_('', 'id=%s, user-time=%s, sys-time=%s' % (
+                thread.id, thread.user_time, thread.system_time))
+    if pinfo['connections'] not in (ACCESS_DENIED, []):
+        print_('open connections', '')
+        for conn in pinfo['connections']:
+            if conn.type == socket.SOCK_STREAM:
+                type = 'TCP'
+            elif conn.type == socket.SOCK_DGRAM:
+                type = 'UDP'
+            else:
+                type = 'UNIX'
+            lip, lport = conn.laddr
+            if not conn.raddr:
+                rip, rport = '*', '*'
+            else:
+                rip, rport = conn.raddr
+            print_('', '%s:%s -> %s:%s type=%s status=%s' % (
+                lip, lport, rip, rport, type, conn.status))
+
+
+def main(argv=None):
+    if argv is None:
+        argv = sys.argv
+    if len(argv) == 1:
+        sys.exit(run(os.getpid()))
+    elif len(argv) == 2:
+        sys.exit(run(int(argv[1])))
+    else:
+        sys.exit('usage: %s [pid]' % __file__)
+
+if __name__ == '__main__':
+    sys.exit(main())


[18/22] AMBARI-5707. Metrics system prototype implementation. (swagle)

Posted by sw...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/timeline/LeveldbTimelineStore.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/timeline/LeveldbTimelineStore.java b/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/timeline/LeveldbTimelineStore.java
new file mode 100644
index 0000000..edd4842
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/timeline/LeveldbTimelineStore.java
@@ -0,0 +1,1473 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.applicationhistoryservice.timeline;
+
+import java.io.ByteArrayOutputStream;
+import java.io.File;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.Comparator;
+import java.util.EnumSet;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Set;
+import java.util.SortedSet;
+import java.util.TreeMap;
+import java.util.concurrent.locks.ReentrantLock;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
+
+import com.google.common.annotations.VisibleForTesting;
+import org.apache.commons.collections.map.LRUMap;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.io.WritableComparator;
+import org.apache.hadoop.service.AbstractService;
+import org.apache.hadoop.yarn.api.records.timeline.TimelineEntities;
+import org.apache.hadoop.yarn.api.records.timeline.TimelineEntity;
+import org.apache.hadoop.yarn.api.records.timeline.TimelineEvent;
+import org.apache.hadoop.yarn.api.records.timeline.TimelineEvents;
+import org.apache.hadoop.yarn.api.records.timeline.TimelinePutResponse;
+import org.apache.hadoop.yarn.api.records.timeline.TimelineEvents.EventsOfOneEntity;
+import org.apache.hadoop.yarn.api.records.timeline.TimelinePutResponse.TimelinePutError;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.fusesource.leveldbjni.JniDBFactory;
+import org.iq80.leveldb.DB;
+import org.iq80.leveldb.DBIterator;
+import org.iq80.leveldb.Options;
+import org.iq80.leveldb.ReadOptions;
+import org.iq80.leveldb.WriteBatch;
+import org.iq80.leveldb.WriteOptions;
+
+import static org.apache.hadoop.yarn.server.applicationhistoryservice.timeline.GenericObjectMapper.readReverseOrderedLong;
+import static org.apache.hadoop.yarn.server.applicationhistoryservice.timeline.GenericObjectMapper.writeReverseOrderedLong;
+
+/**
+ * <p>An implementation of an application timeline store backed by leveldb.</p>
+ *
+ * <p>There are three sections of the db, the start time section,
+ * the entity section, and the indexed entity section.</p>
+ *
+ * <p>The start time section is used to retrieve the unique start time for
+ * a given entity. Its values each contain a start time while its keys are of
+ * the form:</p>
+ * <pre>
+ *   START_TIME_LOOKUP_PREFIX + entity type + entity id</pre>
+ *
+ * <p>The entity section is ordered by entity type, then entity start time
+ * descending, then entity ID. There are four sub-sections of the entity
+ * section: events, primary filters, related entities,
+ * and other info. The event entries have event info serialized into their
+ * values. The other info entries have values corresponding to the values of
+ * the other info name/value map for the entry (note the names are contained
+ * in the key). All other entries have empty values. The key structure is as
+ * follows:</p>
+ * <pre>
+ *   ENTITY_ENTRY_PREFIX + entity type + revstarttime + entity id
+ *
+ *   ENTITY_ENTRY_PREFIX + entity type + revstarttime + entity id +
+ *     EVENTS_COLUMN + reveventtimestamp + eventtype
+ *
+ *   ENTITY_ENTRY_PREFIX + entity type + revstarttime + entity id +
+ *     PRIMARY_FILTERS_COLUMN + name + value
+ *
+ *   ENTITY_ENTRY_PREFIX + entity type + revstarttime + entity id +
+ *     OTHER_INFO_COLUMN + name
+ *
+ *   ENTITY_ENTRY_PREFIX + entity type + revstarttime + entity id +
+ *     RELATED_ENTITIES_COLUMN + relatedentity type + relatedentity id
+ *
+ *   ENTITY_ENTRY_PREFIX + entity type + revstarttime + entity id +
+ *     INVISIBLE_REVERSE_RELATED_ENTITIES_COLUMN + relatedentity type +
+ *     relatedentity id</pre>
+ *
+ * <p>The indexed entity section contains a primary filter name and primary
+ * filter value as the prefix. Within a given name/value, entire entity
+ * entries are stored in the same format as described in the entity section
+ * above (below, "key" represents any one of the possible entity entry keys
+ * described above).</p>
+ * <pre>
+ *   INDEXED_ENTRY_PREFIX + primaryfilter name + primaryfilter value +
+ *     key</pre>
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+public class LeveldbTimelineStore extends AbstractService
+    implements TimelineStore {
+  private static final Log LOG = LogFactory
+      .getLog(LeveldbTimelineStore.class);
+
+  private static final String FILENAME = "leveldb-timeline-store.ldb";
+
+  private static final byte[] START_TIME_LOOKUP_PREFIX = "k".getBytes();
+  private static final byte[] ENTITY_ENTRY_PREFIX = "e".getBytes();
+  private static final byte[] INDEXED_ENTRY_PREFIX = "i".getBytes();
+
+  private static final byte[] EVENTS_COLUMN = "e".getBytes();
+  private static final byte[] PRIMARY_FILTERS_COLUMN = "f".getBytes();
+  private static final byte[] OTHER_INFO_COLUMN = "i".getBytes();
+  private static final byte[] RELATED_ENTITIES_COLUMN = "r".getBytes();
+  private static final byte[] INVISIBLE_REVERSE_RELATED_ENTITIES_COLUMN =
+      "z".getBytes();
+
+  private static final byte[] EMPTY_BYTES = new byte[0];
+
+  private Map<EntityIdentifier, StartAndInsertTime> startTimeWriteCache;
+  private Map<EntityIdentifier, Long> startTimeReadCache;
+
+  /**
+   * Per-entity locks are obtained when writing.
+   */
+  private final LockMap<EntityIdentifier> writeLocks =
+      new LockMap<EntityIdentifier>();
+
+  private final ReentrantReadWriteLock deleteLock =
+      new ReentrantReadWriteLock();
+
+  private DB db;
+
+  private Thread deletionThread;
+
+  public LeveldbTimelineStore() {
+    super(LeveldbTimelineStore.class.getName());
+  }
+
+  @Override
+  @SuppressWarnings("unchecked")
+  protected void serviceInit(Configuration conf) throws Exception {
+    Options options = new Options();
+    options.createIfMissing(true);
+    options.cacheSize(conf.getLong(
+        YarnConfiguration.TIMELINE_SERVICE_LEVELDB_READ_CACHE_SIZE,
+        YarnConfiguration.DEFAULT_TIMELINE_SERVICE_LEVELDB_READ_CACHE_SIZE));
+    JniDBFactory factory = new JniDBFactory();
+    String path = conf.get(YarnConfiguration.TIMELINE_SERVICE_LEVELDB_PATH);
+    File p = new File(path);
+    if (!p.exists()) {
+      if (!p.mkdirs()) {
+        throw new IOException("Couldn't create directory for leveldb " +
+            "timeline store " + path);
+      }
+    }
+    LOG.info("Using leveldb path " + path);
+    db = factory.open(new File(path, FILENAME), options);
+    startTimeWriteCache =
+        Collections.synchronizedMap(new LRUMap(getStartTimeWriteCacheSize(
+            conf)));
+    startTimeReadCache =
+        Collections.synchronizedMap(new LRUMap(getStartTimeReadCacheSize(
+            conf)));
+
+    if (conf.getBoolean(YarnConfiguration.TIMELINE_SERVICE_TTL_ENABLE, true)) {
+      deletionThread = new EntityDeletionThread(conf);
+      deletionThread.start();
+    }
+
+    super.serviceInit(conf);
+  }
+
+  @Override
+  protected void serviceStop() throws Exception {
+    if (deletionThread != null) {
+      deletionThread.interrupt();
+      LOG.info("Waiting for deletion thread to complete its current action");
+      try {
+        deletionThread.join();
+      } catch (InterruptedException e) {
+        LOG.warn("Interrupted while waiting for deletion thread to complete," +
+            " closing db now", e);
+      }
+    }
+    IOUtils.cleanup(LOG, db);
+    super.serviceStop();
+  }
+
+  private static class StartAndInsertTime {
+    final long startTime;
+    final long insertTime;
+
+    public StartAndInsertTime(long startTime, long insertTime) {
+      this.startTime = startTime;
+      this.insertTime = insertTime;
+    }
+  }
+
+  private class EntityDeletionThread extends Thread {
+    private final long ttl;
+    private final long ttlInterval;
+
+    public EntityDeletionThread(Configuration conf) {
+      ttl  = conf.getLong(YarnConfiguration.TIMELINE_SERVICE_TTL_MS,
+          YarnConfiguration.DEFAULT_TIMELINE_SERVICE_TTL_MS);
+      ttlInterval = conf.getLong(
+          YarnConfiguration.TIMELINE_SERVICE_LEVELDB_TTL_INTERVAL_MS,
+          YarnConfiguration.DEFAULT_TIMELINE_SERVICE_LEVELDB_TTL_INTERVAL_MS);
+      LOG.info("Starting deletion thread with ttl " + ttl + " and cycle " +
+          "interval " + ttlInterval);
+    }
+
+    @Override
+    public void run() {
+      while (true) {
+        long timestamp = System.currentTimeMillis() - ttl;
+        try {
+          discardOldEntities(timestamp);
+          Thread.sleep(ttlInterval);
+        } catch (IOException e) {
+          LOG.error(e);
+        } catch (InterruptedException e) {
+          LOG.info("Deletion thread received interrupt, exiting");
+          break;
+        }
+      }
+    }
+  }
+
+  private static class LockMap<K> {
+    private static class CountingReentrantLock<K> extends ReentrantLock {
+      private static final long serialVersionUID = 1L;
+      private int count;
+      private K key;
+
+      CountingReentrantLock(K key) {
+        super();
+        this.count = 0;
+        this.key = key;
+      }
+    }
+
+    private Map<K, CountingReentrantLock<K>> locks =
+        new HashMap<K, CountingReentrantLock<K>>();
+
+    synchronized CountingReentrantLock<K> getLock(K key) {
+      CountingReentrantLock<K> lock = locks.get(key);
+      if (lock == null) {
+        lock = new CountingReentrantLock<K>(key);
+        locks.put(key, lock);
+      }
+
+      lock.count++;
+      return lock;
+    }
+
+    synchronized void returnLock(CountingReentrantLock<K> lock) {
+      if (lock.count == 0) {
+        throw new IllegalStateException("Returned lock more times than it " +
+            "was retrieved");
+      }
+      lock.count--;
+
+      if (lock.count == 0) {
+        locks.remove(lock.key);
+      }
+    }
+  }
+
+  private static class KeyBuilder {
+    private static final int MAX_NUMBER_OF_KEY_ELEMENTS = 10;
+    private byte[][] b;
+    private boolean[] useSeparator;
+    private int index;
+    private int length;
+
+    public KeyBuilder(int size) {
+      b = new byte[size][];
+      useSeparator = new boolean[size];
+      index = 0;
+      length = 0;
+    }
+
+    public static KeyBuilder newInstance() {
+      return new KeyBuilder(MAX_NUMBER_OF_KEY_ELEMENTS);
+    }
+
+    public KeyBuilder add(String s) {
+      return add(s.getBytes(), true);
+    }
+
+    public KeyBuilder add(byte[] t) {
+      return add(t, false);
+    }
+
+    public KeyBuilder add(byte[] t, boolean sep) {
+      b[index] = t;
+      useSeparator[index] = sep;
+      length += t.length;
+      if (sep) {
+        length++;
+      }
+      index++;
+      return this;
+    }
+
+    public byte[] getBytes() throws IOException {
+      ByteArrayOutputStream baos = new ByteArrayOutputStream(length);
+      for (int i = 0; i < index; i++) {
+        baos.write(b[i]);
+        if (i < index-1 && useSeparator[i]) {
+          baos.write(0x0);
+        }
+      }
+      return baos.toByteArray();
+    }
+
+    public byte[] getBytesForLookup() throws IOException {
+      ByteArrayOutputStream baos = new ByteArrayOutputStream(length);
+      for (int i = 0; i < index; i++) {
+        baos.write(b[i]);
+        if (useSeparator[i]) {
+          baos.write(0x0);
+        }
+      }
+      return baos.toByteArray();
+    }
+  }
+
+  private static class KeyParser {
+    private final byte[] b;
+    private int offset;
+
+    public KeyParser(byte[] b, int offset) {
+      this.b = b;
+      this.offset = offset;
+    }
+
+    public String getNextString() throws IOException {
+      if (offset >= b.length) {
+        throw new IOException(
+            "tried to read nonexistent string from byte array");
+      }
+      int i = 0;
+      while (offset+i < b.length && b[offset+i] != 0x0) {
+        i++;
+      }
+      String s = new String(b, offset, i);
+      offset = offset + i + 1;
+      return s;
+    }
+
+    public long getNextLong() throws IOException {
+      if (offset+8 >= b.length) {
+        throw new IOException("byte array ran out when trying to read long");
+      }
+      long l = readReverseOrderedLong(b, offset);
+      offset += 8;
+      return l;
+    }
+
+    public int getOffset() {
+      return offset;
+    }
+  }
+
+  @Override
+  public TimelineEntity getEntity(String entityId, String entityType,
+      EnumSet<Field> fields) throws IOException {
+    Long revStartTime = getStartTimeLong(entityId, entityType);
+    if (revStartTime == null) {
+      return null;
+    }
+    byte[] prefix = KeyBuilder.newInstance().add(ENTITY_ENTRY_PREFIX)
+        .add(entityType).add(writeReverseOrderedLong(revStartTime))
+        .add(entityId).getBytesForLookup();
+
+    DBIterator iterator = null;
+    try {
+      iterator = db.iterator();
+      iterator.seek(prefix);
+
+      return getEntity(entityId, entityType, revStartTime, fields, iterator,
+          prefix, prefix.length);
+    } finally {
+      IOUtils.cleanup(LOG, iterator);
+    }
+  }
+
+  /**
+   * Read entity from a db iterator.  If no information is found in the
+   * specified fields for this entity, return null.
+   */
+  private static TimelineEntity getEntity(String entityId, String entityType,
+      Long startTime, EnumSet<Field> fields, DBIterator iterator,
+      byte[] prefix, int prefixlen) throws IOException {
+    if (fields == null) {
+      fields = EnumSet.allOf(Field.class);
+    }
+
+    TimelineEntity entity = new TimelineEntity();
+    boolean events = false;
+    boolean lastEvent = false;
+    if (fields.contains(Field.EVENTS)) {
+      events = true;
+    } else if (fields.contains(Field.LAST_EVENT_ONLY)) {
+      lastEvent = true;
+    } else {
+      entity.setEvents(null);
+    }
+    boolean relatedEntities = false;
+    if (fields.contains(Field.RELATED_ENTITIES)) {
+      relatedEntities = true;
+    } else {
+      entity.setRelatedEntities(null);
+    }
+    boolean primaryFilters = false;
+    if (fields.contains(Field.PRIMARY_FILTERS)) {
+      primaryFilters = true;
+    } else {
+      entity.setPrimaryFilters(null);
+    }
+    boolean otherInfo = false;
+    if (fields.contains(Field.OTHER_INFO)) {
+      otherInfo = true;
+    } else {
+      entity.setOtherInfo(null);
+    }
+
+    // iterate through the entity's entry, parsing information if it is part
+    // of a requested field
+    for (; iterator.hasNext(); iterator.next()) {
+      byte[] key = iterator.peekNext().getKey();
+      if (!prefixMatches(prefix, prefixlen, key)) {
+        break;
+      }
+      if (key.length == prefixlen) {
+        continue;
+      }
+      if (key[prefixlen] == PRIMARY_FILTERS_COLUMN[0]) {
+        if (primaryFilters) {
+          addPrimaryFilter(entity, key,
+              prefixlen + PRIMARY_FILTERS_COLUMN.length);
+        }
+      } else if (key[prefixlen] == OTHER_INFO_COLUMN[0]) {
+        if (otherInfo) {
+          entity.addOtherInfo(parseRemainingKey(key,
+              prefixlen + OTHER_INFO_COLUMN.length),
+              GenericObjectMapper.read(iterator.peekNext().getValue()));
+        }
+      } else if (key[prefixlen] == RELATED_ENTITIES_COLUMN[0]) {
+        if (relatedEntities) {
+          addRelatedEntity(entity, key,
+              prefixlen + RELATED_ENTITIES_COLUMN.length);
+        }
+      } else if (key[prefixlen] == EVENTS_COLUMN[0]) {
+        if (events || (lastEvent &&
+            entity.getEvents().size() == 0)) {
+          TimelineEvent event = getEntityEvent(null, key, prefixlen +
+              EVENTS_COLUMN.length, iterator.peekNext().getValue());
+          if (event != null) {
+            entity.addEvent(event);
+          }
+        }
+      } else {
+        if (key[prefixlen] !=
+            INVISIBLE_REVERSE_RELATED_ENTITIES_COLUMN[0]) {
+          LOG.warn(String.format("Found unexpected column for entity %s of " +
+              "type %s (0x%02x)", entityId, entityType, key[prefixlen]));
+        }
+      }
+    }
+
+    entity.setEntityId(entityId);
+    entity.setEntityType(entityType);
+    entity.setStartTime(startTime);
+
+    return entity;
+  }
+
+  @Override
+  public TimelineEvents getEntityTimelines(String entityType,
+      SortedSet<String> entityIds, Long limit, Long windowStart,
+      Long windowEnd, Set<String> eventType) throws IOException {
+    TimelineEvents events = new TimelineEvents();
+    if (entityIds == null || entityIds.isEmpty()) {
+      return events;
+    }
+    // create a lexicographically-ordered map from start time to entities
+    Map<byte[], List<EntityIdentifier>> startTimeMap = new TreeMap<byte[],
+        List<EntityIdentifier>>(new Comparator<byte[]>() {
+          @Override
+          public int compare(byte[] o1, byte[] o2) {
+            return WritableComparator.compareBytes(o1, 0, o1.length, o2, 0,
+                o2.length);
+          }
+        });
+    DBIterator iterator = null;
+    try {
+      // look up start times for the specified entities
+      // skip entities with no start time
+      for (String entityId : entityIds) {
+        byte[] startTime = getStartTime(entityId, entityType);
+        if (startTime != null) {
+          List<EntityIdentifier> entities = startTimeMap.get(startTime);
+          if (entities == null) {
+            entities = new ArrayList<EntityIdentifier>();
+            startTimeMap.put(startTime, entities);
+          }
+          entities.add(new EntityIdentifier(entityId, entityType));
+        }
+      }
+      for (Entry<byte[], List<EntityIdentifier>> entry :
+          startTimeMap.entrySet()) {
+        // look up the events matching the given parameters (limit,
+        // start time, end time, event types) for entities whose start times
+        // were found and add the entities to the return list
+        byte[] revStartTime = entry.getKey();
+        for (EntityIdentifier entityIdentifier : entry.getValue()) {
+          EventsOfOneEntity entity = new EventsOfOneEntity();
+          entity.setEntityId(entityIdentifier.getId());
+          entity.setEntityType(entityType);
+          events.addEvent(entity);
+          KeyBuilder kb = KeyBuilder.newInstance().add(ENTITY_ENTRY_PREFIX)
+              .add(entityType).add(revStartTime).add(entityIdentifier.getId())
+              .add(EVENTS_COLUMN);
+          byte[] prefix = kb.getBytesForLookup();
+          if (windowEnd == null) {
+            windowEnd = Long.MAX_VALUE;
+          }
+          byte[] revts = writeReverseOrderedLong(windowEnd);
+          kb.add(revts);
+          byte[] first = kb.getBytesForLookup();
+          byte[] last = null;
+          if (windowStart != null) {
+            last = KeyBuilder.newInstance().add(prefix)
+                .add(writeReverseOrderedLong(windowStart)).getBytesForLookup();
+          }
+          if (limit == null) {
+            limit = DEFAULT_LIMIT;
+          }
+          iterator = db.iterator();
+          for (iterator.seek(first); entity.getEvents().size() < limit &&
+              iterator.hasNext(); iterator.next()) {
+            byte[] key = iterator.peekNext().getKey();
+            if (!prefixMatches(prefix, prefix.length, key) || (last != null &&
+                WritableComparator.compareBytes(key, 0, key.length, last, 0,
+                    last.length) > 0)) {
+              break;
+            }
+            TimelineEvent event = getEntityEvent(eventType, key, prefix.length,
+                iterator.peekNext().getValue());
+            if (event != null) {
+              entity.addEvent(event);
+            }
+          }
+        }
+      }
+    } finally {
+      IOUtils.cleanup(LOG, iterator);
+    }
+    return events;
+  }
+
+  /**
+   * Returns true if the byte array begins with the specified prefix.
+   */
+  private static boolean prefixMatches(byte[] prefix, int prefixlen,
+      byte[] b) {
+    if (b.length < prefixlen) {
+      return false;
+    }
+    return WritableComparator.compareBytes(prefix, 0, prefixlen, b, 0,
+        prefixlen) == 0;
+  }
+
+  @Override
+  public TimelineEntities getEntities(String entityType,
+      Long limit, Long windowStart, Long windowEnd, String fromId, Long fromTs,
+      NameValuePair primaryFilter, Collection<NameValuePair> secondaryFilters,
+      EnumSet<Field> fields) throws IOException {
+    if (primaryFilter == null) {
+      // if no primary filter is specified, prefix the lookup with
+      // ENTITY_ENTRY_PREFIX
+      return getEntityByTime(ENTITY_ENTRY_PREFIX, entityType, limit,
+          windowStart, windowEnd, fromId, fromTs, secondaryFilters, fields);
+    } else {
+      // if a primary filter is specified, prefix the lookup with
+      // INDEXED_ENTRY_PREFIX + primaryFilterName + primaryFilterValue +
+      // ENTITY_ENTRY_PREFIX
+      byte[] base = KeyBuilder.newInstance().add(INDEXED_ENTRY_PREFIX)
+          .add(primaryFilter.getName())
+          .add(GenericObjectMapper.write(primaryFilter.getValue()), true)
+          .add(ENTITY_ENTRY_PREFIX).getBytesForLookup();
+      return getEntityByTime(base, entityType, limit, windowStart, windowEnd,
+          fromId, fromTs, secondaryFilters, fields);
+    }
+  }
+
+  /**
+   * Retrieves a list of entities satisfying given parameters.
+   *
+   * @param base A byte array prefix for the lookup
+   * @param entityType The type of the entity
+   * @param limit A limit on the number of entities to return
+   * @param starttime The earliest entity start time to retrieve (exclusive)
+   * @param endtime The latest entity start time to retrieve (inclusive)
+   * @param fromId Retrieve entities starting with this entity
+   * @param fromTs Ignore entities with insert timestamp later than this ts
+   * @param secondaryFilters Filter pairs that the entities should match
+   * @param fields The set of fields to retrieve
+   * @return A list of entities
+   * @throws IOException
+   */
+  private TimelineEntities getEntityByTime(byte[] base,
+      String entityType, Long limit, Long starttime, Long endtime,
+      String fromId, Long fromTs, Collection<NameValuePair> secondaryFilters,
+      EnumSet<Field> fields) throws IOException {
+    DBIterator iterator = null;
+    try {
+      KeyBuilder kb = KeyBuilder.newInstance().add(base).add(entityType);
+      // only db keys matching the prefix (base + entity type) will be parsed
+      byte[] prefix = kb.getBytesForLookup();
+      if (endtime == null) {
+        // if end time is null, place no restriction on end time
+        endtime = Long.MAX_VALUE;
+      }
+      // construct a first key that will be seeked to using end time or fromId
+      byte[] first = null;
+      if (fromId != null) {
+        Long fromIdStartTime = getStartTimeLong(fromId, entityType);
+        if (fromIdStartTime == null) {
+          // no start time for provided id, so return empty entities
+          return new TimelineEntities();
+        }
+        if (fromIdStartTime <= endtime) {
+          // if provided id's start time falls before the end of the window,
+          // use it to construct the seek key
+          first = kb.add(writeReverseOrderedLong(fromIdStartTime))
+              .add(fromId).getBytesForLookup();
+        }
+      }
+      // if seek key wasn't constructed using fromId, construct it using end ts
+      if (first == null) {
+        first = kb.add(writeReverseOrderedLong(endtime)).getBytesForLookup();
+      }
+      byte[] last = null;
+      if (starttime != null) {
+        // if start time is not null, set a last key that will not be
+        // iterated past
+        last = KeyBuilder.newInstance().add(base).add(entityType)
+            .add(writeReverseOrderedLong(starttime)).getBytesForLookup();
+      }
+      if (limit == null) {
+        // if limit is not specified, use the default
+        limit = DEFAULT_LIMIT;
+      }
+
+      TimelineEntities entities = new TimelineEntities();
+      iterator = db.iterator();
+      iterator.seek(first);
+      // iterate until one of the following conditions is met: limit is
+      // reached, there are no more keys, the key prefix no longer matches,
+      // or a start time has been specified and reached/exceeded
+      while (entities.getEntities().size() < limit && iterator.hasNext()) {
+        byte[] key = iterator.peekNext().getKey();
+        if (!prefixMatches(prefix, prefix.length, key) || (last != null &&
+            WritableComparator.compareBytes(key, 0, key.length, last, 0,
+                last.length) > 0)) {
+          break;
+        }
+        // read the start time and entity id from the current key
+        KeyParser kp = new KeyParser(key, prefix.length);
+        Long startTime = kp.getNextLong();
+        String entityId = kp.getNextString();
+
+        if (fromTs != null) {
+          long insertTime = readReverseOrderedLong(iterator.peekNext()
+              .getValue(), 0);
+          if (insertTime > fromTs) {
+            byte[] firstKey = key;
+            while (iterator.hasNext() && prefixMatches(firstKey,
+                kp.getOffset(), key)) {
+              iterator.next();
+              key = iterator.peekNext().getKey();
+            }
+            continue;
+          }
+        }
+
+        // parse the entity that owns this key, iterating over all keys for
+        // the entity
+        TimelineEntity entity = getEntity(entityId, entityType, startTime,
+            fields, iterator, key, kp.getOffset());
+        // determine if the retrieved entity matches the provided secondary
+        // filters, and if so add it to the list of entities to return
+        boolean filterPassed = true;
+        if (secondaryFilters != null) {
+          for (NameValuePair filter : secondaryFilters) {
+            Object v = entity.getOtherInfo().get(filter.getName());
+            if (v == null) {
+              Set<Object> vs = entity.getPrimaryFilters()
+                  .get(filter.getName());
+              if (vs != null && !vs.contains(filter.getValue())) {
+                filterPassed = false;
+                break;
+              }
+            } else if (!v.equals(filter.getValue())) {
+              filterPassed = false;
+              break;
+            }
+          }
+        }
+        if (filterPassed) {
+          entities.addEntity(entity);
+        }
+      }
+      return entities;
+    } finally {
+      IOUtils.cleanup(LOG, iterator);
+    }
+  }
+
+  /**
+   * Put a single entity.  If there is an error, add a TimelinePutError to the
+   * given response.
+   */
+  private void put(TimelineEntity entity, TimelinePutResponse response) {
+    LockMap.CountingReentrantLock<EntityIdentifier> lock =
+        writeLocks.getLock(new EntityIdentifier(entity.getEntityId(),
+            entity.getEntityType()));
+    lock.lock();
+    WriteBatch writeBatch = null;
+    List<EntityIdentifier> relatedEntitiesWithoutStartTimes =
+        new ArrayList<EntityIdentifier>();
+    byte[] revStartTime = null;
+    try {
+      writeBatch = db.createWriteBatch();
+      List<TimelineEvent> events = entity.getEvents();
+      // look up the start time for the entity
+      StartAndInsertTime startAndInsertTime = getAndSetStartTime(
+          entity.getEntityId(), entity.getEntityType(),
+          entity.getStartTime(), events);
+      if (startAndInsertTime == null) {
+        // if no start time is found, add an error and return
+        TimelinePutError error = new TimelinePutError();
+        error.setEntityId(entity.getEntityId());
+        error.setEntityType(entity.getEntityType());
+        error.setErrorCode(TimelinePutError.NO_START_TIME);
+        response.addError(error);
+        return;
+      }
+      revStartTime = writeReverseOrderedLong(startAndInsertTime
+          .startTime);
+
+      Map<String, Set<Object>> primaryFilters = entity.getPrimaryFilters();
+
+      // write entity marker
+      byte[] markerKey = createEntityMarkerKey(entity.getEntityId(),
+          entity.getEntityType(), revStartTime);
+      byte[] markerValue = writeReverseOrderedLong(startAndInsertTime
+          .insertTime);
+      writeBatch.put(markerKey, markerValue);
+      writePrimaryFilterEntries(writeBatch, primaryFilters, markerKey,
+          markerValue);
+
+      // write event entries
+      if (events != null && !events.isEmpty()) {
+        for (TimelineEvent event : events) {
+          byte[] revts = writeReverseOrderedLong(event.getTimestamp());
+          byte[] key = createEntityEventKey(entity.getEntityId(),
+              entity.getEntityType(), revStartTime, revts,
+              event.getEventType());
+          byte[] value = GenericObjectMapper.write(event.getEventInfo());
+          writeBatch.put(key, value);
+          writePrimaryFilterEntries(writeBatch, primaryFilters, key, value);
+        }
+      }
+
+      // write related entity entries
+      Map<String, Set<String>> relatedEntities =
+          entity.getRelatedEntities();
+      if (relatedEntities != null && !relatedEntities.isEmpty()) {
+        for (Entry<String, Set<String>> relatedEntityList :
+            relatedEntities.entrySet()) {
+          String relatedEntityType = relatedEntityList.getKey();
+          for (String relatedEntityId : relatedEntityList.getValue()) {
+            // invisible "reverse" entries (entity -> related entity)
+            byte[] key = createReverseRelatedEntityKey(entity.getEntityId(),
+                entity.getEntityType(), revStartTime, relatedEntityId,
+                relatedEntityType);
+            writeBatch.put(key, EMPTY_BYTES);
+            // look up start time of related entity
+            byte[] relatedEntityStartTime = getStartTime(relatedEntityId,
+                relatedEntityType);
+            // delay writing the related entity if no start time is found
+            if (relatedEntityStartTime == null) {
+              relatedEntitiesWithoutStartTimes.add(
+                  new EntityIdentifier(relatedEntityId, relatedEntityType));
+              continue;
+            }
+            // write "forward" entry (related entity -> entity)
+            key = createRelatedEntityKey(relatedEntityId,
+                relatedEntityType, relatedEntityStartTime,
+                entity.getEntityId(), entity.getEntityType());
+            writeBatch.put(key, EMPTY_BYTES);
+          }
+        }
+      }
+
+      // write primary filter entries
+      if (primaryFilters != null && !primaryFilters.isEmpty()) {
+        for (Entry<String, Set<Object>> primaryFilter :
+            primaryFilters.entrySet()) {
+          for (Object primaryFilterValue : primaryFilter.getValue()) {
+            byte[] key = createPrimaryFilterKey(entity.getEntityId(),
+                entity.getEntityType(), revStartTime,
+                primaryFilter.getKey(), primaryFilterValue);
+            writeBatch.put(key, EMPTY_BYTES);
+            writePrimaryFilterEntries(writeBatch, primaryFilters, key,
+                EMPTY_BYTES);
+          }
+        }
+      }
+
+      // write other info entries
+      Map<String, Object> otherInfo = entity.getOtherInfo();
+      if (otherInfo != null && !otherInfo.isEmpty()) {
+        for (Entry<String, Object> i : otherInfo.entrySet()) {
+          byte[] key = createOtherInfoKey(entity.getEntityId(),
+              entity.getEntityType(), revStartTime, i.getKey());
+          byte[] value = GenericObjectMapper.write(i.getValue());
+          writeBatch.put(key, value);
+          writePrimaryFilterEntries(writeBatch, primaryFilters, key, value);
+        }
+      }
+      db.write(writeBatch);
+    } catch (IOException e) {
+      LOG.error("Error putting entity " + entity.getEntityId() +
+          " of type " + entity.getEntityType(), e);
+      TimelinePutError error = new TimelinePutError();
+      error.setEntityId(entity.getEntityId());
+      error.setEntityType(entity.getEntityType());
+      error.setErrorCode(TimelinePutError.IO_EXCEPTION);
+      response.addError(error);
+    } finally {
+      lock.unlock();
+      writeLocks.returnLock(lock);
+      IOUtils.cleanup(LOG, writeBatch);
+    }
+
+    for (EntityIdentifier relatedEntity : relatedEntitiesWithoutStartTimes) {
+      lock = writeLocks.getLock(relatedEntity);
+      lock.lock();
+      try {
+        StartAndInsertTime relatedEntityStartAndInsertTime =
+            getAndSetStartTime(relatedEntity.getId(), relatedEntity.getType(),
+            readReverseOrderedLong(revStartTime, 0), null);
+        if (relatedEntityStartAndInsertTime == null) {
+          throw new IOException("Error setting start time for related entity");
+        }
+        byte[] relatedEntityStartTime = writeReverseOrderedLong(
+            relatedEntityStartAndInsertTime.startTime);
+        db.put(createRelatedEntityKey(relatedEntity.getId(),
+            relatedEntity.getType(), relatedEntityStartTime,
+            entity.getEntityId(), entity.getEntityType()), EMPTY_BYTES);
+        db.put(createEntityMarkerKey(relatedEntity.getId(),
+            relatedEntity.getType(), relatedEntityStartTime),
+            writeReverseOrderedLong(relatedEntityStartAndInsertTime
+                .insertTime));
+      } catch (IOException e) {
+        LOG.error("Error putting related entity " + relatedEntity.getId() +
+            " of type " + relatedEntity.getType() + " for entity " +
+            entity.getEntityId() + " of type " + entity.getEntityType(), e);
+        TimelinePutError error = new TimelinePutError();
+        error.setEntityId(entity.getEntityId());
+        error.setEntityType(entity.getEntityType());
+        error.setErrorCode(TimelinePutError.IO_EXCEPTION);
+        response.addError(error);
+      } finally {
+        lock.unlock();
+        writeLocks.returnLock(lock);
+      }
+    }
+  }
+
+  /**
+   * For a given key / value pair that has been written to the db,
+   * write additional entries to the db for each primary filter.
+   */
+  private static void writePrimaryFilterEntries(WriteBatch writeBatch,
+      Map<String, Set<Object>> primaryFilters, byte[] key, byte[] value)
+      throws IOException {
+    if (primaryFilters != null && !primaryFilters.isEmpty()) {
+      for (Entry<String, Set<Object>> pf : primaryFilters.entrySet()) {
+        for (Object pfval : pf.getValue()) {
+          writeBatch.put(addPrimaryFilterToKey(pf.getKey(), pfval,
+              key), value);
+        }
+      }
+    }
+  }
+
+  @Override
+  public TimelinePutResponse put(TimelineEntities entities) {
+    try {
+      deleteLock.readLock().lock();
+      TimelinePutResponse response = new TimelinePutResponse();
+      for (TimelineEntity entity : entities.getEntities()) {
+        put(entity, response);
+      }
+      return response;
+    } finally {
+      deleteLock.readLock().unlock();
+    }
+  }
+
+  /**
+   * Get the unique start time for a given entity as a byte array that sorts
+   * the timestamps in reverse order (see {@link
+   * GenericObjectMapper#writeReverseOrderedLong(long)}).
+   *
+   * @param entityId The id of the entity
+   * @param entityType The type of the entity
+   * @return A byte array, null if not found
+   * @throws IOException
+   */
+  private byte[] getStartTime(String entityId, String entityType)
+      throws IOException {
+    Long l = getStartTimeLong(entityId, entityType);
+    return l == null ? null : writeReverseOrderedLong(l);
+  }
+
+  /**
+   * Get the unique start time for a given entity as a Long.
+   *
+   * @param entityId The id of the entity
+   * @param entityType The type of the entity
+   * @return A Long, null if not found
+   * @throws IOException
+   */
+  private Long getStartTimeLong(String entityId, String entityType)
+      throws IOException {
+    EntityIdentifier entity = new EntityIdentifier(entityId, entityType);
+    // start time is not provided, so try to look it up
+    if (startTimeReadCache.containsKey(entity)) {
+      // found the start time in the cache
+      return startTimeReadCache.get(entity);
+    } else {
+      // try to look up the start time in the db
+      byte[] b = createStartTimeLookupKey(entity.getId(), entity.getType());
+      byte[] v = db.get(b);
+      if (v == null) {
+        // did not find the start time in the db
+        return null;
+      } else {
+        // found the start time in the db
+        Long l = readReverseOrderedLong(v, 0);
+        startTimeReadCache.put(entity, l);
+        return l;
+      }
+    }
+  }
+
+  /**
+   * Get the unique start time for a given entity as a byte array that sorts
+   * the timestamps in reverse order (see {@link
+   * GenericObjectMapper#writeReverseOrderedLong(long)}). If the start time
+   * doesn't exist, set it based on the information provided. Should only be
+   * called when a lock has been obtained on the entity.
+   *
+   * @param entityId The id of the entity
+   * @param entityType The type of the entity
+   * @param startTime The start time of the entity, or null
+   * @param events A list of events for the entity, or null
+   * @return A StartAndInsertTime
+   * @throws IOException
+   */
+  private StartAndInsertTime getAndSetStartTime(String entityId,
+      String entityType, Long startTime, List<TimelineEvent> events)
+      throws IOException {
+    EntityIdentifier entity = new EntityIdentifier(entityId, entityType);
+    if (startTime == null) {
+      // start time is not provided, so try to look it up
+      if (startTimeWriteCache.containsKey(entity)) {
+        // found the start time in the cache
+        return startTimeWriteCache.get(entity);
+      } else {
+        if (events != null) {
+          // prepare a start time from events in case it is needed
+          Long min = Long.MAX_VALUE;
+          for (TimelineEvent e : events) {
+            if (min > e.getTimestamp()) {
+              min = e.getTimestamp();
+            }
+          }
+          startTime = min;
+        }
+        return checkStartTimeInDb(entity, startTime);
+      }
+    } else {
+      // start time is provided
+      if (startTimeWriteCache.containsKey(entity)) {
+        // always use start time from cache if it exists
+        return startTimeWriteCache.get(entity);
+      } else {
+        // check the provided start time matches the db
+        return checkStartTimeInDb(entity, startTime);
+      }
+    }
+  }
+
+  /**
+   * Checks db for start time and returns it if it exists.  If it doesn't
+   * exist, writes the suggested start time (if it is not null).  This is
+   * only called when the start time is not found in the cache,
+   * so it adds it back into the cache if it is found. Should only be called
+   * when a lock has been obtained on the entity.
+   */
+  private StartAndInsertTime checkStartTimeInDb(EntityIdentifier entity,
+      Long suggestedStartTime) throws IOException {
+    StartAndInsertTime startAndInsertTime = null;
+    // create lookup key for start time
+    byte[] b = createStartTimeLookupKey(entity.getId(), entity.getType());
+    // retrieve value for key
+    byte[] v = db.get(b);
+    if (v == null) {
+      // start time doesn't exist in db
+      if (suggestedStartTime == null) {
+        return null;
+      }
+      startAndInsertTime = new StartAndInsertTime(suggestedStartTime,
+          System.currentTimeMillis());
+
+      // write suggested start time
+      v = new byte[16];
+      writeReverseOrderedLong(suggestedStartTime, v, 0);
+      writeReverseOrderedLong(startAndInsertTime.insertTime, v, 8);
+      WriteOptions writeOptions = new WriteOptions();
+      writeOptions.sync(true);
+      db.put(b, v, writeOptions);
+    } else {
+      // found start time in db, so ignore suggested start time
+      startAndInsertTime = new StartAndInsertTime(readReverseOrderedLong(v, 0),
+          readReverseOrderedLong(v, 8));
+    }
+    startTimeWriteCache.put(entity, startAndInsertTime);
+    startTimeReadCache.put(entity, startAndInsertTime.startTime);
+    return startAndInsertTime;
+  }
+
+  /**
+   * Creates a key for looking up the start time of a given entity,
+   * of the form START_TIME_LOOKUP_PREFIX + entity type + entity id.
+   */
+  private static byte[] createStartTimeLookupKey(String entityId,
+      String entityType) throws IOException {
+    return KeyBuilder.newInstance().add(START_TIME_LOOKUP_PREFIX)
+        .add(entityType).add(entityId).getBytes();
+  }
+
+  /**
+   * Creates an entity marker, serializing ENTITY_ENTRY_PREFIX + entity type +
+   * revstarttime + entity id.
+   */
+  private static byte[] createEntityMarkerKey(String entityId,
+      String entityType, byte[] revStartTime) throws IOException {
+    return KeyBuilder.newInstance().add(ENTITY_ENTRY_PREFIX)
+        .add(entityType).add(revStartTime).add(entityId).getBytesForLookup();
+  }
+
+  /**
+   * Creates an index entry for the given key of the form
+   * INDEXED_ENTRY_PREFIX + primaryfiltername + primaryfiltervalue + key.
+   */
+  private static byte[] addPrimaryFilterToKey(String primaryFilterName,
+      Object primaryFilterValue, byte[] key) throws IOException {
+    return KeyBuilder.newInstance().add(INDEXED_ENTRY_PREFIX)
+        .add(primaryFilterName)
+        .add(GenericObjectMapper.write(primaryFilterValue), true).add(key)
+        .getBytes();
+  }
+
+  /**
+   * Creates an event key, serializing ENTITY_ENTRY_PREFIX + entity type +
+   * revstarttime + entity id + EVENTS_COLUMN + reveventtimestamp + event type.
+   */
+  private static byte[] createEntityEventKey(String entityId,
+      String entityType, byte[] revStartTime, byte[] revEventTimestamp,
+      String eventType) throws IOException {
+    return KeyBuilder.newInstance().add(ENTITY_ENTRY_PREFIX)
+        .add(entityType).add(revStartTime).add(entityId).add(EVENTS_COLUMN)
+        .add(revEventTimestamp).add(eventType).getBytes();
+  }
+
+  /**
+   * Creates an event object from the given key, offset, and value.  If the
+   * event type is not contained in the specified set of event types,
+   * returns null.
+   */
+  private static TimelineEvent getEntityEvent(Set<String> eventTypes,
+      byte[] key, int offset, byte[] value) throws IOException {
+    KeyParser kp = new KeyParser(key, offset);
+    long ts = kp.getNextLong();
+    String tstype = kp.getNextString();
+    if (eventTypes == null || eventTypes.contains(tstype)) {
+      TimelineEvent event = new TimelineEvent();
+      event.setTimestamp(ts);
+      event.setEventType(tstype);
+      Object o = GenericObjectMapper.read(value);
+      if (o == null) {
+        event.setEventInfo(null);
+      } else if (o instanceof Map) {
+        @SuppressWarnings("unchecked")
+        Map<String, Object> m = (Map<String, Object>) o;
+        event.setEventInfo(m);
+      } else {
+        throw new IOException("Couldn't deserialize event info map");
+      }
+      return event;
+    }
+    return null;
+  }
+
+  /**
+   * Creates a primary filter key, serializing ENTITY_ENTRY_PREFIX +
+   * entity type + revstarttime + entity id + PRIMARY_FILTERS_COLUMN + name +
+   * value.
+   */
+  private static byte[] createPrimaryFilterKey(String entityId,
+      String entityType, byte[] revStartTime, String name, Object value)
+      throws IOException {
+    return KeyBuilder.newInstance().add(ENTITY_ENTRY_PREFIX).add(entityType)
+        .add(revStartTime).add(entityId).add(PRIMARY_FILTERS_COLUMN).add(name)
+        .add(GenericObjectMapper.write(value)).getBytes();
+  }
+
+  /**
+   * Parses the primary filter from the given key at the given offset and
+   * adds it to the given entity.
+   */
+  private static void addPrimaryFilter(TimelineEntity entity, byte[] key,
+      int offset) throws IOException {
+    KeyParser kp = new KeyParser(key, offset);
+    String name = kp.getNextString();
+    Object value = GenericObjectMapper.read(key, kp.getOffset());
+    entity.addPrimaryFilter(name, value);
+  }
+
+  /**
+   * Creates an other info key, serializing ENTITY_ENTRY_PREFIX + entity type +
+   * revstarttime + entity id + OTHER_INFO_COLUMN + name.
+   */
+  private static byte[] createOtherInfoKey(String entityId, String entityType,
+      byte[] revStartTime, String name) throws IOException {
+    return KeyBuilder.newInstance().add(ENTITY_ENTRY_PREFIX).add(entityType)
+        .add(revStartTime).add(entityId).add(OTHER_INFO_COLUMN).add(name)
+        .getBytes();
+  }
+
+  /**
+   * Creates a string representation of the byte array from the given offset
+   * to the end of the array (for parsing other info keys).
+   */
+  private static String parseRemainingKey(byte[] b, int offset) {
+    return new String(b, offset, b.length - offset);
+  }
+
+  /**
+   * Creates a related entity key, serializing ENTITY_ENTRY_PREFIX +
+   * entity type + revstarttime + entity id + RELATED_ENTITIES_COLUMN +
+   * relatedentity type + relatedentity id.
+   */
+  private static byte[] createRelatedEntityKey(String entityId,
+      String entityType, byte[] revStartTime, String relatedEntityId,
+      String relatedEntityType) throws IOException {
+    return KeyBuilder.newInstance().add(ENTITY_ENTRY_PREFIX).add(entityType)
+        .add(revStartTime).add(entityId).add(RELATED_ENTITIES_COLUMN)
+        .add(relatedEntityType).add(relatedEntityId).getBytes();
+  }
+
+  /**
+   * Parses the related entity from the given key at the given offset and
+   * adds it to the given entity.
+   */
+  private static void addRelatedEntity(TimelineEntity entity, byte[] key,
+      int offset) throws IOException {
+    KeyParser kp = new KeyParser(key, offset);
+    String type = kp.getNextString();
+    String id = kp.getNextString();
+    entity.addRelatedEntity(type, id);
+  }
+
+  /**
+   * Creates a reverse related entity key, serializing ENTITY_ENTRY_PREFIX +
+   * entity type + revstarttime + entity id +
+   * INVISIBLE_REVERSE_RELATED_ENTITIES_COLUMN +
+   * relatedentity type + relatedentity id.
+   */
+  private static byte[] createReverseRelatedEntityKey(String entityId,
+      String entityType, byte[] revStartTime, String relatedEntityId,
+      String relatedEntityType) throws IOException {
+    return KeyBuilder.newInstance().add(ENTITY_ENTRY_PREFIX).add(entityType)
+        .add(revStartTime).add(entityId)
+        .add(INVISIBLE_REVERSE_RELATED_ENTITIES_COLUMN)
+        .add(relatedEntityType).add(relatedEntityId).getBytes();
+  }
+
+  /**
+   * Clears the cache to test reloading start times from leveldb (only for
+   * testing).
+   */
+  @VisibleForTesting
+  void clearStartTimeCache() {
+    startTimeWriteCache.clear();
+    startTimeReadCache.clear();
+  }
+
+  @VisibleForTesting
+  static int getStartTimeReadCacheSize(Configuration conf) {
+    return conf.getInt(
+        YarnConfiguration.TIMELINE_SERVICE_LEVELDB_START_TIME_READ_CACHE_SIZE,
+        YarnConfiguration.
+            DEFAULT_TIMELINE_SERVICE_LEVELDB_START_TIME_READ_CACHE_SIZE);
+  }
+
+  @VisibleForTesting
+  static int getStartTimeWriteCacheSize(Configuration conf) {
+    return conf.getInt(
+        YarnConfiguration.TIMELINE_SERVICE_LEVELDB_START_TIME_WRITE_CACHE_SIZE,
+        YarnConfiguration.
+            DEFAULT_TIMELINE_SERVICE_LEVELDB_START_TIME_WRITE_CACHE_SIZE);
+  }
+
+  // warning is suppressed to prevent eclipse from noting unclosed resource
+  @SuppressWarnings("resource")
+  @VisibleForTesting
+  List<String> getEntityTypes() throws IOException {
+    DBIterator iterator = null;
+    try {
+      iterator = getDbIterator(false);
+      List<String> entityTypes = new ArrayList<String>();
+      iterator.seek(ENTITY_ENTRY_PREFIX);
+      while (iterator.hasNext()) {
+        byte[] key = iterator.peekNext().getKey();
+        if (key[0] != ENTITY_ENTRY_PREFIX[0]) {
+          break;
+        }
+        KeyParser kp = new KeyParser(key,
+            ENTITY_ENTRY_PREFIX.length);
+        String entityType = kp.getNextString();
+        entityTypes.add(entityType);
+        byte[] lookupKey = KeyBuilder.newInstance().add(ENTITY_ENTRY_PREFIX)
+            .add(entityType).getBytesForLookup();
+        if (lookupKey[lookupKey.length - 1] != 0x0) {
+          throw new IOException("Found unexpected end byte in lookup key");
+        }
+        lookupKey[lookupKey.length - 1] = 0x1;
+        iterator.seek(lookupKey);
+      }
+      return entityTypes;
+    } finally {
+      IOUtils.cleanup(LOG, iterator);
+    }
+  }
+
+  /**
+   * Finds all keys in the db that have a given prefix and deletes them on
+   * the given write batch.
+   */
+  private void deleteKeysWithPrefix(WriteBatch writeBatch, byte[] prefix,
+      DBIterator iterator) {
+    for (iterator.seek(prefix); iterator.hasNext(); iterator.next()) {
+      byte[] key = iterator.peekNext().getKey();
+      if (!prefixMatches(prefix, prefix.length, key)) {
+        break;
+      }
+      writeBatch.delete(key);
+    }
+  }
+
+  @VisibleForTesting
+  boolean deleteNextEntity(String entityType, byte[] reverseTimestamp,
+      DBIterator iterator, DBIterator pfIterator, boolean seeked)
+      throws IOException {
+    WriteBatch writeBatch = null;
+    try {
+      KeyBuilder kb = KeyBuilder.newInstance().add(ENTITY_ENTRY_PREFIX)
+          .add(entityType);
+      byte[] typePrefix = kb.getBytesForLookup();
+      kb.add(reverseTimestamp);
+      if (!seeked) {
+        iterator.seek(kb.getBytesForLookup());
+      }
+      if (!iterator.hasNext()) {
+        return false;
+      }
+      byte[] entityKey = iterator.peekNext().getKey();
+      if (!prefixMatches(typePrefix, typePrefix.length, entityKey)) {
+        return false;
+      }
+
+      // read the start time and entity id from the current key
+      KeyParser kp = new KeyParser(entityKey, typePrefix.length + 8);
+      String entityId = kp.getNextString();
+      int prefixlen = kp.getOffset();
+      byte[] deletePrefix = new byte[prefixlen];
+      System.arraycopy(entityKey, 0, deletePrefix, 0, prefixlen);
+
+      writeBatch = db.createWriteBatch();
+
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Deleting entity type:" + entityType + " id:" + entityId);
+      }
+      // remove start time from cache and db
+      writeBatch.delete(createStartTimeLookupKey(entityId, entityType));
+      EntityIdentifier entityIdentifier =
+          new EntityIdentifier(entityId, entityType);
+      startTimeReadCache.remove(entityIdentifier);
+      startTimeWriteCache.remove(entityIdentifier);
+
+      // delete current entity
+      for (; iterator.hasNext(); iterator.next()) {
+        byte[] key = iterator.peekNext().getKey();
+        if (!prefixMatches(entityKey, prefixlen, key)) {
+          break;
+        }
+        writeBatch.delete(key);
+
+        if (key.length == prefixlen) {
+          continue;
+        }
+        if (key[prefixlen] == PRIMARY_FILTERS_COLUMN[0]) {
+          kp = new KeyParser(key,
+              prefixlen + PRIMARY_FILTERS_COLUMN.length);
+          String name = kp.getNextString();
+          Object value = GenericObjectMapper.read(key, kp.getOffset());
+          deleteKeysWithPrefix(writeBatch, addPrimaryFilterToKey(name, value,
+              deletePrefix), pfIterator);
+          if (LOG.isDebugEnabled()) {
+            LOG.debug("Deleting entity type:" + entityType + " id:" +
+                entityId + " primary filter entry " + name + " " +
+                value);
+          }
+        } else if (key[prefixlen] == RELATED_ENTITIES_COLUMN[0]) {
+          kp = new KeyParser(key,
+              prefixlen + RELATED_ENTITIES_COLUMN.length);
+          String type = kp.getNextString();
+          String id = kp.getNextString();
+          byte[] relatedEntityStartTime = getStartTime(id, type);
+          if (relatedEntityStartTime == null) {
+            LOG.warn("Found no start time for " +
+                "related entity " + id + " of type " + type + " while " +
+                "deleting " + entityId + " of type " + entityType);
+            continue;
+          }
+          writeBatch.delete(createReverseRelatedEntityKey(id, type,
+              relatedEntityStartTime, entityId, entityType));
+          if (LOG.isDebugEnabled()) {
+            LOG.debug("Deleting entity type:" + entityType + " id:" +
+                entityId + " from invisible reverse related entity " +
+                "entry of type:" + type + " id:" + id);
+          }
+        } else if (key[prefixlen] ==
+            INVISIBLE_REVERSE_RELATED_ENTITIES_COLUMN[0]) {
+          kp = new KeyParser(key, prefixlen +
+              INVISIBLE_REVERSE_RELATED_ENTITIES_COLUMN.length);
+          String type = kp.getNextString();
+          String id = kp.getNextString();
+          byte[] relatedEntityStartTime = getStartTime(id, type);
+          if (relatedEntityStartTime == null) {
+            LOG.warn("Found no start time for reverse " +
+                "related entity " + id + " of type " + type + " while " +
+                "deleting " + entityId + " of type " + entityType);
+            continue;
+          }
+          writeBatch.delete(createRelatedEntityKey(id, type,
+              relatedEntityStartTime, entityId, entityType));
+          if (LOG.isDebugEnabled()) {
+            LOG.debug("Deleting entity type:" + entityType + " id:" +
+                entityId + " from related entity entry of type:" +
+                type + " id:" + id);
+          }
+        }
+      }
+      WriteOptions writeOptions = new WriteOptions();
+      writeOptions.sync(true);
+      db.write(writeBatch, writeOptions);
+      return true;
+    } finally {
+      IOUtils.cleanup(LOG, writeBatch);
+    }
+  }
+
+  /**
+   * Discards entities with start timestamp less than or equal to the given
+   * timestamp.
+   */
+  @VisibleForTesting
+  void discardOldEntities(long timestamp)
+      throws IOException, InterruptedException {
+    byte[] reverseTimestamp = writeReverseOrderedLong(timestamp);
+    long totalCount = 0;
+    long t1 = System.currentTimeMillis();
+    try {
+      List<String> entityTypes = getEntityTypes();
+      for (String entityType : entityTypes) {
+        DBIterator iterator = null;
+        DBIterator pfIterator = null;
+        long typeCount = 0;
+        try {
+          deleteLock.writeLock().lock();
+          iterator = getDbIterator(false);
+          pfIterator = getDbIterator(false);
+
+          if (deletionThread != null && deletionThread.isInterrupted()) {
+            throw new InterruptedException();
+          }
+          boolean seeked = false;
+          while (deleteNextEntity(entityType, reverseTimestamp, iterator,
+              pfIterator, seeked)) {
+            typeCount++;
+            totalCount++;
+            seeked = true;
+            if (deletionThread != null && deletionThread.isInterrupted()) {
+              throw new InterruptedException();
+            }
+          }
+        } catch (IOException e) {
+          LOG.error("Got IOException while deleting entities for type " +
+              entityType + ", continuing to next type", e);
+        } finally {
+          IOUtils.cleanup(LOG, iterator, pfIterator);
+          deleteLock.writeLock().unlock();
+          if (typeCount > 0) {
+            LOG.info("Deleted " + typeCount + " entities of type " +
+                entityType);
+          }
+        }
+      }
+    } finally {
+      long t2 = System.currentTimeMillis();
+      LOG.info("Discarded " + totalCount + " entities for timestamp " +
+          timestamp + " and earlier in " + (t2 - t1) / 1000.0 + " seconds");
+    }
+  }
+
+  @VisibleForTesting
+  DBIterator getDbIterator(boolean fillCache) {
+    ReadOptions readOptions = new ReadOptions();
+    readOptions.fillCache(fillCache);
+    return db.iterator(readOptions);
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/timeline/MemoryTimelineStore.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/timeline/MemoryTimelineStore.java b/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/timeline/MemoryTimelineStore.java
new file mode 100644
index 0000000..86ac1f8
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/timeline/MemoryTimelineStore.java
@@ -0,0 +1,360 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.applicationhistoryservice.timeline;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.EnumSet;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.PriorityQueue;
+import java.util.Set;
+import java.util.SortedSet;
+import java.util.TreeSet;
+
+import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
+import org.apache.hadoop.service.AbstractService;
+import org.apache.hadoop.yarn.api.records.timeline.TimelineEntities;
+import org.apache.hadoop.yarn.api.records.timeline.TimelineEntity;
+import org.apache.hadoop.yarn.api.records.timeline.TimelineEvent;
+import org.apache.hadoop.yarn.api.records.timeline.TimelineEvents;
+import org.apache.hadoop.yarn.api.records.timeline.TimelinePutResponse;
+import org.apache.hadoop.yarn.api.records.timeline.TimelineEvents.EventsOfOneEntity;
+import org.apache.hadoop.yarn.api.records.timeline.TimelinePutResponse.TimelinePutError;
+
+/**
+ * In-memory implementation of {@link TimelineStore}. This
+ * implementation is for test purpose only. If users improperly instantiate it,
+ * they may encounter reading and writing history data in different memory
+ * store.
+ * 
+ */
+@Private
+@Unstable
+public class MemoryTimelineStore
+    extends AbstractService implements TimelineStore {
+
+  private Map<EntityIdentifier, TimelineEntity> entities =
+      new HashMap<EntityIdentifier, TimelineEntity>();
+  private Map<EntityIdentifier, Long> entityInsertTimes =
+      new HashMap<EntityIdentifier, Long>();
+
+  public MemoryTimelineStore() {
+    super(MemoryTimelineStore.class.getName());
+  }
+
+  @Override
+  public TimelineEntities getEntities(String entityType, Long limit,
+      Long windowStart, Long windowEnd, String fromId, Long fromTs,
+      NameValuePair primaryFilter, Collection<NameValuePair> secondaryFilters,
+      EnumSet<Field> fields) {
+    if (limit == null) {
+      limit = DEFAULT_LIMIT;
+    }
+    if (windowStart == null) {
+      windowStart = Long.MIN_VALUE;
+    }
+    if (windowEnd == null) {
+      windowEnd = Long.MAX_VALUE;
+    }
+    if (fields == null) {
+      fields = EnumSet.allOf(Field.class);
+    }
+
+    Iterator<TimelineEntity> entityIterator = null;
+    if (fromId != null) {
+      TimelineEntity firstEntity = entities.get(new EntityIdentifier(fromId,
+          entityType));
+      if (firstEntity == null) {
+        return new TimelineEntities();
+      } else {
+        entityIterator = new TreeSet<TimelineEntity>(entities.values())
+            .tailSet(firstEntity, true).iterator();
+      }
+    }
+    if (entityIterator == null) {
+      entityIterator = new PriorityQueue<TimelineEntity>(entities.values())
+          .iterator();
+    }
+
+    List<TimelineEntity> entitiesSelected = new ArrayList<TimelineEntity>();
+    while (entityIterator.hasNext()) {
+      TimelineEntity entity = entityIterator.next();
+      if (entitiesSelected.size() >= limit) {
+        break;
+      }
+      if (!entity.getEntityType().equals(entityType)) {
+        continue;
+      }
+      if (entity.getStartTime() <= windowStart) {
+        continue;
+      }
+      if (entity.getStartTime() > windowEnd) {
+        continue;
+      }
+      if (fromTs != null && entityInsertTimes.get(new EntityIdentifier(
+          entity.getEntityId(), entity.getEntityType())) > fromTs) {
+        continue;
+      }
+      if (primaryFilter != null &&
+          !matchPrimaryFilter(entity.getPrimaryFilters(), primaryFilter)) {
+        continue;
+      }
+      if (secondaryFilters != null) { // AND logic
+        boolean flag = true;
+        for (NameValuePair secondaryFilter : secondaryFilters) {
+          if (secondaryFilter != null && !matchPrimaryFilter(
+              entity.getPrimaryFilters(), secondaryFilter) &&
+              !matchFilter(entity.getOtherInfo(), secondaryFilter)) {
+            flag = false;
+            break;
+          }
+        }
+        if (!flag) {
+          continue;
+        }
+      }
+      entitiesSelected.add(entity);
+    }
+    List<TimelineEntity> entitiesToReturn = new ArrayList<TimelineEntity>();
+    for (TimelineEntity entitySelected : entitiesSelected) {
+      entitiesToReturn.add(maskFields(entitySelected, fields));
+    }
+    Collections.sort(entitiesToReturn);
+    TimelineEntities entitiesWrapper = new TimelineEntities();
+    entitiesWrapper.setEntities(entitiesToReturn);
+    return entitiesWrapper;
+  }
+
+  @Override
+  public TimelineEntity getEntity(String entityId, String entityType,
+      EnumSet<Field> fieldsToRetrieve) {
+    if (fieldsToRetrieve == null) {
+      fieldsToRetrieve = EnumSet.allOf(Field.class);
+    }
+    TimelineEntity entity = entities.get(new EntityIdentifier(entityId, entityType));
+    if (entity == null) {
+      return null;
+    } else {
+      return maskFields(entity, fieldsToRetrieve);
+    }
+  }
+
+  @Override
+  public TimelineEvents getEntityTimelines(String entityType,
+      SortedSet<String> entityIds, Long limit, Long windowStart,
+      Long windowEnd,
+      Set<String> eventTypes) {
+    TimelineEvents allEvents = new TimelineEvents();
+    if (entityIds == null) {
+      return allEvents;
+    }
+    if (limit == null) {
+      limit = DEFAULT_LIMIT;
+    }
+    if (windowStart == null) {
+      windowStart = Long.MIN_VALUE;
+    }
+    if (windowEnd == null) {
+      windowEnd = Long.MAX_VALUE;
+    }
+    for (String entityId : entityIds) {
+      EntityIdentifier entityID = new EntityIdentifier(entityId, entityType);
+      TimelineEntity entity = entities.get(entityID);
+      if (entity == null) {
+        continue;
+      }
+      EventsOfOneEntity events = new EventsOfOneEntity();
+      events.setEntityId(entityId);
+      events.setEntityType(entityType);
+      for (TimelineEvent event : entity.getEvents()) {
+        if (events.getEvents().size() >= limit) {
+          break;
+        }
+        if (event.getTimestamp() <= windowStart) {
+          continue;
+        }
+        if (event.getTimestamp() > windowEnd) {
+          continue;
+        }
+        if (eventTypes != null && !eventTypes.contains(event.getEventType())) {
+          continue;
+        }
+        events.addEvent(event);
+      }
+      allEvents.addEvent(events);
+    }
+    return allEvents;
+  }
+
+  @Override
+  public TimelinePutResponse put(TimelineEntities data) {
+    TimelinePutResponse response = new TimelinePutResponse();
+    for (TimelineEntity entity : data.getEntities()) {
+      EntityIdentifier entityId =
+          new EntityIdentifier(entity.getEntityId(), entity.getEntityType());
+      // store entity info in memory
+      TimelineEntity existingEntity = entities.get(entityId);
+      if (existingEntity == null) {
+        existingEntity = new TimelineEntity();
+        existingEntity.setEntityId(entity.getEntityId());
+        existingEntity.setEntityType(entity.getEntityType());
+        existingEntity.setStartTime(entity.getStartTime());
+        entities.put(entityId, existingEntity);
+        entityInsertTimes.put(entityId, System.currentTimeMillis());
+      }
+      if (entity.getEvents() != null) {
+        if (existingEntity.getEvents() == null) {
+          existingEntity.setEvents(entity.getEvents());
+        } else {
+          existingEntity.addEvents(entity.getEvents());
+        }
+        Collections.sort(existingEntity.getEvents());
+      }
+      // check startTime
+      if (existingEntity.getStartTime() == null) {
+        if (existingEntity.getEvents() == null
+            || existingEntity.getEvents().isEmpty()) {
+          TimelinePutError error = new TimelinePutError();
+          error.setEntityId(entityId.getId());
+          error.setEntityType(entityId.getType());
+          error.setErrorCode(TimelinePutError.NO_START_TIME);
+          response.addError(error);
+          entities.remove(entityId);
+          entityInsertTimes.remove(entityId);
+          continue;
+        } else {
+          Long min = Long.MAX_VALUE;
+          for (TimelineEvent e : entity.getEvents()) {
+            if (min > e.getTimestamp()) {
+              min = e.getTimestamp();
+            }
+          }
+          existingEntity.setStartTime(min);
+        }
+      }
+      if (entity.getPrimaryFilters() != null) {
+        if (existingEntity.getPrimaryFilters() == null) {
+          existingEntity.setPrimaryFilters(new HashMap<String, Set<Object>>());
+        }
+        for (Entry<String, Set<Object>> pf :
+            entity.getPrimaryFilters().entrySet()) {
+          for (Object pfo : pf.getValue()) {
+            existingEntity.addPrimaryFilter(pf.getKey(), maybeConvert(pfo));
+          }
+        }
+      }
+      if (entity.getOtherInfo() != null) {
+        if (existingEntity.getOtherInfo() == null) {
+          existingEntity.setOtherInfo(new HashMap<String, Object>());
+        }
+        for (Entry<String, Object> info : entity.getOtherInfo().entrySet()) {
+          existingEntity.addOtherInfo(info.getKey(),
+              maybeConvert(info.getValue()));
+        }
+      }
+      // relate it to other entities
+      if (entity.getRelatedEntities() == null) {
+        continue;
+      }
+      for (Map.Entry<String, Set<String>> partRelatedEntities : entity
+          .getRelatedEntities().entrySet()) {
+        if (partRelatedEntities == null) {
+          continue;
+        }
+        for (String idStr : partRelatedEntities.getValue()) {
+          EntityIdentifier relatedEntityId =
+              new EntityIdentifier(idStr, partRelatedEntities.getKey());
+          TimelineEntity relatedEntity = entities.get(relatedEntityId);
+          if (relatedEntity != null) {
+            relatedEntity.addRelatedEntity(
+                existingEntity.getEntityType(), existingEntity.getEntityId());
+          } else {
+            relatedEntity = new TimelineEntity();
+            relatedEntity.setEntityId(relatedEntityId.getId());
+            relatedEntity.setEntityType(relatedEntityId.getType());
+            relatedEntity.setStartTime(existingEntity.getStartTime());
+            relatedEntity.addRelatedEntity(existingEntity.getEntityType(),
+                existingEntity.getEntityId());
+            entities.put(relatedEntityId, relatedEntity);
+            entityInsertTimes.put(relatedEntityId, System.currentTimeMillis());
+          }
+        }
+      }
+    }
+    return response;
+  }
+
+  private static TimelineEntity maskFields(
+      TimelineEntity entity, EnumSet<Field> fields) {
+    // Conceal the fields that are not going to be exposed
+    TimelineEntity entityToReturn = new TimelineEntity();
+    entityToReturn.setEntityId(entity.getEntityId());
+    entityToReturn.setEntityType(entity.getEntityType());
+    entityToReturn.setStartTime(entity.getStartTime());
+    entityToReturn.setEvents(fields.contains(Field.EVENTS) ?
+        entity.getEvents() : fields.contains(Field.LAST_EVENT_ONLY) ?
+            Arrays.asList(entity.getEvents().get(0)) : null);
+    entityToReturn.setRelatedEntities(fields.contains(Field.RELATED_ENTITIES) ?
+        entity.getRelatedEntities() : null);
+    entityToReturn.setPrimaryFilters(fields.contains(Field.PRIMARY_FILTERS) ?
+        entity.getPrimaryFilters() : null);
+    entityToReturn.setOtherInfo(fields.contains(Field.OTHER_INFO) ?
+        entity.getOtherInfo() : null);
+    return entityToReturn;
+  }
+
+  private static boolean matchFilter(Map<String, Object> tags,
+      NameValuePair filter) {
+    Object value = tags.get(filter.getName());
+    if (value == null) { // doesn't have the filter
+      return false;
+    } else if (!value.equals(filter.getValue())) { // doesn't match the filter
+      return false;
+    }
+    return true;
+  }
+
+  private static boolean matchPrimaryFilter(Map<String, Set<Object>> tags,
+      NameValuePair filter) {
+    Set<Object> value = tags.get(filter.getName());
+    if (value == null) { // doesn't have the filter
+      return false;
+    } else {
+      return value.contains(filter.getValue());
+    }
+  }
+
+  private static Object maybeConvert(Object o) {
+    if (o instanceof Long) {
+      Long l = (Long)o;
+      if (l >= Integer.MIN_VALUE && l <= Integer.MAX_VALUE) {
+        return l.intValue();
+      }
+    }
+    return o;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/timeline/NameValuePair.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/timeline/NameValuePair.java b/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/timeline/NameValuePair.java
new file mode 100644
index 0000000..d8dabd2
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/timeline/NameValuePair.java
@@ -0,0 +1,59 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.applicationhistoryservice.timeline;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+/**
+ * A class holding a name and value pair, used for specifying filters in
+ * {@link TimelineReader}.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+public class NameValuePair {
+  String name;
+  Object value;
+
+  public NameValuePair(String name, Object value) {
+    this.name = name;
+    this.value = value;
+  }
+
+  /**
+   * Get the name.
+   * @return The name.
+   */
+  public String getName() {
+
+    return name;
+  }
+
+  /**
+   * Get the value.
+   * @return The value.
+   */
+  public Object getValue() {
+    return value;
+  }
+
+  @Override
+  public String toString() {
+    return "{ name: " + name + ", value: " + value + " }";
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/timeline/TimelineReader.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/timeline/TimelineReader.java b/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/timeline/TimelineReader.java
new file mode 100644
index 0000000..9ae9954
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/timeline/TimelineReader.java
@@ -0,0 +1,155 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.applicationhistoryservice.timeline;
+
+import java.io.IOException;
+import java.util.Collection;
+import java.util.EnumSet;
+import java.util.Set;
+import java.util.SortedSet;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.yarn.api.records.timeline.TimelineEntities;
+import org.apache.hadoop.yarn.api.records.timeline.TimelineEntity;
+import org.apache.hadoop.yarn.api.records.timeline.TimelineEvents;
+
+/**
+ * This interface is for retrieving timeline information.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+public interface TimelineReader {
+
+  /**
+   * Possible fields to retrieve for {@link #getEntities} and {@link #getEntity}
+   * .
+   */
+  enum Field {
+    EVENTS,
+    RELATED_ENTITIES,
+    PRIMARY_FILTERS,
+    OTHER_INFO,
+    LAST_EVENT_ONLY
+  }
+
+  /**
+   * Default limit for {@link #getEntities} and {@link #getEntityTimelines}.
+   */
+  final long DEFAULT_LIMIT = 100;
+
+  /**
+   * This method retrieves a list of entity information, {@link TimelineEntity},
+   * sorted by the starting timestamp for the entity, descending. The starting
+   * timestamp of an entity is a timestamp specified by the client. If it is not
+   * explicitly specified, it will be chosen by the store to be the earliest
+   * timestamp of the events received in the first put for the entity.
+   * 
+   * @param entityType
+   *          The type of entities to return (required).
+   * @param limit
+   *          A limit on the number of entities to return. If null, defaults to
+   *          {@link #DEFAULT_LIMIT}.
+   * @param windowStart
+   *          The earliest start timestamp to retrieve (exclusive). If null,
+   *          defaults to retrieving all entities until the limit is reached.
+   * @param windowEnd
+   *          The latest start timestamp to retrieve (inclusive). If null,
+   *          defaults to {@link Long#MAX_VALUE}
+   * @param fromId
+   *          If fromId is not null, retrieve entities earlier than and
+   *          including the specified ID. If no start time is found for the
+   *          specified ID, an empty list of entities will be returned. The
+   *          windowEnd parameter will take precedence if the start time of this
+   *          entity falls later than windowEnd.
+   * @param fromTs
+   *          If fromTs is not null, ignore entities that were inserted into the
+   *          store after the given timestamp. The entity's insert timestamp
+   *          used for this comparison is the store's system time when the first
+   *          put for the entity was received (not the entity's start time).
+   * @param primaryFilter
+   *          Retrieves only entities that have the specified primary filter. If
+   *          null, retrieves all entities. This is an indexed retrieval, and no
+   *          entities that do not match the filter are scanned.
+   * @param secondaryFilters
+   *          Retrieves only entities that have exact matches for all the
+   *          specified filters in their primary filters or other info. This is
+   *          not an indexed retrieval, so all entities are scanned but only
+   *          those matching the filters are returned.
+   * @param fieldsToRetrieve
+   *          Specifies which fields of the entity object to retrieve (see
+   *          {@link Field}). If the set of fields contains
+   *          {@link Field#LAST_EVENT_ONLY} and not {@link Field#EVENTS}, the
+   *          most recent event for each entity is retrieved. If null, retrieves
+   *          all fields.
+   * @return An {@link TimelineEntities} object.
+   * @throws IOException
+   */
+  TimelineEntities getEntities(String entityType,
+      Long limit, Long windowStart, Long windowEnd, String fromId, Long fromTs,
+      NameValuePair primaryFilter, Collection<NameValuePair> secondaryFilters,
+      EnumSet<Field> fieldsToRetrieve) throws IOException;
+
+  /**
+   * This method retrieves the entity information for a given entity.
+   * 
+   * @param entityId
+   *          The entity whose information will be retrieved.
+   * @param entityType
+   *          The type of the entity.
+   * @param fieldsToRetrieve
+   *          Specifies which fields of the entity object to retrieve (see
+   *          {@link Field}). If the set of fields contains
+   *          {@link Field#LAST_EVENT_ONLY} and not {@link Field#EVENTS}, the
+   *          most recent event for each entity is retrieved. If null, retrieves
+   *          all fields.
+   * @return An {@link TimelineEntity} object.
+   * @throws IOException
+   */
+  TimelineEntity getEntity(String entityId, String entityType, EnumSet<Field>
+      fieldsToRetrieve) throws IOException;
+
+  /**
+   * This method retrieves the events for a list of entities all of the same
+   * entity type. The events for each entity are sorted in order of their
+   * timestamps, descending.
+   * 
+   * @param entityType
+   *          The type of entities to retrieve events for.
+   * @param entityIds
+   *          The entity IDs to retrieve events for.
+   * @param limit
+   *          A limit on the number of events to return for each entity. If
+   *          null, defaults to {@link #DEFAULT_LIMIT} events per entity.
+   * @param windowStart
+   *          If not null, retrieves only events later than the given time
+   *          (exclusive)
+   * @param windowEnd
+   *          If not null, retrieves only events earlier than the given time
+   *          (inclusive)
+   * @param eventTypes
+   *          Restricts the events returned to the given types. If null, events
+   *          of all types will be returned.
+   * @return An {@link TimelineEvents} object.
+   * @throws IOException
+   */
+  TimelineEvents getEntityTimelines(String entityType,
+      SortedSet<String> entityIds, Long limit, Long windowStart,
+      Long windowEnd, Set<String> eventTypes) throws IOException;
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/timeline/TimelineStore.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/timeline/TimelineStore.java b/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/timeline/TimelineStore.java
new file mode 100644
index 0000000..6b50d83
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/timeline/TimelineStore.java
@@ -0,0 +1,29 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.applicationhistoryservice.timeline;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.service.Service;
+
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+public interface TimelineStore extends
+    Service, TimelineReader, TimelineWriter {
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/timeline/TimelineWriter.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/timeline/TimelineWriter.java b/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/timeline/TimelineWriter.java
new file mode 100644
index 0000000..8f28d82
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/timeline/TimelineWriter.java
@@ -0,0 +1,46 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.applicationhistoryservice.timeline;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.yarn.api.records.timeline.TimelineEntities;
+import org.apache.hadoop.yarn.api.records.timeline.TimelinePutResponse;
+
+import java.io.IOException;
+
+/**
+ * This interface is for storing timeline information.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+public interface TimelineWriter {
+
+  /**
+   * Stores entity information to the timeline store. Any errors occurring for
+   * individual put request objects will be reported in the response.
+   * 
+   * @param data
+   *          An {@link TimelineEntities} object.
+   * @return An {@link TimelinePutResponse} object.
+   * @throws IOException
+   */
+  TimelinePutResponse put(TimelineEntities data) throws IOException;
+
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/timeline/package-info.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/timeline/package-info.java b/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/timeline/package-info.java
new file mode 100644
index 0000000..970e868
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/timeline/package-info.java
@@ -0,0 +1,20 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+@InterfaceAudience.Private
+package org.apache.hadoop.yarn.server.applicationhistoryservice.timeline;
+import org.apache.hadoop.classification.InterfaceAudience;


[06/22] AMBARI-5707. Metrics system prototype implementation. (swagle)

Posted by sw...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/_psutil_bsd.c
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/_psutil_bsd.c b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/_psutil_bsd.c
new file mode 100644
index 0000000..5a9f9c0
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/_psutil_bsd.c
@@ -0,0 +1,2212 @@
+/*
+ * Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ *
+ * FreeBSD platform-specific module methods for _psutil_bsd
+ */
+
+
+#include <Python.h>
+#include <assert.h>
+#include <errno.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <signal.h>
+#include <fcntl.h>
+#include <paths.h>
+#include <sys/types.h>
+#include <sys/sysctl.h>
+#include <sys/param.h>
+#include <sys/user.h>
+#include <sys/proc.h>
+#include <sys/file.h>
+#include <net/route.h>
+
+#include <sys/socket.h>
+#include <sys/socketvar.h>    // for struct xsocket
+#include <sys/un.h>
+#include <sys/unpcb.h>
+// for xinpcb struct
+#include <netinet/in.h>
+#include <netinet/in_systm.h>
+#include <netinet/ip.h>
+#include <netinet/in_pcb.h>
+#include <netinet/tcp_var.h>   // for struct xtcpcb
+#include <netinet/tcp_fsm.h>   // for TCP connection states
+#include <arpa/inet.h>         // for inet_ntop()
+
+#if __FreeBSD_version < 900000
+#include <utmp.h>         // system users
+#else
+#include <utmpx.h>
+#endif
+#include <devstat.h>      // get io counters
+#include <sys/vmmeter.h>  // needed for vmtotal struct
+#include <libutil.h>      // process open files, shared libs (kinfo_getvmmap)
+#include <sys/mount.h>
+
+#include <net/if.h>       // net io counters
+#include <net/if_dl.h>
+#include <net/route.h>
+
+#include <netinet/in.h>   // process open files/connections
+#include <sys/un.h>
+
+#include "_psutil_bsd.h"
+#include "_psutil_common.h"
+#include "arch/bsd/process_info.h"
+
+
+// convert a timeval struct to a double
+#define TV2DOUBLE(t)    ((t).tv_sec + (t).tv_usec / 1000000.0)
+
+
+/*
+ * Utility function which fills a kinfo_proc struct based on process pid
+ */
+static int
+psutil_kinfo_proc(const pid_t pid, struct kinfo_proc *proc)
+{
+    int mib[4];
+    size_t size;
+    mib[0] = CTL_KERN;
+    mib[1] = KERN_PROC;
+    mib[2] = KERN_PROC_PID;
+    mib[3] = pid;
+
+    size = sizeof(struct kinfo_proc);
+
+    if (sysctl((int *)mib, 4, proc, &size, NULL, 0) == -1) {
+        PyErr_SetFromErrno(PyExc_OSError);
+        return -1;
+    }
+
+    // sysctl stores 0 in the size if we can't find the process information.
+    if (size == 0) {
+        NoSuchProcess();
+        return -1;
+    }
+    return 0;
+}
+
+
+/*
+ * Return a Python list of all the PIDs running on the system.
+ */
+static PyObject *
+psutil_pids(PyObject *self, PyObject *args)
+{
+    kinfo_proc *proclist = NULL;
+    kinfo_proc *orig_address = NULL;
+    size_t num_processes;
+    size_t idx;
+    PyObject *retlist = PyList_New(0);
+    PyObject *pid = NULL;
+
+    if (retlist == NULL) {
+        return NULL;
+    }
+    if (psutil_get_proc_list(&proclist, &num_processes) != 0) {
+        PyErr_SetString(PyExc_RuntimeError,
+                        "failed to retrieve process list.");
+        goto error;
+    }
+
+    if (num_processes > 0) {
+        orig_address = proclist; // save so we can free it after we're done
+        for (idx = 0; idx < num_processes; idx++) {
+            pid = Py_BuildValue("i", proclist->ki_pid);
+            if (!pid)
+                goto error;
+            if (PyList_Append(retlist, pid))
+                goto error;
+            Py_DECREF(pid);
+            proclist++;
+        }
+        free(orig_address);
+    }
+
+    return retlist;
+
+error:
+    Py_XDECREF(pid);
+    Py_DECREF(retlist);
+    if (orig_address != NULL) {
+        free(orig_address);
+    }
+    return NULL;
+}
+
+
+/*
+ * Return a Python float indicating the system boot time expressed in
+ * seconds since the epoch.
+ */
+static PyObject *
+psutil_boot_time(PyObject *self, PyObject *args)
+{
+    // fetch sysctl "kern.boottime"
+    static int request[2] = { CTL_KERN, KERN_BOOTTIME };
+    struct timeval boottime;
+    size_t len = sizeof(boottime);
+
+    if (sysctl(request, 2, &boottime, &len, NULL, 0) == -1) {
+        PyErr_SetFromErrno(PyExc_OSError);
+        return NULL;
+    }
+    return Py_BuildValue("d", (double)boottime.tv_sec);
+}
+
+
+/*
+ * Return process name from kinfo_proc as a Python string.
+ */
+static PyObject *
+psutil_proc_name(PyObject *self, PyObject *args)
+{
+    long pid;
+    struct kinfo_proc kp;
+    if (! PyArg_ParseTuple(args, "l", &pid)) {
+        return NULL;
+    }
+    if (psutil_kinfo_proc(pid, &kp) == -1) {
+        return NULL;
+    }
+    return Py_BuildValue("s", kp.ki_comm);
+}
+
+
+/*
+ * Return process pathname executable.
+ * Thanks to Robert N. M. Watson:
+ * http://fxr.googlebit.com/source/usr.bin/procstat/procstat_bin.c?v=8-CURRENT
+ */
+static PyObject *
+psutil_proc_exe(PyObject *self, PyObject *args)
+{
+    long pid;
+    char pathname[PATH_MAX];
+    int error;
+    int mib[4];
+    size_t size;
+
+    if (! PyArg_ParseTuple(args, "l", &pid)) {
+        return NULL;
+    }
+
+    mib[0] = CTL_KERN;
+    mib[1] = KERN_PROC;
+    mib[2] = KERN_PROC_PATHNAME;
+    mib[3] = pid;
+
+    size = sizeof(pathname);
+    error = sysctl(mib, 4, pathname, &size, NULL, 0);
+    if (error == -1) {
+        PyErr_SetFromErrno(PyExc_OSError);
+        return NULL;
+    }
+    if (size == 0 || strlen(pathname) == 0) {
+        if (psutil_pid_exists(pid) == 0) {
+            return NoSuchProcess();
+        }
+        else {
+            strcpy(pathname, "");
+        }
+    }
+    return Py_BuildValue("s", pathname);
+}
+
+
+/*
+ * Return process cmdline as a Python list of cmdline arguments.
+ */
+static PyObject *
+psutil_proc_cmdline(PyObject *self, PyObject *args)
+{
+    long pid;
+    PyObject *arglist = NULL;
+
+    if (! PyArg_ParseTuple(args, "l", &pid)) {
+        return NULL;
+    }
+
+    // get the commandline, defined in arch/bsd/process_info.c
+    arglist = psutil_get_arg_list(pid);
+
+    // psutil_get_arg_list() returns NULL only if psutil_cmd_args
+    // failed with ESRCH (no process with that PID)
+    if (NULL == arglist) {
+        return PyErr_SetFromErrno(PyExc_OSError);
+    }
+    return Py_BuildValue("N", arglist);
+}
+
+
+/*
+ * Return process parent pid from kinfo_proc as a Python integer.
+ */
+static PyObject *
+psutil_proc_ppid(PyObject *self, PyObject *args)
+{
+    long pid;
+    struct kinfo_proc kp;
+    if (! PyArg_ParseTuple(args, "l", &pid)) {
+        return NULL;
+    }
+    if (psutil_kinfo_proc(pid, &kp) == -1) {
+        return NULL;
+    }
+    return Py_BuildValue("l", (long)kp.ki_ppid);
+}
+
+
+/*
+ * Return process status as a Python integer.
+ */
+static PyObject *
+psutil_proc_status(PyObject *self, PyObject *args)
+{
+    long pid;
+    struct kinfo_proc kp;
+    if (! PyArg_ParseTuple(args, "l", &pid)) {
+        return NULL;
+    }
+    if (psutil_kinfo_proc(pid, &kp) == -1) {
+        return NULL;
+    }
+    return Py_BuildValue("i", (int)kp.ki_stat);
+}
+
+
+/*
+ * Return process real, effective and saved user ids from kinfo_proc
+ * as a Python tuple.
+ */
+static PyObject *
+psutil_proc_uids(PyObject *self, PyObject *args)
+{
+    long pid;
+    struct kinfo_proc kp;
+    if (! PyArg_ParseTuple(args, "l", &pid)) {
+        return NULL;
+    }
+    if (psutil_kinfo_proc(pid, &kp) == -1) {
+        return NULL;
+    }
+    return Py_BuildValue("lll",
+                         (long)kp.ki_ruid,
+                         (long)kp.ki_uid,
+                         (long)kp.ki_svuid);
+}
+
+
+/*
+ * Return process real, effective and saved group ids from kinfo_proc
+ * as a Python tuple.
+ */
+static PyObject *
+psutil_proc_gids(PyObject *self, PyObject *args)
+{
+    long pid;
+    struct kinfo_proc kp;
+    if (! PyArg_ParseTuple(args, "l", &pid)) {
+        return NULL;
+    }
+    if (psutil_kinfo_proc(pid, &kp) == -1) {
+        return NULL;
+    }
+    return Py_BuildValue("lll",
+                         (long)kp.ki_rgid,
+                         (long)kp.ki_groups[0],
+                         (long)kp.ki_svuid);
+}
+
+
+/*
+ * Return process real, effective and saved group ids from kinfo_proc
+ * as a Python tuple.
+ */
+static PyObject *
+psutil_proc_tty_nr(PyObject *self, PyObject *args)
+{
+    long pid;
+    struct kinfo_proc kp;
+    if (! PyArg_ParseTuple(args, "l", &pid)) {
+        return NULL;
+    }
+    if (psutil_kinfo_proc(pid, &kp) == -1) {
+        return NULL;
+    }
+    return Py_BuildValue("i", kp.ki_tdev);
+}
+
+
+/*
+ * Return the number of context switches performed by process as a tuple.
+ */
+static PyObject *
+psutil_proc_num_ctx_switches(PyObject *self, PyObject *args)
+{
+    long pid;
+    struct kinfo_proc kp;
+    if (! PyArg_ParseTuple(args, "l", &pid)) {
+        return NULL;
+    }
+    if (psutil_kinfo_proc(pid, &kp) == -1) {
+        return NULL;
+    }
+    return Py_BuildValue("(ll)",
+                         kp.ki_rusage.ru_nvcsw,
+                         kp.ki_rusage.ru_nivcsw);
+}
+
+
+/*
+ * Return number of threads used by process as a Python integer.
+ */
+static PyObject *
+psutil_proc_num_threads(PyObject *self, PyObject *args)
+{
+    long pid;
+    struct kinfo_proc kp;
+    if (! PyArg_ParseTuple(args, "l", &pid)) {
+        return NULL;
+    }
+    if (psutil_kinfo_proc(pid, &kp) == -1) {
+        return NULL;
+    }
+    return Py_BuildValue("l", (long)kp.ki_numthreads);
+}
+
+
+/*
+ * Retrieves all threads used by process returning a list of tuples
+ * including thread id, user time and system time.
+ * Thanks to Robert N. M. Watson:
+ * http://fxr.googlebit.com/source/usr.bin/procstat/
+ *     procstat_threads.c?v=8-CURRENT
+ */
+static PyObject *
+psutil_proc_threads(PyObject *self, PyObject *args)
+{
+    long pid;
+    int mib[4];
+    struct kinfo_proc *kip = NULL;
+    struct kinfo_proc *kipp = NULL;
+    int error;
+    unsigned int i;
+    size_t size;
+    PyObject *retList = PyList_New(0);
+    PyObject *pyTuple = NULL;
+
+    if (retList == NULL)
+        return NULL;
+    if (! PyArg_ParseTuple(args, "l", &pid))
+        goto error;
+
+    // we need to re-query for thread information, so don't use *kipp
+    mib[0] = CTL_KERN;
+    mib[1] = KERN_PROC;
+    mib[2] = KERN_PROC_PID | KERN_PROC_INC_THREAD;
+    mib[3] = pid;
+
+    size = 0;
+    error = sysctl(mib, 4, NULL, &size, NULL, 0);
+    if (error == -1) {
+        PyErr_SetFromErrno(PyExc_OSError);
+        goto error;
+    }
+    if (size == 0) {
+        NoSuchProcess();
+        goto error;
+    }
+
+    kip = malloc(size);
+    if (kip == NULL) {
+        PyErr_NoMemory();
+        goto error;
+    }
+
+    error = sysctl(mib, 4, kip, &size, NULL, 0);
+    if (error == -1) {
+        PyErr_SetFromErrno(PyExc_OSError);
+        goto error;
+    }
+    if (size == 0) {
+        NoSuchProcess();
+        goto error;
+    }
+
+    for (i = 0; i < size / sizeof(*kipp); i++) {
+        kipp = &kip[i];
+        pyTuple = Py_BuildValue("Idd",
+                                kipp->ki_tid,
+                                TV2DOUBLE(kipp->ki_rusage.ru_utime),
+                                TV2DOUBLE(kipp->ki_rusage.ru_stime));
+        if (pyTuple == NULL)
+            goto error;
+        if (PyList_Append(retList, pyTuple))
+            goto error;
+        Py_DECREF(pyTuple);
+    }
+    free(kip);
+    return retList;
+
+error:
+    Py_XDECREF(pyTuple);
+    Py_DECREF(retList);
+    if (kip != NULL) {
+        free(kip);
+    }
+    return NULL;
+}
+
+
+/*
+ * Return a Python tuple (user_time, kernel_time)
+ */
+static PyObject *
+psutil_proc_cpu_times(PyObject *self, PyObject *args)
+{
+    long pid;
+    double user_t, sys_t;
+    struct kinfo_proc kp;
+    if (! PyArg_ParseTuple(args, "l", &pid)) {
+        return NULL;
+    }
+    if (psutil_kinfo_proc(pid, &kp) == -1) {
+        return NULL;
+    }
+    // convert from microseconds to seconds
+    user_t = TV2DOUBLE(kp.ki_rusage.ru_utime);
+    sys_t = TV2DOUBLE(kp.ki_rusage.ru_stime);
+    return Py_BuildValue("(dd)", user_t, sys_t);
+}
+
+
+/*
+ * Return the number of logical CPUs in the system.
+ * XXX this could be shared with OSX
+ */
+static PyObject *
+psutil_cpu_count_logical(PyObject *self, PyObject *args)
+{
+    int mib[2];
+    int ncpu;
+    size_t len;
+
+    mib[0] = CTL_HW;
+    mib[1] = HW_NCPU;
+    len = sizeof(ncpu);
+
+    if (sysctl(mib, 2, &ncpu, &len, NULL, 0) == -1) {
+        // mimic os.cpu_count()
+        Py_INCREF(Py_None);
+        return Py_None;
+    }
+    else {
+        return Py_BuildValue("i", ncpu);
+    }
+}
+
+
+/*
+ * Return an XML string from which we'll determine the number of
+ * physical CPU cores in the system.
+ */
+static PyObject *
+psutil_cpu_count_phys(PyObject *self, PyObject *args)
+{
+    void *topology = NULL;
+    size_t size = 0;
+
+    if (sysctlbyname("kern.sched.topology_spec", NULL, &size, NULL, 0))
+        goto error;
+
+    topology = malloc(size);
+    if (!topology) {
+        PyErr_NoMemory();
+        return NULL;
+    }
+
+    if (sysctlbyname("kern.sched.topology_spec", topology, &size, NULL, 0))
+        goto error;
+
+    return Py_BuildValue("s", topology);
+
+error:
+    Py_INCREF(Py_None);
+    return Py_None;
+}
+
+
+/*
+ * Return a Python float indicating the process create time expressed in
+ * seconds since the epoch.
+ */
+static PyObject *
+psutil_proc_create_time(PyObject *self, PyObject *args)
+{
+    long pid;
+    struct kinfo_proc kp;
+    if (! PyArg_ParseTuple(args, "l", &pid)) {
+        return NULL;
+    }
+    if (psutil_kinfo_proc(pid, &kp) == -1) {
+        return NULL;
+    }
+    return Py_BuildValue("d", TV2DOUBLE(kp.ki_start));
+}
+
+
+/*
+ * Return a Python float indicating the process create time expressed in
+ * seconds since the epoch.
+ */
+static PyObject *
+psutil_proc_io_counters(PyObject *self, PyObject *args)
+{
+    long pid;
+    struct kinfo_proc kp;
+    if (! PyArg_ParseTuple(args, "l", &pid)) {
+        return NULL;
+    }
+    if (psutil_kinfo_proc(pid, &kp) == -1) {
+        return NULL;
+    }
+    // there's apparently no way to determine bytes count, hence return -1.
+    return Py_BuildValue("(llll)",
+                         kp.ki_rusage.ru_inblock,
+                         kp.ki_rusage.ru_oublock,
+                         -1,
+                         -1);
+}
+
+
+/*
+ * Return extended memory info for a process as a Python tuple.
+ */
+static PyObject *
+psutil_proc_memory_info(PyObject *self, PyObject *args)
+{
+    long pid;
+    struct kinfo_proc kp;
+    if (! PyArg_ParseTuple(args, "l", &pid)) {
+        return NULL;
+    }
+    if (psutil_kinfo_proc(pid, &kp) == -1) {
+        return NULL;
+    }
+    return Py_BuildValue("(lllll)",
+                         ptoa(kp.ki_rssize),    // rss
+                         (long)kp.ki_size,      // vms
+                         ptoa(kp.ki_tsize),     // text
+                         ptoa(kp.ki_dsize),     // data
+                         ptoa(kp.ki_ssize));    // stack
+}
+
+
+/*
+ * Return virtual memory usage statistics.
+ */
+static PyObject *
+psutil_virtual_mem(PyObject *self, PyObject *args)
+{
+    unsigned int   total, active, inactive, wired, cached, free;
+    size_t         size = sizeof(total);
+    struct vmtotal vm;
+    int            mib[] = {CTL_VM, VM_METER};
+    long           pagesize = getpagesize();
+#if __FreeBSD_version > 702101
+    long buffers;
+#else
+    int buffers;
+#endif
+    size_t buffers_size = sizeof(buffers);
+
+    if (sysctlbyname("vm.stats.vm.v_page_count", &total, &size, NULL, 0))
+        goto error;
+    if (sysctlbyname("vm.stats.vm.v_active_count", &active, &size, NULL, 0))
+        goto error;
+    if (sysctlbyname("vm.stats.vm.v_inactive_count",
+                     &inactive, &size, NULL, 0))
+        goto error;
+    if (sysctlbyname("vm.stats.vm.v_wire_count", &wired, &size, NULL, 0))
+        goto error;
+    if (sysctlbyname("vm.stats.vm.v_cache_count", &cached, &size, NULL, 0))
+        goto error;
+    if (sysctlbyname("vm.stats.vm.v_free_count", &free, &size, NULL, 0))
+        goto error;
+    if (sysctlbyname("vfs.bufspace", &buffers, &buffers_size, NULL, 0))
+        goto error;
+
+    size = sizeof(vm);
+    if (sysctl(mib, 2, &vm, &size, NULL, 0) != 0)
+        goto error;
+
+    return Py_BuildValue("KKKKKKKK",
+        (unsigned long long) total    * pagesize,
+        (unsigned long long) free     * pagesize,
+        (unsigned long long) active   * pagesize,
+        (unsigned long long) inactive * pagesize,
+        (unsigned long long) wired    * pagesize,
+        (unsigned long long) cached   * pagesize,
+        (unsigned long long) buffers,
+        (unsigned long long) (vm.t_vmshr + vm.t_rmshr) * pagesize  // shared
+    );
+
+error:
+    PyErr_SetFromErrno(PyExc_OSError);
+    return NULL;
+}
+
+
+#ifndef _PATH_DEVNULL
+#define _PATH_DEVNULL "/dev/null"
+#endif
+
+/*
+ * Return swap memory stats (see 'swapinfo' cmdline tool)
+ */
+static PyObject *
+psutil_swap_mem(PyObject *self, PyObject *args)
+{
+    kvm_t *kd;
+    struct kvm_swap kvmsw[1];
+    unsigned int swapin, swapout, nodein, nodeout;
+    size_t size = sizeof(unsigned int);
+
+    kd = kvm_open(NULL, _PATH_DEVNULL, NULL, O_RDONLY, "kvm_open failed");
+    if (kd == NULL) {
+        PyErr_SetString(PyExc_RuntimeError, "kvm_open failed");
+        return NULL;
+    }
+
+    if (kvm_getswapinfo(kd, kvmsw, 1, 0) < 0) {
+        kvm_close(kd);
+        PyErr_SetString(PyExc_RuntimeError, "kvm_getswapinfo failed");
+        return NULL;
+    }
+
+    kvm_close(kd);
+
+    if (sysctlbyname("vm.stats.vm.v_swapin", &swapin, &size, NULL, 0) == -1)
+        goto sbn_error;
+    if (sysctlbyname("vm.stats.vm.v_swapout", &swapout, &size, NULL, 0) == -1)
+        goto sbn_error;
+    if (sysctlbyname("vm.stats.vm.v_vnodein", &nodein, &size, NULL, 0) == -1)
+        goto sbn_error;
+    if (sysctlbyname("vm.stats.vm.v_vnodeout", &nodeout, &size, NULL, 0) == -1)
+        goto sbn_error;
+
+    return Py_BuildValue("(iiiII)",
+                         kvmsw[0].ksw_total,                     // total
+                         kvmsw[0].ksw_used,                      // used
+                         kvmsw[0].ksw_total - kvmsw[0].ksw_used, // free
+                         swapin + swapout,                       // swap in
+                         nodein + nodeout);                      // swap out
+
+sbn_error:
+    PyErr_SetFromErrno(PyExc_OSError);
+    return NULL;
+}
+
+
+/*
+ * Return a Python tuple representing user, kernel and idle CPU times
+ */
+static PyObject *
+psutil_cpu_times(PyObject *self, PyObject *args)
+{
+    long cpu_time[CPUSTATES];
+    size_t size;
+
+    size = sizeof(cpu_time);
+
+    if (sysctlbyname("kern.cp_time", &cpu_time, &size, NULL, 0) == -1) {
+        PyErr_SetFromErrno(PyExc_OSError);
+        return NULL;
+    }
+
+    return Py_BuildValue("(ddddd)",
+                         (double)cpu_time[CP_USER] / CLOCKS_PER_SEC,
+                         (double)cpu_time[CP_NICE] / CLOCKS_PER_SEC,
+                         (double)cpu_time[CP_SYS] / CLOCKS_PER_SEC,
+                         (double)cpu_time[CP_IDLE] / CLOCKS_PER_SEC,
+                         (double)cpu_time[CP_INTR] / CLOCKS_PER_SEC
+                        );
+}
+
+
+/*
+ * XXX
+ * These functions are available on FreeBSD 8 only.
+ * In the upper python layer we do various tricks to avoid crashing
+ * and/or to provide alternatives where possible.
+ */
+
+
+#if defined(__FreeBSD_version) && __FreeBSD_version >= 800000
+/*
+ * Return files opened by process as a list of (path, fd) tuples
+ */
+static PyObject *
+psutil_proc_open_files(PyObject *self, PyObject *args)
+{
+    long pid;
+    int i, cnt;
+    struct kinfo_file *freep = NULL;
+    struct kinfo_file *kif;
+    struct kinfo_proc kipp;
+    PyObject *retList = PyList_New(0);
+    PyObject *tuple = NULL;
+
+    if (retList == NULL)
+        return NULL;
+    if (! PyArg_ParseTuple(args, "l", &pid))
+        goto error;
+    if (psutil_kinfo_proc(pid, &kipp) == -1)
+        goto error;
+
+    freep = kinfo_getfile(pid, &cnt);
+    if (freep == NULL) {
+        psutil_raise_ad_or_nsp(pid);
+        goto error;
+    }
+
+    for (i = 0; i < cnt; i++) {
+        kif = &freep[i];
+        if ((kif->kf_type == KF_TYPE_VNODE) &&
+                (kif->kf_vnode_type == KF_VTYPE_VREG))
+        {
+            tuple = Py_BuildValue("(si)", kif->kf_path, kif->kf_fd);
+            if (tuple == NULL)
+                goto error;
+            if (PyList_Append(retList, tuple))
+                goto error;
+            Py_DECREF(tuple);
+        }
+    }
+    free(freep);
+    return retList;
+
+error:
+    Py_XDECREF(tuple);
+    Py_DECREF(retList);
+    if (freep != NULL)
+        free(freep);
+    return NULL;
+}
+
+
+/*
+ * Return files opened by process as a list of (path, fd) tuples
+ */
+static PyObject *
+psutil_proc_num_fds(PyObject *self, PyObject *args)
+{
+    long pid;
+    int cnt;
+
+    struct kinfo_file *freep;
+    struct kinfo_proc kipp;
+
+    if (! PyArg_ParseTuple(args, "l", &pid))
+        return NULL;
+    if (psutil_kinfo_proc(pid, &kipp) == -1)
+        return NULL;
+
+    freep = kinfo_getfile(pid, &cnt);
+    if (freep == NULL) {
+        psutil_raise_ad_or_nsp(pid);
+        return NULL;
+    }
+    free(freep);
+
+    return Py_BuildValue("i", cnt);
+}
+
+
+/*
+ * Return process current working directory.
+ */
+static PyObject *
+psutil_proc_cwd(PyObject *self, PyObject *args)
+{
+    long pid;
+    PyObject *path = NULL;
+    struct kinfo_file *freep = NULL;
+    struct kinfo_file *kif;
+    struct kinfo_proc kipp;
+
+    int i, cnt;
+
+    if (! PyArg_ParseTuple(args, "l", &pid))
+        goto error;
+    if (psutil_kinfo_proc(pid, &kipp) == -1)
+        goto error;
+
+    freep = kinfo_getfile(pid, &cnt);
+    if (freep == NULL) {
+        psutil_raise_ad_or_nsp(pid);
+        goto error;
+    }
+
+    for (i = 0; i < cnt; i++) {
+        kif = &freep[i];
+        if (kif->kf_fd == KF_FD_TYPE_CWD) {
+            path = Py_BuildValue("s", kif->kf_path);
+            if (!path)
+                goto error;
+            break;
+        }
+    }
+    /*
+     * For lower pids it seems we can't retrieve any information
+     * (lsof can't do that it either).  Since this happens even
+     * as root we return an empty string instead of AccessDenied.
+     */
+    if (path == NULL) {
+        path = Py_BuildValue("s", "");
+    }
+    free(freep);
+    return path;
+
+error:
+    Py_XDECREF(path);
+    if (freep != NULL)
+        free(freep);
+    return NULL;
+}
+
+
+// The tcplist fetching and walking is borrowed from netstat/inet.c.
+static char *
+psutil_fetch_tcplist(void)
+{
+    char *buf;
+    size_t len;
+    int error;
+
+    for (;;) {
+        if (sysctlbyname("net.inet.tcp.pcblist", NULL, &len, NULL, 0) < 0) {
+            PyErr_SetFromErrno(PyExc_OSError);
+            return NULL;
+        }
+        buf = malloc(len);
+        if (buf == NULL) {
+            PyErr_NoMemory();
+            return NULL;
+        }
+        if (sysctlbyname("net.inet.tcp.pcblist", buf, &len, NULL, 0) < 0) {
+            free(buf);
+            PyErr_SetFromErrno(PyExc_OSError);
+            return NULL;
+        }
+        return buf;
+    }
+}
+
+static int
+psutil_sockaddr_port(int family, struct sockaddr_storage *ss)
+{
+    struct sockaddr_in6 *sin6;
+    struct sockaddr_in *sin;
+
+    if (family == AF_INET) {
+        sin = (struct sockaddr_in *)ss;
+        return (sin->sin_port);
+    } else {
+        sin6 = (struct sockaddr_in6 *)ss;
+        return (sin6->sin6_port);
+    }
+}
+
+static void *
+psutil_sockaddr_addr(int family, struct sockaddr_storage *ss)
+{
+    struct sockaddr_in6 *sin6;
+    struct sockaddr_in *sin;
+
+    if (family == AF_INET) {
+        sin = (struct sockaddr_in *)ss;
+        return (&sin->sin_addr);
+    } else {
+        sin6 = (struct sockaddr_in6 *)ss;
+        return (&sin6->sin6_addr);
+    }
+}
+
+static socklen_t
+psutil_sockaddr_addrlen(int family)
+{
+    if (family == AF_INET)
+        return (sizeof(struct in_addr));
+    else
+        return (sizeof(struct in6_addr));
+}
+
+static int
+psutil_sockaddr_matches(int family, int port, void *pcb_addr,
+                        struct sockaddr_storage *ss)
+{
+    if (psutil_sockaddr_port(family, ss) != port)
+        return (0);
+    return (memcmp(psutil_sockaddr_addr(family, ss), pcb_addr,
+                   psutil_sockaddr_addrlen(family)) == 0);
+}
+
+static struct tcpcb *
+psutil_search_tcplist(char *buf, struct kinfo_file *kif)
+{
+    struct tcpcb *tp;
+    struct inpcb *inp;
+    struct xinpgen *xig, *oxig;
+    struct xsocket *so;
+
+    oxig = xig = (struct xinpgen *)buf;
+    for (xig = (struct xinpgen *)((char *)xig + xig->xig_len);
+            xig->xig_len > sizeof(struct xinpgen);
+            xig = (struct xinpgen *)((char *)xig + xig->xig_len)) {
+        tp = &((struct xtcpcb *)xig)->xt_tp;
+        inp = &((struct xtcpcb *)xig)->xt_inp;
+        so = &((struct xtcpcb *)xig)->xt_socket;
+
+        if (so->so_type != kif->kf_sock_type ||
+                so->xso_family != kif->kf_sock_domain ||
+                so->xso_protocol != kif->kf_sock_protocol)
+            continue;
+
+        if (kif->kf_sock_domain == AF_INET) {
+            if (!psutil_sockaddr_matches(
+                    AF_INET, inp->inp_lport, &inp->inp_laddr,
+                    &kif->kf_sa_local))
+                continue;
+            if (!psutil_sockaddr_matches(
+                    AF_INET, inp->inp_fport, &inp->inp_faddr,
+                    &kif->kf_sa_peer))
+                continue;
+        } else {
+            if (!psutil_sockaddr_matches(
+                    AF_INET6, inp->inp_lport, &inp->in6p_laddr,
+                    &kif->kf_sa_local))
+                continue;
+            if (!psutil_sockaddr_matches(
+                    AF_INET6, inp->inp_fport, &inp->in6p_faddr,
+                    &kif->kf_sa_peer))
+                continue;
+        }
+
+        return (tp);
+    }
+    return NULL;
+}
+
+
+// a signaler for connections without an actual status
+static int PSUTIL_CONN_NONE = 128;
+
+/*
+ * Return connections opened by process.
+ */
+static PyObject *
+psutil_proc_connections(PyObject *self, PyObject *args)
+{
+    long pid;
+    int i, cnt;
+
+    struct kinfo_file *freep = NULL;
+    struct kinfo_file *kif;
+    char *tcplist = NULL;
+    struct tcpcb *tcp;
+
+    PyObject *retList = PyList_New(0);
+    PyObject *tuple = NULL;
+    PyObject *laddr = NULL;
+    PyObject *raddr = NULL;
+    PyObject *af_filter = NULL;
+    PyObject *type_filter = NULL;
+    PyObject *_family = NULL;
+    PyObject *_type = NULL;
+
+    if (retList == NULL) {
+        return NULL;
+    }
+    if (! PyArg_ParseTuple(args, "lOO", &pid, &af_filter, &type_filter)) {
+        goto error;
+    }
+    if (!PySequence_Check(af_filter) || !PySequence_Check(type_filter)) {
+        PyErr_SetString(PyExc_TypeError, "arg 2 or 3 is not a sequence");
+        goto error;
+    }
+
+    freep = kinfo_getfile(pid, &cnt);
+    if (freep == NULL) {
+        psutil_raise_ad_or_nsp(pid);
+        goto error;
+    }
+
+    tcplist = psutil_fetch_tcplist();
+    if (tcplist == NULL) {
+        PyErr_SetFromErrno(PyExc_OSError);
+        goto error;
+    }
+
+    for (i = 0; i < cnt; i++) {
+        int lport, rport, state;
+        char lip[200], rip[200];
+        char path[PATH_MAX];
+        int inseq;
+        tuple = NULL;
+        laddr = NULL;
+        raddr = NULL;
+
+        kif = &freep[i];
+        if (kif->kf_type == KF_TYPE_SOCKET)
+        {
+            // apply filters
+            _family = PyLong_FromLong((long)kif->kf_sock_domain);
+            inseq = PySequence_Contains(af_filter, _family);
+            Py_DECREF(_family);
+            if (inseq == 0) {
+                continue;
+            }
+            _type = PyLong_FromLong((long)kif->kf_sock_type);
+            inseq = PySequence_Contains(type_filter, _type);
+            Py_DECREF(_type);
+            if (inseq == 0) {
+                continue;
+            }
+
+            // IPv4 / IPv6 socket
+            if ((kif->kf_sock_domain == AF_INET) ||
+                    (kif->kf_sock_domain == AF_INET6)) {
+                // fill status
+                state = PSUTIL_CONN_NONE;
+                if (kif->kf_sock_type == SOCK_STREAM) {
+                    tcp = psutil_search_tcplist(tcplist, kif);
+                    if (tcp != NULL)
+                        state = (int)tcp->t_state;
+                }
+
+                // build addr and port
+                inet_ntop(
+                    kif->kf_sock_domain,
+                    psutil_sockaddr_addr(kif->kf_sock_domain,
+                                         &kif->kf_sa_local),
+                    lip,
+                    sizeof(lip));
+                inet_ntop(
+                    kif->kf_sock_domain,
+                    psutil_sockaddr_addr(kif->kf_sock_domain,
+                                         &kif->kf_sa_peer),
+                    rip,
+                    sizeof(rip));
+                lport = htons(psutil_sockaddr_port(kif->kf_sock_domain,
+                                                   &kif->kf_sa_local));
+                rport = htons(psutil_sockaddr_port(kif->kf_sock_domain,
+                                                   &kif->kf_sa_peer));
+
+                // construct python tuple/list
+                laddr = Py_BuildValue("(si)", lip, lport);
+                if (!laddr)
+                    goto error;
+                if (rport != 0) {
+                    raddr = Py_BuildValue("(si)", rip, rport);
+                }
+                else {
+                    raddr = Py_BuildValue("()");
+                }
+                if (!raddr)
+                    goto error;
+                tuple = Py_BuildValue("(iiiNNi)",
+                                      kif->kf_fd,
+                                      kif->kf_sock_domain,
+                                      kif->kf_sock_type,
+                                      laddr,
+                                      raddr,
+                                      state);
+                if (!tuple)
+                    goto error;
+                if (PyList_Append(retList, tuple))
+                    goto error;
+                Py_DECREF(tuple);
+            }
+            // UNIX socket
+            else if (kif->kf_sock_domain == AF_UNIX) {
+                struct sockaddr_un *sun;
+
+                sun = (struct sockaddr_un *)&kif->kf_sa_local;
+                snprintf(
+                    path, sizeof(path), "%.*s",
+                    (sun->sun_len - (sizeof(*sun) - sizeof(sun->sun_path))),
+                    sun->sun_path);
+
+                tuple = Py_BuildValue("(iiisOi)",
+                                      kif->kf_fd,
+                                      kif->kf_sock_domain,
+                                      kif->kf_sock_type,
+                                      path,
+                                      Py_None,
+                                      PSUTIL_CONN_NONE);
+                if (!tuple)
+                    goto error;
+                if (PyList_Append(retList, tuple))
+                    goto error;
+                Py_DECREF(tuple);
+                Py_INCREF(Py_None);
+            }
+        }
+    }
+    free(freep);
+    free(tcplist);
+    return retList;
+
+error:
+    Py_XDECREF(tuple);
+    Py_XDECREF(laddr);
+    Py_XDECREF(raddr);
+    Py_DECREF(retList);
+    if (freep != NULL)
+        free(freep);
+    if (tcplist != NULL)
+        free(tcplist);
+    return NULL;
+}
+
+
+/*
+ * Return a Python list of tuple representing per-cpu times
+ */
+static PyObject *
+psutil_per_cpu_times(PyObject *self, PyObject *args)
+{
+    static int maxcpus;
+    int mib[2];
+    int ncpu;
+    size_t len;
+    size_t size;
+    int i;
+    PyObject *py_retlist = PyList_New(0);
+    PyObject *py_cputime = NULL;
+
+    if (py_retlist == NULL)
+        return NULL;
+
+    // retrieve maxcpus value
+    size = sizeof(maxcpus);
+    if (sysctlbyname("kern.smp.maxcpus", &maxcpus, &size, NULL, 0) < 0) {
+        Py_DECREF(py_retlist);
+        PyErr_SetFromErrno(PyExc_OSError);
+        return NULL;
+    }
+    long cpu_time[maxcpus][CPUSTATES];
+
+    // retrieve the number of cpus
+    mib[0] = CTL_HW;
+    mib[1] = HW_NCPU;
+    len = sizeof(ncpu);
+    if (sysctl(mib, 2, &ncpu, &len, NULL, 0) == -1) {
+        PyErr_SetFromErrno(PyExc_OSError);
+        goto error;
+    }
+
+    // per-cpu info
+    size = sizeof(cpu_time);
+    if (sysctlbyname("kern.cp_times", &cpu_time, &size, NULL, 0) == -1) {
+        PyErr_SetFromErrno(PyExc_OSError);
+        goto error;
+    }
+
+    for (i = 0; i < ncpu; i++) {
+        py_cputime = Py_BuildValue(
+            "(ddddd)",
+            (double)cpu_time[i][CP_USER] / CLOCKS_PER_SEC,
+            (double)cpu_time[i][CP_NICE] / CLOCKS_PER_SEC,
+            (double)cpu_time[i][CP_SYS] / CLOCKS_PER_SEC,
+            (double)cpu_time[i][CP_IDLE] / CLOCKS_PER_SEC,
+            (double)cpu_time[i][CP_INTR] / CLOCKS_PER_SEC);
+        if (!py_cputime)
+            goto error;
+        if (PyList_Append(py_retlist, py_cputime))
+            goto error;
+        Py_DECREF(py_cputime);
+    }
+
+    return py_retlist;
+
+error:
+    Py_XDECREF(py_cputime);
+    Py_DECREF(py_retlist);
+    return NULL;
+}
+
+
+// remove spaces from string
+void remove_spaces(char *str) {
+    char *p1 = str;
+    char *p2 = str;
+    do
+        while (*p2 == ' ')
+            p2++;
+    while (*p1++ = *p2++);
+}
+
+
+/*
+ * Return a list of tuples for every process memory maps.
+ * 'procstat' cmdline utility has been used as an example.
+ */
+static PyObject *
+psutil_proc_memory_maps(PyObject *self, PyObject *args)
+{
+    long pid;
+    int ptrwidth;
+    int i, cnt;
+    char addr[30];
+    char perms[4];
+    const char *path;
+    struct kinfo_proc kp;
+    struct kinfo_vmentry *freep = NULL;
+    struct kinfo_vmentry *kve;
+    ptrwidth = 2 * sizeof(void *);
+    PyObject *pytuple = NULL;
+    PyObject *retlist = PyList_New(0);
+
+    if (retlist == NULL) {
+        return NULL;
+    }
+    if (! PyArg_ParseTuple(args, "l", &pid)) {
+        goto error;
+    }
+    if (psutil_kinfo_proc(pid, &kp) == -1) {
+        goto error;
+    }
+
+    freep = kinfo_getvmmap(pid, &cnt);
+    if (freep == NULL) {
+        psutil_raise_ad_or_nsp(pid);
+        goto error;
+    }
+    for (i = 0; i < cnt; i++) {
+        pytuple = NULL;
+        kve = &freep[i];
+        addr[0] = '\0';
+        perms[0] = '\0';
+        sprintf(addr, "%#*jx-%#*jx", ptrwidth, (uintmax_t)kve->kve_start,
+                ptrwidth, (uintmax_t)kve->kve_end);
+        remove_spaces(addr);
+        strlcat(perms, kve->kve_protection & KVME_PROT_READ ? "r" : "-",
+                sizeof(perms));
+        strlcat(perms, kve->kve_protection & KVME_PROT_WRITE ? "w" : "-",
+                sizeof(perms));
+        strlcat(perms, kve->kve_protection & KVME_PROT_EXEC ? "x" : "-",
+                sizeof(perms));
+
+        if (strlen(kve->kve_path) == 0) {
+            switch (kve->kve_type) {
+            case KVME_TYPE_NONE:
+                path = "[none]";
+                break;
+            case KVME_TYPE_DEFAULT:
+                path = "[default]";
+                break;
+            case KVME_TYPE_VNODE:
+                path = "[vnode]";
+                break;
+            case KVME_TYPE_SWAP:
+                path = "[swap]";
+                break;
+            case KVME_TYPE_DEVICE:
+                path = "[device]";
+                break;
+            case KVME_TYPE_PHYS:
+                path = "[phys]";
+                break;
+            case KVME_TYPE_DEAD:
+                path = "[dead]";
+                break;
+            case KVME_TYPE_SG:
+                path = "[sg]";
+                break;
+            case KVME_TYPE_UNKNOWN:
+                path = "[unknown]";
+                break;
+            default:
+                path = "[?]";
+                break;
+            }
+        }
+        else {
+            path = kve->kve_path;
+        }
+
+        pytuple = Py_BuildValue("sssiiii",
+            addr,                       // "start-end" address
+            perms,                      // "rwx" permissions
+            path,                       // path
+            kve->kve_resident,          // rss
+            kve->kve_private_resident,  // private
+            kve->kve_ref_count,         // ref count
+            kve->kve_shadow_count);     // shadow count
+        if (!pytuple)
+            goto error;
+        if (PyList_Append(retlist, pytuple))
+            goto error;
+        Py_DECREF(pytuple);
+    }
+    free(freep);
+    return retlist;
+
+error:
+    Py_XDECREF(pytuple);
+    Py_DECREF(retlist);
+    if (freep != NULL)
+        free(freep);
+    return NULL;
+}
+#endif
+
+
+/*
+ * Return a list of tuples including device, mount point and fs type
+ * for all partitions mounted on the system.
+ */
+static PyObject *
+psutil_disk_partitions(PyObject *self, PyObject *args)
+{
+    int num;
+    int i;
+    long len;
+    uint64_t flags;
+    char opts[200];
+    struct statfs *fs = NULL;
+    PyObject *py_retlist = PyList_New(0);
+    PyObject *py_tuple = NULL;
+
+    if (py_retlist == NULL)
+        return NULL;
+
+    // get the number of mount points
+    Py_BEGIN_ALLOW_THREADS
+    num = getfsstat(NULL, 0, MNT_NOWAIT);
+    Py_END_ALLOW_THREADS
+    if (num == -1) {
+        PyErr_SetFromErrno(PyExc_OSError);
+        goto error;
+    }
+
+    len = sizeof(*fs) * num;
+    fs = malloc(len);
+    if (fs == NULL) {
+        PyErr_NoMemory();
+        goto error;
+    }
+
+    Py_BEGIN_ALLOW_THREADS
+    num = getfsstat(fs, len, MNT_NOWAIT);
+    Py_END_ALLOW_THREADS
+    if (num == -1) {
+        PyErr_SetFromErrno(PyExc_OSError);
+        goto error;
+    }
+
+    for (i = 0; i < num; i++) {
+        py_tuple = NULL;
+        opts[0] = 0;
+        flags = fs[i].f_flags;
+
+        // see sys/mount.h
+        if (flags & MNT_RDONLY)
+            strlcat(opts, "ro", sizeof(opts));
+        else
+            strlcat(opts, "rw", sizeof(opts));
+        if (flags & MNT_SYNCHRONOUS)
+            strlcat(opts, ",sync", sizeof(opts));
+        if (flags & MNT_NOEXEC)
+            strlcat(opts, ",noexec", sizeof(opts));
+        if (flags & MNT_NOSUID)
+            strlcat(opts, ",nosuid", sizeof(opts));
+        if (flags & MNT_UNION)
+            strlcat(opts, ",union", sizeof(opts));
+        if (flags & MNT_ASYNC)
+            strlcat(opts, ",async", sizeof(opts));
+        if (flags & MNT_SUIDDIR)
+            strlcat(opts, ",suiddir", sizeof(opts));
+        if (flags & MNT_SOFTDEP)
+            strlcat(opts, ",softdep", sizeof(opts));
+        if (flags & MNT_NOSYMFOLLOW)
+            strlcat(opts, ",nosymfollow", sizeof(opts));
+        if (flags & MNT_GJOURNAL)
+            strlcat(opts, ",gjournal", sizeof(opts));
+        if (flags & MNT_MULTILABEL)
+            strlcat(opts, ",multilabel", sizeof(opts));
+        if (flags & MNT_ACLS)
+            strlcat(opts, ",acls", sizeof(opts));
+        if (flags & MNT_NOATIME)
+            strlcat(opts, ",noatime", sizeof(opts));
+        if (flags & MNT_NOCLUSTERR)
+            strlcat(opts, ",noclusterr", sizeof(opts));
+        if (flags & MNT_NOCLUSTERW)
+            strlcat(opts, ",noclusterw", sizeof(opts));
+        if (flags & MNT_NFS4ACLS)
+            strlcat(opts, ",nfs4acls", sizeof(opts));
+
+        py_tuple = Py_BuildValue("(ssss)",
+                                 fs[i].f_mntfromname,  // device
+                                 fs[i].f_mntonname,    // mount point
+                                 fs[i].f_fstypename,   // fs type
+                                 opts);                // options
+        if (!py_tuple)
+            goto error;
+        if (PyList_Append(py_retlist, py_tuple))
+            goto error;
+        Py_DECREF(py_tuple);
+    }
+
+    free(fs);
+    return py_retlist;
+
+error:
+    Py_XDECREF(py_tuple);
+    Py_DECREF(py_retlist);
+    if (fs != NULL)
+        free(fs);
+    return NULL;
+}
+
+
+/*
+ * Return a Python list of named tuples with overall network I/O information
+ */
+static PyObject *
+psutil_net_io_counters(PyObject *self, PyObject *args)
+{
+    char *buf = NULL, *lim, *next;
+    struct if_msghdr *ifm;
+    int mib[6];
+    size_t len;
+    PyObject *py_retdict = PyDict_New();
+    PyObject *py_ifc_info = NULL;
+    if (py_retdict == NULL)
+        return NULL;
+
+    mib[0] = CTL_NET;          // networking subsystem
+    mib[1] = PF_ROUTE;         // type of information
+    mib[2] = 0;                // protocol (IPPROTO_xxx)
+    mib[3] = 0;                // address family
+    mib[4] = NET_RT_IFLIST;   // operation
+    mib[5] = 0;
+
+    if (sysctl(mib, 6, NULL, &len, NULL, 0) < 0) {
+        PyErr_SetFromErrno(PyExc_OSError);
+        goto error;
+    }
+
+    buf = malloc(len);
+    if (buf == NULL) {
+        PyErr_NoMemory();
+        goto error;
+    }
+
+    if (sysctl(mib, 6, buf, &len, NULL, 0) < 0) {
+        PyErr_SetFromErrno(PyExc_OSError);
+        goto error;
+    }
+
+    lim = buf + len;
+
+    for (next = buf; next < lim; ) {
+        py_ifc_info = NULL;
+        ifm = (struct if_msghdr *)next;
+        next += ifm->ifm_msglen;
+
+        if (ifm->ifm_type == RTM_IFINFO) {
+            struct if_msghdr *if2m = (struct if_msghdr *)ifm;
+            struct sockaddr_dl *sdl = (struct sockaddr_dl *)(if2m + 1);
+            char ifc_name[32];
+
+            strncpy(ifc_name, sdl->sdl_data, sdl->sdl_nlen);
+            ifc_name[sdl->sdl_nlen] = 0;
+            // XXX: ignore usbus interfaces:
+            // http://lists.freebsd.org/pipermail/freebsd-current/
+            //     2011-October/028752.html
+            // 'ifconfig -a' doesn't show them, nor do we.
+            if (strncmp(ifc_name, "usbus", 5) == 0) {
+                continue;
+            }
+
+            py_ifc_info = Py_BuildValue("(kkkkkkki)",
+                                        if2m->ifm_data.ifi_obytes,
+                                        if2m->ifm_data.ifi_ibytes,
+                                        if2m->ifm_data.ifi_opackets,
+                                        if2m->ifm_data.ifi_ipackets,
+                                        if2m->ifm_data.ifi_ierrors,
+                                        if2m->ifm_data.ifi_oerrors,
+                                        if2m->ifm_data.ifi_iqdrops,
+                                        0);  // dropout not supported
+            if (!py_ifc_info)
+                goto error;
+            if (PyDict_SetItemString(py_retdict, ifc_name, py_ifc_info))
+                goto error;
+            Py_DECREF(py_ifc_info);
+        }
+        else {
+            continue;
+        }
+    }
+
+    free(buf);
+    return py_retdict;
+
+error:
+    Py_XDECREF(py_ifc_info);
+    Py_DECREF(py_retdict);
+    if (buf != NULL)
+        free(buf);
+    return NULL;
+}
+
+
+/*
+ * Return a Python dict of tuples for disk I/O information
+ */
+static PyObject *
+psutil_disk_io_counters(PyObject *self, PyObject *args)
+{
+    int i;
+    struct statinfo stats;
+
+    PyObject *py_retdict = PyDict_New();
+    PyObject *py_disk_info = NULL;
+    if (py_retdict == NULL)
+        return NULL;
+
+    if (devstat_checkversion(NULL) < 0) {
+        PyErr_Format(PyExc_RuntimeError, "devstat_checkversion() failed");
+        goto error;
+    }
+
+    stats.dinfo = (struct devinfo *)malloc(sizeof(struct devinfo));
+    if (stats.dinfo == NULL) {
+        PyErr_NoMemory();
+        goto error;
+    }
+    bzero(stats.dinfo, sizeof(struct devinfo));
+
+    if (devstat_getdevs(NULL, &stats) == -1) {
+        PyErr_Format(PyExc_RuntimeError, "devstat_getdevs() failed");
+        goto error;
+    }
+
+    for (i = 0; i < stats.dinfo->numdevs; i++) {
+        py_disk_info = NULL;
+        struct devstat current;
+        char disk_name[128];
+        current = stats.dinfo->devices[i];
+        snprintf(disk_name, sizeof(disk_name), "%s%d",
+                 current.device_name,
+                 current.unit_number);
+
+        py_disk_info = Py_BuildValue(
+            "(KKKKLL)",
+            current.operations[DEVSTAT_READ],   // no reads
+            current.operations[DEVSTAT_WRITE],  // no writes
+            current.bytes[DEVSTAT_READ],        // bytes read
+            current.bytes[DEVSTAT_WRITE],       // bytes written
+            (long long)devstat_compute_etime(
+                &current.duration[DEVSTAT_READ], NULL),  // r time
+            (long long)devstat_compute_etime(
+                &current.duration[DEVSTAT_WRITE], NULL));  // w time
+        if (!py_disk_info)
+            goto error;
+        if (PyDict_SetItemString(py_retdict, disk_name, py_disk_info))
+            goto error;
+        Py_DECREF(py_disk_info);
+    }
+
+    if (stats.dinfo->mem_ptr) {
+        free(stats.dinfo->mem_ptr);
+    }
+    free(stats.dinfo);
+    return py_retdict;
+
+error:
+    Py_XDECREF(py_disk_info);
+    Py_DECREF(py_retdict);
+    if (stats.dinfo != NULL)
+        free(stats.dinfo);
+    return NULL;
+}
+
+
+/*
+ * Return currently connected users as a list of tuples.
+ */
+static PyObject *
+psutil_users(PyObject *self, PyObject *args)
+{
+    PyObject *ret_list = PyList_New(0);
+    PyObject *tuple = NULL;
+
+    if (ret_list == NULL)
+        return NULL;
+
+#if __FreeBSD_version < 900000
+    struct utmp ut;
+    FILE *fp;
+
+    fp = fopen(_PATH_UTMP, "r");
+    if (fp == NULL) {
+        PyErr_SetFromErrno(PyExc_OSError);
+        goto error;
+    }
+
+    while (fread(&ut, sizeof(ut), 1, fp) == 1) {
+        if (*ut.ut_name == '\0')
+            continue;
+        tuple = Py_BuildValue(
+            "(sssf)",
+            ut.ut_name,         // username
+            ut.ut_line,         // tty
+            ut.ut_host,         // hostname
+           (float)ut.ut_time);  // start time
+        if (!tuple) {
+            fclose(fp);
+            goto error;
+        }
+        if (PyList_Append(ret_list, tuple)) {
+            fclose(fp);
+            goto error;
+        }
+        Py_DECREF(tuple);
+    }
+
+    fclose(fp);
+#else
+    struct utmpx *utx;
+
+    while ((utx = getutxent()) != NULL) {
+        if (utx->ut_type != USER_PROCESS)
+            continue;
+        tuple = Py_BuildValue(
+            "(sssf)",
+            utx->ut_user,  // username
+            utx->ut_line,  // tty
+            utx->ut_host,  // hostname
+            (float)utx->ut_tv.tv_sec  // start time
+        );
+
+        if (!tuple) {
+            endutxent();
+            goto error;
+        }
+        if (PyList_Append(ret_list, tuple)) {
+            endutxent();
+            goto error;
+        }
+        Py_DECREF(tuple);
+    }
+
+    endutxent();
+#endif
+    return ret_list;
+
+error:
+    Py_XDECREF(tuple);
+    Py_DECREF(ret_list);
+    return NULL;
+}
+
+
+
+/*
+ * System-wide open connections.
+ */
+
+#define HASHSIZE 1009
+static struct xfile *psutil_xfiles;
+static int psutil_nxfiles;
+
+int
+psutil_populate_xfiles()
+{
+    size_t len;
+
+    if ((psutil_xfiles = malloc(len = sizeof *psutil_xfiles)) == NULL) {
+        PyErr_NoMemory();
+        return 0;
+    }
+    while (sysctlbyname("kern.file", psutil_xfiles, &len, 0, 0) == -1) {
+        if (errno != ENOMEM) {
+            PyErr_SetFromErrno(0);
+            return 0;
+        }
+        len *= 2;
+        if ((psutil_xfiles = realloc(psutil_xfiles, len)) == NULL) {
+            PyErr_NoMemory();
+            return 0;
+        }
+    }
+    if (len > 0 && psutil_xfiles->xf_size != sizeof *psutil_xfiles) {
+        PyErr_Format(PyExc_RuntimeError, "struct xfile size mismatch");
+        return 0;
+    }
+    psutil_nxfiles = len / sizeof *psutil_xfiles;
+    return 1;
+}
+
+int
+psutil_get_pid_from_sock(int sock_hash)
+{
+    struct xfile *xf;
+    int hash, n;
+    for (xf = psutil_xfiles, n = 0; n < psutil_nxfiles; ++n, ++xf) {
+        if (xf->xf_data == NULL)
+            continue;
+        hash = (int)((uintptr_t)xf->xf_data % HASHSIZE);
+        if (sock_hash == hash) {
+            return xf->xf_pid;
+        }
+    }
+    return -1;
+}
+
+
+int psutil_gather_inet(int proto, PyObject *py_retlist)
+{
+    struct xinpgen *xig, *exig;
+    struct xinpcb *xip;
+    struct xtcpcb *xtp;
+    struct inpcb *inp;
+    struct xsocket *so;
+    struct sock *sock;
+    const char *varname;
+    size_t len, bufsize;
+    void *buf;
+    int hash, retry, vflag, type;
+
+    PyObject *tuple = NULL;
+    PyObject *laddr = NULL;
+    PyObject *raddr = NULL;
+
+    switch (proto) {
+    case IPPROTO_TCP:
+        varname = "net.inet.tcp.pcblist";
+        type = SOCK_STREAM;
+        break;
+    case IPPROTO_UDP:
+        varname = "net.inet.udp.pcblist";
+        type = SOCK_DGRAM;
+        break;
+    }
+
+    buf = NULL;
+    bufsize = 8192;
+    retry = 5;
+    do {
+        for (;;) {
+            buf = realloc(buf, bufsize);
+            if (buf == NULL) {
+                // XXX
+                continue;
+            }
+            len = bufsize;
+            if (sysctlbyname(varname, buf, &len, NULL, 0) == 0)
+                break;
+            if (errno != ENOMEM) {
+                PyErr_SetFromErrno(0);
+                goto error;
+            }
+            bufsize *= 2;
+        }
+        xig = (struct xinpgen *)buf;
+        exig = (struct xinpgen *)(void *)((char *)buf + len - sizeof *exig);
+        if (xig->xig_len != sizeof *xig || exig->xig_len != sizeof *exig) {
+            PyErr_Format(PyExc_RuntimeError, "struct xinpgen size mismatch");
+            goto error;
+        }
+    } while (xig->xig_gen != exig->xig_gen && retry--);
+
+
+    for (;;) {
+        xig = (struct xinpgen *)(void *)((char *)xig + xig->xig_len);
+        if (xig >= exig)
+            break;
+
+        switch (proto) {
+        case IPPROTO_TCP:
+            xtp = (struct xtcpcb *)xig;
+            if (xtp->xt_len != sizeof *xtp) {
+                PyErr_Format(PyExc_RuntimeError, "struct xtcpcb size mismatch");
+                goto error;
+            }
+            break;
+        case IPPROTO_UDP:
+            xip = (struct xinpcb *)xig;
+            if (xip->xi_len != sizeof *xip) {
+                PyErr_Format(PyExc_RuntimeError, "struct xinpcb size mismatch");
+                goto error;
+            }
+            inp = &xip->xi_inp;
+            so = &xip->xi_socket;
+            break;
+        }
+
+        inp = &xtp->xt_inp;
+        so = &xtp->xt_socket;
+        char lip[200], rip[200];
+        int family, lport, rport, pid, status;
+
+        hash = (int)((uintptr_t)so->xso_so % HASHSIZE);
+        pid = psutil_get_pid_from_sock(hash);
+        if (pid < 0)
+            continue;
+        lport = ntohs(inp->inp_lport);
+        rport = ntohs(inp->inp_fport);
+        status = xtp->xt_tp.t_state;
+
+        if (inp->inp_vflag & INP_IPV4) {
+            family = AF_INET;
+            inet_ntop(AF_INET, &inp->inp_laddr.s_addr, lip, sizeof(lip));
+            inet_ntop(AF_INET, &inp->inp_faddr.s_addr, rip, sizeof(rip));
+        }
+        else if (inp->inp_vflag & INP_IPV6) {
+            family = AF_INET6;
+            inet_ntop(AF_INET6, &inp->in6p_laddr.s6_addr, lip, sizeof(lip));
+            inet_ntop(AF_INET6, &inp->in6p_faddr.s6_addr, rip, sizeof(rip));
+        }
+
+        // construct python tuple/list
+        laddr = Py_BuildValue("(si)", lip, lport);
+        if (!laddr)
+            goto error;
+        if (rport != 0) {
+            raddr = Py_BuildValue("(si)", rip, rport);
+        }
+        else {
+            raddr = Py_BuildValue("()");
+        }
+        if (!raddr)
+            goto error;
+        tuple = Py_BuildValue("(iiiNNii)", -1, family, type, laddr, raddr,
+                                               status, pid);
+        if (!tuple)
+            goto error;
+        if (PyList_Append(py_retlist, tuple))
+            goto error;
+        Py_DECREF(tuple);
+  }
+
+    free(buf);
+    return 1;
+
+error:
+    Py_XDECREF(tuple);
+    Py_XDECREF(laddr);
+    Py_XDECREF(raddr);
+    free(buf);
+    return 0;
+}
+
+
+int psutil_gather_unix(int proto, PyObject *py_retlist)
+{
+    struct xunpgen *xug, *exug;
+    struct xunpcb *xup;
+    struct sock *sock;
+    const char *varname, *protoname;
+    size_t len, bufsize;
+    void *buf;
+    int hash, retry;
+    int family, lport, rport, pid;
+    struct sockaddr_un *sun;
+    char path[PATH_MAX];
+
+    PyObject *tuple = NULL;
+    PyObject *laddr = NULL;
+    PyObject *raddr = NULL;
+
+    switch (proto) {
+    case SOCK_STREAM:
+        varname = "net.local.stream.pcblist";
+        protoname = "stream";
+        break;
+    case SOCK_DGRAM:
+        varname = "net.local.dgram.pcblist";
+        protoname = "dgram";
+        break;
+    }
+
+    buf = NULL;
+    bufsize = 8192;
+    retry = 5;
+
+    do {
+        for (;;) {
+            buf = realloc(buf, bufsize);
+            if (buf == NULL) {
+                PyErr_NoMemory();
+                goto error;
+            }
+            len = bufsize;
+            if (sysctlbyname(varname, buf, &len, NULL, 0) == 0)
+                break;
+            if (errno != ENOMEM) {
+                PyErr_SetFromErrno(0);
+                goto error;
+            }
+            bufsize *= 2;
+        }
+        xug = (struct xunpgen *)buf;
+        exug = (struct xunpgen *)(void *)
+            ((char *)buf + len - sizeof *exug);
+        if (xug->xug_len != sizeof *xug || exug->xug_len != sizeof *exug) {
+            PyErr_Format(PyExc_RuntimeError, "struct xinpgen size mismatch");
+            goto error;
+        }
+    } while (xug->xug_gen != exug->xug_gen && retry--);
+
+    for (;;) {
+        xug = (struct xunpgen *)(void *)((char *)xug + xug->xug_len);
+        if (xug >= exug)
+            break;
+        xup = (struct xunpcb *)xug;
+        if (xup->xu_len != sizeof *xup) {
+            warnx("struct xunpgen size mismatch");
+            goto error;
+        }
+
+        hash = (int)((uintptr_t) xup->xu_socket.xso_so % HASHSIZE);
+        pid = psutil_get_pid_from_sock(hash);
+        if (pid < 0)
+            continue;
+
+        sun = (struct sockaddr_un *)&xup->xu_addr;
+        snprintf(path, sizeof(path), "%.*s",
+                 (sun->sun_len - (sizeof(*sun) - sizeof(sun->sun_path))),
+                 sun->sun_path);
+
+        tuple = Py_BuildValue("(iiisOii)", -1, AF_UNIX, proto, path, Py_None,
+                                               PSUTIL_CONN_NONE, pid);
+        if (!tuple)
+            goto error;
+        if (PyList_Append(py_retlist, tuple))
+            goto error;
+        Py_DECREF(tuple);
+        Py_INCREF(Py_None);
+    }
+
+    free(buf);
+    return 1;
+
+error:
+    Py_XDECREF(tuple);
+    Py_XDECREF(laddr);
+    Py_XDECREF(raddr);
+    free(buf);
+    return 0;
+}
+
+
+/*
+ * Return system-wide open connections.
+ */
+static PyObject*
+psutil_net_connections(PyObject* self, PyObject* args)
+{
+    PyObject *af_filter = NULL;
+    PyObject *type_filter = NULL;
+    PyObject *py_retlist = PyList_New(0);
+
+    if (psutil_populate_xfiles() != 1)
+        goto error;
+
+    if (psutil_gather_inet(IPPROTO_TCP, py_retlist) == 0)
+        goto error;
+    if (psutil_gather_inet(IPPROTO_UDP, py_retlist) == 0)
+        goto error;
+    if (psutil_gather_unix(SOCK_STREAM, py_retlist) == 0)
+       goto error;
+    if (psutil_gather_unix(SOCK_DGRAM, py_retlist) == 0)
+        goto error;
+
+    free(psutil_xfiles);
+    return py_retlist;
+
+error:
+    Py_DECREF(py_retlist);
+    free(psutil_xfiles);
+    return NULL;
+}
+
+
+/*
+ * define the psutil C module methods and initialize the module.
+ */
+static PyMethodDef
+PsutilMethods[] =
+{
+    // --- per-process functions
+
+    {"proc_name", psutil_proc_name, METH_VARARGS,
+     "Return process name"},
+    {"proc_connections", psutil_proc_connections, METH_VARARGS,
+     "Return connections opened by process"},
+    {"proc_exe", psutil_proc_exe, METH_VARARGS,
+     "Return process pathname executable"},
+    {"proc_cmdline", psutil_proc_cmdline, METH_VARARGS,
+     "Return process cmdline as a list of cmdline arguments"},
+    {"proc_ppid", psutil_proc_ppid, METH_VARARGS,
+     "Return process ppid as an integer"},
+    {"proc_uids", psutil_proc_uids, METH_VARARGS,
+     "Return process real effective and saved user ids as a Python tuple"},
+    {"proc_gids", psutil_proc_gids, METH_VARARGS,
+     "Return process real effective and saved group ids as a Python tuple"},
+    {"proc_cpu_times", psutil_proc_cpu_times, METH_VARARGS,
+     "Return tuple of user/kern time for the given PID"},
+    {"proc_create_time", psutil_proc_create_time, METH_VARARGS,
+     "Return a float indicating the process create time expressed in "
+     "seconds since the epoch"},
+    {"proc_memory_info", psutil_proc_memory_info, METH_VARARGS,
+     "Return extended memory info for a process as a Python tuple."},
+    {"proc_num_threads", psutil_proc_num_threads, METH_VARARGS,
+     "Return number of threads used by process"},
+    {"proc_num_ctx_switches", psutil_proc_num_ctx_switches, METH_VARARGS,
+     "Return the number of context switches performed by process"},
+    {"proc_threads", psutil_proc_threads, METH_VARARGS,
+     "Return process threads"},
+    {"proc_status", psutil_proc_status, METH_VARARGS,
+     "Return process status as an integer"},
+    {"proc_io_counters", psutil_proc_io_counters, METH_VARARGS,
+     "Return process IO counters"},
+    {"proc_tty_nr", psutil_proc_tty_nr, METH_VARARGS,
+     "Return process tty (terminal) number"},
+#if defined(__FreeBSD_version) && __FreeBSD_version >= 800000
+    {"proc_open_files", psutil_proc_open_files, METH_VARARGS,
+     "Return files opened by process as a list of (path, fd) tuples"},
+    {"proc_cwd", psutil_proc_cwd, METH_VARARGS,
+     "Return process current working directory."},
+    {"proc_memory_maps", psutil_proc_memory_maps, METH_VARARGS,
+     "Return a list of tuples for every process's memory map"},
+    {"proc_num_fds", psutil_proc_num_fds, METH_VARARGS,
+     "Return the number of file descriptors opened by this process"},
+#endif
+
+    // --- system-related functions
+
+    {"pids", psutil_pids, METH_VARARGS,
+     "Returns a list of PIDs currently running on the system"},
+    {"cpu_count_logical", psutil_cpu_count_logical, METH_VARARGS,
+     "Return number of logical CPUs on the system"},
+    {"cpu_count_phys", psutil_cpu_count_phys, METH_VARARGS,
+     "Return an XML string to determine the number physical CPUs."},
+    {"virtual_mem", psutil_virtual_mem, METH_VARARGS,
+     "Return system virtual memory usage statistics"},
+    {"swap_mem", psutil_swap_mem, METH_VARARGS,
+     "Return swap mem stats"},
+    {"cpu_times", psutil_cpu_times, METH_VARARGS,
+     "Return system cpu times as a tuple (user, system, nice, idle, irc)"},
+#if defined(__FreeBSD_version) && __FreeBSD_version >= 800000
+    {"per_cpu_times", psutil_per_cpu_times, METH_VARARGS,
+     "Return system per-cpu times as a list of tuples"},
+#endif
+    {"boot_time", psutil_boot_time, METH_VARARGS,
+     "Return the system boot time expressed in seconds since the epoch."},
+    {"disk_partitions", psutil_disk_partitions, METH_VARARGS,
+     "Return a list of tuples including device, mount point and "
+     "fs type for all partitions mounted on the system."},
+    {"net_io_counters", psutil_net_io_counters, METH_VARARGS,
+     "Return dict of tuples of networks I/O information."},
+    {"disk_io_counters", psutil_disk_io_counters, METH_VARARGS,
+     "Return a Python dict of tuples for disk I/O information"},
+    {"users", psutil_users, METH_VARARGS,
+     "Return currently connected users as a list of tuples"},
+    {"net_connections", psutil_net_connections, METH_VARARGS,
+     "Return system-wide open connections."},
+
+    {NULL, NULL, 0, NULL}
+};
+
+struct module_state {
+    PyObject *error;
+};
+
+#if PY_MAJOR_VERSION >= 3
+#define GETSTATE(m) ((struct module_state*)PyModule_GetState(m))
+#else
+#define GETSTATE(m) (&_state)
+#endif
+
+#if PY_MAJOR_VERSION >= 3
+
+static int
+psutil_bsd_traverse(PyObject *m, visitproc visit, void *arg) {
+    Py_VISIT(GETSTATE(m)->error);
+    return 0;
+}
+
+static int
+psutil_bsd_clear(PyObject *m) {
+    Py_CLEAR(GETSTATE(m)->error);
+    return 0;
+}
+
+static struct PyModuleDef
+        moduledef = {
+    PyModuleDef_HEAD_INIT,
+    "psutil_bsd",
+    NULL,
+    sizeof(struct module_state),
+    PsutilMethods,
+    NULL,
+    psutil_bsd_traverse,
+    psutil_bsd_clear,
+    NULL
+};
+
+#define INITERROR return NULL
+
+PyMODINIT_FUNC PyInit__psutil_bsd(void)
+
+#else
+#define INITERROR return
+
+void init_psutil_bsd(void)
+#endif
+{
+#if PY_MAJOR_VERSION >= 3
+    PyObject *module = PyModule_Create(&moduledef);
+#else
+    PyObject *module = Py_InitModule("_psutil_bsd", PsutilMethods);
+#endif
+    // process status constants
+    PyModule_AddIntConstant(module, "SSTOP", SSTOP);
+    PyModule_AddIntConstant(module, "SSLEEP", SSLEEP);
+    PyModule_AddIntConstant(module, "SRUN", SRUN);
+    PyModule_AddIntConstant(module, "SIDL", SIDL);
+    PyModule_AddIntConstant(module, "SWAIT", SWAIT);
+    PyModule_AddIntConstant(module, "SLOCK", SLOCK);
+    PyModule_AddIntConstant(module, "SZOMB", SZOMB);
+    // connection status constants
+    PyModule_AddIntConstant(module, "TCPS_CLOSED", TCPS_CLOSED);
+    PyModule_AddIntConstant(module, "TCPS_CLOSING", TCPS_CLOSING);
+    PyModule_AddIntConstant(module, "TCPS_CLOSE_WAIT", TCPS_CLOSE_WAIT);
+    PyModule_AddIntConstant(module, "TCPS_LISTEN", TCPS_LISTEN);
+    PyModule_AddIntConstant(module, "TCPS_ESTABLISHED", TCPS_ESTABLISHED);
+    PyModule_AddIntConstant(module, "TCPS_SYN_SENT", TCPS_SYN_SENT);
+    PyModule_AddIntConstant(module, "TCPS_SYN_RECEIVED", TCPS_SYN_RECEIVED);
+    PyModule_AddIntConstant(module, "TCPS_FIN_WAIT_1", TCPS_FIN_WAIT_1);
+    PyModule_AddIntConstant(module, "TCPS_FIN_WAIT_2", TCPS_FIN_WAIT_2);
+    PyModule_AddIntConstant(module, "TCPS_LAST_ACK", TCPS_LAST_ACK);
+    PyModule_AddIntConstant(module, "TCPS_TIME_WAIT", TCPS_TIME_WAIT);
+    PyModule_AddIntConstant(module, "PSUTIL_CONN_NONE", PSUTIL_CONN_NONE);
+
+    if (module == NULL) {
+        INITERROR;
+    }
+#if PY_MAJOR_VERSION >= 3
+    return module;
+#endif
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/_psutil_bsd.h
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/_psutil_bsd.h b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/_psutil_bsd.h
new file mode 100644
index 0000000..2bc7c70
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/_psutil_bsd.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include <Python.h>
+
+// --- per-process functions
+
+static PyObject* psutil_proc_cmdline(PyObject* self, PyObject* args);
+static PyObject* psutil_proc_connections(PyObject* self, PyObject* args);
+static PyObject* psutil_proc_cpu_times(PyObject* self, PyObject* args);
+static PyObject* psutil_proc_create_time(PyObject* self, PyObject* args);
+static PyObject* psutil_proc_exe(PyObject* self, PyObject* args);
+static PyObject* psutil_proc_gids(PyObject* self, PyObject* args);
+static PyObject* psutil_proc_io_counters(PyObject* self, PyObject* args);
+static PyObject* psutil_proc_memory_info(PyObject* self, PyObject* args);
+static PyObject* psutil_proc_memory_maps(PyObject* self, PyObject* args);
+static PyObject* psutil_proc_name(PyObject* self, PyObject* args);
+static PyObject* psutil_proc_num_ctx_switches(PyObject* self, PyObject* args);
+static PyObject* psutil_proc_num_fds(PyObject* self, PyObject* args);
+static PyObject* psutil_proc_num_threads(PyObject* self, PyObject* args);
+static PyObject* psutil_proc_ppid(PyObject* self, PyObject* args);
+static PyObject* psutil_proc_status(PyObject* self, PyObject* args);
+static PyObject* psutil_proc_threads(PyObject* self, PyObject* args);
+static PyObject* psutil_proc_tty_nr(PyObject* self, PyObject* args);
+static PyObject* psutil_proc_uids(PyObject* self, PyObject* args);
+
+#if defined(__FreeBSD_version) && __FreeBSD_version >= 800000
+static PyObject* psutil_proc_open_files(PyObject* self, PyObject* args);
+static PyObject* psutil_proc_cwd(PyObject* self, PyObject* args);
+#endif
+
+// --- system-related functions
+
+static PyObject* psutil_boot_time(PyObject* self, PyObject* args);
+static PyObject* psutil_cpu_count_logical(PyObject* self, PyObject* args);
+static PyObject* psutil_cpu_count_phys(PyObject* self, PyObject* args);
+static PyObject* psutil_cpu_times(PyObject* self, PyObject* args);
+static PyObject* psutil_disk_io_counters(PyObject* self, PyObject* args);
+static PyObject* psutil_disk_partitions(PyObject* self, PyObject* args);
+static PyObject* psutil_net_io_counters(PyObject* self, PyObject* args);
+static PyObject* psutil_pids(PyObject* self, PyObject* args);
+static PyObject* psutil_swap_mem(PyObject* self, PyObject* args);
+static PyObject* psutil_users(PyObject* self, PyObject* args);
+static PyObject* psutil_virtual_mem(PyObject* self, PyObject* args);
+
+#if defined(__FreeBSD_version) && __FreeBSD_version >= 800000
+static PyObject* psutil_per_cpu_times(PyObject* self, PyObject* args);
+#endif
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/_psutil_common.c
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/_psutil_common.c b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/_psutil_common.c
new file mode 100644
index 0000000..1c530d4
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/_psutil_common.c
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ *
+ * Routines common to all platforms.
+ */
+
+#include <Python.h>
+
+
+/*
+ * Set OSError(errno=ESRCH, strerror="No such process") Python exception.
+ */
+PyObject *
+NoSuchProcess(void) {
+    PyObject *exc;
+    char *msg = strerror(ESRCH);
+    exc = PyObject_CallFunction(PyExc_OSError, "(is)", ESRCH, msg);
+    PyErr_SetObject(PyExc_OSError, exc);
+    Py_XDECREF(exc);
+    return NULL;
+}
+
+
+/*
+ * Set OSError(errno=EACCES, strerror="Permission denied") Python exception.
+ */
+PyObject *
+AccessDenied(void) {
+    PyObject *exc;
+    char *msg = strerror(EACCES);
+    exc = PyObject_CallFunction(PyExc_OSError, "(is)", EACCES, msg);
+    PyErr_SetObject(PyExc_OSError, exc);
+    Py_XDECREF(exc);
+    return NULL;
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/_psutil_common.h
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/_psutil_common.h b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/_psutil_common.h
new file mode 100644
index 0000000..43021a7
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/_psutil_common.h
@@ -0,0 +1,10 @@
+/*
+ * Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include <Python.h>
+
+PyObject* AccessDenied(void);
+PyObject* NoSuchProcess(void);

http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/_psutil_linux.c
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/_psutil_linux.c b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/_psutil_linux.c
new file mode 100644
index 0000000..1f17ae7
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/_psutil_linux.c
@@ -0,0 +1,510 @@
+/*
+ * Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ *
+ * Linux-specific functions.
+ */
+
+#ifndef _GNU_SOURCE
+    #define _GNU_SOURCE 1
+#endif
+#include <Python.h>
+#include <errno.h>
+#include <stdlib.h>
+#include <mntent.h>
+#include <features.h>
+#include <utmp.h>
+#include <sched.h>
+#include <linux/version.h>
+#include <sys/syscall.h>
+#include <sys/sysinfo.h>
+
+#include "_psutil_linux.h"
+
+
+// Linux >= 2.6.13
+#define PSUTIL_HAVE_IOPRIO defined(__NR_ioprio_get) && defined(__NR_ioprio_set)
+
+// Linux >= 2.6.36 (supposedly) and glibc >= 13
+#define PSUTIL_HAVE_PRLIMIT \
+    (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36)) && \
+    (__GLIBC__ >= 2 && __GLIBC_MINOR__ >= 13) && \
+    defined(__NR_prlimit64)
+
+#if PSUTIL_HAVE_PRLIMIT
+    #define _FILE_OFFSET_BITS 64
+    #include <time.h>
+    #include <sys/resource.h>
+#endif
+
+
+#if PSUTIL_HAVE_IOPRIO
+enum {
+    IOPRIO_WHO_PROCESS = 1,
+};
+
+static inline int
+ioprio_get(int which, int who)
+{
+    return syscall(__NR_ioprio_get, which, who);
+}
+
+static inline int
+ioprio_set(int which, int who, int ioprio)
+{
+    return syscall(__NR_ioprio_set, which, who, ioprio);
+}
+
+#define IOPRIO_CLASS_SHIFT 13
+#define IOPRIO_PRIO_MASK ((1UL << IOPRIO_CLASS_SHIFT) - 1)
+
+#define IOPRIO_PRIO_CLASS(mask) ((mask) >> IOPRIO_CLASS_SHIFT)
+#define IOPRIO_PRIO_DATA(mask) ((mask) & IOPRIO_PRIO_MASK)
+#define IOPRIO_PRIO_VALUE(class, data) (((class) << IOPRIO_CLASS_SHIFT) | data)
+
+
+/*
+ * Return a (ioclass, iodata) Python tuple representing process I/O priority.
+ */
+static PyObject *
+psutil_proc_ioprio_get(PyObject *self, PyObject *args)
+{
+    long pid;
+    int ioprio, ioclass, iodata;
+    if (! PyArg_ParseTuple(args, "l", &pid)) {
+        return NULL;
+    }
+    ioprio = ioprio_get(IOPRIO_WHO_PROCESS, pid);
+    if (ioprio == -1) {
+        return PyErr_SetFromErrno(PyExc_OSError);
+    }
+    ioclass = IOPRIO_PRIO_CLASS(ioprio);
+    iodata = IOPRIO_PRIO_DATA(ioprio);
+    return Py_BuildValue("ii", ioclass, iodata);
+}
+
+
+/*
+ * A wrapper around ioprio_set(); sets process I/O priority.
+ * ioclass can be either IOPRIO_CLASS_RT, IOPRIO_CLASS_BE, IOPRIO_CLASS_IDLE
+ * or 0. iodata goes from 0 to 7 depending on ioclass specified.
+ */
+static PyObject *
+psutil_proc_ioprio_set(PyObject *self, PyObject *args)
+{
+    long pid;
+    int ioprio, ioclass, iodata;
+    int retval;
+
+    if (! PyArg_ParseTuple(args, "lii", &pid, &ioclass, &iodata)) {
+        return NULL;
+    }
+    ioprio = IOPRIO_PRIO_VALUE(ioclass, iodata);
+    retval = ioprio_set(IOPRIO_WHO_PROCESS, pid, ioprio);
+    if (retval == -1) {
+        return PyErr_SetFromErrno(PyExc_OSError);
+    }
+    Py_INCREF(Py_None);
+    return Py_None;
+}
+#endif
+
+
+#if PSUTIL_HAVE_PRLIMIT
+/*
+ * A wrapper around prlimit(2); sets process resource limits.
+ * This can be used for both get and set, in which case extra
+ * 'soft' and 'hard' args must be provided.
+ */
+static PyObject *
+psutil_linux_prlimit(PyObject *self, PyObject *args)
+{
+    long pid;
+    int ret, resource;
+    struct rlimit old, new;
+    struct rlimit *newp = NULL;
+    PyObject *soft = NULL;
+    PyObject *hard = NULL;
+
+    if (! PyArg_ParseTuple(args, "li|OO", &pid, &resource, &soft, &hard)) {
+        return NULL;
+    }
+
+    // get
+    if (soft == NULL && hard == NULL) {
+        ret = prlimit(pid, resource, NULL, &old);
+        if (ret == -1)
+            return PyErr_SetFromErrno(PyExc_OSError);
+#if defined(PSUTIL_HAVE_LONG_LONG)
+        if (sizeof(old.rlim_cur) > sizeof(long)) {
+            return Py_BuildValue("LL",
+                                 (PY_LONG_LONG)old.rlim_cur,
+                                 (PY_LONG_LONG)old.rlim_max);
+        }
+#endif
+        return Py_BuildValue("ll", (long)old.rlim_cur, (long)old.rlim_max);
+    }
+
+    // set
+    else {
+#if defined(PSUTIL_HAVE_LARGEFILE_SUPPORT)
+        new.rlim_cur = PyLong_AsLongLong(soft);
+        if (new.rlim_cur == (rlim_t) - 1 && PyErr_Occurred())
+            return NULL;
+        new.rlim_max = PyLong_AsLongLong(hard);
+        if (new.rlim_max == (rlim_t) - 1 && PyErr_Occurred())
+            return NULL;
+#else
+        new.rlim_cur = PyLong_AsLong(soft);
+        if (new.rlim_cur == (rlim_t) - 1 && PyErr_Occurred())
+            return NULL;
+        new.rlim_max = PyLong_AsLong(hard);
+        if (new.rlim_max == (rlim_t) - 1 && PyErr_Occurred())
+            return NULL;
+#endif
+        newp = &new;
+        ret = prlimit(pid, resource, newp, &old);
+        if (ret == -1)
+            return PyErr_SetFromErrno(PyExc_OSError);
+        Py_INCREF(Py_None);
+        return Py_None;
+    }
+}
+#endif
+
+
+/*
+ * Return disk mounted partitions as a list of tuples including device,
+ * mount point and filesystem type
+ */
+static PyObject *
+psutil_disk_partitions(PyObject *self, PyObject *args)
+{
+    FILE *file = NULL;
+    struct mntent *entry;
+    PyObject *py_retlist = PyList_New(0);
+    PyObject *py_tuple = NULL;
+
+    if (py_retlist == NULL)
+        return NULL;
+
+    // MOUNTED constant comes from mntent.h and it's == '/etc/mtab'
+    Py_BEGIN_ALLOW_THREADS
+    file = setmntent(MOUNTED, "r");
+    Py_END_ALLOW_THREADS
+    if ((file == 0) || (file == NULL)) {
+        PyErr_SetFromErrnoWithFilename(PyExc_OSError, MOUNTED);
+        goto error;
+    }
+
+    while ((entry = getmntent(file))) {
+        if (entry == NULL) {
+            PyErr_Format(PyExc_RuntimeError, "getmntent() failed");
+            goto error;
+        }
+        py_tuple = Py_BuildValue("(ssss)",
+                                 entry->mnt_fsname,  // device
+                                 entry->mnt_dir,     // mount point
+                                 entry->mnt_type,    // fs type
+                                 entry->mnt_opts);   // options
+        if (! py_tuple)
+            goto error;
+        if (PyList_Append(py_retlist, py_tuple))
+            goto error;
+        Py_DECREF(py_tuple);
+    }
+    endmntent(file);
+    return py_retlist;
+
+error:
+    if (file != NULL)
+        endmntent(file);
+    Py_XDECREF(py_tuple);
+    Py_DECREF(py_retlist);
+    return NULL;
+}
+
+
+/*
+ * A wrapper around sysinfo(), return system memory usage statistics.
+ */
+static PyObject *
+psutil_linux_sysinfo(PyObject *self, PyObject *args)
+{
+    struct sysinfo info;
+    if (sysinfo(&info) != 0) {
+        return PyErr_SetFromErrno(PyExc_OSError);
+    }
+
+    // note: boot time might also be determined from here
+    return Py_BuildValue(
+        "(KKKKKK)",
+        (unsigned long long)info.totalram  * info.mem_unit,   // total
+        (unsigned long long)info.freeram   * info.mem_unit,   // free
+        (unsigned long long)info.bufferram * info.mem_unit,   // buffer
+        (unsigned long long)info.sharedram * info.mem_unit,   // shared
+        (unsigned long long)info.totalswap * info.mem_unit,   // swap tot
+        (unsigned long long)info.freeswap  * info.mem_unit);  // swap free
+}
+
+
+/*
+ * Return process CPU affinity as a Python long (the bitmask)
+ */
+static PyObject *
+psutil_proc_cpu_affinity_get(PyObject *self, PyObject *args)
+{
+    unsigned long mask;
+    unsigned int len = sizeof(mask);
+    long pid;
+
+    if (!PyArg_ParseTuple(args, "i", &pid)) {
+        return NULL;
+    }
+    if (sched_getaffinity(pid, len, (cpu_set_t *)&mask) < 0) {
+        return PyErr_SetFromErrno(PyExc_OSError);
+    }
+    return Py_BuildValue("l", mask);
+}
+
+
+/*
+ * Set process CPU affinity; expects a bitmask
+ */
+static PyObject *
+psutil_proc_cpu_affinity_set(PyObject *self, PyObject *args)
+{
+    cpu_set_t cpu_set;
+    size_t len;
+    long pid;
+    int i, seq_len;
+    PyObject *py_cpu_set;
+    PyObject *py_cpu_seq = NULL;
+
+    if (!PyArg_ParseTuple(args, "lO", &pid, &py_cpu_set)) {
+        goto error;
+    }
+
+    if (!PySequence_Check(py_cpu_set)) {
+        // does not work on Python 2.4
+        // PyErr_Format(PyExc_TypeError, "sequence argument expected, got %s",
+        //              Py_TYPE(py_cpu_set)->tp_name);
+        PyErr_Format(PyExc_TypeError, "sequence argument expected");
+        goto error;
+    }
+
+    py_cpu_seq = PySequence_Fast(py_cpu_set, "expected a sequence or integer");
+    if (!py_cpu_seq) {
+        goto error;
+    }
+    seq_len = PySequence_Fast_GET_SIZE(py_cpu_seq);
+    CPU_ZERO(&cpu_set);
+    for (i = 0; i < seq_len; i++) {
+        PyObject *item = PySequence_Fast_GET_ITEM(py_cpu_seq, i);
+#if PY_MAJOR_VERSION >= 3
+        long value = PyLong_AsLong(item);
+#else
+        long value = PyInt_AsLong(item);
+#endif
+        if (value == -1 && PyErr_Occurred()) {
+            goto error;
+        }
+        CPU_SET(value, &cpu_set);
+    }
+
+    len = sizeof(cpu_set);
+    if (sched_setaffinity(pid, len, &cpu_set)) {
+        PyErr_SetFromErrno(PyExc_OSError);
+        goto error;
+    }
+
+    Py_DECREF(py_cpu_seq);
+    Py_INCREF(Py_None);
+    return Py_None;
+
+error:
+    if (py_cpu_seq != NULL)
+        Py_DECREF(py_cpu_seq);
+    return NULL;
+}
+
+
+/*
+ * Return currently connected users as a list of tuples.
+ */
+static PyObject *
+psutil_users(PyObject *self, PyObject *args)
+{
+    PyObject *ret_list = PyList_New(0);
+    PyObject *tuple = NULL;
+    PyObject *user_proc = NULL;
+    struct utmp *ut;
+
+    if (ret_list == NULL)
+        return NULL;
+    setutent();
+    while (NULL != (ut = getutent())) {
+        tuple = NULL;
+        user_proc = NULL;
+        if (ut->ut_type == USER_PROCESS)
+            user_proc = Py_True;
+        else
+            user_proc = Py_False;
+        tuple = Py_BuildValue(
+            "(sssfO)",
+            ut->ut_user,              // username
+            ut->ut_line,              // tty
+            ut->ut_host,              // hostname
+            (float)ut->ut_tv.tv_sec,  // tstamp
+            user_proc                 // (bool) user process
+        );
+    if (! tuple)
+            goto error;
+        if (PyList_Append(ret_list, tuple))
+            goto error;
+        Py_DECREF(tuple);
+    }
+    endutent();
+    return ret_list;
+
+error:
+    Py_XDECREF(tuple);
+    Py_XDECREF(user_proc);
+    Py_DECREF(ret_list);
+    endutent();
+    return NULL;
+}
+
+
+/*
+ * Define the psutil C module methods and initialize the module.
+ */
+static PyMethodDef
+PsutilMethods[] =
+{
+    // --- per-process functions
+
+#if PSUTIL_HAVE_IOPRIO
+    {"proc_ioprio_get", psutil_proc_ioprio_get, METH_VARARGS,
+     "Get process I/O priority"},
+    {"proc_ioprio_set", psutil_proc_ioprio_set, METH_VARARGS,
+     "Set process I/O priority"},
+#endif
+    {"proc_cpu_affinity_get", psutil_proc_cpu_affinity_get, METH_VARARGS,
+     "Return process CPU affinity as a Python long (the bitmask)."},
+    {"proc_cpu_affinity_set", psutil_proc_cpu_affinity_set, METH_VARARGS,
+     "Set process CPU affinity; expects a bitmask."},
+
+    // --- system related functions
+
+    {"disk_partitions", psutil_disk_partitions, METH_VARARGS,
+     "Return disk mounted partitions as a list of tuples including "
+     "device, mount point and filesystem type"},
+    {"users", psutil_users, METH_VARARGS,
+     "Return currently connected users as a list of tuples"},
+
+    // --- linux specific
+
+    {"linux_sysinfo", psutil_linux_sysinfo, METH_VARARGS,
+     "A wrapper around sysinfo(), return system memory usage statistics"},
+#if PSUTIL_HAVE_PRLIMIT
+    {"linux_prlimit", psutil_linux_prlimit, METH_VARARGS,
+     "Get or set process resource limits."},
+#endif
+
+
+    {NULL, NULL, 0, NULL}
+};
+
+struct module_state {
+    PyObject *error;
+};
+
+#if PY_MAJOR_VERSION >= 3
+#define GETSTATE(m) ((struct module_state*)PyModule_GetState(m))
+#else
+#define GETSTATE(m) (&_state)
+#endif
+
+#if PY_MAJOR_VERSION >= 3
+
+static int
+psutil_linux_traverse(PyObject *m, visitproc visit, void *arg) {
+    Py_VISIT(GETSTATE(m)->error);
+    return 0;
+}
+
+static int
+psutil_linux_clear(PyObject *m) {
+    Py_CLEAR(GETSTATE(m)->error);
+    return 0;
+}
+
+static struct PyModuleDef
+        moduledef = {
+    PyModuleDef_HEAD_INIT,
+    "psutil_linux",
+    NULL,
+    sizeof(struct module_state),
+    PsutilMethods,
+    NULL,
+    psutil_linux_traverse,
+    psutil_linux_clear,
+    NULL
+};
+
+#define INITERROR return NULL
+
+PyMODINIT_FUNC PyInit__psutil_linux(void)
+
+#else
+#define INITERROR return
+
+void init_psutil_linux(void)
+#endif
+{
+#if PY_MAJOR_VERSION >= 3
+    PyObject *module = PyModule_Create(&moduledef);
+#else
+    PyObject *module = Py_InitModule("_psutil_linux", PsutilMethods);
+#endif
+
+
+#if PSUTIL_HAVE_PRLIMIT
+    PyModule_AddIntConstant(module, "RLIM_INFINITY", RLIM_INFINITY);
+    PyModule_AddIntConstant(module, "RLIMIT_AS", RLIMIT_AS);
+    PyModule_AddIntConstant(module, "RLIMIT_CORE", RLIMIT_CORE);
+    PyModule_AddIntConstant(module, "RLIMIT_CPU", RLIMIT_CPU);
+    PyModule_AddIntConstant(module, "RLIMIT_DATA", RLIMIT_DATA);
+    PyModule_AddIntConstant(module, "RLIMIT_FSIZE", RLIMIT_FSIZE);
+    PyModule_AddIntConstant(module, "RLIMIT_LOCKS", RLIMIT_LOCKS);
+    PyModule_AddIntConstant(module, "RLIMIT_MEMLOCK", RLIMIT_MEMLOCK);
+    PyModule_AddIntConstant(module, "RLIMIT_NOFILE", RLIMIT_NOFILE);
+    PyModule_AddIntConstant(module, "RLIMIT_NPROC", RLIMIT_NPROC);
+    PyModule_AddIntConstant(module, "RLIMIT_RSS", RLIMIT_RSS);
+    PyModule_AddIntConstant(module, "RLIMIT_STACK", RLIMIT_STACK);
+#ifdef RLIMIT_MSGQUEUE
+    PyModule_AddIntConstant(module, "RLIMIT_MSGQUEUE", RLIMIT_MSGQUEUE);
+#endif
+#ifdef RLIMIT_NICE
+    PyModule_AddIntConstant(module, "RLIMIT_NICE", RLIMIT_NICE);
+#endif
+#ifdef RLIMIT_RTPRIO
+    PyModule_AddIntConstant(module, "RLIMIT_RTPRIO", RLIMIT_RTPRIO);
+#endif
+#ifdef RLIMIT_RTTIME
+    PyModule_AddIntConstant(module, "RLIMIT_RTTIME", RLIMIT_RTTIME);
+#endif
+#ifdef RLIMIT_SIGPENDING
+    PyModule_AddIntConstant(module, "RLIMIT_SIGPENDING", RLIMIT_SIGPENDING);
+#endif
+#endif
+
+    if (module == NULL) {
+        INITERROR;
+    }
+#if PY_MAJOR_VERSION >= 3
+    return module;
+#endif
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/_psutil_linux.h
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/_psutil_linux.h b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/_psutil_linux.h
new file mode 100644
index 0000000..04ffec3
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/_psutil_linux.h
@@ -0,0 +1,20 @@
+/*
+ * Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include <Python.h>
+
+// process
+
+static PyObject* psutil_proc_cpu_affinity_get(PyObject* self, PyObject* args);
+static PyObject* psutil_proc_cpu_affinity_set(PyObject* self, PyObject* args);
+static PyObject* psutil_proc_ioprio_get(PyObject* self, PyObject* args);
+static PyObject* psutil_proc_ioprio_get(PyObject* self, PyObject* args);
+
+// system
+
+static PyObject* psutil_disk_partitions(PyObject* self, PyObject* args);
+static PyObject* psutil_linux_sysinfo(PyObject* self, PyObject* args);
+static PyObject* psutil_users(PyObject* self, PyObject* args);


[13/22] AMBARI-5707. Metrics system prototype implementation. (swagle)

Posted by sw...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/build/lib.macosx-10.8-intel-2.7/psutil/__init__.py
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/build/lib.macosx-10.8-intel-2.7/psutil/__init__.py b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/build/lib.macosx-10.8-intel-2.7/psutil/__init__.py
new file mode 100644
index 0000000..3068b10
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/build/lib.macosx-10.8-intel-2.7/psutil/__init__.py
@@ -0,0 +1,1987 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""psutil is a cross-platform library for retrieving information on
+running processes and system utilization (CPU, memory, disks, network)
+in Python.
+"""
+
+from __future__ import division
+
+__author__ = "Giampaolo Rodola'"
+__version__ = "2.1.1"
+version_info = tuple([int(num) for num in __version__.split('.')])
+
+__all__ = [
+    # exceptions
+    "Error", "NoSuchProcess", "AccessDenied", "TimeoutExpired",
+    # constants
+    "version_info", "__version__",
+    "STATUS_RUNNING", "STATUS_IDLE", "STATUS_SLEEPING", "STATUS_DISK_SLEEP",
+    "STATUS_STOPPED", "STATUS_TRACING_STOP", "STATUS_ZOMBIE", "STATUS_DEAD",
+    "STATUS_WAKING", "STATUS_LOCKED", "STATUS_WAITING", "STATUS_LOCKED",
+    "CONN_ESTABLISHED", "CONN_SYN_SENT", "CONN_SYN_RECV", "CONN_FIN_WAIT1",
+    "CONN_FIN_WAIT2", "CONN_TIME_WAIT", "CONN_CLOSE", "CONN_CLOSE_WAIT",
+    "CONN_LAST_ACK", "CONN_LISTEN", "CONN_CLOSING", "CONN_NONE",
+    # classes
+    "Process", "Popen",
+    # functions
+    "pid_exists", "pids", "process_iter", "wait_procs",             # proc
+    "virtual_memory", "swap_memory",                                # memory
+    "cpu_times", "cpu_percent", "cpu_times_percent", "cpu_count",   # cpu
+    "net_io_counters", "net_connections",                           # network
+    "disk_io_counters", "disk_partitions", "disk_usage",            # disk
+    "users", "boot_time",                                           # others
+]
+
+import sys
+import os
+import time
+import signal
+import warnings
+import errno
+import subprocess
+try:
+    import pwd
+except ImportError:
+    pwd = None
+
+from psutil._common import memoize
+from psutil._compat import property, callable, defaultdict
+from psutil._compat import (wraps as _wraps,
+                            PY3 as _PY3)
+from psutil._common import (deprecated_method as _deprecated_method,
+                            deprecated as _deprecated,
+                            sdiskio as _nt_sys_diskio,
+                            snetio as _nt_sys_netio)
+
+from psutil._common import (STATUS_RUNNING,
+                            STATUS_SLEEPING,
+                            STATUS_DISK_SLEEP,
+                            STATUS_STOPPED,
+                            STATUS_TRACING_STOP,
+                            STATUS_ZOMBIE,
+                            STATUS_DEAD,
+                            STATUS_WAKING,
+                            STATUS_LOCKED,
+                            STATUS_IDLE,  # bsd
+                            STATUS_WAITING,  # bsd
+                            STATUS_LOCKED)  # bsd
+
+from psutil._common import (CONN_ESTABLISHED,
+                            CONN_SYN_SENT,
+                            CONN_SYN_RECV,
+                            CONN_FIN_WAIT1,
+                            CONN_FIN_WAIT2,
+                            CONN_TIME_WAIT,
+                            CONN_CLOSE,
+                            CONN_CLOSE_WAIT,
+                            CONN_LAST_ACK,
+                            CONN_LISTEN,
+                            CONN_CLOSING,
+                            CONN_NONE)
+
+if sys.platform.startswith("linux"):
+    import psutil._pslinux as _psplatform
+    from psutil._pslinux import (phymem_buffers,
+                                 cached_phymem)
+
+    from psutil._pslinux import (IOPRIO_CLASS_NONE,
+                                 IOPRIO_CLASS_RT,
+                                 IOPRIO_CLASS_BE,
+                                 IOPRIO_CLASS_IDLE)
+    # Linux >= 2.6.36
+    if _psplatform.HAS_PRLIMIT:
+        from _psutil_linux import (RLIM_INFINITY,
+                                   RLIMIT_AS,
+                                   RLIMIT_CORE,
+                                   RLIMIT_CPU,
+                                   RLIMIT_DATA,
+                                   RLIMIT_FSIZE,
+                                   RLIMIT_LOCKS,
+                                   RLIMIT_MEMLOCK,
+                                   RLIMIT_NOFILE,
+                                   RLIMIT_NPROC,
+                                   RLIMIT_RSS,
+                                   RLIMIT_STACK)
+        # Kinda ugly but considerably faster than using hasattr() and
+        # setattr() against the module object (we are at import time:
+        # speed matters).
+        import _psutil_linux
+        try:
+            RLIMIT_MSGQUEUE = _psutil_linux.RLIMIT_MSGQUEUE
+        except AttributeError:
+            pass
+        try:
+            RLIMIT_NICE = _psutil_linux.RLIMIT_NICE
+        except AttributeError:
+            pass
+        try:
+            RLIMIT_RTPRIO = _psutil_linux.RLIMIT_RTPRIO
+        except AttributeError:
+            pass
+        try:
+            RLIMIT_RTTIME = _psutil_linux.RLIMIT_RTTIME
+        except AttributeError:
+            pass
+        try:
+            RLIMIT_SIGPENDING = _psutil_linux.RLIMIT_SIGPENDING
+        except AttributeError:
+            pass
+        del _psutil_linux
+
+elif sys.platform.startswith("win32"):
+    import psutil._pswindows as _psplatform
+    from _psutil_windows import (ABOVE_NORMAL_PRIORITY_CLASS,
+                                 BELOW_NORMAL_PRIORITY_CLASS,
+                                 HIGH_PRIORITY_CLASS,
+                                 IDLE_PRIORITY_CLASS,
+                                 NORMAL_PRIORITY_CLASS,
+                                 REALTIME_PRIORITY_CLASS)
+    from psutil._pswindows import CONN_DELETE_TCB
+
+elif sys.platform.startswith("darwin"):
+    import psutil._psosx as _psplatform
+
+elif sys.platform.startswith("freebsd"):
+    import psutil._psbsd as _psplatform
+
+elif sys.platform.startswith("sunos"):
+    import psutil._pssunos as _psplatform
+    from psutil._pssunos import (CONN_IDLE,
+                                 CONN_BOUND)
+
+else:
+    raise NotImplementedError('platform %s is not supported' % sys.platform)
+
+__all__.extend(_psplatform.__extra__all__)
+
+
+_TOTAL_PHYMEM = None
+_POSIX = os.name == 'posix'
+_WINDOWS = os.name == 'nt'
+_timer = getattr(time, 'monotonic', time.time)
+
+
+# =====================================================================
+# --- exceptions
+# =====================================================================
+
+class Error(Exception):
+    """Base exception class. All other psutil exceptions inherit
+    from this one.
+    """
+
+
+class NoSuchProcess(Error):
+    """Exception raised when a process with a certain PID doesn't
+    or no longer exists (zombie).
+    """
+
+    def __init__(self, pid, name=None, msg=None):
+        Error.__init__(self)
+        self.pid = pid
+        self.name = name
+        self.msg = msg
+        if msg is None:
+            if name:
+                details = "(pid=%s, name=%s)" % (self.pid, repr(self.name))
+            else:
+                details = "(pid=%s)" % self.pid
+            self.msg = "process no longer exists " + details
+
+    def __str__(self):
+        return self.msg
+
+
+class AccessDenied(Error):
+    """Exception raised when permission to perform an action is denied."""
+
+    def __init__(self, pid=None, name=None, msg=None):
+        Error.__init__(self)
+        self.pid = pid
+        self.name = name
+        self.msg = msg
+        if msg is None:
+            if (pid is not None) and (name is not None):
+                self.msg = "(pid=%s, name=%s)" % (pid, repr(name))
+            elif (pid is not None):
+                self.msg = "(pid=%s)" % self.pid
+            else:
+                self.msg = ""
+
+    def __str__(self):
+        return self.msg
+
+
+class TimeoutExpired(Error):
+    """Raised on Process.wait(timeout) if timeout expires and process
+    is still alive.
+    """
+
+    def __init__(self, seconds, pid=None, name=None):
+        Error.__init__(self)
+        self.seconds = seconds
+        self.pid = pid
+        self.name = name
+        self.msg = "timeout after %s seconds" % seconds
+        if (pid is not None) and (name is not None):
+            self.msg += " (pid=%s, name=%s)" % (pid, repr(name))
+        elif (pid is not None):
+            self.msg += " (pid=%s)" % self.pid
+
+    def __str__(self):
+        return self.msg
+
+# push exception classes into platform specific module namespace
+_psplatform.NoSuchProcess = NoSuchProcess
+_psplatform.AccessDenied = AccessDenied
+_psplatform.TimeoutExpired = TimeoutExpired
+
+
+# =====================================================================
+# --- Process class
+# =====================================================================
+
+def _assert_pid_not_reused(fun):
+    """Decorator which raises NoSuchProcess in case a process is no
+    longer running or its PID has been reused.
+    """
+    @_wraps(fun)
+    def wrapper(self, *args, **kwargs):
+        if not self.is_running():
+            raise NoSuchProcess(self.pid, self._name)
+        return fun(self, *args, **kwargs)
+    return wrapper
+
+
+class Process(object):
+    """Represents an OS process with the given PID.
+    If PID is omitted current process PID (os.getpid()) is used.
+    Raise NoSuchProcess if PID does not exist.
+
+    Note that most of the methods of this class do not make sure
+    the PID of the process being queried has been reused over time.
+    That means you might end up retrieving an information referring
+    to another process in case the original one this instance
+    refers to is gone in the meantime.
+
+    The only exceptions for which process identity is pre-emptively
+    checked and guaranteed are:
+
+     - parent()
+     - children()
+     - nice() (set)
+     - ionice() (set)
+     - rlimit() (set)
+     - cpu_affinity (set)
+     - suspend()
+     - resume()
+     - send_signal()
+     - terminate()
+     - kill()
+
+    To prevent this problem for all other methods you can:
+      - use is_running() before querying the process
+      - if you're continuously iterating over a set of Process
+        instances use process_iter() which pre-emptively checks
+        process identity for every yielded instance
+    """
+
+    def __init__(self, pid=None):
+        self._init(pid)
+
+    def _init(self, pid, _ignore_nsp=False):
+        if pid is None:
+            pid = os.getpid()
+        else:
+            if not _PY3 and not isinstance(pid, (int, long)):
+                raise TypeError('pid must be an integer (got %r)' % pid)
+            if pid < 0:
+                raise ValueError('pid must be a positive integer (got %s)'
+                                 % pid)
+        self._pid = pid
+        self._name = None
+        self._exe = None
+        self._create_time = None
+        self._gone = False
+        self._hash = None
+        # used for caching on Windows only (on POSIX ppid may change)
+        self._ppid = None
+        # platform-specific modules define an _psplatform.Process
+        # implementation class
+        self._proc = _psplatform.Process(pid)
+        self._last_sys_cpu_times = None
+        self._last_proc_cpu_times = None
+        # cache creation time for later use in is_running() method
+        try:
+            self.create_time()
+        except AccessDenied:
+            # we should never get here as AFAIK we're able to get
+            # process creation time on all platforms even as a
+            # limited user
+            pass
+        except NoSuchProcess:
+            if not _ignore_nsp:
+                msg = 'no process found with pid %s' % pid
+                raise NoSuchProcess(pid, None, msg)
+            else:
+                self._gone = True
+        # This pair is supposed to indentify a Process instance
+        # univocally over time (the PID alone is not enough as
+        # it might refer to a process whose PID has been reused).
+        # This will be used later in __eq__() and is_running().
+        self._ident = (self.pid, self._create_time)
+
+    def __str__(self):
+        try:
+            pid = self.pid
+            name = repr(self.name())
+        except NoSuchProcess:
+            details = "(pid=%s (terminated))" % self.pid
+        except AccessDenied:
+            details = "(pid=%s)" % (self.pid)
+        else:
+            details = "(pid=%s, name=%s)" % (pid, name)
+        return "%s.%s%s" % (self.__class__.__module__,
+                            self.__class__.__name__, details)
+
+    def __repr__(self):
+        return "<%s at %s>" % (self.__str__(), id(self))
+
+    def __eq__(self, other):
+        # Test for equality with another Process object based
+        # on PID and creation time.
+        if not isinstance(other, Process):
+            return NotImplemented
+        return self._ident == other._ident
+
+    def __ne__(self, other):
+        return not self == other
+
+    def __hash__(self):
+        if self._hash is None:
+            self._hash = hash(self._ident)
+        return self._hash
+
+    # --- utility methods
+
+    def as_dict(self, attrs=[], ad_value=None):
+        """Utility method returning process information as a
+        hashable dictionary.
+
+        If 'attrs' is specified it must be a list of strings
+        reflecting available Process class' attribute names
+        (e.g. ['cpu_times', 'name']) else all public (read
+        only) attributes are assumed.
+
+        'ad_value' is the value which gets assigned in case
+        AccessDenied  exception is raised when retrieving that
+        particular process information.
+        """
+        excluded_names = set(
+            ['send_signal', 'suspend', 'resume', 'terminate', 'kill', 'wait',
+             'is_running', 'as_dict', 'parent', 'children', 'rlimit'])
+        retdict = dict()
+        ls = set(attrs or [x for x in dir(self) if not x.startswith('get')])
+        for name in ls:
+            if name.startswith('_'):
+                continue
+            if name.startswith('set_'):
+                continue
+            if name.startswith('get_'):
+                msg = "%s() is deprecated; use %s() instead" % (name, name[4:])
+                warnings.warn(msg, category=DeprecationWarning, stacklevel=2)
+                name = name[4:]
+                if name in ls:
+                    continue
+            if name == 'getcwd':
+                msg = "getcwd() is deprecated; use cwd() instead"
+                warnings.warn(msg, category=DeprecationWarning, stacklevel=2)
+                name = 'cwd'
+                if name in ls:
+                    continue
+
+            if name in excluded_names:
+                continue
+            try:
+                attr = getattr(self, name)
+                if callable(attr):
+                    ret = attr()
+                else:
+                    ret = attr
+            except AccessDenied:
+                ret = ad_value
+            except NotImplementedError:
+                # in case of not implemented functionality (may happen
+                # on old or exotic systems) we want to crash only if
+                # the user explicitly asked for that particular attr
+                if attrs:
+                    raise
+                continue
+            retdict[name] = ret
+        return retdict
+
+    def parent(self):
+        """Return the parent process as a Process object pre-emptively
+        checking whether PID has been reused.
+        If no parent is known return None.
+        """
+        ppid = self.ppid()
+        if ppid is not None:
+            try:
+                parent = Process(ppid)
+                if parent.create_time() <= self.create_time():
+                    return parent
+                # ...else ppid has been reused by another process
+            except NoSuchProcess:
+                pass
+
+    def is_running(self):
+        """Return whether this process is running.
+        It also checks if PID has been reused by another process in
+        which case return False.
+        """
+        if self._gone:
+            return False
+        try:
+            # Checking if PID is alive is not enough as the PID might
+            # have been reused by another process: we also want to
+            # check process identity.
+            # Process identity / uniqueness over time is greanted by
+            # (PID + creation time) and that is verified in __eq__.
+            return self == Process(self.pid)
+        except NoSuchProcess:
+            self._gone = True
+            return False
+
+    # --- actual API
+
+    @property
+    def pid(self):
+        """The process PID."""
+        return self._pid
+
+    def ppid(self):
+        """The process parent PID.
+        On Windows the return value is cached after first call.
+        """
+        # On POSIX we don't want to cache the ppid as it may unexpectedly
+        # change to 1 (init) in case this process turns into a zombie:
+        # https://code.google.com/p/psutil/issues/detail?id=321
+        # http://stackoverflow.com/questions/356722/
+
+        # XXX should we check creation time here rather than in
+        # Process.parent()?
+        if _POSIX:
+            return self._proc.ppid()
+        else:
+            if self._ppid is None:
+                self._ppid = self._proc.ppid()
+            return self._ppid
+
+    def name(self):
+        """The process name. The return value is cached after first call."""
+        if self._name is None:
+            name = self._proc.name()
+            if _POSIX and len(name) >= 15:
+                # On UNIX the name gets truncated to the first 15 characters.
+                # If it matches the first part of the cmdline we return that
+                # one instead because it's usually more explicative.
+                # Examples are "gnome-keyring-d" vs. "gnome-keyring-daemon".
+                try:
+                    cmdline = self.cmdline()
+                except AccessDenied:
+                    pass
+                else:
+                    if cmdline:
+                        extended_name = os.path.basename(cmdline[0])
+                        if extended_name.startswith(name):
+                            name = extended_name
+            self._proc._name = name
+            self._name = name
+        return self._name
+
+    def exe(self):
+        """The process executable as an absolute path.
+        May also be an empty string.
+        The return value is cached after first call.
+        """
+        def guess_it(fallback):
+            # try to guess exe from cmdline[0] in absence of a native
+            # exe representation
+            cmdline = self.cmdline()
+            if cmdline and hasattr(os, 'access') and hasattr(os, 'X_OK'):
+                exe = cmdline[0]  # the possible exe
+                # Attempt to guess only in case of an absolute path.
+                # It is not safe otherwise as the process might have
+                # changed cwd.
+                if (os.path.isabs(exe)
+                        and os.path.isfile(exe)
+                        and os.access(exe, os.X_OK)):
+                    return exe
+            if isinstance(fallback, AccessDenied):
+                raise fallback
+            return fallback
+
+        if self._exe is None:
+            try:
+                exe = self._proc.exe()
+            except AccessDenied:
+                err = sys.exc_info()[1]
+                return guess_it(fallback=err)
+            else:
+                if not exe:
+                    # underlying implementation can legitimately return an
+                    # empty string; if that's the case we don't want to
+                    # raise AD while guessing from the cmdline
+                    try:
+                        exe = guess_it(fallback=exe)
+                    except AccessDenied:
+                        pass
+                self._exe = exe
+        return self._exe
+
+    def cmdline(self):
+        """The command line this process has been called with."""
+        return self._proc.cmdline()
+
+    def status(self):
+        """The process current status as a STATUS_* constant."""
+        return self._proc.status()
+
+    def username(self):
+        """The name of the user that owns the process.
+        On UNIX this is calculated by using *real* process uid.
+        """
+        if _POSIX:
+            if pwd is None:
+                # might happen if python was installed from sources
+                raise ImportError(
+                    "requires pwd module shipped with standard python")
+            return pwd.getpwuid(self.uids().real).pw_name
+        else:
+            return self._proc.username()
+
+    def create_time(self):
+        """The process creation time as a floating point number
+        expressed in seconds since the epoch, in UTC.
+        The return value is cached after first call.
+        """
+        if self._create_time is None:
+            self._create_time = self._proc.create_time()
+        return self._create_time
+
+    def cwd(self):
+        """Process current working directory as an absolute path."""
+        return self._proc.cwd()
+
+    def nice(self, value=None):
+        """Get or set process niceness (priority)."""
+        if value is None:
+            return self._proc.nice_get()
+        else:
+            if not self.is_running():
+                raise NoSuchProcess(self.pid, self._name)
+            self._proc.nice_set(value)
+
+    if _POSIX:
+
+        def uids(self):
+            """Return process UIDs as a (real, effective, saved)
+            namedtuple.
+            """
+            return self._proc.uids()
+
+        def gids(self):
+            """Return process GIDs as a (real, effective, saved)
+            namedtuple.
+            """
+            return self._proc.gids()
+
+        def terminal(self):
+            """The terminal associated with this process, if any,
+            else None.
+            """
+            return self._proc.terminal()
+
+        def num_fds(self):
+            """Return the number of file descriptors opened by this
+            process (POSIX only).
+            """
+            return self._proc.num_fds()
+
+    # Linux, BSD and Windows only
+    if hasattr(_psplatform.Process, "io_counters"):
+
+        def io_counters(self):
+            """Return process I/O statistics as a
+            (read_count, write_count, read_bytes, write_bytes)
+            namedtuple.
+            Those are the number of read/write calls performed and the
+            amount of bytes read and written by the process.
+            """
+            return self._proc.io_counters()
+
+    # Linux and Windows >= Vista only
+    if hasattr(_psplatform.Process, "ionice_get"):
+
+        def ionice(self, ioclass=None, value=None):
+            """Get or set process I/O niceness (priority).
+
+            On Linux 'ioclass' is one of the IOPRIO_CLASS_* constants.
+            'value' is a number which goes from 0 to 7. The higher the
+            value, the lower the I/O priority of the process.
+
+            On Windows only 'ioclass' is used and it can be set to 2
+            (normal), 1 (low) or 0 (very low).
+
+            Available on Linux and Windows > Vista only.
+            """
+            if ioclass is None:
+                if value is not None:
+                    raise ValueError("'ioclass' must be specified")
+                return self._proc.ionice_get()
+            else:
+                return self._proc.ionice_set(ioclass, value)
+
+    # Linux only
+    if hasattr(_psplatform.Process, "rlimit"):
+
+        def rlimit(self, resource, limits=None):
+            """Get or set process resource limits as a (soft, hard)
+            tuple.
+
+            'resource' is one of the RLIMIT_* constants.
+            'limits' is supposed to be a (soft, hard)  tuple.
+
+            See "man prlimit" for further info.
+            Available on Linux only.
+            """
+            if limits is None:
+                return self._proc.rlimit(resource)
+            else:
+                return self._proc.rlimit(resource, limits)
+
+    # Windows and Linux only
+    if hasattr(_psplatform.Process, "cpu_affinity_get"):
+
+        def cpu_affinity(self, cpus=None):
+            """Get or set process CPU affinity.
+            If specified 'cpus' must be a list of CPUs for which you
+            want to set the affinity (e.g. [0, 1]).
+            """
+            if cpus is None:
+                return self._proc.cpu_affinity_get()
+            else:
+                self._proc.cpu_affinity_set(cpus)
+
+    if _WINDOWS:
+
+        def num_handles(self):
+            """Return the number of handles opened by this process
+            (Windows only).
+            """
+            return self._proc.num_handles()
+
+    def num_ctx_switches(self):
+        """Return the number of voluntary and involuntary context
+        switches performed by this process.
+        """
+        return self._proc.num_ctx_switches()
+
+    def num_threads(self):
+        """Return the number of threads used by this process."""
+        return self._proc.num_threads()
+
+    def threads(self):
+        """Return threads opened by process as a list of
+        (id, user_time, system_time) namedtuples representing
+        thread id and thread CPU times (user/system).
+        """
+        return self._proc.threads()
+
+    @_assert_pid_not_reused
+    def children(self, recursive=False):
+        """Return the children of this process as a list of Process
+        instances, pre-emptively checking whether PID has been reused.
+        If recursive is True return all the parent descendants.
+
+        Example (A == this process):
+
+         A ─┐
+            │
+            ├─ B (child) ─┐
+            │             └─ X (grandchild) ─┐
+            │                                └─ Y (great grandchild)
+            ├─ C (child)
+            └─ D (child)
+
+        >>> import psutil
+        >>> p = psutil.Process()
+        >>> p.children()
+        B, C, D
+        >>> p.children(recursive=True)
+        B, X, Y, C, D
+
+        Note that in the example above if process X disappears
+        process Y won't be listed as the reference to process A
+        is lost.
+        """
+        if hasattr(_psplatform, 'ppid_map'):
+            # Windows only: obtain a {pid:ppid, ...} dict for all running
+            # processes in one shot (faster).
+            ppid_map = _psplatform.ppid_map()
+        else:
+            ppid_map = None
+
+        ret = []
+        if not recursive:
+            if ppid_map is None:
+                # 'slow' version, common to all platforms except Windows
+                for p in process_iter():
+                    try:
+                        if p.ppid() == self.pid:
+                            # if child happens to be older than its parent
+                            # (self) it means child's PID has been reused
+                            if self.create_time() <= p.create_time():
+                                ret.append(p)
+                    except NoSuchProcess:
+                        pass
+            else:
+                # Windows only (faster)
+                for pid, ppid in ppid_map.items():
+                    if ppid == self.pid:
+                        try:
+                            child = Process(pid)
+                            # if child happens to be older than its parent
+                            # (self) it means child's PID has been reused
+                            if self.create_time() <= child.create_time():
+                                ret.append(child)
+                        except NoSuchProcess:
+                            pass
+        else:
+            # construct a dict where 'values' are all the processes
+            # having 'key' as their parent
+            table = defaultdict(list)
+            if ppid_map is None:
+                for p in process_iter():
+                    try:
+                        table[p.ppid()].append(p)
+                    except NoSuchProcess:
+                        pass
+            else:
+                for pid, ppid in ppid_map.items():
+                    try:
+                        p = Process(pid)
+                        table[ppid].append(p)
+                    except NoSuchProcess:
+                        pass
+            # At this point we have a mapping table where table[self.pid]
+            # are the current process' children.
+            # Below, we look for all descendants recursively, similarly
+            # to a recursive function call.
+            checkpids = [self.pid]
+            for pid in checkpids:
+                for child in table[pid]:
+                    try:
+                        # if child happens to be older than its parent
+                        # (self) it means child's PID has been reused
+                        intime = self.create_time() <= child.create_time()
+                    except NoSuchProcess:
+                        pass
+                    else:
+                        if intime:
+                            ret.append(child)
+                            if child.pid not in checkpids:
+                                checkpids.append(child.pid)
+        return ret
+
+    def cpu_percent(self, interval=None):
+        """Return a float representing the current process CPU
+        utilization as a percentage.
+
+        When interval is 0.0 or None (default) compares process times
+        to system CPU times elapsed since last call, returning
+        immediately (non-blocking). That means that the first time
+        this is called it will return a meaningful 0.0 value.
+
+        When interval is > 0.0 compares process times to system CPU
+        times elapsed before and after the interval (blocking).
+
+        In this case is recommended for accuracy that this function
+        be called with at least 0.1 seconds between calls.
+
+        Examples:
+
+          >>> import psutil
+          >>> p = psutil.Process(os.getpid())
+          >>> # blocking
+          >>> p.cpu_percent(interval=1)
+          2.0
+          >>> # non-blocking (percentage since last call)
+          >>> p.cpu_percent(interval=None)
+          2.9
+          >>>
+        """
+        blocking = interval is not None and interval > 0.0
+        num_cpus = cpu_count()
+        if _POSIX:
+            timer = lambda: _timer() * num_cpus
+        else:
+            timer = lambda: sum(cpu_times())
+        if blocking:
+            st1 = timer()
+            pt1 = self._proc.cpu_times()
+            time.sleep(interval)
+            st2 = timer()
+            pt2 = self._proc.cpu_times()
+        else:
+            st1 = self._last_sys_cpu_times
+            pt1 = self._last_proc_cpu_times
+            st2 = timer()
+            pt2 = self._proc.cpu_times()
+            if st1 is None or pt1 is None:
+                self._last_sys_cpu_times = st2
+                self._last_proc_cpu_times = pt2
+                return 0.0
+
+        delta_proc = (pt2.user - pt1.user) + (pt2.system - pt1.system)
+        delta_time = st2 - st1
+        # reset values for next call in case of interval == None
+        self._last_sys_cpu_times = st2
+        self._last_proc_cpu_times = pt2
+
+        try:
+            # The utilization split between all CPUs.
+            # Note: a percentage > 100 is legitimate as it can result
+            # from a process with multiple threads running on different
+            # CPU cores, see:
+            # http://stackoverflow.com/questions/1032357
+            # https://code.google.com/p/psutil/issues/detail?id=474
+            overall_percent = ((delta_proc / delta_time) * 100) * num_cpus
+        except ZeroDivisionError:
+            # interval was too low
+            return 0.0
+        else:
+            return round(overall_percent, 1)
+
+    def cpu_times(self):
+        """Return a (user, system) namedtuple representing  the
+        accumulated process time, in seconds.
+        This is the same as os.times() but per-process.
+        """
+        return self._proc.cpu_times()
+
+    def memory_info(self):
+        """Return a tuple representing RSS (Resident Set Size) and VMS
+        (Virtual Memory Size) in bytes.
+
+        On UNIX RSS and VMS are the same values shown by 'ps'.
+
+        On Windows RSS and VMS refer to "Mem Usage" and "VM Size"
+        columns of taskmgr.exe.
+        """
+        return self._proc.memory_info()
+
+    def memory_info_ex(self):
+        """Return a namedtuple with variable fields depending on the
+        platform representing extended memory information about
+        this process. All numbers are expressed in bytes.
+        """
+        return self._proc.memory_info_ex()
+
+    def memory_percent(self):
+        """Compare physical system memory to process resident memory
+        (RSS) and calculate process memory utilization as a percentage.
+        """
+        rss = self._proc.memory_info()[0]
+        # use cached value if available
+        total_phymem = _TOTAL_PHYMEM or virtual_memory().total
+        try:
+            return (rss / float(total_phymem)) * 100
+        except ZeroDivisionError:
+            return 0.0
+
+    def memory_maps(self, grouped=True):
+        """Return process' mapped memory regions as a list of nameduples
+        whose fields are variable depending on the platform.
+
+        If 'grouped' is True the mapped regions with the same 'path'
+        are grouped together and the different memory fields are summed.
+
+        If 'grouped' is False every mapped region is shown as a single
+        entity and the namedtuple will also include the mapped region's
+        address space ('addr') and permission set ('perms').
+        """
+        it = self._proc.memory_maps()
+        if grouped:
+            d = {}
+            for tupl in it:
+                path = tupl[2]
+                nums = tupl[3:]
+                try:
+                    d[path] = map(lambda x, y: x + y, d[path], nums)
+                except KeyError:
+                    d[path] = nums
+            nt = _psplatform.pmmap_grouped
+            return [nt(path, *d[path]) for path in d]
+        else:
+            nt = _psplatform.pmmap_ext
+            return [nt(*x) for x in it]
+
+    def open_files(self):
+        """Return files opened by process as a list of
+        (path, fd) namedtuples including the absolute file name
+        and file descriptor number.
+        """
+        return self._proc.open_files()
+
+    def connections(self, kind='inet'):
+        """Return connections opened by process as a list of
+        (fd, family, type, laddr, raddr, status) namedtuples.
+        The 'kind' parameter filters for connections that match the
+        following criteria:
+
+        Kind Value      Connections using
+        inet            IPv4 and IPv6
+        inet4           IPv4
+        inet6           IPv6
+        tcp             TCP
+        tcp4            TCP over IPv4
+        tcp6            TCP over IPv6
+        udp             UDP
+        udp4            UDP over IPv4
+        udp6            UDP over IPv6
+        unix            UNIX socket (both UDP and TCP protocols)
+        all             the sum of all the possible families and protocols
+        """
+        return self._proc.connections(kind)
+
+    if _POSIX:
+        def _send_signal(self, sig):
+            try:
+                os.kill(self.pid, sig)
+            except OSError:
+                err = sys.exc_info()[1]
+                if err.errno == errno.ESRCH:
+                    self._gone = True
+                    raise NoSuchProcess(self.pid, self._name)
+                if err.errno == errno.EPERM:
+                    raise AccessDenied(self.pid, self._name)
+                raise
+
+    @_assert_pid_not_reused
+    def send_signal(self, sig):
+        """Send a signal to process pre-emptively checking whether
+        PID has been reused (see signal module constants) .
+        On Windows only SIGTERM is valid and is treated as an alias
+        for kill().
+        """
+        if _POSIX:
+            self._send_signal(sig)
+        else:
+            if sig == signal.SIGTERM:
+                self._proc.kill()
+            else:
+                raise ValueError("only SIGTERM is supported on Windows")
+
+    @_assert_pid_not_reused
+    def suspend(self):
+        """Suspend process execution with SIGSTOP pre-emptively checking
+        whether PID has been reused.
+        On Windows this has the effect ot suspending all process threads.
+        """
+        if _POSIX:
+            self._send_signal(signal.SIGSTOP)
+        else:
+            self._proc.suspend()
+
+    @_assert_pid_not_reused
+    def resume(self):
+        """Resume process execution with SIGCONT pre-emptively checking
+        whether PID has been reused.
+        On Windows this has the effect of resuming all process threads.
+        """
+        if _POSIX:
+            self._send_signal(signal.SIGCONT)
+        else:
+            self._proc.resume()
+
+    @_assert_pid_not_reused
+    def terminate(self):
+        """Terminate the process with SIGTERM pre-emptively checking
+        whether PID has been reused.
+        On Windows this is an alias for kill().
+        """
+        if _POSIX:
+            self._send_signal(signal.SIGTERM)
+        else:
+            self._proc.kill()
+
+    @_assert_pid_not_reused
+    def kill(self):
+        """Kill the current process with SIGKILL pre-emptively checking
+        whether PID has been reused.
+        """
+        if _POSIX:
+            self._send_signal(signal.SIGKILL)
+        else:
+            self._proc.kill()
+
+    def wait(self, timeout=None):
+        """Wait for process to terminate and, if process is a children
+        of os.getpid(), also return its exit code, else None.
+
+        If the process is already terminated immediately return None
+        instead of raising NoSuchProcess.
+
+        If timeout (in seconds) is specified and process is still alive
+        raise TimeoutExpired.
+
+        To wait for multiple Process(es) use psutil.wait_procs().
+        """
+        if timeout is not None and not timeout >= 0:
+            raise ValueError("timeout must be a positive integer")
+        return self._proc.wait(timeout)
+
+    # --- deprecated APIs
+
+    _locals = set(locals())
+
+    @_deprecated_method(replacement='children')
+    def get_children(self):
+        pass
+
+    @_deprecated_method(replacement='connections')
+    def get_connections(self):
+        pass
+
+    if "cpu_affinity" in _locals:
+        @_deprecated_method(replacement='cpu_affinity')
+        def get_cpu_affinity(self):
+            pass
+
+        @_deprecated_method(replacement='cpu_affinity')
+        def set_cpu_affinity(self, cpus):
+            pass
+
+    @_deprecated_method(replacement='cpu_percent')
+    def get_cpu_percent(self):
+        pass
+
+    @_deprecated_method(replacement='cpu_times')
+    def get_cpu_times(self):
+        pass
+
+    @_deprecated_method(replacement='cwd')
+    def getcwd(self):
+        pass
+
+    @_deprecated_method(replacement='memory_info_ex')
+    def get_ext_memory_info(self):
+        pass
+
+    if "io_counters" in _locals:
+        @_deprecated_method(replacement='io_counters')
+        def get_io_counters(self):
+            pass
+
+    if "ionice" in _locals:
+        @_deprecated_method(replacement='ionice')
+        def get_ionice(self):
+            pass
+
+        @_deprecated_method(replacement='ionice')
+        def set_ionice(self, ioclass, value=None):
+            pass
+
+    @_deprecated_method(replacement='memory_info')
+    def get_memory_info(self):
+        pass
+
+    @_deprecated_method(replacement='memory_maps')
+    def get_memory_maps(self):
+        pass
+
+    @_deprecated_method(replacement='memory_percent')
+    def get_memory_percent(self):
+        pass
+
+    @_deprecated_method(replacement='nice')
+    def get_nice(self):
+        pass
+
+    @_deprecated_method(replacement='num_ctx_switches')
+    def get_num_ctx_switches(self):
+        pass
+
+    if 'num_fds' in _locals:
+        @_deprecated_method(replacement='num_fds')
+        def get_num_fds(self):
+            pass
+
+    if 'num_handles' in _locals:
+        @_deprecated_method(replacement='num_handles')
+        def get_num_handles(self):
+            pass
+
+    @_deprecated_method(replacement='num_threads')
+    def get_num_threads(self):
+        pass
+
+    @_deprecated_method(replacement='open_files')
+    def get_open_files(self):
+        pass
+
+    if "rlimit" in _locals:
+        @_deprecated_method(replacement='rlimit')
+        def get_rlimit(self):
+            pass
+
+        @_deprecated_method(replacement='rlimit')
+        def set_rlimit(self, resource, limits):
+            pass
+
+    @_deprecated_method(replacement='threads')
+    def get_threads(self):
+        pass
+
+    @_deprecated_method(replacement='nice')
+    def set_nice(self, value):
+        pass
+
+    del _locals
+
+
+# =====================================================================
+# --- Popen class
+# =====================================================================
+
+class Popen(Process):
+    """A more convenient interface to stdlib subprocess module.
+    It starts a sub process and deals with it exactly as when using
+    subprocess.Popen class but in addition also provides all the
+    properties and methods of psutil.Process class as a unified
+    interface:
+
+      >>> import psutil
+      >>> from subprocess import PIPE
+      >>> p = psutil.Popen(["python", "-c", "print 'hi'"], stdout=PIPE)
+      >>> p.name()
+      'python'
+      >>> p.uids()
+      user(real=1000, effective=1000, saved=1000)
+      >>> p.username()
+      'giampaolo'
+      >>> p.communicate()
+      ('hi\n', None)
+      >>> p.terminate()
+      >>> p.wait(timeout=2)
+      0
+      >>>
+
+    For method names common to both classes such as kill(), terminate()
+    and wait(), psutil.Process implementation takes precedence.
+
+    Unlike subprocess.Popen this class pre-emptively checks wheter PID
+    has been reused on send_signal(), terminate() and kill() so that
+    you don't accidentally terminate another process, fixing
+    http://bugs.python.org/issue6973.
+
+    For a complete documentation refer to:
+    http://docs.python.org/library/subprocess.html
+    """
+
+    def __init__(self, *args, **kwargs):
+        # Explicitly avoid to raise NoSuchProcess in case the process
+        # spawned by subprocess.Popen terminates too quickly, see:
+        # https://code.google.com/p/psutil/issues/detail?id=193
+        self.__subproc = subprocess.Popen(*args, **kwargs)
+        self._init(self.__subproc.pid, _ignore_nsp=True)
+
+    def __dir__(self):
+        return sorted(set(dir(Popen) + dir(subprocess.Popen)))
+
+    def __getattribute__(self, name):
+        try:
+            return object.__getattribute__(self, name)
+        except AttributeError:
+            try:
+                return object.__getattribute__(self.__subproc, name)
+            except AttributeError:
+                raise AttributeError("%s instance has no attribute '%s'"
+                                     % (self.__class__.__name__, name))
+
+    def wait(self, timeout=None):
+        if self.__subproc.returncode is not None:
+            return self.__subproc.returncode
+        ret = super(Popen, self).wait(timeout)
+        self.__subproc.returncode = ret
+        return ret
+
+
+# =====================================================================
+# --- system processes related functions
+# =====================================================================
+
+def pids():
+    """Return a list of current running PIDs."""
+    return _psplatform.pids()
+
+
+def pid_exists(pid):
+    """Return True if given PID exists in the current process list.
+    This is faster than doing "pid in psutil.pids()" and
+    should be preferred.
+    """
+    if pid < 0:
+        return False
+    elif pid == 0 and _POSIX:
+        # On POSIX we use os.kill() to determine PID existence.
+        # According to "man 2 kill" PID 0 has a special meaning
+        # though: it refers to <<every process in the process
+        # group of the calling process>> and that is not we want
+        # to do here.
+        return pid in pids()
+    else:
+        return _psplatform.pid_exists(pid)
+
+
+_pmap = {}
+
+def process_iter():
+    """Return a generator yielding a Process instance for all
+    running processes.
+
+    Every new Process instance is only created once and then cached
+    into an internal table which is updated every time this is used.
+
+    Cached Process instances are checked for identity so that you're
+    safe in case a PID has been reused by another process, in which
+    case the cached instance is updated.
+
+    The sorting order in which processes are yielded is based on
+    their PIDs.
+    """
+    def add(pid):
+        proc = Process(pid)
+        _pmap[proc.pid] = proc
+        return proc
+
+    def remove(pid):
+        _pmap.pop(pid, None)
+
+    a = set(pids())
+    b = set(_pmap.keys())
+    new_pids = a - b
+    gone_pids = b - a
+
+    for pid in gone_pids:
+        remove(pid)
+    for pid, proc in sorted(list(_pmap.items()) +
+                            list(dict.fromkeys(new_pids).items())):
+        try:
+            if proc is None:  # new process
+                yield add(pid)
+            else:
+                # use is_running() to check whether PID has been reused by
+                # another process in which case yield a new Process instance
+                if proc.is_running():
+                    yield proc
+                else:
+                    yield add(pid)
+        except NoSuchProcess:
+            remove(pid)
+        except AccessDenied:
+            # Process creation time can't be determined hence there's
+            # no way to tell whether the pid of the cached process
+            # has been reused. Just return the cached version.
+            yield proc
+
+
+def wait_procs(procs, timeout=None, callback=None):
+    """Convenience function which waits for a list of processes to
+    terminate.
+
+    Return a (gone, alive) tuple indicating which processes
+    are gone and which ones are still alive.
+
+    The gone ones will have a new 'returncode' attribute indicating
+    process exit status (may be None).
+
+    'callback' is a function which gets called every time a process
+    terminates (a Process instance is passed as callback argument).
+
+    Function will return as soon as all processes terminate or when
+    timeout occurs.
+
+    Typical use case is:
+
+     - send SIGTERM to a list of processes
+     - give them some time to terminate
+     - send SIGKILL to those ones which are still alive
+
+    Example:
+
+    >>> def on_terminate(proc):
+    ...     print("process {} terminated".format(proc))
+    ...
+    >>> for p in procs:
+    ...    p.terminate()
+    ...
+    >>> gone, alive = wait_procs(procs, timeout=3, callback=on_terminate)
+    >>> for p in alive:
+    ...     p.kill()
+    """
+    def check_gone(proc, timeout):
+        try:
+            returncode = proc.wait(timeout=timeout)
+        except TimeoutExpired:
+            pass
+        else:
+            if returncode is not None or not proc.is_running():
+                proc.returncode = returncode
+                gone.add(proc)
+                if callback is not None:
+                    callback(proc)
+
+    if timeout is not None and not timeout >= 0:
+        msg = "timeout must be a positive integer, got %s" % timeout
+        raise ValueError(msg)
+    gone = set()
+    alive = set(procs)
+    if callback is not None and not callable(callback):
+        raise TypeError("callback %r is not a callable" % callable)
+    if timeout is not None:
+        deadline = _timer() + timeout
+
+    while alive:
+        if timeout is not None and timeout <= 0:
+            break
+        for proc in alive:
+            # Make sure that every complete iteration (all processes)
+            # will last max 1 sec.
+            # We do this because we don't want to wait too long on a
+            # single process: in case it terminates too late other
+            # processes may disappear in the meantime and their PID
+            # reused.
+            max_timeout = 1.0 / len(alive)
+            if timeout is not None:
+                timeout = min((deadline - _timer()), max_timeout)
+                if timeout <= 0:
+                    break
+                check_gone(proc, timeout)
+            else:
+                check_gone(proc, max_timeout)
+        alive = alive - gone
+
+    if alive:
+        # Last attempt over processes survived so far.
+        # timeout == 0 won't make this function wait any further.
+        for proc in alive:
+            check_gone(proc, 0)
+        alive = alive - gone
+
+    return (list(gone), list(alive))
+
+
+# =====================================================================
+# --- CPU related functions
+# =====================================================================
+
+@memoize
+def cpu_count(logical=True):
+    """Return the number of logical CPUs in the system (same as
+    os.cpu_count() in Python 3.4).
+
+    If logical is False return the number of physical cores only
+    (hyper thread CPUs are excluded).
+
+    Return None if undetermined.
+
+    The return value is cached after first call.
+    If desired cache can be cleared like this:
+
+    >>> psutil.cpu_count.cache_clear()
+    """
+    if logical:
+        return _psplatform.cpu_count_logical()
+    else:
+        return _psplatform.cpu_count_physical()
+
+
+def cpu_times(percpu=False):
+    """Return system-wide CPU times as a namedtuple.
+    Every CPU time represents the seconds the CPU has spent in the given mode.
+    The namedtuple's fields availability varies depending on the platform:
+     - user
+     - system
+     - idle
+     - nice (UNIX)
+     - iowait (Linux)
+     - irq (Linux, FreeBSD)
+     - softirq (Linux)
+     - steal (Linux >= 2.6.11)
+     - guest (Linux >= 2.6.24)
+     - guest_nice (Linux >= 3.2.0)
+
+    When percpu is True return a list of nameduples for each CPU.
+    First element of the list refers to first CPU, second element
+    to second CPU and so on.
+    The order of the list is consistent across calls.
+    """
+    if not percpu:
+        return _psplatform.cpu_times()
+    else:
+        return _psplatform.per_cpu_times()
+
+
+_last_cpu_times = cpu_times()
+_last_per_cpu_times = cpu_times(percpu=True)
+
+def cpu_percent(interval=None, percpu=False):
+    """Return a float representing the current system-wide CPU
+    utilization as a percentage.
+
+    When interval is > 0.0 compares system CPU times elapsed before
+    and after the interval (blocking).
+
+    When interval is 0.0 or None compares system CPU times elapsed
+    since last call or module import, returning immediately (non
+    blocking). That means the first time this is called it will
+    return a meaningless 0.0 value which you should ignore.
+    In this case is recommended for accuracy that this function be
+    called with at least 0.1 seconds between calls.
+
+    When percpu is True returns a list of floats representing the
+    utilization as a percentage for each CPU.
+    First element of the list refers to first CPU, second element
+    to second CPU and so on.
+    The order of the list is consistent across calls.
+
+    Examples:
+
+      >>> # blocking, system-wide
+      >>> psutil.cpu_percent(interval=1)
+      2.0
+      >>>
+      >>> # blocking, per-cpu
+      >>> psutil.cpu_percent(interval=1, percpu=True)
+      [2.0, 1.0]
+      >>>
+      >>> # non-blocking (percentage since last call)
+      >>> psutil.cpu_percent(interval=None)
+      2.9
+      >>>
+    """
+    global _last_cpu_times
+    global _last_per_cpu_times
+    blocking = interval is not None and interval > 0.0
+
+    def calculate(t1, t2):
+        t1_all = sum(t1)
+        t1_busy = t1_all - t1.idle
+
+        t2_all = sum(t2)
+        t2_busy = t2_all - t2.idle
+
+        # this usually indicates a float precision issue
+        if t2_busy <= t1_busy:
+            return 0.0
+
+        busy_delta = t2_busy - t1_busy
+        all_delta = t2_all - t1_all
+        busy_perc = (busy_delta / all_delta) * 100
+        return round(busy_perc, 1)
+
+    # system-wide usage
+    if not percpu:
+        if blocking:
+            t1 = cpu_times()
+            time.sleep(interval)
+        else:
+            t1 = _last_cpu_times
+        _last_cpu_times = cpu_times()
+        return calculate(t1, _last_cpu_times)
+    # per-cpu usage
+    else:
+        ret = []
+        if blocking:
+            tot1 = cpu_times(percpu=True)
+            time.sleep(interval)
+        else:
+            tot1 = _last_per_cpu_times
+        _last_per_cpu_times = cpu_times(percpu=True)
+        for t1, t2 in zip(tot1, _last_per_cpu_times):
+            ret.append(calculate(t1, t2))
+        return ret
+
+
+# Use separate global vars for cpu_times_percent() so that it's
+# independent from cpu_percent() and they can both be used within
+# the same program.
+_last_cpu_times_2 = _last_cpu_times
+_last_per_cpu_times_2 = _last_per_cpu_times
+
+def cpu_times_percent(interval=None, percpu=False):
+    """Same as cpu_percent() but provides utilization percentages
+    for each specific CPU time as is returned by cpu_times().
+    For instance, on Linux we'll get:
+
+      >>> cpu_times_percent()
+      cpupercent(user=4.8, nice=0.0, system=4.8, idle=90.5, iowait=0.0,
+                 irq=0.0, softirq=0.0, steal=0.0, guest=0.0, guest_nice=0.0)
+      >>>
+
+    interval and percpu arguments have the same meaning as in
+    cpu_percent().
+    """
+    global _last_cpu_times_2
+    global _last_per_cpu_times_2
+    blocking = interval is not None and interval > 0.0
+
+    def calculate(t1, t2):
+        nums = []
+        all_delta = sum(t2) - sum(t1)
+        for field in t1._fields:
+            field_delta = getattr(t2, field) - getattr(t1, field)
+            try:
+                field_perc = (100 * field_delta) / all_delta
+            except ZeroDivisionError:
+                field_perc = 0.0
+            field_perc = round(field_perc, 1)
+            if _WINDOWS:
+                # XXX
+                # Work around:
+                # https://code.google.com/p/psutil/issues/detail?id=392
+                # CPU times are always supposed to increase over time
+                # or at least remain the same and that's because time
+                # cannot go backwards.
+                # Surprisingly sometimes this might not be the case on
+                # Windows where 'system' CPU time can be smaller
+                # compared to the previous call, resulting in corrupted
+                # percentages (< 0 or > 100).
+                # I really don't know what to do about that except
+                # forcing the value to 0 or 100.
+                if field_perc > 100.0:
+                    field_perc = 100.0
+                elif field_perc < 0.0:
+                    field_perc = 0.0
+            nums.append(field_perc)
+        return _psplatform.scputimes(*nums)
+
+    # system-wide usage
+    if not percpu:
+        if blocking:
+            t1 = cpu_times()
+            time.sleep(interval)
+        else:
+            t1 = _last_cpu_times_2
+        _last_cpu_times_2 = cpu_times()
+        return calculate(t1, _last_cpu_times_2)
+    # per-cpu usage
+    else:
+        ret = []
+        if blocking:
+            tot1 = cpu_times(percpu=True)
+            time.sleep(interval)
+        else:
+            tot1 = _last_per_cpu_times_2
+        _last_per_cpu_times_2 = cpu_times(percpu=True)
+        for t1, t2 in zip(tot1, _last_per_cpu_times_2):
+            ret.append(calculate(t1, t2))
+        return ret
+
+
+# =====================================================================
+# --- system memory related functions
+# =====================================================================
+
+def virtual_memory():
+    """Return statistics about system memory usage as a namedtuple
+    including the following fields, expressed in bytes:
+
+     - total:
+       total physical memory available.
+
+     - available:
+       the actual amount of available memory that can be given
+       instantly to processes that request more memory in bytes; this
+       is calculated by summing different memory values depending on
+       the platform (e.g. free + buffers + cached on Linux) and it is
+       supposed to be used to monitor actual memory usage in a cross
+       platform fashion.
+
+     - percent:
+       the percentage usage calculated as (total - available) / total * 100
+
+     - used:
+       memory used, calculated differently depending on the platform and
+       designed for informational purposes only:
+        OSX: active + inactive + wired
+        BSD: active + wired + cached
+        LINUX: total - free
+
+     - free:
+       memory not being used at all (zeroed) that is readily available;
+       note that this doesn't reflect the actual memory available
+       (use 'available' instead)
+
+    Platform-specific fields:
+
+     - active (UNIX):
+       memory currently in use or very recently used, and so it is in RAM.
+
+     - inactive (UNIX):
+       memory that is marked as not used.
+
+     - buffers (BSD, Linux):
+       cache for things like file system metadata.
+
+     - cached (BSD, OSX):
+       cache for various things.
+
+     - wired (OSX, BSD):
+       memory that is marked to always stay in RAM. It is never moved to disk.
+
+     - shared (BSD):
+       memory that may be simultaneously accessed by multiple processes.
+
+    The sum of 'used' and 'available' does not necessarily equal total.
+    On Windows 'available' and 'free' are the same.
+    """
+    global _TOTAL_PHYMEM
+    ret = _psplatform.virtual_memory()
+    # cached for later use in Process.memory_percent()
+    _TOTAL_PHYMEM = ret.total
+    return ret
+
+
+def swap_memory():
+    """Return system swap memory statistics as a namedtuple including
+    the following fields:
+
+     - total:   total swap memory in bytes
+     - used:    used swap memory in bytes
+     - free:    free swap memory in bytes
+     - percent: the percentage usage
+     - sin:     no. of bytes the system has swapped in from disk (cumulative)
+     - sout:    no. of bytes the system has swapped out from disk (cumulative)
+
+    'sin' and 'sout' on Windows are meaningless and always set to 0.
+    """
+    return _psplatform.swap_memory()
+
+
+# =====================================================================
+# --- disks/paritions related functions
+# =====================================================================
+
+def disk_usage(path):
+    """Return disk usage statistics about the given path as a namedtuple
+    including total, used and free space expressed in bytes plus the
+    percentage usage.
+    """
+    return _psplatform.disk_usage(path)
+
+
+def disk_partitions(all=False):
+    """Return mounted partitions as a list of
+    (device, mountpoint, fstype, opts) namedtuple.
+    'opts' field is a raw string separated by commas indicating mount
+    options which may vary depending on the platform.
+
+    If "all" parameter is False return physical devices only and ignore
+    all others.
+    """
+    return _psplatform.disk_partitions(all)
+
+
+def disk_io_counters(perdisk=False):
+    """Return system disk I/O statistics as a namedtuple including
+    the following fields:
+
+     - read_count:  number of reads
+     - write_count: number of writes
+     - read_bytes:  number of bytes read
+     - write_bytes: number of bytes written
+     - read_time:   time spent reading from disk (in milliseconds)
+     - write_time:  time spent writing to disk (in milliseconds)
+
+    If perdisk is True return the same information for every
+    physical disk installed on the system as a dictionary
+    with partition names as the keys and the namedutuple
+    described above as the values.
+
+    On recent Windows versions 'diskperf -y' command may need to be
+    executed first otherwise this function won't find any disk.
+    """
+    rawdict = _psplatform.disk_io_counters()
+    if not rawdict:
+        raise RuntimeError("couldn't find any physical disk")
+    if perdisk:
+        for disk, fields in rawdict.items():
+            rawdict[disk] = _nt_sys_diskio(*fields)
+        return rawdict
+    else:
+        return _nt_sys_diskio(*[sum(x) for x in zip(*rawdict.values())])
+
+
+# =====================================================================
+# --- network related functions
+# =====================================================================
+
+def net_io_counters(pernic=False):
+    """Return network I/O statistics as a namedtuple including
+    the following fields:
+
+     - bytes_sent:   number of bytes sent
+     - bytes_recv:   number of bytes received
+     - packets_sent: number of packets sent
+     - packets_recv: number of packets received
+     - errin:        total number of errors while receiving
+     - errout:       total number of errors while sending
+     - dropin:       total number of incoming packets which were dropped
+     - dropout:      total number of outgoing packets which were dropped
+                     (always 0 on OSX and BSD)
+
+    If pernic is True return the same information for every
+    network interface installed on the system as a dictionary
+    with network interface names as the keys and the namedtuple
+    described above as the values.
+    """
+    rawdict = _psplatform.net_io_counters()
+    if not rawdict:
+        raise RuntimeError("couldn't find any network interface")
+    if pernic:
+        for nic, fields in rawdict.items():
+            rawdict[nic] = _nt_sys_netio(*fields)
+        return rawdict
+    else:
+        return _nt_sys_netio(*[sum(x) for x in zip(*rawdict.values())])
+
+
+def net_connections(kind='inet'):
+    """Return system-wide connections as a list of
+    (fd, family, type, laddr, raddr, status, pid) namedtuples.
+    In case of limited privileges 'fd' and 'pid' may be set to -1
+    and None respectively.
+    The 'kind' parameter filters for connections that fit the
+    following criteria:
+
+    Kind Value      Connections using
+    inet            IPv4 and IPv6
+    inet4           IPv4
+    inet6           IPv6
+    tcp             TCP
+    tcp4            TCP over IPv4
+    tcp6            TCP over IPv6
+    udp             UDP
+    udp4            UDP over IPv4
+    udp6            UDP over IPv6
+    unix            UNIX socket (both UDP and TCP protocols)
+    all             the sum of all the possible families and protocols
+    """
+    return _psplatform.net_connections(kind)
+
+# =====================================================================
+# --- other system related functions
+# =====================================================================
+
+def boot_time():
+    """Return the system boot time expressed in seconds since the epoch.
+    This is also available as psutil.BOOT_TIME.
+    """
+    # Note: we are not caching this because it is subject to
+    # system clock updates.
+    return _psplatform.boot_time()
+
+
+def users():
+    """Return users currently connected on the system as a list of
+    namedtuples including the following fields.
+
+     - user: the name of the user
+     - terminal: the tty or pseudo-tty associated with the user, if any.
+     - host: the host name associated with the entry, if any.
+     - started: the creation time as a floating point number expressed in
+       seconds since the epoch.
+    """
+    return _psplatform.users()
+
+
+# =====================================================================
+# --- deprecated functions
+# =====================================================================
+
+@_deprecated(replacement="psutil.pids()")
+def get_pid_list():
+    return pids()
+
+
+@_deprecated(replacement="list(process_iter())")
+def get_process_list():
+    return list(process_iter())
+
+
+@_deprecated(replacement="psutil.users()")
+def get_users():
+    return users()
+
+
+@_deprecated(replacement="psutil.virtual_memory()")
+def phymem_usage():
+    """Return the amount of total, used and free physical memory
+    on the system in bytes plus the percentage usage.
+    Deprecated; use psutil.virtual_memory() instead.
+    """
+    return virtual_memory()
+
+
+@_deprecated(replacement="psutil.swap_memory()")
+def virtmem_usage():
+    return swap_memory()
+
+
+@_deprecated(replacement="psutil.phymem_usage().free")
+def avail_phymem():
+    return phymem_usage().free
+
+
+@_deprecated(replacement="psutil.phymem_usage().used")
+def used_phymem():
+    return phymem_usage().used
+
+
+@_deprecated(replacement="psutil.virtmem_usage().total")
+def total_virtmem():
+    return virtmem_usage().total
+
+
+@_deprecated(replacement="psutil.virtmem_usage().used")
+def used_virtmem():
+    return virtmem_usage().used
+
+
+@_deprecated(replacement="psutil.virtmem_usage().free")
+def avail_virtmem():
+    return virtmem_usage().free
+
+
+@_deprecated(replacement="psutil.net_io_counters()")
+def network_io_counters(pernic=False):
+    return net_io_counters(pernic)
+
+
+def test():
+    """List info of all currently running processes emulating ps aux
+    output.
+    """
+    import datetime
+    from psutil._compat import print_
+
+    today_day = datetime.date.today()
+    templ = "%-10s %5s %4s %4s %7s %7s %-13s %5s %7s  %s"
+    attrs = ['pid', 'cpu_percent', 'memory_percent', 'name', 'cpu_times',
+             'create_time', 'memory_info']
+    if _POSIX:
+        attrs.append('uids')
+        attrs.append('terminal')
+    print_(templ % ("USER", "PID", "%CPU", "%MEM", "VSZ", "RSS", "TTY",
+                    "START", "TIME", "COMMAND"))
+    for p in process_iter():
+        try:
+            pinfo = p.as_dict(attrs, ad_value='')
+        except NoSuchProcess:
+            pass
+        else:
+            if pinfo['create_time']:
+                ctime = datetime.datetime.fromtimestamp(pinfo['create_time'])
+                if ctime.date() == today_day:
+                    ctime = ctime.strftime("%H:%M")
+                else:
+                    ctime = ctime.strftime("%b%d")
+            else:
+                ctime = ''
+            cputime = time.strftime("%M:%S",
+                                    time.localtime(sum(pinfo['cpu_times'])))
+            try:
+                user = p.username()
+            except KeyError:
+                if _POSIX:
+                    if pinfo['uids']:
+                        user = str(pinfo['uids'].real)
+                    else:
+                        user = ''
+                else:
+                    raise
+            except Error:
+                user = ''
+            if _WINDOWS and '\\' in user:
+                user = user.split('\\')[1]
+            vms = pinfo['memory_info'] and \
+                int(pinfo['memory_info'].vms / 1024) or '?'
+            rss = pinfo['memory_info'] and \
+                int(pinfo['memory_info'].rss / 1024) or '?'
+            memp = pinfo['memory_percent'] and \
+                round(pinfo['memory_percent'], 1) or '?'
+            print_(templ % (user[:10],
+                            pinfo['pid'],
+                            pinfo['cpu_percent'],
+                            memp,
+                            vms,
+                            rss,
+                            pinfo.get('terminal', '') or '?',
+                            ctime,
+                            cputime,
+                            pinfo['name'].strip() or '?'))
+
+
+def _replace_module():
+    """Dirty hack to replace the module object in order to access
+    deprecated module constants, see:
+    http://www.dr-josiah.com/2013/12/properties-on-python-modules.html
+    """
+    class ModuleWrapper(object):
+
+        def __repr__(self):
+            return repr(self._module)
+        __str__ = __repr__
+
+        @property
+        def NUM_CPUS(self):
+            msg = "NUM_CPUS constant is deprecated; use cpu_count() instead"
+            warnings.warn(msg, category=DeprecationWarning, stacklevel=2)
+            return cpu_count()
+
+        @property
+        def BOOT_TIME(self):
+            msg = "BOOT_TIME constant is deprecated; use boot_time() instead"
+            warnings.warn(msg, category=DeprecationWarning, stacklevel=2)
+            return boot_time()
+
+        @property
+        def TOTAL_PHYMEM(self):
+            msg = "TOTAL_PHYMEM constant is deprecated; " \
+                  "use virtual_memory().total instead"
+            warnings.warn(msg, category=DeprecationWarning, stacklevel=2)
+            return virtual_memory().total
+
+    mod = ModuleWrapper()
+    mod.__dict__ = globals()
+    mod._module = sys.modules[__name__]
+    sys.modules[__name__] = mod
+
+
+_replace_module()
+del property, memoize, division, _replace_module
+if sys.version_info < (3, 0):
+    del num
+
+if __name__ == "__main__":
+    test()

http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/build/lib.macosx-10.8-intel-2.7/psutil/_common.py
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/build/lib.macosx-10.8-intel-2.7/psutil/_common.py b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/build/lib.macosx-10.8-intel-2.7/psutil/_common.py
new file mode 100644
index 0000000..3d2f27c
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/build/lib.macosx-10.8-intel-2.7/psutil/_common.py
@@ -0,0 +1,258 @@
+#/usr/bin/env python
+
+# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Common objects shared by all _ps* modules."""
+
+from __future__ import division
+import errno
+import os
+import socket
+import stat
+import sys
+import warnings
+try:
+    import threading
+except ImportError:
+    import dummy_threading as threading
+
+from socket import AF_INET, SOCK_STREAM, SOCK_DGRAM
+
+from psutil._compat import namedtuple, wraps
+
+# --- constants
+
+AF_INET6 = getattr(socket, 'AF_INET6', None)
+AF_UNIX = getattr(socket, 'AF_UNIX', None)
+
+STATUS_RUNNING = "running"
+STATUS_SLEEPING = "sleeping"
+STATUS_DISK_SLEEP = "disk-sleep"
+STATUS_STOPPED = "stopped"
+STATUS_TRACING_STOP = "tracing-stop"
+STATUS_ZOMBIE = "zombie"
+STATUS_DEAD = "dead"
+STATUS_WAKE_KILL = "wake-kill"
+STATUS_WAKING = "waking"
+STATUS_IDLE = "idle"  # BSD
+STATUS_LOCKED = "locked"  # BSD
+STATUS_WAITING = "waiting"  # BSD
+
+CONN_ESTABLISHED = "ESTABLISHED"
+CONN_SYN_SENT = "SYN_SENT"
+CONN_SYN_RECV = "SYN_RECV"
+CONN_FIN_WAIT1 = "FIN_WAIT1"
+CONN_FIN_WAIT2 = "FIN_WAIT2"
+CONN_TIME_WAIT = "TIME_WAIT"
+CONN_CLOSE = "CLOSE"
+CONN_CLOSE_WAIT = "CLOSE_WAIT"
+CONN_LAST_ACK = "LAST_ACK"
+CONN_LISTEN = "LISTEN"
+CONN_CLOSING = "CLOSING"
+CONN_NONE = "NONE"
+
+
+# --- functions
+
+def usage_percent(used, total, _round=None):
+    """Calculate percentage usage of 'used' against 'total'."""
+    try:
+        ret = (used / total) * 100
+    except ZeroDivisionError:
+        ret = 0
+    if _round is not None:
+        return round(ret, _round)
+    else:
+        return ret
+
+
+def memoize(fun):
+    """A simple memoize decorator for functions supporting (hashable)
+    positional arguments.
+    It also provides a cache_clear() function for clearing the cache:
+
+    >>> @memoize
+    ... def foo()
+    ...     return 1
+    ...
+    >>> foo()
+    1
+    >>> foo.cache_clear()
+    >>>
+    """
+    @wraps(fun)
+    def wrapper(*args, **kwargs):
+        key = (args, frozenset(sorted(kwargs.items())))
+        lock.acquire()
+        try:
+            try:
+                return cache[key]
+            except KeyError:
+                ret = cache[key] = fun(*args, **kwargs)
+        finally:
+            lock.release()
+        return ret
+
+    def cache_clear():
+        """Clear cache."""
+        lock.acquire()
+        try:
+            cache.clear()
+        finally:
+            lock.release()
+
+    lock = threading.RLock()
+    cache = {}
+    wrapper.cache_clear = cache_clear
+    return wrapper
+
+
+# http://code.activestate.com/recipes/577819-deprecated-decorator/
+def deprecated(replacement=None):
+    """A decorator which can be used to mark functions as deprecated."""
+    def outer(fun):
+        msg = "psutil.%s is deprecated" % fun.__name__
+        if replacement is not None:
+            msg += "; use %s instead" % replacement
+        if fun.__doc__ is None:
+            fun.__doc__ = msg
+
+        @wraps(fun)
+        def inner(*args, **kwargs):
+            warnings.warn(msg, category=DeprecationWarning, stacklevel=2)
+            return fun(*args, **kwargs)
+
+        return inner
+    return outer
+
+
+def deprecated_method(replacement):
+    """A decorator which can be used to mark a method as deprecated
+    'replcement' is the method name which will be called instead.
+    """
+    def outer(fun):
+        msg = "%s() is deprecated; use %s() instead" % (
+            fun.__name__, replacement)
+        if fun.__doc__ is None:
+            fun.__doc__ = msg
+
+        @wraps(fun)
+        def inner(self, *args, **kwargs):
+            warnings.warn(msg, category=DeprecationWarning, stacklevel=2)
+            return getattr(self, replacement)(*args, **kwargs)
+        return inner
+    return outer
+
+
+def isfile_strict(path):
+    """Same as os.path.isfile() but does not swallow EACCES / EPERM
+    exceptions, see:
+    http://mail.python.org/pipermail/python-dev/2012-June/120787.html
+    """
+    try:
+        st = os.stat(path)
+    except OSError:
+        err = sys.exc_info()[1]
+        if err.errno in (errno.EPERM, errno.EACCES):
+            raise
+        return False
+    else:
+        return stat.S_ISREG(st.st_mode)
+
+
+# --- Process.connections() 'kind' parameter mapping
+
+conn_tmap = {
+    "all": ([AF_INET, AF_INET6, AF_UNIX], [SOCK_STREAM, SOCK_DGRAM]),
+    "tcp": ([AF_INET, AF_INET6], [SOCK_STREAM]),
+    "tcp4": ([AF_INET], [SOCK_STREAM]),
+    "udp": ([AF_INET, AF_INET6], [SOCK_DGRAM]),
+    "udp4": ([AF_INET], [SOCK_DGRAM]),
+    "inet": ([AF_INET, AF_INET6], [SOCK_STREAM, SOCK_DGRAM]),
+    "inet4": ([AF_INET], [SOCK_STREAM, SOCK_DGRAM]),
+    "inet6": ([AF_INET6], [SOCK_STREAM, SOCK_DGRAM]),
+}
+
+if AF_INET6 is not None:
+    conn_tmap.update({
+        "tcp6": ([AF_INET6], [SOCK_STREAM]),
+        "udp6": ([AF_INET6], [SOCK_DGRAM]),
+    })
+
+if AF_UNIX is not None:
+    conn_tmap.update({
+        "unix": ([AF_UNIX], [SOCK_STREAM, SOCK_DGRAM]),
+    })
+
+del AF_INET, AF_INET6, AF_UNIX, SOCK_STREAM, SOCK_DGRAM, socket
+
+
+# --- namedtuples for psutil.* system-related functions
+
+# psutil.swap_memory()
+sswap = namedtuple('sswap', ['total', 'used', 'free', 'percent', 'sin',
+                             'sout'])
+# psutil.disk_usage()
+sdiskusage = namedtuple('sdiskusage', ['total', 'used', 'free', 'percent'])
+# psutil.disk_io_counters()
+sdiskio = namedtuple('sdiskio', ['read_count', 'write_count',
+                                 'read_bytes', 'write_bytes',
+                                 'read_time', 'write_time'])
+# psutil.disk_partitions()
+sdiskpart = namedtuple('sdiskpart', ['device', 'mountpoint', 'fstype', 'opts'])
+# psutil.net_io_counters()
+snetio = namedtuple('snetio', ['bytes_sent', 'bytes_recv',
+                               'packets_sent', 'packets_recv',
+                               'errin', 'errout',
+                               'dropin', 'dropout'])
+# psutil.users()
+suser = namedtuple('suser', ['name', 'terminal', 'host', 'started'])
+# psutil.net_connections()
+sconn = namedtuple('sconn', ['fd', 'family', 'type', 'laddr', 'raddr',
+                             'status', 'pid'])
+
+
+# --- namedtuples for psutil.Process methods
+
+# psutil.Process.memory_info()
+pmem = namedtuple('pmem', ['rss', 'vms'])
+# psutil.Process.cpu_times()
+pcputimes = namedtuple('pcputimes', ['user', 'system'])
+# psutil.Process.open_files()
+popenfile = namedtuple('popenfile', ['path', 'fd'])
+# psutil.Process.threads()
+pthread = namedtuple('pthread', ['id', 'user_time', 'system_time'])
+# psutil.Process.uids()
+puids = namedtuple('puids', ['real', 'effective', 'saved'])
+# psutil.Process.gids()
+pgids = namedtuple('pgids', ['real', 'effective', 'saved'])
+# psutil.Process.io_counters()
+pio = namedtuple('pio', ['read_count', 'write_count',
+                         'read_bytes', 'write_bytes'])
+# psutil.Process.ionice()
+pionice = namedtuple('pionice', ['ioclass', 'value'])
+# psutil.Process.ctx_switches()
+pctxsw = namedtuple('pctxsw', ['voluntary', 'involuntary'])
+
+
+# --- misc
+
+# backward compatibility layer for Process.connections() ntuple
+class pconn(
+    namedtuple('pconn',
+               ['fd', 'family', 'type', 'laddr', 'raddr', 'status'])):
+    __slots__ = ()
+
+    @property
+    def local_address(self):
+        warnings.warn("'local_address' field is deprecated; use 'laddr'"
+                      "instead", category=DeprecationWarning, stacklevel=2)
+        return self.laddr
+
+    @property
+    def remote_address(self):
+        warnings.warn("'remote_address' field is deprecated; use 'raddr'"
+                      "instead", category=DeprecationWarning, stacklevel=2)
+        return self.raddr


[20/22] AMBARI-5707. Metrics system prototype implementation. (swagle)

Posted by sw...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/HBaseTimelineMetricStore.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/HBaseTimelineMetricStore.java b/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/HBaseTimelineMetricStore.java
new file mode 100644
index 0000000..3f174ef
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/HBaseTimelineMetricStore.java
@@ -0,0 +1,185 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline;
+
+import org.apache.commons.io.FilenameUtils;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.metrics2.sink.timeline.TimelineMetric;
+import org.apache.hadoop.metrics2.sink.timeline.TimelineMetrics;
+import org.apache.hadoop.service.AbstractService;
+import org.apache.hadoop.yarn.api.records.timeline.TimelinePutResponse;
+import org.apache.hadoop.yarn.conf.YarnConfig;
+import java.io.IOException;
+import java.net.URL;
+import java.sql.SQLException;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.PhoenixTransactSQL.Condition;
+
+public class HBaseTimelineMetricStore extends AbstractService
+  implements TimelineMetricStore {
+
+  static final Log LOG = LogFactory.getLog(HBaseTimelineMetricStore.class);
+  static final String HBASE_CONF = "hbase-site.xml";
+  static final String DEFAULT_CHECKPOINT_LOCATION = "/tmp";
+  static final String AGGREGATOR_CHECKPOINT_FILE =
+    "timeline-metrics-aggregator-checkpoint";
+  static final String MINUTE_AGGREGATE_ROLLUP_CHECKPOINT_FILE =
+    "timeline-metrics-minute-aggregator-checkpoint";
+  static final String HOURLY_AGGREGATE_ROLLUP_CHECKPOINT_FILE =
+    "timeline-metrics-hourly-aggregator-checkpoint";
+  static final String HOURLY_ROLLUP_CHECKPOINT_FILE =
+    "timeline-metrics-hourly-checkpoint";
+  private PhoenixHBaseAccessor hBaseAccessor;
+
+  /**
+   * Construct the service.
+   *
+   */
+  public HBaseTimelineMetricStore() {
+    super(HBaseTimelineMetricStore.class.getName());
+  }
+
+  @Override
+  protected void serviceInit(Configuration conf) throws Exception {
+    URL resUrl = getClass().getClassLoader().getResource(HBASE_CONF);
+    LOG.info("Found hbase site configuration: " + resUrl);
+    Configuration hbaseConf;
+    if (resUrl != null) {
+      hbaseConf = new Configuration(true);
+      hbaseConf.addResource(resUrl.toURI().toURL());
+      hBaseAccessor = new PhoenixHBaseAccessor(hbaseConf);
+      hBaseAccessor.initMetricSchema();
+
+      String checkpointLocation = FilenameUtils.concat(conf.get(
+        YarnConfig.TIMELINE_METRICS_AGGREGATOR_CHECKPOINT_DIR,
+        DEFAULT_CHECKPOINT_LOCATION), AGGREGATOR_CHECKPOINT_FILE);
+
+      // Start the cluster aggregator
+      TimelineMetricClusterAggregator clusterAggregator =
+        new TimelineMetricClusterAggregator(hBaseAccessor, checkpointLocation);
+      Thread aggregatorThread = new Thread(clusterAggregator);
+      aggregatorThread.start();
+
+      // Start the hourly cluster aggregator
+      String clusterAggregatorHourlyCheckpoint = FilenameUtils.concat(conf.get(
+        YarnConfig.TIMELINE_METRICS_AGGREGATOR_CHECKPOINT_DIR,
+        DEFAULT_CHECKPOINT_LOCATION), HOURLY_AGGREGATE_ROLLUP_CHECKPOINT_FILE);
+
+      TimelineMetricClusterAggregatorHourly clusterAggregatorHourly = new
+        TimelineMetricClusterAggregatorHourly(hBaseAccessor,
+        clusterAggregatorHourlyCheckpoint);
+      Thread rollupAggregatorThread = new Thread(clusterAggregatorHourly);
+      rollupAggregatorThread.start();
+
+      // Start the 5 minute aggregator
+      String minuteCheckpoint = FilenameUtils.concat(conf.get(
+        YarnConfig.TIMELINE_METRICS_AGGREGATOR_CHECKPOINT_DIR,
+        DEFAULT_CHECKPOINT_LOCATION), MINUTE_AGGREGATE_ROLLUP_CHECKPOINT_FILE);
+      TimelineMetricAggregatorMinute minuteAggregator = new
+        TimelineMetricAggregatorMinute(hBaseAccessor, minuteCheckpoint);
+
+      Thread minuteAggregatorThread = new Thread(minuteAggregator);
+      minuteAggregatorThread.start();
+
+      // Start hourly host aggregator
+      String hostAggregatorHourlyCheckpoint = FilenameUtils.concat(conf.get(
+        YarnConfig.TIMELINE_METRICS_AGGREGATOR_CHECKPOINT_DIR,
+        DEFAULT_CHECKPOINT_LOCATION), HOURLY_ROLLUP_CHECKPOINT_FILE);
+
+      TimelineMetricAggregatorHourly aggregatorHourly = new
+        TimelineMetricAggregatorHourly(hBaseAccessor, hostAggregatorHourlyCheckpoint);
+      Thread aggregatorHourlyThread = new Thread(aggregatorHourly);
+      aggregatorHourlyThread.start();
+
+    } else {
+      throw new IllegalStateException("Unable to initialize the metrics " +
+        "subsystem. No hbase-site present in the classpath.");
+    }
+  }
+
+  @Override
+  protected void serviceStop() throws Exception {
+    super.serviceStop();
+  }
+
+  @Override
+  public TimelineMetrics getTimelineMetrics(List<String> metricNames,
+                                            String hostname, String applicationId, String instanceId,
+                                            Long startTime, Long endTime, Integer limit,
+                                            boolean groupedByHosts) throws SQLException, IOException {
+
+    Condition condition = new Condition(metricNames, hostname, applicationId,
+      instanceId, startTime, endTime, limit, groupedByHosts);
+
+    if (hostname == null) {
+      return hBaseAccessor.getAggregateMetricRecords(condition);
+    }
+
+    return hBaseAccessor.getMetricRecords(condition);
+  }
+
+  @Override
+  public TimelineMetric getTimelineMetric(String metricName, String hostname,
+                                          String applicationId, String instanceId, Long startTime,
+                                          Long endTime, Integer limit)
+    throws SQLException, IOException {
+
+    TimelineMetrics metrics = hBaseAccessor.getMetricRecords(
+      new Condition(Collections.singletonList(metricName), hostname,
+        applicationId, instanceId, startTime, endTime, limit, true)
+    );
+
+    TimelineMetric metric = new TimelineMetric();
+    List<TimelineMetric> metricList = metrics.getMetrics();
+
+    if (metricList != null && !metricList.isEmpty()) {
+      metric.setMetricName(metricList.get(0).getMetricName());
+      metric.setAppId(metricList.get(0).getAppId());
+      metric.setInstanceId(metricList.get(0).getInstanceId());
+      metric.setHostName(metricList.get(0).getHostName());
+      // Assumption that metrics are ordered by start time
+      metric.setStartTime(metricList.get(0).getStartTime());
+      Map<Long, Double> metricRecords = new HashMap<Long, Double>();
+      for (TimelineMetric timelineMetric : metricList) {
+        metricRecords.putAll(timelineMetric.getMetricValues());
+      }
+      metric.setMetricValues(metricRecords);
+    }
+
+    return metric;
+  }
+
+
+  @Override
+  public TimelinePutResponse putMetrics(TimelineMetrics metrics)
+    throws SQLException, IOException {
+
+    // Error indicated by the Sql exception
+    TimelinePutResponse response = new TimelinePutResponse();
+
+    hBaseAccessor.insertMetricRecords(metrics);
+
+    return response;
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/PhoenixHBaseAccessor.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/PhoenixHBaseAccessor.java b/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/PhoenixHBaseAccessor.java
new file mode 100644
index 0000000..d39b1cb
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/PhoenixHBaseAccessor.java
@@ -0,0 +1,522 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.metrics2.sink.timeline.TimelineMetric;
+import org.apache.hadoop.metrics2.sink.timeline.TimelineMetrics;
+import org.apache.hadoop.yarn.util.timeline.TimelineUtils;
+import org.apache.hadoop.yarn.util.timeline.TimelineUtilsExt;
+
+import java.io.IOException;
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.PreparedStatement;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.sql.Statement;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.AbstractTimelineAggregator.MetricClusterAggregate;
+import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.AbstractTimelineAggregator.MetricHostAggregate;
+import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.PhoenixTransactSQL.CREATE_METRICS_CLUSTER_AGGREGATE_HOURLY_TABLE_SQL;
+import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.PhoenixTransactSQL.CREATE_METRICS_CLUSTER_AGGREGATE_TABLE_SQL;
+import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.PhoenixTransactSQL.CREATE_METRICS_AGGREGATE_HOURLY_TABLE_SQL;
+import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.PhoenixTransactSQL.CREATE_METRICS_TABLE_SQL;
+import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.PhoenixTransactSQL.CREATE_METRICS_AGGREGATE_MINUTE_TABLE_SQL;
+import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.PhoenixTransactSQL.Condition;
+import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.PhoenixTransactSQL.UPSERT_CLUSTER_AGGREGATE_SQL;
+import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.PhoenixTransactSQL.UPSERT_AGGREGATE_RECORD_SQL;
+import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.PhoenixTransactSQL.UPSERT_METRICS_SQL;
+import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.TimelineMetricClusterAggregator.TimelineClusterMetric;
+
+/**
+ * Provides a facade over the Phoenix API to access HBase schema
+ */
+public class PhoenixHBaseAccessor {
+
+  private final Configuration conf;
+  static final Log LOG = LogFactory.getLog(PhoenixHBaseAccessor.class);
+  private static final String connectionUrl = "jdbc:phoenix:%s:%s:%s";
+
+  private static final String ZOOKEEPER_CLIENT_PORT =
+    "hbase.zookeeper.property.clientPort";
+  private static final String ZOOKEEPER_QUORUM = "hbase.zookeeper.quorum";
+  private static final String ZNODE_PARENT = "zookeeper.znode.parent";
+  static final int PHOENIX_MAX_MUTATION_STATE_SIZE = 50000;
+
+  public PhoenixHBaseAccessor(Configuration conf) {
+    this.conf = conf;
+    try {
+      Class.forName("org.apache.phoenix.jdbc.PhoenixDriver");
+    } catch (ClassNotFoundException e) {
+      LOG.error("Phoenix client jar not found in the classpath.");
+      e.printStackTrace();
+    }
+  }
+
+  /**
+   * Get JDBC connection to HBase store. Assumption is that the hbase
+   * configuration is present on the classpath and loaded by the caller into
+   * the Configuration object.
+   * Phoenix already caches the HConnection between the client and HBase
+   * cluster.
+   * @return @java.sql.Connection
+   */
+  protected Connection getConnection() {
+    Connection connection = null;
+    String zookeeperClientPort = conf.getTrimmed(ZOOKEEPER_CLIENT_PORT, "2181");
+    String zookeeperQuorum = conf.getTrimmed(ZOOKEEPER_QUORUM);
+    String znodeParent = conf.getTrimmed(ZNODE_PARENT, "/hbase");
+
+    if (zookeeperQuorum == null || zookeeperQuorum.isEmpty()) {
+      throw new IllegalStateException("Unable to find Zookeeper quorum to " +
+        "access HBase store using Phoenix.");
+    }
+
+    String url = String.format(connectionUrl, zookeeperQuorum,
+      zookeeperClientPort, znodeParent);
+
+    LOG.debug("Metric store connection url: " + url);
+
+    try {
+      connection = DriverManager.getConnection(url);
+    } catch (SQLException e) {
+      LOG.warn("Unable to connect to HBase store using Phoenix.", e);
+    }
+
+    return connection;
+  }
+
+  @SuppressWarnings("unchecked")
+  static TimelineMetric getTimelineMetricFromResultSet(ResultSet rs)
+      throws SQLException, IOException {
+    TimelineMetric metric = new TimelineMetric();
+    metric.setMetricName(rs.getString("METRIC_NAME"));
+    metric.setAppId(rs.getString("APP_ID"));
+    metric.setInstanceId(rs.getString("INSTANCE_ID"));
+    metric.setHostName(rs.getString("HOSTNAME"));
+    metric.setTimestamp(rs.getLong("TIMESTAMP"));
+    metric.setStartTime(rs.getLong("START_TIME"));
+    metric.setType(rs.getString("UNITS"));
+    metric.setMetricValues(
+      (Map<Long, Double>) TimelineUtilsExt.readMetricFromJSON(
+        rs.getString("METRICS")));
+    return metric;
+  }
+
+  static TimelineMetric getTimelineMetricKeyFromResultSet(ResultSet rs)
+      throws SQLException, IOException {
+    TimelineMetric metric = new TimelineMetric();
+    metric.setMetricName(rs.getString("METRIC_NAME"));
+    metric.setAppId(rs.getString("APP_ID"));
+    metric.setInstanceId(rs.getString("INSTANCE_ID"));
+    metric.setHostName(rs.getString("HOSTNAME"));
+    metric.setTimestamp(rs.getLong("TIMESTAMP"));
+    metric.setType(rs.getString("UNITS"));
+    return metric;
+  }
+
+  static MetricHostAggregate getMetricHostAggregateFromResultSet(ResultSet rs)
+      throws SQLException {
+    MetricHostAggregate metricHostAggregate = new MetricHostAggregate();
+    metricHostAggregate.setSum(rs.getDouble("METRIC_AVG"));
+    metricHostAggregate.setMax(rs.getDouble("METRIC_MAX"));
+    metricHostAggregate.setMin(rs.getDouble("METRIC_MIN"));
+    metricHostAggregate.setDeviation(0.0);
+    return metricHostAggregate;
+  }
+
+
+  protected void initMetricSchema() {
+    Connection conn = getConnection();
+    Statement stmt = null;
+
+    try {
+      LOG.info("Initializing metrics schema...");
+      stmt = conn.createStatement();
+      stmt.executeUpdate(CREATE_METRICS_TABLE_SQL);
+      stmt.executeUpdate(CREATE_METRICS_AGGREGATE_HOURLY_TABLE_SQL);
+      stmt.executeUpdate(CREATE_METRICS_AGGREGATE_MINUTE_TABLE_SQL);
+      stmt.executeUpdate(CREATE_METRICS_CLUSTER_AGGREGATE_TABLE_SQL);
+      stmt.executeUpdate(CREATE_METRICS_CLUSTER_AGGREGATE_HOURLY_TABLE_SQL);
+      conn.commit();
+    } catch (SQLException sql) {
+      LOG.warn("Error creating Metrics Schema in HBase using Phoenix.", sql);
+    } finally {
+      if (stmt != null) {
+        try {
+          stmt.close();
+        } catch (SQLException e) {
+          // Ignore
+        }
+      }
+      if (conn != null) {
+        try {
+          conn.close();
+        } catch (SQLException e) {
+          // Ignore
+        }
+      }
+    }
+  }
+
+  public void insertMetricRecords(TimelineMetrics metrics)
+      throws SQLException, IOException {
+
+    List<TimelineMetric> timelineMetrics = metrics.getMetrics();
+    if (timelineMetrics == null || timelineMetrics.isEmpty()) {
+      LOG.debug("Empty metrics insert request.");
+      return;
+    }
+
+    Connection conn = getConnection();
+    PreparedStatement stmt = null;
+    long currentTime = System.currentTimeMillis();
+
+    try {
+      stmt = conn.prepareStatement(UPSERT_METRICS_SQL);
+
+      for (TimelineMetric metric : timelineMetrics) {
+        stmt.clearParameters();
+
+        LOG.trace("host: " + metric.getHostName() + ", " +
+          "values: " + metric.getMetricValues());
+        Double[] aggregates = calculateAggregates(metric.getMetricValues());
+
+        stmt.setString(1, metric.getMetricName());
+        stmt.setString(2, metric.getHostName());
+        stmt.setString(3, metric.getAppId());
+        stmt.setString(4, metric.getInstanceId());
+        stmt.setLong(5, currentTime);
+        stmt.setLong(6, metric.getStartTime());
+        stmt.setString(7, metric.getType());
+        stmt.setDouble(8, aggregates[0]);
+        stmt.setDouble(9, aggregates[1]);
+        stmt.setDouble(10, aggregates[2]);
+        stmt.setString(11,
+          TimelineUtils.dumpTimelineRecordtoJSON(metric.getMetricValues()));
+
+        try {
+          stmt.executeUpdate();
+        } catch (SQLException sql) {
+          LOG.error(sql);
+        }
+      }
+
+      conn.commit();
+
+    } finally {
+      if (stmt != null) {
+        try {
+          stmt.close();
+        } catch (SQLException e) {
+          // Ignore
+        }
+      }
+      if (conn != null) {
+        try {
+          conn.close();
+        } catch (SQLException sql) {
+          // Ignore
+        }
+      }
+    }
+  }
+
+  private Double[] calculateAggregates(Map<Long, Double> metricValues) {
+    Double[] values = new Double[3];
+    Double max = Double.MIN_VALUE;
+    Double min = Double.MAX_VALUE;
+    Double avg = 0.0;
+    if (metricValues != null && !metricValues.isEmpty()) {
+      for (Double value : metricValues.values()) {
+        // TODO: Some nulls in data - need to investigate null values from host
+        if (value != null) {
+          if (value > max) {
+            max  = value;
+          }
+          if (value < min) {
+            min = value;
+          }
+          avg += value;
+        }
+      }
+      avg /= metricValues.values().size();
+    }
+    values[0] = max != Double.MIN_VALUE ? max : 0.0;
+    values[1] = min != Double.MAX_VALUE ? min : 0.0;
+    values[2] = avg;
+    return values;
+  }
+
+  @SuppressWarnings("unchecked")
+  public TimelineMetrics getMetricRecords(final Condition condition)
+      throws SQLException, IOException {
+
+    if (condition.isEmpty()) {
+      throw new SQLException("No filter criteria specified.");
+    }
+
+    Connection conn = getConnection();
+    PreparedStatement stmt = null;
+    TimelineMetrics metrics = new TimelineMetrics();
+
+    try {
+      stmt = PhoenixTransactSQL.prepareGetMetricsSqlStmt(conn, condition);
+
+      ResultSet rs = stmt.executeQuery();
+
+      while (rs.next()) {
+        TimelineMetric metric = getTimelineMetricFromResultSet(rs);
+
+        if (condition.isGrouped()) {
+          metrics.addOrMergeTimelineMetric(metric);
+        } else {
+          metrics.getMetrics().add(metric);
+        }
+      }
+
+    } finally {
+      if (stmt != null) {
+        try {
+          stmt.close();
+        } catch (SQLException e) {
+          // Ignore
+        }
+      }
+      if (conn != null) {
+        try {
+          conn.close();
+        } catch (SQLException sql) {
+          // Ignore
+        }
+      }
+    }
+    return metrics;
+  }
+
+  public void saveHostAggregateRecords(Map<TimelineMetric,
+      MetricHostAggregate> hostAggregateMap, String phoenixTableName)
+      throws SQLException {
+
+    if (hostAggregateMap != null && !hostAggregateMap.isEmpty()) {
+      Connection conn = getConnection();
+      PreparedStatement stmt = null;
+
+      long start = System.currentTimeMillis();
+      int rowCount = 0;
+
+      try {
+        stmt = conn.prepareStatement(
+          String.format(UPSERT_AGGREGATE_RECORD_SQL, phoenixTableName));
+
+        for (Map.Entry<TimelineMetric, MetricHostAggregate> metricAggregate :
+            hostAggregateMap.entrySet()) {
+
+          TimelineMetric metric = metricAggregate.getKey();
+          MetricHostAggregate hostAggregate = metricAggregate.getValue();
+
+          rowCount++;
+          stmt.clearParameters();
+          stmt.setString(1, metric.getMetricName());
+          stmt.setString(2, metric.getHostName());
+          stmt.setString(3, metric.getAppId());
+          stmt.setString(4, metric.getInstanceId());
+          stmt.setLong(5, metric.getTimestamp());
+          stmt.setString(6, metric.getType());
+          stmt.setDouble(7, hostAggregate.getSum());
+          stmt.setDouble(8, hostAggregate.getMax());
+          stmt.setDouble(9, hostAggregate.getMin());
+
+          try {
+            stmt.executeUpdate();
+          } catch (SQLException sql) {
+            LOG.error(sql);
+          }
+
+          if (rowCount >= PHOENIX_MAX_MUTATION_STATE_SIZE - 1) {
+            conn.commit();
+            rowCount = 0;
+          }
+
+        }
+
+        conn.commit();
+
+      } finally {
+        if (stmt != null) {
+          try {
+            stmt.close();
+          } catch (SQLException e) {
+            // Ignore
+          }
+        }
+        if (conn != null) {
+          try {
+            conn.close();
+          } catch (SQLException sql) {
+            // Ignore
+          }
+        }
+      }
+
+      long end = System.currentTimeMillis();
+
+      if ((end - start) > 60000l) {
+        LOG.info("Time to save map: " + (end - start) + ", " +
+          "thread = " + Thread.currentThread().getClass());
+      }
+    }
+  }
+
+  /**
+   * Save Metric aggregate records.
+   * @throws SQLException
+   */
+  public void saveClusterAggregateRecords(Map<TimelineClusterMetric,
+      MetricClusterAggregate> records) throws SQLException {
+    if (records == null || records.isEmpty()) {
+      LOG.debug("Empty aggregate records.");
+      return;
+    }
+
+    long start = System.currentTimeMillis();
+
+    Connection conn = getConnection();
+    PreparedStatement stmt = null;
+    try {
+      stmt = conn.prepareStatement(UPSERT_CLUSTER_AGGREGATE_SQL);
+      int rowCount = 0;
+
+      for (Map.Entry<TimelineClusterMetric, MetricClusterAggregate>
+          aggregateEntry : records.entrySet()) {
+        TimelineClusterMetric clusterMetric = aggregateEntry.getKey();
+        MetricClusterAggregate aggregate = aggregateEntry.getValue();
+
+        LOG.trace("clusterMetric = " + clusterMetric + ", " +
+          "aggregate = " + aggregate);
+
+        rowCount++;
+        stmt.clearParameters();
+        stmt.setString(1, clusterMetric.getMetricName());
+        stmt.setString(2, clusterMetric.getAppId());
+        stmt.setString(3, clusterMetric.getInstanceId());
+        stmt.setLong(4, clusterMetric.getTimestamp());
+        stmt.setString(5, clusterMetric.getType());
+        stmt.setDouble(6, aggregate.getSum());
+        stmt.setInt(7, aggregate.getNumberOfHosts());
+        stmt.setDouble(8, aggregate.getMax());
+        stmt.setDouble(9, aggregate.getMin());
+
+        try {
+          stmt.executeUpdate();
+        } catch (SQLException sql) {
+          LOG.error(sql);
+        }
+
+        if (rowCount >= PHOENIX_MAX_MUTATION_STATE_SIZE - 1) {
+          conn.commit();
+          rowCount = 0;
+        }
+      }
+
+      conn.commit();
+
+    } finally {
+      if (stmt != null) {
+        try {
+          stmt.close();
+        } catch (SQLException e) {
+          // Ignore
+        }
+      }
+      if (conn != null) {
+        try {
+          conn.close();
+        } catch (SQLException sql) {
+          // Ignore
+        }
+      }
+    }
+    long end = System.currentTimeMillis();
+    if ((end - start) > 60000l) {
+      LOG.info("Time to save: " + (end - start) + ", " +
+        "thread = " + Thread.currentThread().getName());
+    }
+  }
+
+
+  public TimelineMetrics getAggregateMetricRecords(final Condition condition)
+      throws SQLException {
+
+    if (condition.isEmpty()) {
+      throw new SQLException("No filter criteria specified.");
+    }
+
+    Connection conn = getConnection();
+    PreparedStatement stmt = null;
+    TimelineMetrics metrics = new TimelineMetrics();
+
+    try {
+      stmt = PhoenixTransactSQL.prepareGetAggregateSqlStmt(conn, condition);
+
+      ResultSet rs = stmt.executeQuery();
+
+      while (rs.next()) {
+        TimelineMetric metric = new TimelineMetric();
+        metric.setMetricName(rs.getString("METRIC_NAME"));
+        metric.setAppId(rs.getString("APP_ID"));
+        metric.setInstanceId(rs.getString("INSTANCE_ID"));
+        metric.setTimestamp(rs.getLong("TIMESTAMP"));
+        metric.setStartTime(rs.getLong("TIMESTAMP"));
+        Map<Long, Double> valueMap = new HashMap<Long, Double>();
+        valueMap.put(rs.getLong("TIMESTAMP"), rs.getDouble("METRIC_SUM") /
+                                              rs.getInt("HOSTS_COUNT"));
+        metric.setMetricValues(valueMap);
+
+        if (condition.isGrouped()) {
+          metrics.addOrMergeTimelineMetric(metric);
+        } else {
+          metrics.getMetrics().add(metric);
+        }
+      }
+
+    } finally {
+      if (stmt != null) {
+        try {
+          stmt.close();
+        } catch (SQLException e) {
+          // Ignore
+        }
+      }
+      if (conn != null) {
+        try {
+          conn.close();
+        } catch (SQLException sql) {
+          // Ignore
+        }
+      }
+    }
+    LOG.info("Aggregate records size: " + metrics.getMetrics().size());
+    return metrics;
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/PhoenixTransactSQL.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/PhoenixTransactSQL.java b/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/PhoenixTransactSQL.java
new file mode 100644
index 0000000..60a5673
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/PhoenixTransactSQL.java
@@ -0,0 +1,398 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+
+import java.sql.Connection;
+import java.sql.PreparedStatement;
+import java.sql.SQLException;
+import java.util.List;
+
+/**
+ * Encapsulate all metrics related SQL queries.
+ */
+public class PhoenixTransactSQL {
+
+  static final Log LOG = LogFactory.getLog(PhoenixTransactSQL.class);
+  // TODO: Configurable TTL values
+  /**
+  * Create table to store individual metric records.
+  */
+  public static final String CREATE_METRICS_TABLE_SQL = "CREATE TABLE IF NOT " +
+    "EXISTS METRIC_RECORD (METRIC_NAME VARCHAR, HOSTNAME VARCHAR, " +
+    "APP_ID VARCHAR, INSTANCE_ID VARCHAR, TIMESTAMP UNSIGNED_LONG NOT NULL, " +
+    "START_TIME UNSIGNED_LONG, UNITS CHAR(20), " +
+    "METRIC_AVG DOUBLE, METRIC_MAX DOUBLE, METRIC_MIN DOUBLE, " +
+    "METRICS VARCHAR CONSTRAINT pk " +
+    "PRIMARY KEY (METRIC_NAME, HOSTNAME, APP_ID, INSTANCE_ID, TIMESTAMP)) " +
+    "IMMUTABLE_ROWS=true, TTL=86400";
+
+  public static final String CREATE_METRICS_AGGREGATE_HOURLY_TABLE_SQL =
+    "CREATE TABLE IF NOT EXISTS METRIC_RECORD_HOURLY " +
+    "(METRIC_NAME VARCHAR, HOSTNAME VARCHAR, " +
+    "APP_ID VARCHAR, INSTANCE_ID VARCHAR, TIMESTAMP UNSIGNED_LONG NOT NULL, " +
+    "UNITS CHAR(20), METRIC_AVG DOUBLE, METRIC_MAX DOUBLE," +
+    "METRIC_MIN DOUBLE CONSTRAINT pk " +
+    "PRIMARY KEY (METRIC_NAME, HOSTNAME, APP_ID, INSTANCE_ID, TIMESTAMP)) " +
+    "IMMUTABLE_ROWS=true, TTL=2592000";
+
+  public static final String CREATE_METRICS_AGGREGATE_MINUTE_TABLE_SQL =
+    "CREATE TABLE IF NOT EXISTS METRIC_RECORD_MINUTE " +
+    "(METRIC_NAME VARCHAR, HOSTNAME VARCHAR, " +
+    "APP_ID VARCHAR, INSTANCE_ID VARCHAR, TIMESTAMP UNSIGNED_LONG NOT NULL, " +
+    "UNITS CHAR(20), METRIC_AVG DOUBLE, METRIC_MAX DOUBLE," +
+    "METRIC_MIN DOUBLE CONSTRAINT pk " +
+    "PRIMARY KEY (METRIC_NAME, HOSTNAME, APP_ID, INSTANCE_ID, TIMESTAMP)) " +
+    "IMMUTABLE_ROWS=true, TTL=604800";
+
+  public static final String CREATE_METRICS_CLUSTER_AGGREGATE_TABLE_SQL =
+    "CREATE TABLE IF NOT EXISTS METRIC_AGGREGATE " +
+    "(METRIC_NAME VARCHAR, APP_ID VARCHAR, INSTANCE_ID VARCHAR, " +
+    "TIMESTAMP UNSIGNED_LONG NOT NULL, UNITS CHAR(20), METRIC_SUM DOUBLE, " +
+    "HOSTS_COUNT UNSIGNED_INT, METRIC_MAX DOUBLE, METRIC_MIN DOUBLE " +
+    "CONSTRAINT pk PRIMARY KEY (METRIC_NAME, APP_ID, INSTANCE_ID, TIMESTAMP)) " +
+    "IMMUTABLE_ROWS=true, TTL=2592000";
+
+  public static final String CREATE_METRICS_CLUSTER_AGGREGATE_HOURLY_TABLE_SQL =
+    "CREATE TABLE IF NOT EXISTS METRIC_AGGREGATE_HOURLY " +
+    "(METRIC_NAME VARCHAR, APP_ID VARCHAR, INSTANCE_ID VARCHAR, " +
+    "TIMESTAMP UNSIGNED_LONG NOT NULL, UNITS CHAR(20), METRIC_AVG DOUBLE, " +
+    "METRIC_MAX DOUBLE, METRIC_MIN DOUBLE " +
+    "CONSTRAINT pk PRIMARY KEY (METRIC_NAME, APP_ID, INSTANCE_ID, TIMESTAMP)) " +
+    "IMMUTABLE_ROWS=true, TTL=31536000";
+
+  /**
+   * Insert into metric records table.
+   */
+  public static final String UPSERT_METRICS_SQL = "UPSERT INTO METRIC_RECORD " +
+    "(METRIC_NAME, HOSTNAME, APP_ID, INSTANCE_ID, TIMESTAMP, START_TIME, " +
+    "UNITS, METRIC_AVG, METRIC_MAX, METRIC_MIN, METRICS) VALUES " +
+    "(?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)";
+
+  public static final String UPSERT_CLUSTER_AGGREGATE_SQL = "UPSERT INTO " +
+    "METRIC_AGGREGATE (METRIC_NAME, APP_ID, INSTANCE_ID, TIMESTAMP, " +
+    "UNITS, METRIC_SUM, HOSTS_COUNT, METRIC_MAX, METRIC_MIN) " +
+    "VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)";
+
+  public static final String UPSERT_AGGREGATE_RECORD_SQL = "UPSERT INTO " +
+    "%s (METRIC_NAME, HOSTNAME, APP_ID, INSTANCE_ID, " +
+    "TIMESTAMP, UNITS, METRIC_AVG, METRIC_MAX, METRIC_MIN) " +
+    "VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)";
+
+  /**
+   * Retrieve a set of rows from metrics records table.
+   */
+  public static final String GET_METRIC_SQL = "SELECT METRIC_NAME, " +
+    "HOSTNAME, APP_ID, INSTANCE_ID, TIMESTAMP, START_TIME, UNITS, METRIC_AVG, " +
+    "METRIC_MAX, METRIC_MIN, METRICS FROM METRIC_RECORD";
+
+  public static final String GET_METRIC_AGGREGATE_ONLY_SQL = "SELECT " +
+    "METRIC_NAME, HOSTNAME, APP_ID, INSTANCE_ID, TIMESTAMP, " +
+    "UNITS, METRIC_AVG, METRIC_MAX, METRIC_MIN FROM %s";
+
+  public static final String GET_CLUSTER_AGGREGATE_SQL =
+    "SELECT METRIC_NAME, APP_ID, " +
+    "INSTANCE_ID, TIMESTAMP, METRIC_SUM, HOSTS_COUNT, METRIC_MAX, " +
+    "METRIC_MIN FROM METRIC_AGGREGATE";
+
+  /**
+   * 4 metrics/min * 60 * 24: Retrieve data for 1 day.
+   */
+  public static final Integer DEFAULT_RESULT_LIMIT = 5760;
+  public static final String METRICS_RECORD_TABLE_NAME =
+    "METRIC_RECORD";
+  public static final String METRICS_AGGREGATE_MINUTE_TABLE_NAME =
+    "METRIC_RECORD_MINUTE";
+  public static final String METRICS_AGGREGATE_HOURLY_TABLE_NAME =
+    "METRIC_RECORD_HOURLY";
+
+  public static PreparedStatement prepareGetMetricsSqlStmt(
+      Connection connection, Condition condition) throws SQLException {
+
+    if (condition.isEmpty()) {
+      throw new IllegalArgumentException("Condition is empty.");
+    }
+    String stmtStr = GET_METRIC_SQL;
+    if (condition.getStatement() != null) {
+      stmtStr = condition.getStatement();
+    }
+
+    StringBuilder sb = new StringBuilder(stmtStr);
+    sb.append(" WHERE ");
+    sb.append(condition.getConditionClause());
+    sb.append(" ORDER BY METRIC_NAME, TIMESTAMP");
+    if (condition.getLimit() != null) {
+      sb.append(" LIMIT ").append(condition.getLimit());
+    }
+
+    LOG.debug("SQL: " + sb.toString() + ", condition: " + condition);
+    PreparedStatement stmt = connection.prepareStatement(sb.toString());
+    int pos = 1;
+    if (condition.getMetricNames() != null) {
+      for ( ; pos <= condition.getMetricNames().size(); pos++) {
+        stmt.setString(pos, condition.getMetricNames().get(pos - 1));
+      }
+    }
+    if (condition.getHostname() != null) {
+      stmt.setString(pos++, condition.getHostname());
+    }
+    // TODO: Upper case all strings on POST
+    if (condition.getAppId() != null) {
+      stmt.setString(pos++, condition.getAppId().toLowerCase());
+    }
+    if (condition.getInstanceId() != null) {
+      stmt.setString(pos++, condition.getInstanceId());
+    }
+    if (condition.getStartTime() != null) {
+      stmt.setLong(pos++, condition.getStartTime());
+    }
+    if (condition.getEndTime() != null) {
+      stmt.setLong(pos, condition.getEndTime());
+    }
+    if (condition.getFetchSize() != null) {
+      stmt.setFetchSize(condition.getFetchSize());
+    }
+
+    return stmt;
+  }
+
+  public static PreparedStatement prepareGetAggregateSqlStmt(
+      Connection connection, Condition condition) throws SQLException {
+
+    if (condition.isEmpty()) {
+      throw new IllegalArgumentException("Condition is empty.");
+    }
+
+    StringBuilder sb = new StringBuilder(GET_CLUSTER_AGGREGATE_SQL);
+    sb.append(" WHERE ");
+    sb.append(condition.getConditionClause());
+    sb.append(" ORDER BY METRIC_NAME, TIMESTAMP");
+    if (condition.getLimit() != null) {
+      sb.append(" LIMIT ").append(condition.getLimit());
+    }
+
+    LOG.debug("SQL => " + sb.toString() + ", condition => " + condition);
+    PreparedStatement stmt = connection.prepareStatement(sb.toString());
+    int pos = 1;
+    if (condition.getMetricNames() != null) {
+      for ( ; pos <= condition.getMetricNames().size(); pos++) {
+        stmt.setString(pos, condition.getMetricNames().get(pos - 1));
+      }
+    }
+    // TODO: Upper case all strings on POST
+    if (condition.getAppId() != null) {
+      stmt.setString(pos++, condition.getAppId().toLowerCase());
+    }
+    if (condition.getInstanceId() != null) {
+      stmt.setString(pos++, condition.getInstanceId());
+    }
+    if (condition.getStartTime() != null) {
+      stmt.setLong(pos++, condition.getStartTime());
+    }
+    if (condition.getEndTime() != null) {
+      stmt.setLong(pos, condition.getEndTime());
+    }
+
+    return stmt;
+  }
+
+  static class Condition {
+    List<String> metricNames;
+    String hostname;
+    String appId;
+    String instanceId;
+    Long startTime;
+    Long endTime;
+    Integer limit;
+    boolean grouped;
+    boolean noLimit = false;
+    Integer fetchSize;
+    String statement;
+
+    Condition(List<String> metricNames, String hostname, String appId,
+              String instanceId, Long startTime, Long endTime, Integer limit,
+              boolean grouped) {
+      this.metricNames = metricNames;
+      this.hostname = hostname;
+      this.appId = appId;
+      this.instanceId = instanceId;
+      this.startTime = startTime;
+      this.endTime = endTime;
+      this.limit = limit;
+      this.grouped = grouped;
+    }
+
+    String getStatement() {
+      return statement;
+    }
+
+    void setStatement(String statement) {
+      this.statement = statement;
+    }
+
+    List<String> getMetricNames() {
+      return metricNames == null || metricNames.isEmpty() ? null : metricNames;
+    }
+
+    String getMetricsClause() {
+      StringBuilder sb = new StringBuilder("(");
+      if (metricNames != null) {
+        for (String name : metricNames) {
+          if (sb.length() != 1) {
+            sb.append(", ");
+          }
+          sb.append("?");
+        }
+        sb.append(")");
+        return sb.toString();
+      } else {
+        return null;
+      }
+    }
+
+    String getConditionClause() {
+      StringBuilder sb = new StringBuilder();
+      boolean appendConjunction = false;
+
+      if (getMetricNames() != null) {
+        sb.append("METRIC_NAME IN ");
+        sb.append(getMetricsClause());
+        appendConjunction = true;
+      }
+      if (appendConjunction) {
+        sb.append(" AND");
+      }
+      appendConjunction = false;
+      if (getHostname() != null) {
+        sb.append(" HOSTNAME = ?");
+        appendConjunction = true;
+      }
+      if (appendConjunction) {
+        sb.append(" AND");
+      }
+      appendConjunction = false;
+      if (getAppId() != null) {
+        sb.append(" APP_ID = ?");
+        appendConjunction = true;
+      }
+      if (appendConjunction) {
+        sb.append(" AND");
+      }
+      appendConjunction = false;
+      if (getInstanceId() != null) {
+        sb.append(" INSTANCE_ID = ?");
+        appendConjunction = true;
+      }
+      if (appendConjunction) {
+        sb.append(" AND");
+      }
+      appendConjunction = false;
+      if (getStartTime() != null) {
+        sb.append(" TIMESTAMP >= ?");
+        appendConjunction = true;
+      }
+      if (appendConjunction) {
+        sb.append(" AND");
+      }
+      if (getEndTime() != null) {
+        sb.append(" TIMESTAMP < ?");
+      }
+      return sb.toString();
+    }
+
+    String getHostname() {
+      return hostname == null || hostname.isEmpty() ? null : hostname;
+    }
+
+    String getAppId() {
+      return appId == null || appId.isEmpty() ? null : appId;
+    }
+
+    String getInstanceId() {
+      return instanceId == null || instanceId.isEmpty() ? null : instanceId;
+    }
+
+    /**
+     * Convert to millis.
+     */
+    Long getStartTime() {
+      if (startTime < 9999999999l) {
+        return startTime * 1000;
+      } else {
+        return startTime;
+      }
+    }
+
+    Long getEndTime() {
+      if (endTime < 9999999999l) {
+        return endTime * 1000;
+      } else {
+        return endTime;
+      }
+    }
+
+    void setNoLimit() {
+      this.noLimit = true;
+    }
+
+    Integer getLimit() {
+      if (noLimit) {
+        return null;
+      }
+      return limit == null ? DEFAULT_RESULT_LIMIT : limit;
+    }
+
+    boolean isGrouped() {
+      return grouped;
+    }
+
+    boolean isEmpty() {
+      return (metricNames == null || metricNames.isEmpty())
+        && (hostname == null || hostname.isEmpty())
+        && (appId == null || appId.isEmpty())
+        && (instanceId == null || instanceId.isEmpty())
+        && startTime == null
+        && endTime == null;
+    }
+
+    Integer getFetchSize() {
+      return fetchSize;
+    }
+
+    void setFetchSize(Integer fetchSize) {
+      this.fetchSize = fetchSize;
+    }
+
+    @Override
+    public String toString() {
+      return "Condition{" +
+        "metricNames=" + metricNames +
+        ", hostname='" + hostname + '\'' +
+        ", appId='" + appId + '\'' +
+        ", instanceId='" + instanceId + '\'' +
+        ", startTime=" + startTime +
+        ", endTime=" + endTime +
+        ", limit=" + limit +
+        ", grouped=" + grouped +
+        ", noLimit=" + noLimit +
+        '}';
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TimelineMetricAggregatorHourly.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TimelineMetricAggregatorHourly.java b/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TimelineMetricAggregatorHourly.java
new file mode 100644
index 0000000..50a4f63
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TimelineMetricAggregatorHourly.java
@@ -0,0 +1,142 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p/>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p/>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.metrics2.sink.timeline.TimelineMetric;
+
+import java.io.IOException;
+import java.sql.Connection;
+import java.sql.PreparedStatement;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.util.Date;
+import java.util.HashMap;
+import java.util.Map;
+
+import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.PhoenixTransactSQL.Condition;
+import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.PhoenixTransactSQL.GET_METRIC_AGGREGATE_ONLY_SQL;
+import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.PhoenixTransactSQL.METRICS_AGGREGATE_HOURLY_TABLE_NAME;
+import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.PhoenixTransactSQL.METRICS_AGGREGATE_MINUTE_TABLE_NAME;
+
+public class TimelineMetricAggregatorHourly extends AbstractTimelineAggregator {
+  static final Long SLEEP_INTERVAL = 3600000l;
+  static final Long CHECKPOINT_CUT_OFF_INTERVAL = SLEEP_INTERVAL * 2;
+  static final Integer RESULTSET_FETCH_SIZE = 1000;
+  private static final Log LOG = LogFactory.getLog(TimelineMetricAggregatorHourly.class);
+
+  public TimelineMetricAggregatorHourly(PhoenixHBaseAccessor hBaseAccessor,
+                                        String checkpointLocation) {
+    super(hBaseAccessor, checkpointLocation);
+  }
+
+  @Override
+  protected boolean doWork(long startTime, long endTime) {
+    LOG.info("Start aggregation cycle @ " + new Date());
+
+    boolean success = true;
+    Condition condition = new Condition(null, null, null, null, startTime,
+                                        endTime, null, true);
+    condition.setNoLimit();
+    condition.setFetchSize(RESULTSET_FETCH_SIZE);
+    condition.setStatement(String.format(GET_METRIC_AGGREGATE_ONLY_SQL,
+      METRICS_AGGREGATE_MINUTE_TABLE_NAME));
+
+    Connection conn = null;
+    PreparedStatement stmt = null;
+
+    try {
+      conn = hBaseAccessor.getConnection();
+      stmt = PhoenixTransactSQL.prepareGetMetricsSqlStmt(conn, condition);
+
+      ResultSet rs = stmt.executeQuery();
+      TimelineMetric existingMetric = null;
+      MetricHostAggregate hostAggregate = null;
+      Map<TimelineMetric, MetricHostAggregate> hostAggregateMap =
+        new HashMap<TimelineMetric, MetricHostAggregate>();
+
+      while (rs.next()) {
+        TimelineMetric currentMetric =
+          PhoenixHBaseAccessor.getTimelineMetricKeyFromResultSet(rs);
+        MetricHostAggregate currentHostAggregate =
+          PhoenixHBaseAccessor.getMetricHostAggregateFromResultSet(rs);
+
+        if (existingMetric == null) {
+          // First row
+          existingMetric = currentMetric;
+          hostAggregate = new MetricHostAggregate();
+          hostAggregateMap.put(currentMetric, hostAggregate);
+        }
+
+        if (existingMetric.equalsExceptTime(currentMetric)) {
+          // Recalculate totals with current metric
+          hostAggregate.updateAggregates(currentHostAggregate);
+
+        } else {
+          // Switched over to a new metric - save existing
+          hostAggregate = new MetricHostAggregate();
+          hostAggregate.updateAggregates(currentHostAggregate);
+          hostAggregateMap.put(currentMetric, hostAggregate);
+          existingMetric = currentMetric;
+        }
+      }
+
+      LOG.info("Saving " + hostAggregateMap.size() + " metric aggregates.");
+
+      hBaseAccessor.saveHostAggregateRecords(hostAggregateMap,
+        METRICS_AGGREGATE_HOURLY_TABLE_NAME);
+
+    } catch (SQLException e) {
+      LOG.error("Exception during aggregating metrics.", e);
+      success = false;
+    } catch (IOException e) {
+      LOG.error("Exception during aggregating metrics.", e);
+      success = false;
+    } finally {
+      if (stmt != null) {
+        try {
+          stmt.close();
+        } catch (SQLException e) {
+          // Ignore
+        }
+      }
+      if (conn != null) {
+        try {
+          conn.close();
+        } catch (SQLException sql) {
+          // Ignore
+        }
+      }
+    }
+
+    LOG.info("End aggregation cycle @ " + new Date());
+    return success;
+  }
+
+  @Override
+  protected Long getSleepInterval() {
+    return SLEEP_INTERVAL;
+  }
+
+  @Override
+  protected Long getCheckpointCutOffInterval() {
+    return CHECKPOINT_CUT_OFF_INTERVAL;
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TimelineMetricAggregatorMinute.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TimelineMetricAggregatorMinute.java b/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TimelineMetricAggregatorMinute.java
new file mode 100644
index 0000000..a3909cf
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TimelineMetricAggregatorMinute.java
@@ -0,0 +1,142 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.metrics2.sink.timeline.TimelineMetric;
+import java.io.IOException;
+import java.sql.Connection;
+import java.sql.PreparedStatement;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.util.Date;
+import java.util.HashMap;
+import java.util.Map;
+
+import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.PhoenixTransactSQL.Condition;
+import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.PhoenixTransactSQL.GET_METRIC_AGGREGATE_ONLY_SQL;
+import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.PhoenixTransactSQL.METRICS_AGGREGATE_MINUTE_TABLE_NAME;
+import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.PhoenixTransactSQL.METRICS_RECORD_TABLE_NAME;
+import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.PhoenixTransactSQL.prepareGetMetricsSqlStmt;
+
+public class TimelineMetricAggregatorMinute extends AbstractTimelineAggregator {
+  static final Long SLEEP_INTERVAL = 300000l; // 5 mins
+  static final Long CHECKPOINT_CUT_OFF_INTERVAL = SLEEP_INTERVAL * 4;
+  private static final Log LOG = LogFactory.getLog(TimelineMetricAggregatorMinute.class);
+
+  public TimelineMetricAggregatorMinute(PhoenixHBaseAccessor hBaseAccessor,
+                                        String checkpointLocation) {
+    super(hBaseAccessor, checkpointLocation);
+  }
+
+  @Override
+  protected boolean doWork(long startTime, long endTime) {
+    LOG.info("Start aggregation cycle @ " + new Date());
+
+    boolean success = true;
+    Condition condition = new Condition(null, null, null, null, startTime,
+                                        endTime, null, true);
+    condition.setNoLimit();
+    condition.setFetchSize(RESULTSET_FETCH_SIZE);
+    condition.setStatement(String.format(GET_METRIC_AGGREGATE_ONLY_SQL,
+      METRICS_RECORD_TABLE_NAME));
+
+    Connection conn = null;
+    PreparedStatement stmt = null;
+
+    try {
+      conn = hBaseAccessor.getConnection();
+      stmt = prepareGetMetricsSqlStmt(conn, condition);
+      LOG.info("Query issued @: " + new Date());
+      ResultSet rs = stmt.executeQuery();
+      LOG.info("Query returned @: " + new Date());
+      TimelineMetric existingMetric = null;
+      MetricHostAggregate hostAggregate = null;
+
+      Map<TimelineMetric, MetricHostAggregate> hostAggregateMap =
+        new HashMap<TimelineMetric, MetricHostAggregate>();
+
+      while (rs.next()) {
+        TimelineMetric currentMetric =
+          PhoenixHBaseAccessor.getTimelineMetricKeyFromResultSet(rs);
+        MetricHostAggregate currentHostAggregate =
+          PhoenixHBaseAccessor.getMetricHostAggregateFromResultSet(rs);
+
+        if (existingMetric == null) {
+          // First row
+          existingMetric = currentMetric;
+          hostAggregate = new MetricHostAggregate();
+          hostAggregateMap.put(currentMetric, hostAggregate);
+        }
+
+        if (existingMetric.equalsExceptTime(currentMetric)) {
+          // Recalculate totals with current metric
+          hostAggregate.updateAggregates(currentHostAggregate);
+
+        } else {
+          // Switched over to a new metric - save existing
+          hostAggregate = new MetricHostAggregate();
+          hostAggregate.updateAggregates(currentHostAggregate);
+          hostAggregateMap.put(currentMetric, hostAggregate);
+          existingMetric = currentMetric;
+        }
+      }
+
+      LOG.info("Saving " + hostAggregateMap.size() + " metric aggregates.");
+
+      hBaseAccessor.saveHostAggregateRecords(hostAggregateMap,
+        METRICS_AGGREGATE_MINUTE_TABLE_NAME);
+
+    } catch (SQLException e) {
+      LOG.error("Exception during aggregating metrics.", e);
+      success = false;
+    } catch (IOException e) {
+      LOG.error("Exception during aggregating metrics.", e);
+      success = false;
+    } finally {
+      if (stmt != null) {
+        try {
+          stmt.close();
+        } catch (SQLException e) {
+          // Ignore
+        }
+      }
+      if (conn != null) {
+        try {
+          conn.close();
+        } catch (SQLException sql) {
+          // Ignore
+        }
+      }
+    }
+
+    LOG.info("End aggregation cycle @ " + new Date());
+    return success;
+  }
+
+  @Override
+  protected Long getSleepInterval() {
+    return SLEEP_INTERVAL;
+  }
+
+  @Override
+  protected Long getCheckpointCutOffInterval() {
+    return CHECKPOINT_CUT_OFF_INTERVAL;
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TimelineMetricClusterAggregator.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TimelineMetricClusterAggregator.java b/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TimelineMetricClusterAggregator.java
new file mode 100644
index 0000000..10b2d70
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TimelineMetricClusterAggregator.java
@@ -0,0 +1,259 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.metrics2.sink.timeline.TimelineMetric;
+
+import java.io.IOException;
+import java.sql.Connection;
+import java.sql.PreparedStatement;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.util.ArrayList;
+import java.util.Date;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.PhoenixTransactSQL.Condition;
+import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.PhoenixTransactSQL.GET_METRIC_SQL;
+import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.PhoenixTransactSQL.prepareGetMetricsSqlStmt;
+
+/**
+ * Aggregates a metric across all hosts in the cluster.
+ */
+public class TimelineMetricClusterAggregator extends AbstractTimelineAggregator {
+  public static final long WAKE_UP_INTERVAL = 120000;
+  public static final int TIME_SLICE_INTERVAL = 15000;
+  private static final Log LOG = LogFactory.getLog(TimelineMetricClusterAggregator.class);
+
+  public TimelineMetricClusterAggregator(PhoenixHBaseAccessor hBaseAccessor,
+                                         String checkpointLocation) {
+    super(hBaseAccessor, checkpointLocation);
+  }
+
+  /**
+   * Read metrics written during the time interval and save the sum and total
+   * in the aggregate table.
+   *
+   * @param startTime Sample start time
+   * @param endTime Sample end time
+   */
+  protected boolean doWork(long startTime, long endTime) {
+    LOG.info("Start aggregation cycle @ " + new Date());
+
+    boolean success = true;
+    Condition condition = new Condition(null, null, null, null, startTime,
+                                        endTime, null, true);
+    condition.setFetchSize(RESULTSET_FETCH_SIZE);
+    condition.setNoLimit();
+    condition.setStatement(GET_METRIC_SQL);
+
+    Connection conn;
+    PreparedStatement stmt;
+
+    try {
+      conn = hBaseAccessor.getConnection();
+      stmt = prepareGetMetricsSqlStmt(conn, condition);
+      LOG.info("Query issued @: " + new Date());
+      ResultSet rs = stmt.executeQuery();
+      LOG.info("Query returned @: " + new Date());
+      Map<TimelineClusterMetric, MetricClusterAggregate> aggregateClusterMetrics =
+        new HashMap<TimelineClusterMetric, MetricClusterAggregate>();
+      List<Long[]> timeSlices = new ArrayList<Long[]>();
+      // Create time slices
+      long sliceStartTime = startTime;
+      while (sliceStartTime < endTime) {
+        timeSlices.add(new Long[] { sliceStartTime, sliceStartTime + TIME_SLICE_INTERVAL });
+        sliceStartTime += TIME_SLICE_INTERVAL;
+      }
+
+      while (rs.next()) {
+        TimelineMetric metric =
+          PhoenixHBaseAccessor.getTimelineMetricFromResultSet(rs);
+
+        Map<TimelineClusterMetric, Double> clusterMetrics =
+          sliceFromTimelineMetric(metric, timeSlices);
+
+        if (clusterMetrics != null && !clusterMetrics.isEmpty()) {
+          for (Map.Entry<TimelineClusterMetric, Double> clusterMetricEntry :
+              clusterMetrics.entrySet()) {
+            TimelineClusterMetric clusterMetric = clusterMetricEntry.getKey();
+            MetricClusterAggregate aggregate = aggregateClusterMetrics.get(clusterMetric);
+            Double avgValue = clusterMetricEntry.getValue();
+
+            if (aggregate == null) {
+              aggregate = new MetricClusterAggregate(avgValue, 1, null,
+                avgValue, avgValue);
+              aggregateClusterMetrics.put(clusterMetric, aggregate);
+            } else {
+              aggregate.updateSum(avgValue);
+              aggregate.updateNumberOfHosts(1);
+              aggregate.updateMax(avgValue);
+              aggregate.updateMin(avgValue);
+            }
+          }
+        }
+      }
+      LOG.info("Saving " + aggregateClusterMetrics.size() + " metric aggregates.");
+
+      hBaseAccessor.saveClusterAggregateRecords(aggregateClusterMetrics);
+
+      LOG.info("End aggregation cycle @ " + new Date());
+
+    } catch (SQLException e) {
+      LOG.error("Exception during aggregating metrics.", e);
+      success = false;
+    } catch (IOException e) {
+      LOG.error("Exception during aggregating metrics.", e);
+      success = false;
+    }
+
+    return success;
+  }
+
+  @Override
+  protected Long getSleepInterval() {
+    return WAKE_UP_INTERVAL;
+  }
+
+  @Override
+  protected Long getCheckpointCutOffInterval() {
+    return 600000l;
+  }
+
+  private Map<TimelineClusterMetric, Double> sliceFromTimelineMetric(
+        TimelineMetric timelineMetric, List<Long[]> timeSlices) {
+
+    if (timelineMetric.getMetricValues().isEmpty()) {
+      return null;
+    }
+
+    Map<TimelineClusterMetric, Double> timelineClusterMetricMap =
+      new HashMap<TimelineClusterMetric, Double>();
+
+    for (Map.Entry<Long, Double> metric : timelineMetric.getMetricValues().entrySet()) {
+      // TODO: investigate null values - pre filter
+      if (metric.getValue() == null) {
+        continue;
+      }
+      Long timestamp = getSliceTimeForMetric(timeSlices,
+                       Long.parseLong(metric.getKey().toString()));
+      if (timestamp != -1) {
+        // Metric is within desired time range
+        TimelineClusterMetric clusterMetric = new TimelineClusterMetric(
+          timelineMetric.getMetricName(), timelineMetric.getAppId(),
+          timelineMetric.getInstanceId(), timestamp, timelineMetric.getType());
+
+        if (!timelineClusterMetricMap.containsKey(clusterMetric)) {
+          timelineClusterMetricMap.put(clusterMetric, metric.getValue());
+        } else {
+          Double oldValue = timelineClusterMetricMap.get(clusterMetric);
+          Double newValue = (oldValue + metric.getValue()) / 2;
+          timelineClusterMetricMap.put(clusterMetric, newValue);
+        }
+      }
+    }
+
+    return timelineClusterMetricMap;
+  }
+
+  /**
+   * Return beginning of the time slice into which the metric fits.
+   */
+  private Long getSliceTimeForMetric(List<Long[]> timeSlices, Long timestamp) {
+    for (Long[] timeSlice : timeSlices) {
+      if (timestamp >= timeSlice[0] && timestamp < timeSlice[1]) {
+        return timeSlice[0];
+      }
+    }
+    return -1l;
+  }
+
+  public static class TimelineClusterMetric {
+    private String metricName;
+    private String appId;
+    private String instanceId;
+    private long timestamp;
+    private String type;
+
+    TimelineClusterMetric(String metricName, String appId, String instanceId,
+                          long timestamp, String type) {
+      this.metricName = metricName;
+      this.appId = appId;
+      this.instanceId = instanceId;
+      this.timestamp = timestamp;
+      this.type = type;
+    }
+
+    String getMetricName() {
+      return metricName;
+    }
+
+    String getAppId() {
+      return appId;
+    }
+
+    String getInstanceId() {
+      return instanceId;
+    }
+
+    long getTimestamp() {
+      return timestamp;
+    }
+
+    String getType() { return type; }
+
+    @Override
+    public boolean equals(Object o) {
+      if (this == o) return true;
+      if (o == null || getClass() != o.getClass()) return false;
+
+      TimelineClusterMetric that = (TimelineClusterMetric) o;
+
+      if (timestamp != that.timestamp) return false;
+      if (appId != null ? !appId.equals(that.appId) : that.appId != null)
+        return false;
+      if (instanceId != null ? !instanceId.equals(that.instanceId) : that.instanceId != null)
+        return false;
+      if (!metricName.equals(that.metricName)) return false;
+
+      return true;
+    }
+
+    @Override
+    public int hashCode() {
+      int result = metricName.hashCode();
+      result = 31 * result + (appId != null ? appId.hashCode() : 0);
+      result = 31 * result + (instanceId != null ? instanceId.hashCode() : 0);
+      result = 31 * result + (int) (timestamp ^ (timestamp >>> 32));
+      return result;
+    }
+
+    @Override
+    public String toString() {
+      return "TimelineClusterMetric{" +
+        "metricName='" + metricName + '\'' +
+        ", appId='" + appId + '\'' +
+        ", instanceId='" + instanceId + '\'' +
+        ", timestamp=" + timestamp +
+        '}';
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TimelineMetricClusterAggregatorHourly.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TimelineMetricClusterAggregatorHourly.java b/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TimelineMetricClusterAggregatorHourly.java
new file mode 100644
index 0000000..1caf809
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TimelineMetricClusterAggregatorHourly.java
@@ -0,0 +1,48 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+
+public class TimelineMetricClusterAggregatorHourly extends AbstractTimelineAggregator {
+  private static final Log LOG = LogFactory.getLog(TimelineMetricClusterAggregator.class);
+  public static final long SLEEP_INTERVAL = 3600000;
+
+  public TimelineMetricClusterAggregatorHourly(PhoenixHBaseAccessor hBaseAccessor,
+                                               String checkpointLocation) {
+    super(hBaseAccessor, checkpointLocation);
+  }
+
+  @Override
+  protected boolean doWork(long startTime, long endTime) {
+    return false;
+  }
+
+  @Override
+  protected Long getSleepInterval() {
+    return SLEEP_INTERVAL;
+  }
+
+  @Override
+  protected Long getCheckpointCutOffInterval() {
+    return 7200000l;
+  }
+
+
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TimelineMetricStore.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TimelineMetricStore.java b/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TimelineMetricStore.java
new file mode 100644
index 0000000..5224450
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TimelineMetricStore.java
@@ -0,0 +1,70 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline;
+
+import org.apache.hadoop.metrics2.sink.timeline.TimelineMetric;
+import org.apache.hadoop.metrics2.sink.timeline.TimelineMetrics;
+import org.apache.hadoop.yarn.api.records.timeline.TimelinePutResponse;
+import java.io.IOException;
+import java.sql.SQLException;
+import java.util.List;
+
+public interface TimelineMetricStore {
+  /**
+   * This method retrieves metrics stored byu the Timeline store.
+   *
+   * @param metricNames Names of the metric, e.g.: cpu_user
+   * @param hostname Name of the host where the metric originated from
+   * @param applicationId Id of the application to which this metric belongs
+   * @param instanceId Application instance id.
+   * @param startTime Start timestamp
+   * @param endTime End timestamp
+   * @param limit Override default result limit
+   * @param groupedByHosts Group {@link TimelineMetric} by metric name, hostname,
+   *                app id and instance id
+   *
+   * @return {@link TimelineMetric}
+   * @throws java.sql.SQLException
+   */
+  TimelineMetrics getTimelineMetrics(List<String> metricNames, String hostname,
+      String applicationId, String instanceId, Long startTime,
+      Long endTime, Integer limit, boolean groupedByHosts)
+    throws SQLException, IOException;
+
+
+  /**
+   * Return all records for a single metric satisfying the filter criteria.
+   * @return {@link TimelineMetric}
+   */
+  TimelineMetric getTimelineMetric(String metricName, String hostname,
+      String applicationId, String instanceId, Long startTime,
+      Long endTime, Integer limit)
+      throws SQLException, IOException;
+
+
+  /**
+   * Stores metric information to the timeline store. Any errors occurring for
+   * individual put request objects will be reported in the response.
+   *
+   * @param metrics An {@link TimelineMetrics}.
+   * @return An {@link org.apache.hadoop.yarn.api.records.timeline.TimelinePutResponse}.
+   * @throws SQLException, IOException
+   */
+  TimelinePutResponse putMetrics(TimelineMetrics metrics)
+    throws SQLException, IOException;
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/ApplicationAttemptFinishData.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/ApplicationAttemptFinishData.java b/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/ApplicationAttemptFinishData.java
new file mode 100644
index 0000000..7ba51af
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/ApplicationAttemptFinishData.java
@@ -0,0 +1,95 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.applicationhistoryservice.records;
+
+import org.apache.hadoop.classification.InterfaceAudience.Public;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
+import org.apache.hadoop.yarn.api.records.YarnApplicationAttemptState;
+import org.apache.hadoop.yarn.util.Records;
+
+/**
+ * The class contains the fields that can be determined when
+ * <code>RMAppAttempt</code> finishes, and that need to be stored persistently.
+ */
+@Public
+@Unstable
+public abstract class ApplicationAttemptFinishData {
+
+  @Public
+  @Unstable
+  public static ApplicationAttemptFinishData newInstance(
+      ApplicationAttemptId appAttemptId, String diagnosticsInfo,
+      String trackingURL, FinalApplicationStatus finalApplicationStatus,
+      YarnApplicationAttemptState yarnApplicationAttemptState) {
+    ApplicationAttemptFinishData appAttemptFD =
+        Records.newRecord(ApplicationAttemptFinishData.class);
+    appAttemptFD.setApplicationAttemptId(appAttemptId);
+    appAttemptFD.setDiagnosticsInfo(diagnosticsInfo);
+    appAttemptFD.setTrackingURL(trackingURL);
+    appAttemptFD.setFinalApplicationStatus(finalApplicationStatus);
+    appAttemptFD.setYarnApplicationAttemptState(yarnApplicationAttemptState);
+    return appAttemptFD;
+  }
+
+  @Public
+  @Unstable
+  public abstract ApplicationAttemptId getApplicationAttemptId();
+
+  @Public
+  @Unstable
+  public abstract void setApplicationAttemptId(
+      ApplicationAttemptId applicationAttemptId);
+
+  @Public
+  @Unstable
+  public abstract String getTrackingURL();
+
+  @Public
+  @Unstable
+  public abstract void setTrackingURL(String trackingURL);
+
+  @Public
+  @Unstable
+  public abstract String getDiagnosticsInfo();
+
+  @Public
+  @Unstable
+  public abstract void setDiagnosticsInfo(String diagnosticsInfo);
+
+  @Public
+  @Unstable
+  public abstract FinalApplicationStatus getFinalApplicationStatus();
+
+  @Public
+  @Unstable
+  public abstract void setFinalApplicationStatus(
+      FinalApplicationStatus finalApplicationStatus);
+
+  @Public
+  @Unstable
+  public abstract YarnApplicationAttemptState getYarnApplicationAttemptState();
+
+  @Public
+  @Unstable
+  public abstract void setYarnApplicationAttemptState(
+      YarnApplicationAttemptState yarnApplicationAttemptState);
+
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/ApplicationAttemptHistoryData.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/ApplicationAttemptHistoryData.java b/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/ApplicationAttemptHistoryData.java
new file mode 100644
index 0000000..b759ab1
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/ApplicationAttemptHistoryData.java
@@ -0,0 +1,171 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.applicationhistoryservice.records;
+
+import org.apache.hadoop.classification.InterfaceAudience.Public;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
+import org.apache.hadoop.yarn.api.records.YarnApplicationAttemptState;
+
+/**
+ * The class contains all the fields that are stored persistently for
+ * <code>RMAppAttempt</code>.
+ */
+@Public
+@Unstable
+public class ApplicationAttemptHistoryData {
+
+  private ApplicationAttemptId applicationAttemptId;
+
+  private String host;
+
+  private int rpcPort;
+
+  private String trackingURL;
+
+  private String diagnosticsInfo;
+
+  private FinalApplicationStatus finalApplicationStatus;
+
+  private ContainerId masterContainerId;
+
+  private YarnApplicationAttemptState yarnApplicationAttemptState;
+
+  @Public
+  @Unstable
+  public static ApplicationAttemptHistoryData newInstance(
+      ApplicationAttemptId appAttemptId, String host, int rpcPort,
+      ContainerId masterContainerId, String diagnosticsInfo,
+      String trackingURL, FinalApplicationStatus finalApplicationStatus,
+      YarnApplicationAttemptState yarnApplicationAttemptState) {
+    ApplicationAttemptHistoryData appAttemptHD =
+        new ApplicationAttemptHistoryData();
+    appAttemptHD.setApplicationAttemptId(appAttemptId);
+    appAttemptHD.setHost(host);
+    appAttemptHD.setRPCPort(rpcPort);
+    appAttemptHD.setMasterContainerId(masterContainerId);
+    appAttemptHD.setDiagnosticsInfo(diagnosticsInfo);
+    appAttemptHD.setTrackingURL(trackingURL);
+    appAttemptHD.setFinalApplicationStatus(finalApplicationStatus);
+    appAttemptHD.setYarnApplicationAttemptState(yarnApplicationAttemptState);
+    return appAttemptHD;
+  }
+
+  @Public
+  @Unstable
+  public ApplicationAttemptId getApplicationAttemptId() {
+    return applicationAttemptId;
+  }
+
+  @Public
+  @Unstable
+  public void
+      setApplicationAttemptId(ApplicationAttemptId applicationAttemptId) {
+    this.applicationAttemptId = applicationAttemptId;
+  }
+
+  @Public
+  @Unstable
+  public String getHost() {
+    return host;
+  }
+
+  @Public
+  @Unstable
+  public void setHost(String host) {
+    this.host = host;
+  }
+
+  @Public
+  @Unstable
+  public int getRPCPort() {
+    return rpcPort;
+  }
+
+  @Public
+  @Unstable
+  public void setRPCPort(int rpcPort) {
+    this.rpcPort = rpcPort;
+  }
+
+  @Public
+  @Unstable
+  public String getTrackingURL() {
+    return trackingURL;
+  }
+
+  @Public
+  @Unstable
+  public void setTrackingURL(String trackingURL) {
+    this.trackingURL = trackingURL;
+  }
+
+  @Public
+  @Unstable
+  public String getDiagnosticsInfo() {
+    return diagnosticsInfo;
+  }
+
+  @Public
+  @Unstable
+  public void setDiagnosticsInfo(String diagnosticsInfo) {
+    this.diagnosticsInfo = diagnosticsInfo;
+  }
+
+  @Public
+  @Unstable
+  public FinalApplicationStatus getFinalApplicationStatus() {
+    return finalApplicationStatus;
+  }
+
+  @Public
+  @Unstable
+  public void setFinalApplicationStatus(
+      FinalApplicationStatus finalApplicationStatus) {
+    this.finalApplicationStatus = finalApplicationStatus;
+  }
+
+  @Public
+  @Unstable
+  public ContainerId getMasterContainerId() {
+    return masterContainerId;
+  }
+
+  @Public
+  @Unstable
+  public void setMasterContainerId(ContainerId masterContainerId) {
+    this.masterContainerId = masterContainerId;
+  }
+
+  @Public
+  @Unstable
+  public YarnApplicationAttemptState getYarnApplicationAttemptState() {
+    return yarnApplicationAttemptState;
+  }
+
+  @Public
+  @Unstable
+  public void setYarnApplicationAttemptState(
+      YarnApplicationAttemptState yarnApplicationAttemptState) {
+    this.yarnApplicationAttemptState = yarnApplicationAttemptState;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/ApplicationAttemptStartData.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/ApplicationAttemptStartData.java b/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/ApplicationAttemptStartData.java
new file mode 100644
index 0000000..7ca43fa
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/ApplicationAttemptStartData.java
@@ -0,0 +1,82 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.applicationhistoryservice.records;
+
+import org.apache.hadoop.classification.InterfaceAudience.Public;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.util.Records;
+
+/**
+ * The class contains the fields that can be determined when
+ * <code>RMAppAttempt</code> starts, and that need to be stored persistently.
+ */
+@Public
+@Unstable
+public abstract class ApplicationAttemptStartData {
+
+  @Public
+  @Unstable
+  public static ApplicationAttemptStartData newInstance(
+      ApplicationAttemptId appAttemptId, String host, int rpcPort,
+      ContainerId masterContainerId) {
+    ApplicationAttemptStartData appAttemptSD =
+        Records.newRecord(ApplicationAttemptStartData.class);
+    appAttemptSD.setApplicationAttemptId(appAttemptId);
+    appAttemptSD.setHost(host);
+    appAttemptSD.setRPCPort(rpcPort);
+    appAttemptSD.setMasterContainerId(masterContainerId);
+    return appAttemptSD;
+  }
+
+  @Public
+  @Unstable
+  public abstract ApplicationAttemptId getApplicationAttemptId();
+
+  @Public
+  @Unstable
+  public abstract void setApplicationAttemptId(
+      ApplicationAttemptId applicationAttemptId);
+
+  @Public
+  @Unstable
+  public abstract String getHost();
+
+  @Public
+  @Unstable
+  public abstract void setHost(String host);
+
+  @Public
+  @Unstable
+  public abstract int getRPCPort();
+
+  @Public
+  @Unstable
+  public abstract void setRPCPort(int rpcPort);
+
+  @Public
+  @Unstable
+  public abstract ContainerId getMasterContainerId();
+
+  @Public
+  @Unstable
+  public abstract void setMasterContainerId(ContainerId masterContainerId);
+
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/ApplicationFinishData.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/ApplicationFinishData.java b/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/ApplicationFinishData.java
new file mode 100644
index 0000000..997fa6c
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/ApplicationFinishData.java
@@ -0,0 +1,94 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.applicationhistoryservice.records;
+
+import org.apache.hadoop.classification.InterfaceAudience.Public;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
+import org.apache.hadoop.yarn.api.records.YarnApplicationState;
+import org.apache.hadoop.yarn.util.Records;
+
+/**
+ * The class contains the fields that can be determined when <code>RMApp</code>
+ * finishes, and that need to be stored persistently.
+ */
+@Public
+@Unstable
+public abstract class ApplicationFinishData {
+
+  @Public
+  @Unstable
+  public static ApplicationFinishData newInstance(ApplicationId applicationId,
+      long finishTime, String diagnosticsInfo,
+      FinalApplicationStatus finalApplicationStatus,
+      YarnApplicationState yarnApplicationState) {
+    ApplicationFinishData appFD =
+        Records.newRecord(ApplicationFinishData.class);
+    appFD.setApplicationId(applicationId);
+    appFD.setFinishTime(finishTime);
+    appFD.setDiagnosticsInfo(diagnosticsInfo);
+    appFD.setFinalApplicationStatus(finalApplicationStatus);
+    appFD.setYarnApplicationState(yarnApplicationState);
+    return appFD;
+  }
+
+  @Public
+  @Unstable
+  public abstract ApplicationId getApplicationId();
+
+  @Public
+  @Unstable
+  public abstract void setApplicationId(ApplicationId applicationId);
+
+  @Public
+  @Unstable
+  public abstract long getFinishTime();
+
+  @Public
+  @Unstable
+  public abstract void setFinishTime(long finishTime);
+
+  @Public
+  @Unstable
+  public abstract String getDiagnosticsInfo();
+
+  @Public
+  @Unstable
+  public abstract void setDiagnosticsInfo(String diagnosticsInfo);
+
+  @Public
+  @Unstable
+  public abstract FinalApplicationStatus getFinalApplicationStatus();
+
+  @Public
+  @Unstable
+  public abstract void setFinalApplicationStatus(
+      FinalApplicationStatus finalApplicationStatus);
+
+  @Public
+  @Unstable
+  public abstract YarnApplicationState getYarnApplicationState();
+
+  @Public
+  @Unstable
+  public abstract void setYarnApplicationState(
+      YarnApplicationState yarnApplicationState);
+
+}


[22/22] git commit: AMBARI-5707. Metrics system prototype implementation. (swagle)

Posted by sw...@apache.org.
AMBARI-5707. Metrics system prototype implementation. (swagle)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/865d187e
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/865d187e
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/865d187e

Branch: refs/heads/branch-metrics-dev
Commit: 865d187e33ea028fcf34541b2f634f887db5aa7c
Parents: 802df76
Author: Siddharth Wagle <sw...@hortonworks.com>
Authored: Mon Sep 22 11:01:05 2014 -0700
Committer: Siddharth Wagle <sw...@hortonworks.com>
Committed: Mon Sep 22 11:01:05 2014 -0700

----------------------------------------------------------------------
 .../ambari-metrics-hadoop-sink/pom.xml          |  132 +
 .../src/main/assemblies/empty.xml               |   21 +
 .../src/main/assemblies/sink.xml                |   34 +
 .../timeline/AbstractTimelineMetricsSink.java   |  101 +
 .../metrics2/sink/timeline/TimelineMetric.java  |  170 +
 .../metrics2/sink/timeline/TimelineMetrics.java |  102 +
 .../sink/timeline/TimelineMetricsCache.java     |  128 +
 .../sink/timeline/TimelineMetricsSink.java      |  211 ++
 .../pom.xml                                     |  248 ++
 .../src/main/assemblies/ats.xml                 |   34 +
 .../src/main/assemblies/empty.xml               |   21 +
 .../main/conf/hbase-site-metrics-service.xml    |   72 +
 .../org/apache/hadoop/yarn/conf/YarnConfig.java |   26 +
 .../ApplicationHistoryClientService.java        |  211 ++
 .../ApplicationHistoryManager.java              |   28 +
 .../ApplicationHistoryManagerImpl.java          |  243 ++
 .../ApplicationHistoryReader.java               |  117 +
 .../ApplicationHistoryServer.java               |  190 +
 .../ApplicationHistoryStore.java                |   37 +
 .../ApplicationHistoryWriter.java               |  112 +
 .../FileSystemApplicationHistoryStore.java      |  784 +++++
 .../MemoryApplicationHistoryStore.java          |  274 ++
 .../NullApplicationHistoryStore.java            |  127 +
 .../timeline/AbstractTimelineAggregator.java    |  294 ++
 .../timeline/HBaseTimelineMetricStore.java      |  185 +
 .../metrics/timeline/PhoenixHBaseAccessor.java  |  522 +++
 .../metrics/timeline/PhoenixTransactSQL.java    |  398 +++
 .../TimelineMetricAggregatorHourly.java         |  142 +
 .../TimelineMetricAggregatorMinute.java         |  142 +
 .../TimelineMetricClusterAggregator.java        |  259 ++
 .../TimelineMetricClusterAggregatorHourly.java  |   48 +
 .../metrics/timeline/TimelineMetricStore.java   |   70 +
 .../records/ApplicationAttemptFinishData.java   |   95 +
 .../records/ApplicationAttemptHistoryData.java  |  171 +
 .../records/ApplicationAttemptStartData.java    |   82 +
 .../records/ApplicationFinishData.java          |   94 +
 .../records/ApplicationHistoryData.java         |  213 ++
 .../records/ApplicationStartData.java           |  106 +
 .../records/ContainerFinishData.java            |   90 +
 .../records/ContainerHistoryData.java           |  182 +
 .../records/ContainerStartData.java             |   92 +
 .../pb/ApplicationAttemptFinishDataPBImpl.java  |  239 ++
 .../pb/ApplicationAttemptStartDataPBImpl.java   |  208 ++
 .../impl/pb/ApplicationFinishDataPBImpl.java    |  226 ++
 .../impl/pb/ApplicationStartDataPBImpl.java     |  229 ++
 .../impl/pb/ContainerFinishDataPBImpl.java      |  204 ++
 .../impl/pb/ContainerStartDataPBImpl.java       |  258 ++
 .../timeline/EntityIdentifier.java              |  100 +
 .../timeline/GenericObjectMapper.java           |  135 +
 .../timeline/LeveldbTimelineStore.java          | 1473 ++++++++
 .../timeline/MemoryTimelineStore.java           |  360 ++
 .../timeline/NameValuePair.java                 |   59 +
 .../timeline/TimelineReader.java                |  155 +
 .../timeline/TimelineStore.java                 |   29 +
 .../timeline/TimelineWriter.java                |   46 +
 .../timeline/package-info.java                  |   20 +
 .../webapp/AHSController.java                   |   55 +
 .../webapp/AHSLogsPage.java                     |   55 +
 .../webapp/AHSView.java                         |   90 +
 .../webapp/AHSWebApp.java                       |   63 +
 .../webapp/AHSWebServices.java                  |  162 +
 .../webapp/AppAttemptPage.java                  |   69 +
 .../webapp/AppPage.java                         |   71 +
 .../webapp/ContainerPage.java                   |   41 +
 .../webapp/JAXBContextResolver.java             |   64 +
 .../webapp/NavBlock.java                        |   51 +
 .../webapp/TimelineWebServices.java             |  504 +++
 .../yarn/util/timeline/TimelineUtilsExt.java    |   39 +
 .../ApplicationHistoryStoreTestUtils.java       |   84 +
 .../TestApplicationHistoryClientService.java    |  206 ++
 .../TestApplicationHistoryManagerImpl.java      |   74 +
 .../TestApplicationHistoryServer.java           |   77 +
 .../TestFileSystemApplicationHistoryStore.java  |  233 ++
 .../TestMemoryApplicationHistoryStore.java      |  204 ++
 .../timeline/TestPhoenixTransactSQL.java        |   69 +
 .../timeline/TestTimelineMetricStore.java       |   81 +
 .../timeline/TestGenericObjectMapper.java       |  102 +
 .../timeline/TestLeveldbTimelineStore.java      |  253 ++
 .../timeline/TestMemoryTimelineStore.java       |   83 +
 .../timeline/TimelineStoreTestUtils.java        |  789 +++++
 .../webapp/TestAHSWebApp.java                   |  182 +
 .../webapp/TestAHSWebServices.java              |  303 ++
 .../webapp/TestTimelineWebServices.java         |  391 +++
 .../ambari-metrics-host-monitoring/pom.xml      |  152 +
 .../src/main/python/__init__.py                 |   21 +
 .../src/main/python/core/__init__.py            |   34 +
 .../main/python/core/application_metric_map.py  |  127 +
 .../src/main/python/core/config_reader.py       |  127 +
 .../src/main/python/core/controller.py          |  126 +
 .../src/main/python/core/emitter.py             |   90 +
 .../src/main/python/core/event_definition.py    |   85 +
 .../src/main/python/core/host_info.py           |  187 +
 .../src/main/python/core/metric_collector.py    |   91 +
 .../src/main/python/main.py                     |   49 +
 .../src/main/python/psutil/LICENSE              |   27 +
 .../src/main/python/psutil/MANIFEST.in          |   14 +
 .../src/main/python/psutil/Makefile             |   77 +
 .../src/main/python/psutil/README               |  270 ++
 .../src/main/python/psutil/build.out            |  137 +
 .../src/main/python/psutil/build.py             |   57 +
 .../psutil/__init__.py                          | 1987 +++++++++++
 .../lib.macosx-10.8-intel-2.7/psutil/_common.py |  258 ++
 .../lib.macosx-10.8-intel-2.7/psutil/_compat.py |  433 +++
 .../lib.macosx-10.8-intel-2.7/psutil/_psbsd.py  |  389 +++
 .../psutil/_pslinux.py                          | 1225 +++++++
 .../lib.macosx-10.8-intel-2.7/psutil/_psosx.py  |  341 ++
 .../psutil/_psposix.py                          |  157 +
 .../psutil/_pssunos.py                          |  533 +++
 .../psutil/_pswindows.py                        |  485 +++
 .../src/main/python/psutil/docs/Makefile        |  177 +
 .../src/main/python/psutil/docs/README          |   15 +
 .../python/psutil/docs/_static/copybutton.js    |   57 +
 .../main/python/psutil/docs/_static/sidebar.js  |  161 +
 .../python/psutil/docs/_template/globaltoc.html |   12 +
 .../psutil/docs/_template/indexcontent.html     |    4 +
 .../psutil/docs/_template/indexsidebar.html     |   16 +
 .../main/python/psutil/docs/_template/page.html |   66 +
 .../_themes/pydoctheme/static/pydoctheme.css    |  187 +
 .../psutil/docs/_themes/pydoctheme/theme.conf   |   23 +
 .../src/main/python/psutil/docs/conf.py         |  253 ++
 .../src/main/python/psutil/docs/index.rst       | 1247 +++++++
 .../src/main/python/psutil/docs/make.bat        |  242 ++
 .../main/python/psutil/examples/disk_usage.py   |   63 +
 .../src/main/python/psutil/examples/free.py     |   42 +
 .../src/main/python/psutil/examples/iotop.py    |  178 +
 .../src/main/python/psutil/examples/killall.py  |   32 +
 .../src/main/python/psutil/examples/meminfo.py  |   69 +
 .../src/main/python/psutil/examples/netstat.py  |   65 +
 .../src/main/python/psutil/examples/nettop.py   |  165 +
 .../src/main/python/psutil/examples/pmap.py     |   58 +
 .../python/psutil/examples/process_detail.py    |  162 +
 .../src/main/python/psutil/examples/top.py      |  232 ++
 .../src/main/python/psutil/examples/who.py      |   34 +
 .../src/main/python/psutil/make.bat             |  176 +
 .../src/main/python/psutil/psutil/__init__.py   | 1987 +++++++++++
 .../src/main/python/psutil/psutil/_common.py    |  258 ++
 .../src/main/python/psutil/psutil/_compat.py    |  433 +++
 .../src/main/python/psutil/psutil/_psbsd.py     |  389 +++
 .../src/main/python/psutil/psutil/_pslinux.py   | 1225 +++++++
 .../src/main/python/psutil/psutil/_psosx.py     |  341 ++
 .../src/main/python/psutil/psutil/_psposix.py   |  157 +
 .../src/main/python/psutil/psutil/_pssunos.py   |  533 +++
 .../src/main/python/psutil/psutil/_psutil_bsd.c | 2212 ++++++++++++
 .../src/main/python/psutil/psutil/_psutil_bsd.h |   51 +
 .../main/python/psutil/psutil/_psutil_common.c  |   37 +
 .../main/python/psutil/psutil/_psutil_common.h  |   10 +
 .../main/python/psutil/psutil/_psutil_linux.c   |  510 +++
 .../main/python/psutil/psutil/_psutil_linux.h   |   20 +
 .../src/main/python/psutil/psutil/_psutil_osx.c | 1881 ++++++++++
 .../src/main/python/psutil/psutil/_psutil_osx.h |   41 +
 .../main/python/psutil/psutil/_psutil_posix.c   |  128 +
 .../main/python/psutil/psutil/_psutil_posix.h   |   10 +
 .../main/python/psutil/psutil/_psutil_sunos.c   | 1290 +++++++
 .../main/python/psutil/psutil/_psutil_sunos.h   |   27 +
 .../main/python/psutil/psutil/_psutil_windows.c | 3241 ++++++++++++++++++
 .../main/python/psutil/psutil/_psutil_windows.h |   70 +
 .../src/main/python/psutil/psutil/_pswindows.py |  485 +++
 .../psutil/psutil/arch/bsd/process_info.c       |  285 ++
 .../psutil/psutil/arch/bsd/process_info.h       |   15 +
 .../psutil/psutil/arch/osx/process_info.c       |  293 ++
 .../psutil/psutil/arch/osx/process_info.h       |   16 +
 .../python/psutil/psutil/arch/windows/glpi.h    |   41 +
 .../psutil/psutil/arch/windows/ntextapi.h       |  287 ++
 .../psutil/arch/windows/process_handles.c       |  336 ++
 .../psutil/arch/windows/process_handles.h       |   10 +
 .../psutil/psutil/arch/windows/process_info.c   |  443 +++
 .../psutil/psutil/arch/windows/process_info.h   |   17 +
 .../psutil/psutil/arch/windows/security.c       |  238 ++
 .../psutil/psutil/arch/windows/security.h       |   17 +
 .../src/main/python/psutil/setup.py             |  198 ++
 ambari-metrics/pom.xml                          |  149 +
 pom.xml                                         |    7 +
 172 files changed, 43647 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-hadoop-sink/pom.xml
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-hadoop-sink/pom.xml b/ambari-metrics/ambari-metrics-hadoop-sink/pom.xml
new file mode 100644
index 0000000..310a53f
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-hadoop-sink/pom.xml
@@ -0,0 +1,132 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+Licensed to the Apache Software Foundation (ASF) under one or more
+contributor license agreements.  See the NOTICE file distributed with
+this work for additional information regarding copyright ownership.
+The ASF licenses this file to You under the Apache License, Version 2.0
+(the "License"); you may not use this file except in compliance with
+the License.  You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+-->
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
+                             http://maven.apache.org/xsd/maven-4.0.0.xsd">
+  <parent>
+    <artifactId>ambari-metrics</artifactId>
+    <groupId>org.apache.ambari</groupId>
+    <version>1.3.0-SNAPSHOT</version>
+  </parent>
+  <modelVersion>4.0.0</modelVersion>
+  <artifactId>ambari-metrics-hadoop-sink</artifactId>
+  <packaging>jar</packaging>
+  <build>
+    <plugins>
+      <plugin>
+        <artifactId>maven-assembly-plugin</artifactId>
+        <configuration>
+          <descriptors>
+            <descriptor>src/main/assemblies/sink.xml</descriptor>
+          </descriptors>
+          <tarLongFileMode>gnu</tarLongFileMode>
+        </configuration>
+        <executions>
+          <execution>
+            <id>build-tarball</id>
+            <phase>package</phase>
+            <goals>
+              <goal>single</goal>
+            </goals>
+          </execution>
+        </executions>
+      </plugin>
+      <plugin>
+        <artifactId>maven-compiler-plugin</artifactId>
+        <version>3.0</version>
+      </plugin>
+      <plugin>
+        <groupId>org.codehaus.mojo</groupId>
+        <artifactId>build-helper-maven-plugin</artifactId>
+        <version>1.8</version>
+        <executions>
+          <execution>
+            <id>parse-version</id>
+            <phase>validate</phase>
+            <goals>
+              <goal>parse-version</goal>
+            </goals>
+          </execution>
+          <execution>
+            <id>regex-property</id>
+            <goals>
+              <goal>regex-property</goal>
+            </goals>
+            <configuration>
+              <name>ambariVersion</name>
+              <value>${project.version}</value>
+              <regex>^([0-9]+)\.([0-9]+)\.([0-9]+)(\.|-).*</regex>
+              <replacement>$1.$2.$3</replacement>
+              <failIfNoMatch>false</failIfNoMatch>
+            </configuration>
+          </execution>
+        </executions>
+      </plugin>
+    </plugins>
+  </build>
+
+  <dependencies>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-common</artifactId>
+      <version>2.4.0</version>
+      <scope>compile</scope>
+    </dependency>
+    <dependency>
+      <groupId>commons-httpclient</groupId>
+      <artifactId>commons-httpclient</artifactId>
+      <scope>compile</scope>
+    </dependency>
+    <dependency>
+      <groupId>commons-codec</groupId>
+      <artifactId>commons-codec</artifactId>
+      <scope>compile</scope>
+    </dependency>
+    <dependency>
+      <groupId>commons-io</groupId>
+      <artifactId>commons-io</artifactId>
+      <scope>compile</scope>
+    </dependency>
+    <dependency>
+      <groupId>commons-collections</groupId>
+      <artifactId>commons-collections</artifactId>
+      <version>3.2.1</version>
+      <scope>compile</scope>
+    </dependency>
+    <dependency>
+      <groupId>commons-logging</groupId>
+      <artifactId>commons-logging</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>commons-configuration</groupId>
+      <artifactId>commons-configuration</artifactId>
+      <version>1.6</version>
+      <scope>compile</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.codehaus.jackson</groupId>
+      <artifactId>jackson-core-asl</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.codehaus.jackson</groupId>
+      <artifactId>jackson-mapper-asl</artifactId>
+    </dependency>
+  </dependencies>
+
+</project>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-hadoop-sink/src/main/assemblies/empty.xml
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-hadoop-sink/src/main/assemblies/empty.xml b/ambari-metrics/ambari-metrics-hadoop-sink/src/main/assemblies/empty.xml
new file mode 100644
index 0000000..35738b1
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-hadoop-sink/src/main/assemblies/empty.xml
@@ -0,0 +1,21 @@
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one
+  or more contributor license agreements.  See the NOTICE file
+  distributed with this work for additional information
+  regarding copyright ownership.  The ASF licenses this file
+  to you under the Apache License, Version 2.0 (the
+  "License"); you may not use this file except in compliance
+  with the License.  You may obtain a copy of the License at
+  
+       http://www.apache.org/licenses/LICENSE-2.0
+  
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+<assembly>
+    <id>empty</id>
+    <formats/>
+</assembly>

http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-hadoop-sink/src/main/assemblies/sink.xml
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-hadoop-sink/src/main/assemblies/sink.xml b/ambari-metrics/ambari-metrics-hadoop-sink/src/main/assemblies/sink.xml
new file mode 100644
index 0000000..21a6b36
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-hadoop-sink/src/main/assemblies/sink.xml
@@ -0,0 +1,34 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<assembly>
+  <!--This 'all' id is not appended to the produced bundle because we do this:
+    http://maven.apache.org/plugins/maven-assembly-plugin/faq.html#required-classifiers
+  -->
+  <id>dist</id>
+  <formats>
+    <format>dir</format>
+    <format>tar.gz</format>
+  </formats>
+  <includeBaseDirectory>false</includeBaseDirectory>
+  <files>
+    <file>
+      <source>${project.build.directory}/${artifact.artifactId}-${artifact.version}.jar</source>
+      <outputDirectory>ambari-metrics-${project.version}/lib/ambari-metrics</outputDirectory>
+    </file>
+  </files>
+</assembly>

http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-hadoop-sink/src/main/java/org/apache/hadoop/metrics2/sink/timeline/AbstractTimelineMetricsSink.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-hadoop-sink/src/main/java/org/apache/hadoop/metrics2/sink/timeline/AbstractTimelineMetricsSink.java b/ambari-metrics/ambari-metrics-hadoop-sink/src/main/java/org/apache/hadoop/metrics2/sink/timeline/AbstractTimelineMetricsSink.java
new file mode 100644
index 0000000..2c42274
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-hadoop-sink/src/main/java/org/apache/hadoop/metrics2/sink/timeline/AbstractTimelineMetricsSink.java
@@ -0,0 +1,101 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.metrics2.sink.timeline;
+
+import org.apache.commons.configuration.SubsetConfiguration;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.metrics2.MetricsSink;
+import org.apache.hadoop.metrics2.util.Servers;
+import org.apache.hadoop.net.DNS;
+import java.net.SocketAddress;
+import java.net.UnknownHostException;
+import java.util.List;
+
+public abstract class AbstractTimelineMetricsSink implements MetricsSink {
+
+  public final Log LOG = LogFactory.getLog(this.getClass());
+
+  private SubsetConfiguration conf;
+  private String hostName = "UNKNOWN.example.com";
+  private String serviceName = "";
+  private final String COLLECTOR_HOST_PROPERTY = "collector";
+  private final int DEFAULT_PORT = 8188;
+
+  private List<? extends SocketAddress> metricsServers;
+  private String collectorUri;
+
+  @Override
+  public void init(SubsetConfiguration conf) {
+    this.conf = conf;
+    LOG.info("Initializing Timeline metrics sink.");
+
+    // Take the hostname from the DNS class.
+    if (conf.getString("slave.host.name") != null) {
+      hostName = conf.getString("slave.host.name");
+    } else {
+      try {
+        hostName = DNS.getDefaultHost(
+          conf.getString("dfs.datanode.dns.interface", "default"),
+          conf.getString("dfs.datanode.dns.nameserver", "default"));
+      } catch (UnknownHostException uhe) {
+        LOG.error(uhe);
+        hostName = "UNKNOWN.example.com";
+      }
+    }
+
+    serviceName = getFirstConfigPrefix(conf);
+
+    // Load collector configs
+    metricsServers = Servers.parse(conf.getString(COLLECTOR_HOST_PROPERTY),
+      DEFAULT_PORT);
+
+    if (metricsServers == null || metricsServers.isEmpty()) {
+      LOG.error("No Metric collector configured.");
+    } else {
+      collectorUri = "http://" + conf.getString(COLLECTOR_HOST_PROPERTY).trim()
+        + "/ws/v1/timeline/metrics";
+    }
+  }
+
+  protected String getHostName() {
+    return hostName;
+  }
+
+  protected String getServiceName() {
+    return serviceName;
+  }
+
+  private String getFirstConfigPrefix(SubsetConfiguration conf) {
+    while (conf.getParent() instanceof SubsetConfiguration) {
+      conf = (SubsetConfiguration) conf.getParent();
+    }
+    return conf.getPrefix();
+  }
+
+  protected SocketAddress getServerSocketAddress() {
+    if (metricsServers != null && !metricsServers.isEmpty()) {
+      return metricsServers.get(0);
+    }
+    return null;
+  }
+
+  protected String getCollectorUri() {
+    return collectorUri;
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-hadoop-sink/src/main/java/org/apache/hadoop/metrics2/sink/timeline/TimelineMetric.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-hadoop-sink/src/main/java/org/apache/hadoop/metrics2/sink/timeline/TimelineMetric.java b/ambari-metrics/ambari-metrics-hadoop-sink/src/main/java/org/apache/hadoop/metrics2/sink/timeline/TimelineMetric.java
new file mode 100644
index 0000000..1b35d92
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-hadoop-sink/src/main/java/org/apache/hadoop/metrics2/sink/timeline/TimelineMetric.java
@@ -0,0 +1,170 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.metrics2.sink.timeline;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import javax.xml.bind.annotation.XmlAccessType;
+import javax.xml.bind.annotation.XmlAccessorType;
+import javax.xml.bind.annotation.XmlElement;
+import javax.xml.bind.annotation.XmlRootElement;
+import java.util.Map;
+import java.util.TreeMap;
+
+@XmlRootElement(name = "metric")
+@XmlAccessorType(XmlAccessType.NONE)
+@InterfaceAudience.Public
+@InterfaceStability.Unstable
+public class TimelineMetric implements Comparable<TimelineMetric> {
+
+  private String metricName;
+  private String appId;
+  private String instanceId;
+  private String hostName;
+  private long timestamp;
+  private long startTime;
+  private String type;
+  private Map<Long, Double> metricValues = new TreeMap<Long, Double>();
+
+  @XmlElement(name = "metricname")
+  public String getMetricName() {
+    return metricName;
+  }
+
+  public void setMetricName(String metricName) {
+    this.metricName = metricName;
+  }
+
+  @XmlElement(name = "appid")
+  public String getAppId() {
+    return appId;
+  }
+
+  public void setAppId(String appId) {
+    this.appId = appId;
+  }
+
+  @XmlElement(name = "instanceid")
+  public String getInstanceId() {
+    return instanceId;
+  }
+
+  public void setInstanceId(String instanceId) {
+    this.instanceId = instanceId;
+  }
+
+  @XmlElement(name = "hostname")
+  public String getHostName() {
+    return hostName;
+  }
+
+  public void setHostName(String hostName) {
+    this.hostName = hostName;
+  }
+
+  @XmlElement(name = "timestamp")
+  public long getTimestamp() {
+    return timestamp;
+  }
+
+  public void setTimestamp(long timestamp) {
+    this.timestamp = timestamp;
+  }
+
+  @XmlElement(name = "starttime")
+  public long getStartTime() {
+    return startTime;
+  }
+
+  public void setStartTime(long startTime) {
+    this.startTime = startTime;
+  }
+
+  @XmlElement(name = "type")
+  public String getType() {
+    return type;
+  }
+
+  public void setType(String type) {
+    this.type = type;
+  }
+
+  @XmlElement(name = "metrics")
+  public Map<Long, Double> getMetricValues() {
+    return metricValues;
+  }
+
+  public void setMetricValues(Map<Long, Double> metricValues) {
+    this.metricValues = metricValues;
+  }
+
+  public void addMetricValues(Map<Long, Double> metricValues) {
+    this.metricValues.putAll(metricValues);
+  }
+
+  @Override
+  public boolean equals(Object o) {
+    if (this == o) return true;
+    if (o == null || getClass() != o.getClass()) return false;
+
+    TimelineMetric metric = (TimelineMetric) o;
+
+    if (!metricName.equals(metric.metricName)) return false;
+    if (hostName != null ? !hostName.equals(metric.hostName) : metric.hostName != null)
+      return false;
+    if (!appId.equals(metric.appId)) return false;
+    if (instanceId != null ? !instanceId.equals(metric.instanceId) : metric.instanceId != null)
+      return false;
+    if (timestamp != metric.timestamp) return false;
+    if (startTime != metric.startTime) return false;
+
+    return true;
+  }
+
+  public boolean equalsExceptTime(TimelineMetric metric) {
+    if (!metricName.equals(metric.metricName)) return false;
+    if (hostName != null ? !hostName.equals(metric.hostName) : metric.hostName != null)
+      return false;
+    if (!appId.equals(metric.appId)) return false;
+    if (instanceId != null ? !instanceId.equals(metric.instanceId) : metric.instanceId != null)
+      return false;
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    int result = metricName.hashCode();
+    result = 31 * result + appId.hashCode();
+    result = 31 * result + (instanceId != null ? instanceId.hashCode() : 0);
+    result = 31 * result + (hostName != null ? hostName.hashCode() : 0);
+    result = 31 * result + (int) (timestamp ^ (timestamp >>> 32));
+    return result;
+  }
+
+  @Override
+  public int compareTo(TimelineMetric other) {
+    if (timestamp > other.timestamp) {
+      return -1;
+    } else if (timestamp < other.timestamp) {
+      return 1;
+    } else {
+      return metricName.compareTo(other.metricName);
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-hadoop-sink/src/main/java/org/apache/hadoop/metrics2/sink/timeline/TimelineMetrics.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-hadoop-sink/src/main/java/org/apache/hadoop/metrics2/sink/timeline/TimelineMetrics.java b/ambari-metrics/ambari-metrics-hadoop-sink/src/main/java/org/apache/hadoop/metrics2/sink/timeline/TimelineMetrics.java
new file mode 100644
index 0000000..a6c925a
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-hadoop-sink/src/main/java/org/apache/hadoop/metrics2/sink/timeline/TimelineMetrics.java
@@ -0,0 +1,102 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.metrics2.sink.timeline;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.metrics2.sink.timeline.TimelineMetric;
+
+import javax.xml.bind.annotation.XmlAccessType;
+import javax.xml.bind.annotation.XmlAccessorType;
+import javax.xml.bind.annotation.XmlElement;
+import javax.xml.bind.annotation.XmlRootElement;
+import java.util.ArrayList;
+import java.util.List;
+
+/**
+ * The class that hosts a list of timeline entities.
+ */
+@XmlRootElement(name = "metrics")
+@XmlAccessorType(XmlAccessType.NONE)
+@InterfaceAudience.Public
+@InterfaceStability.Unstable
+public class TimelineMetrics {
+
+  private List<TimelineMetric> allMetrics = new ArrayList<TimelineMetric>();
+
+  public TimelineMetrics() {}
+
+  @XmlElement(name = "metrics")
+  public List<TimelineMetric> getMetrics() {
+    return allMetrics;
+  }
+
+  public void setMetrics(List<TimelineMetric> allMetrics) {
+    this.allMetrics = allMetrics;
+  }
+
+  private boolean isEqualTimelineMetrics(TimelineMetric metric1,
+                                         TimelineMetric metric2) {
+
+    boolean isEqual = true;
+
+    if (!metric1.getMetricName().equals(metric2.getMetricName())) {
+      return false;
+    }
+
+    if (metric1.getHostName() != null) {
+      isEqual = metric1.getHostName().equals(metric2.getHostName());
+    }
+
+    if (metric1.getAppId() != null) {
+      isEqual = metric1.getAppId().equals(metric2.getAppId());
+    }
+
+    return isEqual;
+  }
+
+  /**
+   * Merge with existing TimelineMetric if everything except startTime is
+   * the same.
+   * @param metric {@link TimelineMetric}
+   */
+  public void addOrMergeTimelineMetric(TimelineMetric metric) {
+    TimelineMetric metricToMerge = null;
+
+    if (!allMetrics.isEmpty()) {
+      for (TimelineMetric timelineMetric : allMetrics) {
+        if (timelineMetric.equalsExceptTime(metric)) {
+          metricToMerge = timelineMetric;
+          break;
+        }
+      }
+    }
+
+    if (metricToMerge != null) {
+      metricToMerge.addMetricValues(metric.getMetricValues());
+      if (metricToMerge.getTimestamp() > metric.getTimestamp()) {
+        metricToMerge.setTimestamp(metric.getTimestamp());
+      }
+      if (metricToMerge.getStartTime() > metric.getStartTime()) {
+        metricToMerge.setStartTime(metric.getStartTime());
+      }
+    } else {
+      allMetrics.add(metric);
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-hadoop-sink/src/main/java/org/apache/hadoop/metrics2/sink/timeline/TimelineMetricsCache.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-hadoop-sink/src/main/java/org/apache/hadoop/metrics2/sink/timeline/TimelineMetricsCache.java b/ambari-metrics/ambari-metrics-hadoop-sink/src/main/java/org/apache/hadoop/metrics2/sink/timeline/TimelineMetricsCache.java
new file mode 100644
index 0000000..36aaec2
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-hadoop-sink/src/main/java/org/apache/hadoop/metrics2/sink/timeline/TimelineMetricsCache.java
@@ -0,0 +1,128 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.metrics2.sink.timeline;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.metrics2.sink.timeline.TimelineMetric;
+
+import java.util.LinkedHashMap;
+import java.util.Map;
+
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public class TimelineMetricsCache {
+
+  private final TimelineMetricHolder timelineMetricCache = new TimelineMetricHolder();
+  private static final Log LOG = LogFactory.getLog(TimelineMetric.class);
+  static final int MAX_RECS_PER_NAME_DEFAULT = 10000;
+  static final int MAX_EVICTION_TIME_MILLIS = 59000; // ~ 1 min
+  private final int maxRecsPerName;
+  private final int maxEvictionTimeInMillis;
+
+  TimelineMetricsCache(int maxRecsPerName, int maxEvictionTimeInMillis) {
+    this.maxRecsPerName = maxRecsPerName;
+    this.maxEvictionTimeInMillis = maxEvictionTimeInMillis;
+  }
+
+  class TimelineMetricWrapper {
+    private long timeDiff = -1;
+    private long oldestTimestamp = -1;
+    private TimelineMetric timelineMetric;
+
+    TimelineMetricWrapper(TimelineMetric timelineMetric) {
+      this.timelineMetric = timelineMetric;
+      this.oldestTimestamp = timelineMetric.getStartTime();
+    }
+
+    private void updateTimeDiff(long timestamp) {
+      if (oldestTimestamp != -1 && timestamp > oldestTimestamp) {
+        timeDiff = timestamp - oldestTimestamp;
+      } else {
+        oldestTimestamp = timestamp;
+      }
+    }
+
+    public void putMetric(TimelineMetric metric) {
+      this.timelineMetric.addMetricValues(metric.getMetricValues());
+      updateTimeDiff(metric.getStartTime());
+    }
+
+    public long getTimeDiff() {
+      return timeDiff;
+    }
+
+    public TimelineMetric getTimelineMetric() {
+      return timelineMetric;
+    }
+  }
+
+  // TODO: Change to ConcurentHashMap with weighted eviction
+  class TimelineMetricHolder extends LinkedHashMap<String, TimelineMetricWrapper> {
+    private static final long serialVersionUID = 1L;
+    private boolean gotOverflow = false;
+
+    @Override
+    protected boolean removeEldestEntry(Map.Entry<String, TimelineMetricWrapper> eldest) {
+      boolean overflow = size() > maxRecsPerName;
+      if (overflow && !gotOverflow) {
+        LOG.warn("Metrics cache overflow at "+ size() +" for "+ eldest);
+        gotOverflow = true;
+      }
+      return overflow;
+    }
+
+    public TimelineMetric evict(String metricName) {
+      TimelineMetricWrapper metricWrapper = this.get(metricName);
+
+      if (metricWrapper == null
+        || metricWrapper.getTimeDiff() < maxEvictionTimeInMillis) {
+        return null;
+      }
+
+      TimelineMetric timelineMetric = metricWrapper.getTimelineMetric();
+      this.remove(metricName);
+
+      return timelineMetric;
+    }
+
+    public void put(String metricName, TimelineMetric timelineMetric) {
+
+      TimelineMetricWrapper metric = this.get(metricName);
+      if (metric == null) {
+        this.put(metricName, new TimelineMetricWrapper(timelineMetric));
+      } else {
+        metric.putMetric(timelineMetric);
+      }
+    }
+  }
+
+  public TimelineMetric getTimelineMetric(String metricName) {
+    if (timelineMetricCache.containsKey(metricName)) {
+      return timelineMetricCache.evict(metricName);
+    }
+
+    return null;
+  }
+
+  public void putTimelineMetric(TimelineMetric timelineMetric) {
+    timelineMetricCache.put(timelineMetric.getMetricName(), timelineMetric);
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-hadoop-sink/src/main/java/org/apache/hadoop/metrics2/sink/timeline/TimelineMetricsSink.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-hadoop-sink/src/main/java/org/apache/hadoop/metrics2/sink/timeline/TimelineMetricsSink.java b/ambari-metrics/ambari-metrics-hadoop-sink/src/main/java/org/apache/hadoop/metrics2/sink/timeline/TimelineMetricsSink.java
new file mode 100644
index 0000000..a843428
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-hadoop-sink/src/main/java/org/apache/hadoop/metrics2/sink/timeline/TimelineMetricsSink.java
@@ -0,0 +1,211 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.metrics2.sink.timeline;
+
+import org.apache.commons.configuration.SubsetConfiguration;
+import org.apache.commons.httpclient.HttpClient;
+import org.apache.commons.httpclient.methods.PostMethod;
+import org.apache.commons.httpclient.methods.StringRequestEntity;
+import org.apache.commons.lang.ClassUtils;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.metrics2.AbstractMetric;
+import org.apache.hadoop.metrics2.MetricsException;
+import org.apache.hadoop.metrics2.MetricsRecord;
+import org.apache.hadoop.metrics2.MetricsTag;
+import org.apache.hadoop.metrics2.impl.MsInfo;
+import org.apache.hadoop.metrics2.sink.timeline.AbstractTimelineMetricsSink;
+import org.apache.hadoop.metrics2.sink.timeline.TimelineMetric;
+import org.apache.hadoop.metrics2.sink.timeline.TimelineMetrics;
+import org.apache.hadoop.metrics2.sink.timeline.TimelineMetricsCache;
+import org.codehaus.jackson.map.AnnotationIntrospector;
+import org.codehaus.jackson.map.ObjectMapper;
+import org.codehaus.jackson.map.annotate.JsonSerialize;
+import org.codehaus.jackson.xc.JaxbAnnotationIntrospector;
+import java.io.IOException;
+import java.net.SocketAddress;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public class TimelineMetricsSink extends AbstractTimelineMetricsSink {
+  private static ObjectMapper mapper;
+  private Map<String, Set<String>> useTagsMap = new HashMap<String, Set<String>>();
+  private static final String TAGS_FOR_PREFIX_PROPERTY_PREFIX = "tagsForPrefix.";
+  private static final String MAX_METRIC_ROW_CACHE_SIZE = "maxRowCacheSize";
+  private static final String METRICS_SEND_INTERVAL = "sendInterval";
+  protected HttpClient httpClient = new HttpClient();
+  private TimelineMetricsCache metricsCache;
+
+  static {
+    mapper = new ObjectMapper();
+    AnnotationIntrospector introspector = new JaxbAnnotationIntrospector();
+    mapper.setAnnotationIntrospector(introspector);
+    mapper.getSerializationConfig()
+      .setSerializationInclusion(JsonSerialize.Inclusion.NON_NULL);
+  }
+
+  @Override
+  public void init(SubsetConfiguration conf) {
+    super.init(conf);
+
+    int maxRowCacheSize = conf.getInt(MAX_METRIC_ROW_CACHE_SIZE,
+      TimelineMetricsCache.MAX_RECS_PER_NAME_DEFAULT);
+    int metricsSendInterval = conf.getInt(METRICS_SEND_INTERVAL,
+      TimelineMetricsCache.MAX_EVICTION_TIME_MILLIS); // ~ 1 min
+    metricsCache = new TimelineMetricsCache(maxRowCacheSize, metricsSendInterval);
+
+    conf.setListDelimiter(',');
+    Iterator<String> it = (Iterator<String>) conf.getKeys();
+    while (it.hasNext()) {
+      String propertyName = it.next();
+      if (propertyName != null && propertyName.startsWith(TAGS_FOR_PREFIX_PROPERTY_PREFIX)) {
+        String contextName = propertyName.substring(TAGS_FOR_PREFIX_PROPERTY_PREFIX.length());
+        String[] tags = conf.getStringArray(propertyName);
+        boolean useAllTags = false;
+        Set<String> set = null;
+        if (tags.length > 0) {
+          set = new HashSet<String>();
+          for (String tag : tags) {
+            tag = tag.trim();
+            useAllTags |= tag.equals("*");
+            if (tag.length() > 0) {
+              set.add(tag);
+            }
+          }
+          if (useAllTags) {
+            set = null;
+          }
+        }
+        useTagsMap.put(contextName, set);
+      }
+    }
+  }
+
+  @Override
+  public void putMetrics(MetricsRecord record) {
+    try {
+      String recordName = record.name();
+      String contextName = record.context();
+
+      StringBuilder sb = new StringBuilder();
+      sb.append(contextName);
+      sb.append('.');
+      sb.append(recordName);
+
+      appendPrefix(record, sb);
+      sb.append(".");
+      int sbBaseLen = sb.length();
+
+      Collection<AbstractMetric> metrics =
+        (Collection<AbstractMetric>) record.metrics();
+
+      List<TimelineMetric> metricList = new ArrayList<TimelineMetric>();
+
+      for (AbstractMetric metric : metrics) {
+        sb.append(metric.name());
+        String name = sb.toString();
+        TimelineMetric timelineMetric = new TimelineMetric();
+        timelineMetric.setMetricName(name);
+        timelineMetric.setHostName(getHostName());
+        timelineMetric.setAppId(getServiceName());
+        timelineMetric.setStartTime(record.timestamp());
+        timelineMetric.setType(ClassUtils.getShortCanonicalName(
+          metric.value(), "Number"));
+        timelineMetric.getMetricValues().put(record.timestamp(),
+          metric.value().doubleValue());
+        // Put intermediate values into the cache until it is time to send
+        metricsCache.putTimelineMetric(timelineMetric);
+
+        // Retrieve all values from cache if it is time to send
+        TimelineMetric cachedMetric = metricsCache.getTimelineMetric(name);
+
+        if (cachedMetric != null) {
+          metricList.add(cachedMetric);
+        }
+
+        sb.setLength(sbBaseLen);
+      }
+
+      TimelineMetrics timelineMetrics = new TimelineMetrics();
+      timelineMetrics.setMetrics(metricList);
+
+      if (!metricList.isEmpty()) {
+        emitMetrics(timelineMetrics);
+      }
+
+
+    } catch (IOException io) {
+      throw new MetricsException("Failed to putMetrics", io);
+    }
+  }
+
+  private void emitMetrics(TimelineMetrics metrics) throws IOException {
+    String jsonData = mapper.writeValueAsString(metrics);
+
+    SocketAddress socketAddress = getServerSocketAddress();
+
+    if (socketAddress != null) {
+      StringRequestEntity requestEntity = new StringRequestEntity(
+        jsonData, "application/json", "UTF-8");
+
+      PostMethod postMethod = new PostMethod(getCollectorUri());
+      postMethod.setRequestEntity(requestEntity);
+      int statusCode = httpClient.executeMethod(postMethod);
+      if (statusCode != 200) {
+        LOG.info("Unable to POST metrics to collector, " + getCollectorUri());
+      }
+    }
+  }
+
+  // Taken as is from Ganglia30 implementation
+  @InterfaceAudience.Private
+  public void appendPrefix(MetricsRecord record, StringBuilder sb) {
+    String contextName = record.context();
+    Collection<MetricsTag> tags = record.tags();
+    if (useTagsMap.containsKey(contextName)) {
+      Set<String> useTags = useTagsMap.get(contextName);
+      for (MetricsTag t : tags) {
+        if (useTags == null || useTags.contains(t.name())) {
+
+          // the context is always skipped here because it is always added
+
+          // the hostname is always skipped to avoid case-mismatches
+          // from different DNSes.
+
+          if (t.info() != MsInfo.Context && t.info() != MsInfo.Hostname && t.value() != null) {
+            sb.append('.').append(t.name()).append('=').append(t.value());
+          }
+        }
+      }
+    }
+  }
+
+  @Override
+  public void flush() {
+    // TODO: Buffering implementation
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-hadoop-timelineservice/pom.xml
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-hadoop-timelineservice/pom.xml b/ambari-metrics/ambari-metrics-hadoop-timelineservice/pom.xml
new file mode 100644
index 0000000..e1f11c9
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-hadoop-timelineservice/pom.xml
@@ -0,0 +1,248 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
+                      http://maven.apache.org/xsd/maven-4.0.0.xsd">
+  <parent>
+    <artifactId>ambari-metrics</artifactId>
+    <groupId>org.apache.ambari</groupId>
+    <version>1.3.0-SNAPSHOT</version>
+  </parent>
+  <modelVersion>4.0.0</modelVersion>
+  <groupId>org.apache.hadoop</groupId>
+  <artifactId>ambari-metrics-hadoop-timelineservice</artifactId>
+  <version>1.3.0-SNAPSHOT</version>
+  <name>ambari-metrics-hadoop-timelineservice</name>
+  <packaging>jar</packaging>
+
+  <properties>
+    <!-- Needed for generating FindBugs warnings using parent pom -->
+    <yarn.basedir>${project.parent.parent.basedir}</yarn.basedir>
+    <protobuf.version>2.5.0</protobuf.version>
+    <hadoop.version>2.4.0</hadoop.version>
+  </properties>
+
+  <build>
+    <plugins>
+      <plugin>
+        <artifactId>maven-dependency-plugin</artifactId>
+        <executions>
+          <execution>
+            <phase>package</phase>
+            <goals>
+              <goal>copy-dependencies</goal>
+            </goals>
+            <configuration>
+              <outputDirectory>${project.build.directory}/lib</outputDirectory>
+            </configuration>
+          </execution>
+        </executions>
+      </plugin>
+      <plugin>
+        <artifactId>maven-assembly-plugin</artifactId>
+        <configuration>
+          <descriptors>
+            <descriptor>src/main/assemblies/ats.xml</descriptor>
+          </descriptors>
+          <tarLongFileMode>gnu</tarLongFileMode>
+        </configuration>
+        <executions>
+          <execution>
+            <id>build-tarball</id>
+            <phase>none</phase>
+            <goals>
+              <goal>single</goal>
+            </goals>
+          </execution>
+        </executions>
+      </plugin>
+    </plugins>
+  </build>
+
+  <dependencies>
+    <dependency>
+      <artifactId>ambari-metrics-hadoop-sink</artifactId>
+      <groupId>org.apache.ambari</groupId>
+      <version>1.3.0-SNAPSHOT</version>
+    </dependency>
+    <dependency>
+      <groupId>javax.servlet</groupId>
+      <artifactId>servlet-api</artifactId>
+    </dependency>
+    <!-- 'mvn dependency:analyze' fails to detect use of this dependency -->
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-common</artifactId>
+      <version>${hadoop.version}</version>
+      <scope>provided</scope>
+      <exclusions>
+        <exclusion>
+          <groupId>commons-el</groupId>
+          <artifactId>commons-el</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>tomcat</groupId>
+          <artifactId>jasper-runtime</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>tomcat</groupId>
+          <artifactId>jasper-compiler</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.mortbay.jetty</groupId>
+          <artifactId>jsp-2.1-jetty</artifactId>
+        </exclusion>
+      </exclusions>
+    </dependency>
+
+    <!-- 'mvn dependency:analyze' fails to detect use of this dependency -->
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-annotations</artifactId>
+      <version>${hadoop.version}</version>
+    </dependency>
+    <dependency>
+      <groupId>org.mockito</groupId>
+      <artifactId>mockito-all</artifactId>
+      <version>1.8.5</version>
+      <scope>test</scope>
+    </dependency>
+    <!-- 'mvn dependency:analyze' fails to detect use of this dependency -->
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-common</artifactId>
+      <version>${hadoop.version}</version>
+      <type>test-jar</type>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
+      <groupId>com.google.inject.extensions</groupId>
+      <artifactId>guice-servlet</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>com.google.protobuf</groupId>
+      <artifactId>protobuf-java</artifactId>
+      <version>${protobuf.version}</version>
+    </dependency>
+    <dependency>
+      <groupId>junit</groupId>
+      <artifactId>junit</artifactId>
+      <scope>test</scope>
+    </dependency>
+    <!-- 'mvn dependency:analyze' fails to detect use of this dependency -->
+    <dependency>
+      <groupId>com.google.inject</groupId>
+      <artifactId>guice</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>com.sun.jersey.jersey-test-framework</groupId>
+      <artifactId>jersey-test-framework-core</artifactId>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
+      <groupId>com.sun.jersey</groupId>
+      <artifactId>jersey-json</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>com.sun.jersey.contribs</groupId>
+      <artifactId>jersey-guice</artifactId>
+    </dependency>
+    <!-- 'mvn dependency:analyze' fails to detect use of this dependency -->
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-yarn-common</artifactId>
+      <version>${hadoop.version}</version>
+      <type>test-jar</type>
+      <scope>test</scope>
+    </dependency>
+    <!-- 'mvn dependency:analyze' fails to detect use of this dependency -->
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-yarn-common</artifactId>
+      <version>${hadoop.version}</version>
+    </dependency>
+    <!-- 'mvn dependency:analyze' fails to detect use of this dependency -->
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-yarn-api</artifactId>
+      <version>${hadoop.version}</version>
+    </dependency>
+    <dependency>
+      <groupId>javax.xml.bind</groupId>
+      <artifactId>jaxb-api</artifactId>
+      <version>2.2.2</version>
+    </dependency>
+    <dependency>
+      <groupId>org.codehaus.jettison</groupId>
+      <artifactId>jettison</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>com.sun.jersey</groupId>
+      <artifactId>jersey-core</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>com.sun.jersey</groupId>
+      <artifactId>jersey-client</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>com.google.guava</groupId>
+      <artifactId>guava</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>commons-logging</groupId>
+      <artifactId>commons-logging</artifactId>
+    </dependency>
+
+    <!-- 'mvn dependency:analyze' fails to detect use of this dependency -->
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-yarn-server-common</artifactId>
+      <version>${hadoop.version}</version>
+    </dependency>
+
+    <!-- 'mvn dependency:analyze' fails to detect use of this dependency -->
+    <dependency>
+      <groupId>com.sun.jersey.jersey-test-framework</groupId>
+      <artifactId>jersey-test-framework-grizzly2</artifactId>
+      <scope>test</scope>
+    </dependency>
+
+    <dependency>
+      <groupId>org.codehaus.jackson</groupId>
+      <artifactId>jackson-core-asl</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.codehaus.jackson</groupId>
+      <artifactId>jackson-mapper-asl</artifactId>
+    </dependency>
+
+    <dependency>
+      <groupId>commons-collections</groupId>
+      <artifactId>commons-collections</artifactId>
+      <version>3.2.1</version>
+    </dependency>
+
+    <dependency>
+      <groupId>org.fusesource.leveldbjni</groupId>
+      <artifactId>leveldbjni-all</artifactId>
+      <version>1.8</version>
+    </dependency>
+
+  </dependencies>
+</project>

http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/assemblies/ats.xml
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/assemblies/ats.xml b/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/assemblies/ats.xml
new file mode 100644
index 0000000..21a6b36
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/assemblies/ats.xml
@@ -0,0 +1,34 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<assembly>
+  <!--This 'all' id is not appended to the produced bundle because we do this:
+    http://maven.apache.org/plugins/maven-assembly-plugin/faq.html#required-classifiers
+  -->
+  <id>dist</id>
+  <formats>
+    <format>dir</format>
+    <format>tar.gz</format>
+  </formats>
+  <includeBaseDirectory>false</includeBaseDirectory>
+  <files>
+    <file>
+      <source>${project.build.directory}/${artifact.artifactId}-${artifact.version}.jar</source>
+      <outputDirectory>ambari-metrics-${project.version}/lib/ambari-metrics</outputDirectory>
+    </file>
+  </files>
+</assembly>

http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/assemblies/empty.xml
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/assemblies/empty.xml b/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/assemblies/empty.xml
new file mode 100644
index 0000000..35738b1
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/assemblies/empty.xml
@@ -0,0 +1,21 @@
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one
+  or more contributor license agreements.  See the NOTICE file
+  distributed with this work for additional information
+  regarding copyright ownership.  The ASF licenses this file
+  to you under the Apache License, Version 2.0 (the
+  "License"); you may not use this file except in compliance
+  with the License.  You may obtain a copy of the License at
+  
+       http://www.apache.org/licenses/LICENSE-2.0
+  
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+<assembly>
+    <id>empty</id>
+    <formats/>
+</assembly>

http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/conf/hbase-site-metrics-service.xml
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/conf/hbase-site-metrics-service.xml b/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/conf/hbase-site-metrics-service.xml
new file mode 100644
index 0000000..4c85581
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/conf/hbase-site-metrics-service.xml
@@ -0,0 +1,72 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration>
+  <property>
+    <name>hbase.rootdir</name>
+    <value>file:///grid/0/hbase</value>
+  </property>
+  <property>
+    <name>hbase.tmp.dir</name>
+    <value>/grid/0/hbase-tmp</value>
+  </property>
+  <property>
+    <name>hbase.cluster.distributed</name>
+    <value>true</value>
+  </property>
+  <property>
+    <name>hbase.master.wait.on.regionservers.mintostart</name>
+    <value>1</value>
+  </property>
+  <property>
+    <name>hbase.zookeeper.quorum</name>
+    <value>localhost</value>
+  </property>
+  <property>
+    <name>phoenix.query.spoolThresholdBytes</name>
+    <value>12582912</value>
+  </property>
+  <property>
+      <name>hbase.zookeeper.property.dataDir</name>
+      <value>/grid/0/zookeeper</value>
+  </property>
+  <property>
+      <name>hbase.client.scanner.caching</name>
+      <value>10000</value>
+  </property>
+  <property>
+    <name>hfile.block.cache.size</name>
+    <value>0.6</value>
+  </property>
+  <property>
+    <name>hbase.regionserver.global.memstore.upperLimit</name>
+    <value>0.2</value>
+  </property>
+  <property>
+    <name>hbase.regionserver.global.memstore.lowerLimit</name>
+    <value>0.1</value>
+  </property>
+  <property>
+    <name>phoenix.groupby.maxCacheSize</name>
+    <value>307200000</value>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/conf/YarnConfig.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/conf/YarnConfig.java b/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/conf/YarnConfig.java
new file mode 100644
index 0000000..fe5a553
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/conf/YarnConfig.java
@@ -0,0 +1,26 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.conf;
+
+public class YarnConfig extends YarnConfiguration {
+  public static final String TIMELINE_METRICS_SERVICE_PREFIX =
+        TIMELINE_SERVICE_PREFIX + "metrics.";
+
+  public static final String TIMELINE_METRICS_AGGREGATOR_CHECKPOINT_DIR =
+        TIMELINE_METRICS_SERVICE_PREFIX + "aggregator.checkpoint.dir";
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryClientService.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryClientService.java b/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryClientService.java
new file mode 100644
index 0000000..e15198b
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryClientService.java
@@ -0,0 +1,211 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.applicationhistoryservice;
+
+import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.util.ArrayList;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.ipc.Server;
+import org.apache.hadoop.service.AbstractService;
+import org.apache.hadoop.yarn.api.ApplicationHistoryProtocol;
+import org.apache.hadoop.yarn.api.protocolrecords.CancelDelegationTokenRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.CancelDelegationTokenResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptReportRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptReportResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptsRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptsResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.GetContainerReportRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetContainerReportResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.GetContainersRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetContainersResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.GetDelegationTokenRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetDelegationTokenResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.RenewDelegationTokenRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.RenewDelegationTokenResponse;
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptReport;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ApplicationReport;
+import org.apache.hadoop.yarn.api.records.ContainerReport;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.exceptions.ApplicationAttemptNotFoundException;
+import org.apache.hadoop.yarn.exceptions.ApplicationNotFoundException;
+import org.apache.hadoop.yarn.exceptions.ContainerNotFoundException;
+import org.apache.hadoop.yarn.exceptions.YarnException;
+import org.apache.hadoop.yarn.ipc.YarnRPC;
+
+public class ApplicationHistoryClientService extends AbstractService {
+  private static final Log LOG = LogFactory
+    .getLog(ApplicationHistoryClientService.class);
+  private ApplicationHistoryManager history;
+  private ApplicationHistoryProtocol protocolHandler;
+  private Server server;
+  private InetSocketAddress bindAddress;
+
+  public ApplicationHistoryClientService(ApplicationHistoryManager history) {
+    super("ApplicationHistoryClientService");
+    this.history = history;
+    this.protocolHandler = new ApplicationHSClientProtocolHandler();
+  }
+
+  protected void serviceStart() throws Exception {
+    Configuration conf = getConfig();
+    YarnRPC rpc = YarnRPC.create(conf);
+    InetSocketAddress address =
+        conf.getSocketAddr(YarnConfiguration.TIMELINE_SERVICE_ADDRESS,
+          YarnConfiguration.DEFAULT_TIMELINE_SERVICE_ADDRESS,
+          YarnConfiguration.DEFAULT_TIMELINE_SERVICE_PORT);
+
+    server =
+        rpc.getServer(ApplicationHistoryProtocol.class, protocolHandler,
+          address, conf, null, conf.getInt(
+            YarnConfiguration.TIMELINE_SERVICE_HANDLER_THREAD_COUNT,
+            YarnConfiguration.DEFAULT_TIMELINE_SERVICE_CLIENT_THREAD_COUNT));
+
+    server.start();
+    this.bindAddress =
+        conf.updateConnectAddr(YarnConfiguration.TIMELINE_SERVICE_ADDRESS,
+          server.getListenerAddress());
+    LOG.info("Instantiated ApplicationHistoryClientService at "
+        + this.bindAddress);
+
+    super.serviceStart();
+  }
+
+  @Override
+  protected void serviceStop() throws Exception {
+    if (server != null) {
+      server.stop();
+    }
+    super.serviceStop();
+  }
+
+  @Private
+  public ApplicationHistoryProtocol getClientHandler() {
+    return this.protocolHandler;
+  }
+
+  @Private
+  public InetSocketAddress getBindAddress() {
+    return this.bindAddress;
+  }
+
+  private class ApplicationHSClientProtocolHandler implements
+      ApplicationHistoryProtocol {
+
+    @Override
+    public CancelDelegationTokenResponse cancelDelegationToken(
+        CancelDelegationTokenRequest request) throws YarnException, IOException {
+      // TODO Auto-generated method stub
+      return null;
+    }
+
+    @Override
+    public GetApplicationAttemptReportResponse getApplicationAttemptReport(
+        GetApplicationAttemptReportRequest request) throws YarnException,
+        IOException {
+      try {
+        GetApplicationAttemptReportResponse response =
+            GetApplicationAttemptReportResponse.newInstance(history
+              .getApplicationAttempt(request.getApplicationAttemptId()));
+        return response;
+      } catch (IOException e) {
+        throw new ApplicationAttemptNotFoundException(e.getMessage());
+      }
+    }
+
+    @Override
+    public GetApplicationAttemptsResponse getApplicationAttempts(
+        GetApplicationAttemptsRequest request) throws YarnException,
+        IOException {
+      GetApplicationAttemptsResponse response =
+          GetApplicationAttemptsResponse
+            .newInstance(new ArrayList<ApplicationAttemptReport>(history
+              .getApplicationAttempts(request.getApplicationId()).values()));
+      return response;
+    }
+
+    @Override
+    public GetApplicationReportResponse getApplicationReport(
+        GetApplicationReportRequest request) throws YarnException, IOException {
+      try {
+        ApplicationId applicationId = request.getApplicationId();
+        GetApplicationReportResponse response =
+            GetApplicationReportResponse.newInstance(history
+              .getApplication(applicationId));
+        return response;
+      } catch (IOException e) {
+        throw new ApplicationNotFoundException(e.getMessage());
+      }
+    }
+
+    @Override
+    public GetApplicationsResponse getApplications(
+        GetApplicationsRequest request) throws YarnException, IOException {
+      GetApplicationsResponse response =
+          GetApplicationsResponse.newInstance(new ArrayList<ApplicationReport>(
+            history.getAllApplications().values()));
+      return response;
+    }
+
+    @Override
+    public GetContainerReportResponse getContainerReport(
+        GetContainerReportRequest request) throws YarnException, IOException {
+      try {
+        GetContainerReportResponse response =
+            GetContainerReportResponse.newInstance(history.getContainer(request
+              .getContainerId()));
+        return response;
+      } catch (IOException e) {
+        throw new ContainerNotFoundException(e.getMessage());
+      }
+    }
+
+    @Override
+    public GetContainersResponse getContainers(GetContainersRequest request)
+        throws YarnException, IOException {
+      GetContainersResponse response =
+          GetContainersResponse.newInstance(new ArrayList<ContainerReport>(
+            history.getContainers(request.getApplicationAttemptId()).values()));
+      return response;
+    }
+
+    @Override
+    public GetDelegationTokenResponse getDelegationToken(
+        GetDelegationTokenRequest request) throws YarnException, IOException {
+      // TODO Auto-generated method stub
+      return null;
+    }
+
+    @Override
+    public RenewDelegationTokenResponse renewDelegationToken(
+        RenewDelegationTokenRequest request) throws YarnException, IOException {
+      // TODO Auto-generated method stub
+      return null;
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManager.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManager.java b/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManager.java
new file mode 100644
index 0000000..db25d29
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManager.java
@@ -0,0 +1,28 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.applicationhistoryservice;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.yarn.server.api.ApplicationContext;
+
+@InterfaceAudience.Public
+@InterfaceStability.Unstable
+public interface ApplicationHistoryManager extends ApplicationContext {
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerImpl.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerImpl.java b/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerImpl.java
new file mode 100644
index 0000000..b56a595
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerImpl.java
@@ -0,0 +1,243 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.applicationhistoryservice;
+
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Map.Entry;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.service.AbstractService;
+import org.apache.hadoop.util.ReflectionUtils;
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptReport;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ApplicationReport;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.ContainerReport;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ApplicationAttemptHistoryData;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ApplicationHistoryData;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ContainerHistoryData;
+import org.apache.hadoop.yarn.webapp.util.WebAppUtils;
+
+import com.google.common.annotations.VisibleForTesting;
+
+public class ApplicationHistoryManagerImpl extends AbstractService implements
+    ApplicationHistoryManager {
+  private static final Log LOG = LogFactory
+    .getLog(ApplicationHistoryManagerImpl.class);
+  private static final String UNAVAILABLE = "N/A";
+
+  private ApplicationHistoryStore historyStore;
+  private String serverHttpAddress;
+
+  public ApplicationHistoryManagerImpl() {
+    super(ApplicationHistoryManagerImpl.class.getName());
+  }
+
+  @Override
+  protected void serviceInit(Configuration conf) throws Exception {
+    LOG.info("ApplicationHistory Init");
+    historyStore = createApplicationHistoryStore(conf);
+    historyStore.init(conf);
+    serverHttpAddress = WebAppUtils.getHttpSchemePrefix(conf) +
+        WebAppUtils.getAHSWebAppURLWithoutScheme(conf);
+    super.serviceInit(conf);
+  }
+
+  @Override
+  protected void serviceStart() throws Exception {
+    LOG.info("Starting ApplicationHistory");
+    historyStore.start();
+    super.serviceStart();
+  }
+
+  @Override
+  protected void serviceStop() throws Exception {
+    LOG.info("Stopping ApplicationHistory");
+    historyStore.stop();
+    super.serviceStop();
+  }
+
+  protected ApplicationHistoryStore createApplicationHistoryStore(
+      Configuration conf) {
+    return ReflectionUtils.newInstance(conf.getClass(
+      YarnConfiguration.APPLICATION_HISTORY_STORE,
+      FileSystemApplicationHistoryStore.class,
+      ApplicationHistoryStore.class), conf);
+  }
+
+  @Override
+  public ContainerReport getAMContainer(ApplicationAttemptId appAttemptId)
+      throws IOException {
+    ApplicationReport app =
+        getApplication(appAttemptId.getApplicationId());
+    return convertToContainerReport(historyStore.getAMContainer(appAttemptId),
+        app == null ? null : app.getUser());
+  }
+
+  @Override
+  public Map<ApplicationId, ApplicationReport> getAllApplications()
+      throws IOException {
+    Map<ApplicationId, ApplicationHistoryData> histData =
+        historyStore.getAllApplications();
+    HashMap<ApplicationId, ApplicationReport> applicationsReport =
+        new HashMap<ApplicationId, ApplicationReport>();
+    for (Entry<ApplicationId, ApplicationHistoryData> entry : histData
+      .entrySet()) {
+      applicationsReport.put(entry.getKey(),
+        convertToApplicationReport(entry.getValue()));
+    }
+    return applicationsReport;
+  }
+
+  @Override
+  public ApplicationReport getApplication(ApplicationId appId)
+      throws IOException {
+    return convertToApplicationReport(historyStore.getApplication(appId));
+  }
+
+  private ApplicationReport convertToApplicationReport(
+      ApplicationHistoryData appHistory) throws IOException {
+    ApplicationAttemptId currentApplicationAttemptId = null;
+    String trackingUrl = UNAVAILABLE;
+    String host = UNAVAILABLE;
+    int rpcPort = -1;
+
+    ApplicationAttemptHistoryData lastAttempt =
+        getLastAttempt(appHistory.getApplicationId());
+    if (lastAttempt != null) {
+      currentApplicationAttemptId = lastAttempt.getApplicationAttemptId();
+      trackingUrl = lastAttempt.getTrackingURL();
+      host = lastAttempt.getHost();
+      rpcPort = lastAttempt.getRPCPort();
+    }
+    return ApplicationReport.newInstance(appHistory.getApplicationId(),
+      currentApplicationAttemptId, appHistory.getUser(), appHistory.getQueue(),
+      appHistory.getApplicationName(), host, rpcPort, null,
+      appHistory.getYarnApplicationState(), appHistory.getDiagnosticsInfo(),
+      trackingUrl, appHistory.getStartTime(), appHistory.getFinishTime(),
+      appHistory.getFinalApplicationStatus(), null, "", 100,
+      appHistory.getApplicationType(), null);
+  }
+
+  private ApplicationAttemptHistoryData getLastAttempt(ApplicationId appId)
+      throws IOException {
+    Map<ApplicationAttemptId, ApplicationAttemptHistoryData> attempts =
+        historyStore.getApplicationAttempts(appId);
+    ApplicationAttemptId prevMaxAttemptId = null;
+    for (ApplicationAttemptId attemptId : attempts.keySet()) {
+      if (prevMaxAttemptId == null) {
+        prevMaxAttemptId = attemptId;
+      } else {
+        if (prevMaxAttemptId.getAttemptId() < attemptId.getAttemptId()) {
+          prevMaxAttemptId = attemptId;
+        }
+      }
+    }
+    return attempts.get(prevMaxAttemptId);
+  }
+
+  private ApplicationAttemptReport convertToApplicationAttemptReport(
+      ApplicationAttemptHistoryData appAttemptHistory) {
+    return ApplicationAttemptReport.newInstance(
+      appAttemptHistory.getApplicationAttemptId(), appAttemptHistory.getHost(),
+      appAttemptHistory.getRPCPort(), appAttemptHistory.getTrackingURL(),
+      appAttemptHistory.getDiagnosticsInfo(),
+      appAttemptHistory.getYarnApplicationAttemptState(),
+      appAttemptHistory.getMasterContainerId());
+  }
+
+  @Override
+  public ApplicationAttemptReport getApplicationAttempt(
+      ApplicationAttemptId appAttemptId) throws IOException {
+    return convertToApplicationAttemptReport(historyStore
+      .getApplicationAttempt(appAttemptId));
+  }
+
+  @Override
+  public Map<ApplicationAttemptId, ApplicationAttemptReport>
+      getApplicationAttempts(ApplicationId appId) throws IOException {
+    Map<ApplicationAttemptId, ApplicationAttemptHistoryData> histData =
+        historyStore.getApplicationAttempts(appId);
+    HashMap<ApplicationAttemptId, ApplicationAttemptReport> applicationAttemptsReport =
+        new HashMap<ApplicationAttemptId, ApplicationAttemptReport>();
+    for (Entry<ApplicationAttemptId, ApplicationAttemptHistoryData> entry : histData
+      .entrySet()) {
+      applicationAttemptsReport.put(entry.getKey(),
+        convertToApplicationAttemptReport(entry.getValue()));
+    }
+    return applicationAttemptsReport;
+  }
+
+  @Override
+  public ContainerReport getContainer(ContainerId containerId)
+      throws IOException {
+    ApplicationReport app =
+        getApplication(containerId.getApplicationAttemptId().getApplicationId());
+    return convertToContainerReport(historyStore.getContainer(containerId),
+        app == null ? null: app.getUser());
+  }
+
+  private ContainerReport convertToContainerReport(
+      ContainerHistoryData containerHistory, String user) {
+    // If the container has the aggregated log, add the server root url
+    String logUrl = WebAppUtils.getAggregatedLogURL(
+        serverHttpAddress,
+        containerHistory.getAssignedNode().toString(),
+        containerHistory.getContainerId().toString(),
+        containerHistory.getContainerId().toString(),
+        user);
+    return ContainerReport.newInstance(containerHistory.getContainerId(),
+      containerHistory.getAllocatedResource(),
+      containerHistory.getAssignedNode(), containerHistory.getPriority(),
+      containerHistory.getStartTime(), containerHistory.getFinishTime(),
+      containerHistory.getDiagnosticsInfo(), logUrl,
+      containerHistory.getContainerExitStatus(),
+      containerHistory.getContainerState());
+  }
+
+  @Override
+  public Map<ContainerId, ContainerReport> getContainers(
+      ApplicationAttemptId appAttemptId) throws IOException {
+    ApplicationReport app =
+        getApplication(appAttemptId.getApplicationId());
+    Map<ContainerId, ContainerHistoryData> histData =
+        historyStore.getContainers(appAttemptId);
+    HashMap<ContainerId, ContainerReport> containersReport =
+        new HashMap<ContainerId, ContainerReport>();
+    for (Entry<ContainerId, ContainerHistoryData> entry : histData.entrySet()) {
+      containersReport.put(entry.getKey(),
+        convertToContainerReport(entry.getValue(),
+            app == null ? null : app.getUser()));
+    }
+    return containersReport;
+  }
+
+  @Private
+  @VisibleForTesting
+  public ApplicationHistoryStore getHistoryStore() {
+    return this.historyStore;
+  }
+}


[07/22] AMBARI-5707. Metrics system prototype implementation. (swagle)

Posted by sw...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/_psosx.py
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/_psosx.py b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/_psosx.py
new file mode 100644
index 0000000..8953867
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/_psosx.py
@@ -0,0 +1,341 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""OSX platform implementation."""
+
+import errno
+import os
+import sys
+
+from psutil import _common
+from psutil import _psposix
+from psutil._common import conn_tmap, usage_percent, isfile_strict
+from psutil._compat import namedtuple, wraps
+import _psutil_osx as cext
+import _psutil_posix
+
+
+__extra__all__ = []
+
+# --- constants
+
+PAGESIZE = os.sysconf("SC_PAGE_SIZE")
+
+# http://students.mimuw.edu.pl/lxr/source/include/net/tcp_states.h
+TCP_STATUSES = {
+    cext.TCPS_ESTABLISHED: _common.CONN_ESTABLISHED,
+    cext.TCPS_SYN_SENT: _common.CONN_SYN_SENT,
+    cext.TCPS_SYN_RECEIVED: _common.CONN_SYN_RECV,
+    cext.TCPS_FIN_WAIT_1: _common.CONN_FIN_WAIT1,
+    cext.TCPS_FIN_WAIT_2: _common.CONN_FIN_WAIT2,
+    cext.TCPS_TIME_WAIT: _common.CONN_TIME_WAIT,
+    cext.TCPS_CLOSED: _common.CONN_CLOSE,
+    cext.TCPS_CLOSE_WAIT: _common.CONN_CLOSE_WAIT,
+    cext.TCPS_LAST_ACK: _common.CONN_LAST_ACK,
+    cext.TCPS_LISTEN: _common.CONN_LISTEN,
+    cext.TCPS_CLOSING: _common.CONN_CLOSING,
+    cext.PSUTIL_CONN_NONE: _common.CONN_NONE,
+}
+
+PROC_STATUSES = {
+    cext.SIDL: _common.STATUS_IDLE,
+    cext.SRUN: _common.STATUS_RUNNING,
+    cext.SSLEEP: _common.STATUS_SLEEPING,
+    cext.SSTOP: _common.STATUS_STOPPED,
+    cext.SZOMB: _common.STATUS_ZOMBIE,
+}
+
+scputimes = namedtuple('scputimes', ['user', 'nice', 'system', 'idle'])
+
+svmem = namedtuple(
+    'svmem', ['total', 'available', 'percent', 'used', 'free',
+              'active', 'inactive', 'wired'])
+
+pextmem = namedtuple('pextmem', ['rss', 'vms', 'pfaults', 'pageins'])
+
+pmmap_grouped = namedtuple(
+    'pmmap_grouped',
+    'path rss private swapped dirtied ref_count shadow_depth')
+
+pmmap_ext = namedtuple(
+    'pmmap_ext', 'addr perms ' + ' '.join(pmmap_grouped._fields))
+
+# set later from __init__.py
+NoSuchProcess = None
+AccessDenied = None
+TimeoutExpired = None
+
+
+# --- functions
+
+def virtual_memory():
+    """System virtual memory as a namedtuple."""
+    total, active, inactive, wired, free = cext.virtual_mem()
+    avail = inactive + free
+    used = active + inactive + wired
+    percent = usage_percent((total - avail), total, _round=1)
+    return svmem(total, avail, percent, used, free,
+                 active, inactive, wired)
+
+
+def swap_memory():
+    """Swap system memory as a (total, used, free, sin, sout) tuple."""
+    total, used, free, sin, sout = cext.swap_mem()
+    percent = usage_percent(used, total, _round=1)
+    return _common.sswap(total, used, free, percent, sin, sout)
+
+
+def cpu_times():
+    """Return system CPU times as a namedtuple."""
+    user, nice, system, idle = cext.cpu_times()
+    return scputimes(user, nice, system, idle)
+
+
+def per_cpu_times():
+    """Return system CPU times as a named tuple"""
+    ret = []
+    for cpu_t in cext.per_cpu_times():
+        user, nice, system, idle = cpu_t
+        item = scputimes(user, nice, system, idle)
+        ret.append(item)
+    return ret
+
+
+def cpu_count_logical():
+    """Return the number of logical CPUs in the system."""
+    return cext.cpu_count_logical()
+
+
+def cpu_count_physical():
+    """Return the number of physical CPUs in the system."""
+    return cext.cpu_count_phys()
+
+
+def boot_time():
+    """The system boot time expressed in seconds since the epoch."""
+    return cext.boot_time()
+
+
+def disk_partitions(all=False):
+    retlist = []
+    partitions = cext.disk_partitions()
+    for partition in partitions:
+        device, mountpoint, fstype, opts = partition
+        if device == 'none':
+            device = ''
+        if not all:
+            if not os.path.isabs(device) or not os.path.exists(device):
+                continue
+        ntuple = _common.sdiskpart(device, mountpoint, fstype, opts)
+        retlist.append(ntuple)
+    return retlist
+
+
+def users():
+    retlist = []
+    rawlist = cext.users()
+    for item in rawlist:
+        user, tty, hostname, tstamp = item
+        if tty == '~':
+            continue  # reboot or shutdown
+        if not tstamp:
+            continue
+        nt = _common.suser(user, tty or None, hostname or None, tstamp)
+        retlist.append(nt)
+    return retlist
+
+
+def net_connections(kind='inet'):
+    # Note: on OSX this will fail with AccessDenied unless
+    # the process is owned by root.
+    ret = []
+    for pid in pids():
+        try:
+            cons = Process(pid).connections(kind)
+        except NoSuchProcess:
+            continue
+        else:
+            if cons:
+                for c in cons:
+                    c = list(c) + [pid]
+                    ret.append(_common.sconn(*c))
+    return ret
+
+
+pids = cext.pids
+pid_exists = _psposix.pid_exists
+disk_usage = _psposix.disk_usage
+net_io_counters = cext.net_io_counters
+disk_io_counters = cext.disk_io_counters
+
+
+def wrap_exceptions(fun):
+    """Decorator which translates bare OSError exceptions into
+    NoSuchProcess and AccessDenied.
+    """
+    @wraps(fun)
+    def wrapper(self, *args, **kwargs):
+        try:
+            return fun(self, *args, **kwargs)
+        except OSError:
+            # support for private module import
+            if NoSuchProcess is None or AccessDenied is None:
+                raise
+            err = sys.exc_info()[1]
+            if err.errno == errno.ESRCH:
+                raise NoSuchProcess(self.pid, self._name)
+            if err.errno in (errno.EPERM, errno.EACCES):
+                raise AccessDenied(self.pid, self._name)
+            raise
+    return wrapper
+
+
+class Process(object):
+    """Wrapper class around underlying C implementation."""
+
+    __slots__ = ["pid", "_name"]
+
+    def __init__(self, pid):
+        self.pid = pid
+        self._name = None
+
+    @wrap_exceptions
+    def name(self):
+        return cext.proc_name(self.pid)
+
+    @wrap_exceptions
+    def exe(self):
+        return cext.proc_exe(self.pid)
+
+    @wrap_exceptions
+    def cmdline(self):
+        if not pid_exists(self.pid):
+            raise NoSuchProcess(self.pid, self._name)
+        return cext.proc_cmdline(self.pid)
+
+    @wrap_exceptions
+    def ppid(self):
+        return cext.proc_ppid(self.pid)
+
+    @wrap_exceptions
+    def cwd(self):
+        return cext.proc_cwd(self.pid)
+
+    @wrap_exceptions
+    def uids(self):
+        real, effective, saved = cext.proc_uids(self.pid)
+        return _common.puids(real, effective, saved)
+
+    @wrap_exceptions
+    def gids(self):
+        real, effective, saved = cext.proc_gids(self.pid)
+        return _common.pgids(real, effective, saved)
+
+    @wrap_exceptions
+    def terminal(self):
+        tty_nr = cext.proc_tty_nr(self.pid)
+        tmap = _psposix._get_terminal_map()
+        try:
+            return tmap[tty_nr]
+        except KeyError:
+            return None
+
+    @wrap_exceptions
+    def memory_info(self):
+        rss, vms = cext.proc_memory_info(self.pid)[:2]
+        return _common.pmem(rss, vms)
+
+    @wrap_exceptions
+    def memory_info_ex(self):
+        rss, vms, pfaults, pageins = cext.proc_memory_info(self.pid)
+        return pextmem(rss, vms, pfaults * PAGESIZE, pageins * PAGESIZE)
+
+    @wrap_exceptions
+    def cpu_times(self):
+        user, system = cext.proc_cpu_times(self.pid)
+        return _common.pcputimes(user, system)
+
+    @wrap_exceptions
+    def create_time(self):
+        return cext.proc_create_time(self.pid)
+
+    @wrap_exceptions
+    def num_ctx_switches(self):
+        return _common.pctxsw(*cext.proc_num_ctx_switches(self.pid))
+
+    @wrap_exceptions
+    def num_threads(self):
+        return cext.proc_num_threads(self.pid)
+
+    @wrap_exceptions
+    def open_files(self):
+        if self.pid == 0:
+            return []
+        files = []
+        rawlist = cext.proc_open_files(self.pid)
+        for path, fd in rawlist:
+            if isfile_strict(path):
+                ntuple = _common.popenfile(path, fd)
+                files.append(ntuple)
+        return files
+
+    @wrap_exceptions
+    def connections(self, kind='inet'):
+        if kind not in conn_tmap:
+            raise ValueError("invalid %r kind argument; choose between %s"
+                             % (kind, ', '.join([repr(x) for x in conn_tmap])))
+        families, types = conn_tmap[kind]
+        rawlist = cext.proc_connections(self.pid, families, types)
+        ret = []
+        for item in rawlist:
+            fd, fam, type, laddr, raddr, status = item
+            status = TCP_STATUSES[status]
+            nt = _common.pconn(fd, fam, type, laddr, raddr, status)
+            ret.append(nt)
+        return ret
+
+    @wrap_exceptions
+    def num_fds(self):
+        if self.pid == 0:
+            return 0
+        return cext.proc_num_fds(self.pid)
+
+    @wrap_exceptions
+    def wait(self, timeout=None):
+        try:
+            return _psposix.wait_pid(self.pid, timeout)
+        except _psposix.TimeoutExpired:
+            # support for private module import
+            if TimeoutExpired is None:
+                raise
+            raise TimeoutExpired(timeout, self.pid, self._name)
+
+    @wrap_exceptions
+    def nice_get(self):
+        return _psutil_posix.getpriority(self.pid)
+
+    @wrap_exceptions
+    def nice_set(self, value):
+        return _psutil_posix.setpriority(self.pid, value)
+
+    @wrap_exceptions
+    def status(self):
+        code = cext.proc_status(self.pid)
+        # XXX is '?' legit? (we're not supposed to return it anyway)
+        return PROC_STATUSES.get(code, '?')
+
+    @wrap_exceptions
+    def threads(self):
+        rawlist = cext.proc_threads(self.pid)
+        retlist = []
+        for thread_id, utime, stime in rawlist:
+            ntuple = _common.pthread(thread_id, utime, stime)
+            retlist.append(ntuple)
+        return retlist
+
+    @wrap_exceptions
+    def memory_maps(self):
+        return cext.proc_memory_maps(self.pid)

http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/_psposix.py
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/_psposix.py b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/_psposix.py
new file mode 100644
index 0000000..1188c9f
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/_psposix.py
@@ -0,0 +1,157 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Routines common to all posix systems."""
+
+import errno
+import glob
+import os
+import sys
+import time
+
+from psutil._common import sdiskusage, usage_percent, memoize
+from psutil._compat import PY3, unicode
+
+
+class TimeoutExpired(Exception):
+    pass
+
+
+def pid_exists(pid):
+    """Check whether pid exists in the current process table."""
+    if pid == 0:
+        # According to "man 2 kill" PID 0 has a special meaning:
+        # it refers to <<every process in the process group of the
+        # calling process>> so we don't want to go any further.
+        # If we get here it means this UNIX platform *does* have
+        # a process with id 0.
+        return True
+    try:
+        os.kill(pid, 0)
+    except OSError:
+        err = sys.exc_info()[1]
+        if err.errno == errno.ESRCH:
+            # ESRCH == No such process
+            return False
+        elif err.errno == errno.EPERM:
+            # EPERM clearly means there's a process to deny access to
+            return True
+        else:
+            # According to "man 2 kill" possible error values are
+            # (EINVAL, EPERM, ESRCH) therefore we should never get
+            # here. If we do let's be explicit in considering this
+            # an error.
+            raise err
+    else:
+        return True
+
+
+def wait_pid(pid, timeout=None):
+    """Wait for process with pid 'pid' to terminate and return its
+    exit status code as an integer.
+
+    If pid is not a children of os.getpid() (current process) just
+    waits until the process disappears and return None.
+
+    If pid does not exist at all return None immediately.
+
+    Raise TimeoutExpired on timeout expired.
+    """
+    def check_timeout(delay):
+        if timeout is not None:
+            if timer() >= stop_at:
+                raise TimeoutExpired()
+        time.sleep(delay)
+        return min(delay * 2, 0.04)
+
+    timer = getattr(time, 'monotonic', time.time)
+    if timeout is not None:
+        waitcall = lambda: os.waitpid(pid, os.WNOHANG)
+        stop_at = timer() + timeout
+    else:
+        waitcall = lambda: os.waitpid(pid, 0)
+
+    delay = 0.0001
+    while 1:
+        try:
+            retpid, status = waitcall()
+        except OSError:
+            err = sys.exc_info()[1]
+            if err.errno == errno.EINTR:
+                delay = check_timeout(delay)
+                continue
+            elif err.errno == errno.ECHILD:
+                # This has two meanings:
+                # - pid is not a child of os.getpid() in which case
+                #   we keep polling until it's gone
+                # - pid never existed in the first place
+                # In both cases we'll eventually return None as we
+                # can't determine its exit status code.
+                while 1:
+                    if pid_exists(pid):
+                        delay = check_timeout(delay)
+                    else:
+                        return
+            else:
+                raise
+        else:
+            if retpid == 0:
+                # WNOHANG was used, pid is still running
+                delay = check_timeout(delay)
+                continue
+            # process exited due to a signal; return the integer of
+            # that signal
+            if os.WIFSIGNALED(status):
+                return os.WTERMSIG(status)
+            # process exited using exit(2) system call; return the
+            # integer exit(2) system call has been called with
+            elif os.WIFEXITED(status):
+                return os.WEXITSTATUS(status)
+            else:
+                # should never happen
+                raise RuntimeError("unknown process exit status")
+
+
+def disk_usage(path):
+    """Return disk usage associated with path."""
+    try:
+        st = os.statvfs(path)
+    except UnicodeEncodeError:
+        if not PY3 and isinstance(path, unicode):
+            # this is a bug with os.statvfs() and unicode on
+            # Python 2, see:
+            # - https://code.google.com/p/psutil/issues/detail?id=416
+            # - http://bugs.python.org/issue18695
+            try:
+                path = path.encode(sys.getfilesystemencoding())
+            except UnicodeEncodeError:
+                pass
+            st = os.statvfs(path)
+        else:
+            raise
+    free = (st.f_bavail * st.f_frsize)
+    total = (st.f_blocks * st.f_frsize)
+    used = (st.f_blocks - st.f_bfree) * st.f_frsize
+    percent = usage_percent(used, total, _round=1)
+    # NB: the percentage is -5% than what shown by df due to
+    # reserved blocks that we are currently not considering:
+    # http://goo.gl/sWGbH
+    return sdiskusage(total, used, free, percent)
+
+
+@memoize
+def _get_terminal_map():
+    ret = {}
+    ls = glob.glob('/dev/tty*') + glob.glob('/dev/pts/*')
+    for name in ls:
+        assert name not in ret
+        try:
+            ret[os.stat(name).st_rdev] = name
+        except OSError:
+            err = sys.exc_info()[1]
+            if err.errno != errno.ENOENT:
+                raise
+    return ret

http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/_pssunos.py
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/_pssunos.py b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/_pssunos.py
new file mode 100644
index 0000000..bc18427
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/_pssunos.py
@@ -0,0 +1,533 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Sun OS Solaris platform implementation."""
+
+import errno
+import os
+import socket
+import subprocess
+import sys
+
+from psutil import _common
+from psutil import _psposix
+from psutil._common import (conn_tmap, usage_percent, isfile_strict)
+from psutil._compat import namedtuple, PY3
+import _psutil_posix
+import _psutil_sunos as cext
+
+
+__extra__all__ = ["CONN_IDLE", "CONN_BOUND"]
+
+PAGE_SIZE = os.sysconf('SC_PAGE_SIZE')
+
+CONN_IDLE = "IDLE"
+CONN_BOUND = "BOUND"
+
+PROC_STATUSES = {
+    cext.SSLEEP: _common.STATUS_SLEEPING,
+    cext.SRUN: _common.STATUS_RUNNING,
+    cext.SZOMB: _common.STATUS_ZOMBIE,
+    cext.SSTOP: _common.STATUS_STOPPED,
+    cext.SIDL: _common.STATUS_IDLE,
+    cext.SONPROC: _common.STATUS_RUNNING,  # same as run
+    cext.SWAIT: _common.STATUS_WAITING,
+}
+
+TCP_STATUSES = {
+    cext.TCPS_ESTABLISHED: _common.CONN_ESTABLISHED,
+    cext.TCPS_SYN_SENT: _common.CONN_SYN_SENT,
+    cext.TCPS_SYN_RCVD: _common.CONN_SYN_RECV,
+    cext.TCPS_FIN_WAIT_1: _common.CONN_FIN_WAIT1,
+    cext.TCPS_FIN_WAIT_2: _common.CONN_FIN_WAIT2,
+    cext.TCPS_TIME_WAIT: _common.CONN_TIME_WAIT,
+    cext.TCPS_CLOSED: _common.CONN_CLOSE,
+    cext.TCPS_CLOSE_WAIT: _common.CONN_CLOSE_WAIT,
+    cext.TCPS_LAST_ACK: _common.CONN_LAST_ACK,
+    cext.TCPS_LISTEN: _common.CONN_LISTEN,
+    cext.TCPS_CLOSING: _common.CONN_CLOSING,
+    cext.PSUTIL_CONN_NONE: _common.CONN_NONE,
+    cext.TCPS_IDLE: CONN_IDLE,  # sunos specific
+    cext.TCPS_BOUND: CONN_BOUND,  # sunos specific
+}
+
+scputimes = namedtuple('scputimes', ['user', 'system', 'idle', 'iowait'])
+svmem = namedtuple('svmem', ['total', 'available', 'percent', 'used', 'free'])
+pextmem = namedtuple('pextmem', ['rss', 'vms'])
+pmmap_grouped = namedtuple('pmmap_grouped', ['path', 'rss', 'anon', 'locked'])
+pmmap_ext = namedtuple(
+    'pmmap_ext', 'addr perms ' + ' '.join(pmmap_grouped._fields))
+
+# set later from __init__.py
+NoSuchProcess = None
+AccessDenied = None
+TimeoutExpired = None
+
+# --- functions
+
+disk_io_counters = cext.disk_io_counters
+net_io_counters = cext.net_io_counters
+disk_usage = _psposix.disk_usage
+
+
+def virtual_memory():
+    # we could have done this with kstat, but imho this is good enough
+    total = os.sysconf('SC_PHYS_PAGES') * PAGE_SIZE
+    # note: there's no difference on Solaris
+    free = avail = os.sysconf('SC_AVPHYS_PAGES') * PAGE_SIZE
+    used = total - free
+    percent = usage_percent(used, total, _round=1)
+    return svmem(total, avail, percent, used, free)
+
+
+def swap_memory():
+    sin, sout = cext.swap_mem()
+    # XXX
+    # we are supposed to get total/free by doing so:
+    # http://cvs.opensolaris.org/source/xref/onnv/onnv-gate/
+    #     usr/src/cmd/swap/swap.c
+    # ...nevertheless I can't manage to obtain the same numbers as 'swap'
+    # cmdline utility, so let's parse its output (sigh!)
+    p = subprocess.Popen(['swap', '-l', '-k'], stdout=subprocess.PIPE)
+    stdout, stderr = p.communicate()
+    if PY3:
+        stdout = stdout.decode(sys.stdout.encoding)
+    if p.returncode != 0:
+        raise RuntimeError("'swap -l -k' failed (retcode=%s)" % p.returncode)
+
+    lines = stdout.strip().split('\n')[1:]
+    if not lines:
+        raise RuntimeError('no swap device(s) configured')
+    total = free = 0
+    for line in lines:
+        line = line.split()
+        t, f = line[-2:]
+        t = t.replace('K', '')
+        f = f.replace('K', '')
+        total += int(int(t) * 1024)
+        free += int(int(f) * 1024)
+    used = total - free
+    percent = usage_percent(used, total, _round=1)
+    return _common.sswap(total, used, free, percent,
+                       sin * PAGE_SIZE, sout * PAGE_SIZE)
+
+
+def pids():
+    """Returns a list of PIDs currently running on the system."""
+    return [int(x) for x in os.listdir('/proc') if x.isdigit()]
+
+
+def pid_exists(pid):
+    """Check for the existence of a unix pid."""
+    return _psposix.pid_exists(pid)
+
+
+def cpu_times():
+    """Return system-wide CPU times as a named tuple"""
+    ret = cext.per_cpu_times()
+    return scputimes(*[sum(x) for x in zip(*ret)])
+
+
+def per_cpu_times():
+    """Return system per-CPU times as a list of named tuples"""
+    ret = cext.per_cpu_times()
+    return [scputimes(*x) for x in ret]
+
+
+def cpu_count_logical():
+    """Return the number of logical CPUs in the system."""
+    try:
+        return os.sysconf("SC_NPROCESSORS_ONLN")
+    except ValueError:
+        # mimic os.cpu_count() behavior
+        return None
+
+
+def cpu_count_physical():
+    """Return the number of physical CPUs in the system."""
+    return cext.cpu_count_phys()
+
+
+def boot_time():
+    """The system boot time expressed in seconds since the epoch."""
+    return cext.boot_time()
+
+
+def users():
+    """Return currently connected users as a list of namedtuples."""
+    retlist = []
+    rawlist = cext.users()
+    localhost = (':0.0', ':0')
+    for item in rawlist:
+        user, tty, hostname, tstamp, user_process = item
+        # note: the underlying C function includes entries about
+        # system boot, run level and others.  We might want
+        # to use them in the future.
+        if not user_process:
+            continue
+        if hostname in localhost:
+            hostname = 'localhost'
+        nt = _common.suser(user, tty, hostname, tstamp)
+        retlist.append(nt)
+    return retlist
+
+
+def disk_partitions(all=False):
+    """Return system disk partitions."""
+    # TODO - the filtering logic should be better checked so that
+    # it tries to reflect 'df' as much as possible
+    retlist = []
+    partitions = cext.disk_partitions()
+    for partition in partitions:
+        device, mountpoint, fstype, opts = partition
+        if device == 'none':
+            device = ''
+        if not all:
+            # Differently from, say, Linux, we don't have a list of
+            # common fs types so the best we can do, AFAIK, is to
+            # filter by filesystem having a total size > 0.
+            if not disk_usage(mountpoint).total:
+                continue
+        ntuple = _common.sdiskpart(device, mountpoint, fstype, opts)
+        retlist.append(ntuple)
+    return retlist
+
+
+def net_connections(kind, _pid=-1):
+    """Return socket connections.  If pid == -1 return system-wide
+    connections (as opposed to connections opened by one process only).
+    Only INET sockets are returned (UNIX are not).
+    """
+    cmap = _common.conn_tmap.copy()
+    if _pid == -1:
+        cmap.pop('unix', 0)
+    if kind not in cmap:
+        raise ValueError("invalid %r kind argument; choose between %s"
+                         % (kind, ', '.join([repr(x) for x in cmap])))
+    families, types = _common.conn_tmap[kind]
+    rawlist = cext.net_connections(_pid, families, types)
+    ret = []
+    for item in rawlist:
+        fd, fam, type_, laddr, raddr, status, pid = item
+        if fam not in families:
+            continue
+        if type_ not in types:
+            continue
+        status = TCP_STATUSES[status]
+        if _pid == -1:
+            nt = _common.sconn(fd, fam, type_, laddr, raddr, status, pid)
+        else:
+            nt = _common.pconn(fd, fam, type_, laddr, raddr, status)
+        ret.append(nt)
+    return ret
+
+
+def wrap_exceptions(fun):
+    """Call callable into a try/except clause and translate ENOENT,
+    EACCES and EPERM in NoSuchProcess or AccessDenied exceptions.
+    """
+    def wrapper(self, *args, **kwargs):
+        try:
+            return fun(self, *args, **kwargs)
+        except EnvironmentError:
+            # support for private module import
+            if NoSuchProcess is None or AccessDenied is None:
+                raise
+            # ENOENT (no such file or directory) gets raised on open().
+            # ESRCH (no such process) can get raised on read() if
+            # process is gone in meantime.
+            err = sys.exc_info()[1]
+            if err.errno in (errno.ENOENT, errno.ESRCH):
+                raise NoSuchProcess(self.pid, self._name)
+            if err.errno in (errno.EPERM, errno.EACCES):
+                raise AccessDenied(self.pid, self._name)
+            raise
+    return wrapper
+
+
+class Process(object):
+    """Wrapper class around underlying C implementation."""
+
+    __slots__ = ["pid", "_name"]
+
+    def __init__(self, pid):
+        self.pid = pid
+        self._name = None
+
+    @wrap_exceptions
+    def name(self):
+        # note: max len == 15
+        return cext.proc_name_and_args(self.pid)[0]
+
+    @wrap_exceptions
+    def exe(self):
+        # Will be guess later from cmdline but we want to explicitly
+        # invoke cmdline here in order to get an AccessDenied
+        # exception if the user has not enough privileges.
+        self.cmdline()
+        return ""
+
+    @wrap_exceptions
+    def cmdline(self):
+        return cext.proc_name_and_args(self.pid)[1].split(' ')
+
+    @wrap_exceptions
+    def create_time(self):
+        return cext.proc_basic_info(self.pid)[3]
+
+    @wrap_exceptions
+    def num_threads(self):
+        return cext.proc_basic_info(self.pid)[5]
+
+    @wrap_exceptions
+    def nice_get(self):
+        # For some reason getpriority(3) return ESRCH (no such process)
+        # for certain low-pid processes, no matter what (even as root).
+        # The process actually exists though, as it has a name,
+        # creation time, etc.
+        # The best thing we can do here appears to be raising AD.
+        # Note: tested on Solaris 11; on Open Solaris 5 everything is
+        # fine.
+        try:
+            return _psutil_posix.getpriority(self.pid)
+        except EnvironmentError:
+            err = sys.exc_info()[1]
+            if err.errno in (errno.ENOENT, errno.ESRCH):
+                if pid_exists(self.pid):
+                    raise AccessDenied(self.pid, self._name)
+            raise
+
+    @wrap_exceptions
+    def nice_set(self, value):
+        if self.pid in (2, 3):
+            # Special case PIDs: internally setpriority(3) return ESRCH
+            # (no such process), no matter what.
+            # The process actually exists though, as it has a name,
+            # creation time, etc.
+            raise AccessDenied(self.pid, self._name)
+        return _psutil_posix.setpriority(self.pid, value)
+
+    @wrap_exceptions
+    def ppid(self):
+        return cext.proc_basic_info(self.pid)[0]
+
+    @wrap_exceptions
+    def uids(self):
+        real, effective, saved, _, _, _ = cext.proc_cred(self.pid)
+        return _common.puids(real, effective, saved)
+
+    @wrap_exceptions
+    def gids(self):
+        _, _, _, real, effective, saved = cext.proc_cred(self.pid)
+        return _common.puids(real, effective, saved)
+
+    @wrap_exceptions
+    def cpu_times(self):
+        user, system = cext.proc_cpu_times(self.pid)
+        return _common.pcputimes(user, system)
+
+    @wrap_exceptions
+    def terminal(self):
+        hit_enoent = False
+        tty = wrap_exceptions(
+            cext.proc_basic_info(self.pid)[0])
+        if tty != cext.PRNODEV:
+            for x in (0, 1, 2, 255):
+                try:
+                    return os.readlink('/proc/%d/path/%d' % (self.pid, x))
+                except OSError:
+                    err = sys.exc_info()[1]
+                    if err.errno == errno.ENOENT:
+                        hit_enoent = True
+                        continue
+                    raise
+        if hit_enoent:
+            # raise NSP if the process disappeared on us
+            os.stat('/proc/%s' % self.pid)
+
+    @wrap_exceptions
+    def cwd(self):
+        # /proc/PID/path/cwd may not be resolved by readlink() even if
+        # it exists (ls shows it). If that's the case and the process
+        # is still alive return None (we can return None also on BSD).
+        # Reference: http://goo.gl/55XgO
+        try:
+            return os.readlink("/proc/%s/path/cwd" % self.pid)
+        except OSError:
+            err = sys.exc_info()[1]
+            if err.errno == errno.ENOENT:
+                os.stat("/proc/%s" % self.pid)
+                return None
+            raise
+
+    @wrap_exceptions
+    def memory_info(self):
+        ret = cext.proc_basic_info(self.pid)
+        rss, vms = ret[1] * 1024, ret[2] * 1024
+        return _common.pmem(rss, vms)
+
+    # it seems Solaris uses rss and vms only
+    memory_info_ex = memory_info
+
+    @wrap_exceptions
+    def status(self):
+        code = cext.proc_basic_info(self.pid)[6]
+        # XXX is '?' legit? (we're not supposed to return it anyway)
+        return PROC_STATUSES.get(code, '?')
+
+    @wrap_exceptions
+    def threads(self):
+        ret = []
+        tids = os.listdir('/proc/%d/lwp' % self.pid)
+        hit_enoent = False
+        for tid in tids:
+            tid = int(tid)
+            try:
+                utime, stime = cext.query_process_thread(
+                    self.pid, tid)
+            except EnvironmentError:
+                # ENOENT == thread gone in meantime
+                err = sys.exc_info()[1]
+                if err.errno == errno.ENOENT:
+                    hit_enoent = True
+                    continue
+                raise
+            else:
+                nt = _common.pthread(tid, utime, stime)
+                ret.append(nt)
+        if hit_enoent:
+            # raise NSP if the process disappeared on us
+            os.stat('/proc/%s' % self.pid)
+        return ret
+
+    @wrap_exceptions
+    def open_files(self):
+        retlist = []
+        hit_enoent = False
+        pathdir = '/proc/%d/path' % self.pid
+        for fd in os.listdir('/proc/%d/fd' % self.pid):
+            path = os.path.join(pathdir, fd)
+            if os.path.islink(path):
+                try:
+                    file = os.readlink(path)
+                except OSError:
+                    # ENOENT == file which is gone in the meantime
+                    err = sys.exc_info()[1]
+                    if err.errno == errno.ENOENT:
+                        hit_enoent = True
+                        continue
+                    raise
+                else:
+                    if isfile_strict(file):
+                        retlist.append(_common.popenfile(file, int(fd)))
+        if hit_enoent:
+            # raise NSP if the process disappeared on us
+            os.stat('/proc/%s' % self.pid)
+        return retlist
+
+    def _get_unix_sockets(self, pid):
+        """Get UNIX sockets used by process by parsing 'pfiles' output."""
+        # TODO: rewrite this in C (...but the damn netstat source code
+        # does not include this part! Argh!!)
+        cmd = "pfiles %s" % pid
+        p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,
+                             stderr=subprocess.PIPE)
+        stdout, stderr = p.communicate()
+        if PY3:
+            stdout, stderr = [x.decode(sys.stdout.encoding)
+                              for x in (stdout, stderr)]
+        if p.returncode != 0:
+            if 'permission denied' in stderr.lower():
+                raise AccessDenied(self.pid, self._name)
+            if 'no such process' in stderr.lower():
+                raise NoSuchProcess(self.pid, self._name)
+            raise RuntimeError("%r command error\n%s" % (cmd, stderr))
+
+        lines = stdout.split('\n')[2:]
+        for i, line in enumerate(lines):
+            line = line.lstrip()
+            if line.startswith('sockname: AF_UNIX'):
+                path = line.split(' ', 2)[2]
+                type = lines[i - 2].strip()
+                if type == 'SOCK_STREAM':
+                    type = socket.SOCK_STREAM
+                elif type == 'SOCK_DGRAM':
+                    type = socket.SOCK_DGRAM
+                else:
+                    type = -1
+                yield (-1, socket.AF_UNIX, type, path, "", _common.CONN_NONE)
+
+    @wrap_exceptions
+    def connections(self, kind='inet'):
+        ret = net_connections(kind, _pid=self.pid)
+        # The underlying C implementation retrieves all OS connections
+        # and filters them by PID.  At this point we can't tell whether
+        # an empty list means there were no connections for process or
+        # process is no longer active so we force NSP in case the PID
+        # is no longer there.
+        if not ret:
+            os.stat('/proc/%s' % self.pid)  # will raise NSP if process is gone
+
+        # UNIX sockets
+        if kind in ('all', 'unix'):
+            ret.extend([_common.pconn(*conn) for conn in
+                        self._get_unix_sockets(self.pid)])
+        return ret
+
+    nt_mmap_grouped = namedtuple('mmap', 'path rss anon locked')
+    nt_mmap_ext = namedtuple('mmap', 'addr perms path rss anon locked')
+
+    @wrap_exceptions
+    def memory_maps(self):
+        def toaddr(start, end):
+            return '%s-%s' % (hex(start)[2:].strip('L'),
+                              hex(end)[2:].strip('L'))
+
+        retlist = []
+        rawlist = cext.proc_memory_maps(self.pid)
+        hit_enoent = False
+        for item in rawlist:
+            addr, addrsize, perm, name, rss, anon, locked = item
+            addr = toaddr(addr, addrsize)
+            if not name.startswith('['):
+                try:
+                    name = os.readlink('/proc/%s/path/%s' % (self.pid, name))
+                except OSError:
+                    err = sys.exc_info()[1]
+                    if err.errno == errno.ENOENT:
+                        # sometimes the link may not be resolved by
+                        # readlink() even if it exists (ls shows it).
+                        # If that's the case we just return the
+                        # unresolved link path.
+                        # This seems an incosistency with /proc similar
+                        # to: http://goo.gl/55XgO
+                        name = '/proc/%s/path/%s' % (self.pid, name)
+                        hit_enoent = True
+                    else:
+                        raise
+            retlist.append((addr, perm, name, rss, anon, locked))
+        if hit_enoent:
+            # raise NSP if the process disappeared on us
+            os.stat('/proc/%s' % self.pid)
+        return retlist
+
+    @wrap_exceptions
+    def num_fds(self):
+        return len(os.listdir("/proc/%s/fd" % self.pid))
+
+    @wrap_exceptions
+    def num_ctx_switches(self):
+        return _common.pctxsw(*cext.proc_num_ctx_switches(self.pid))
+
+    @wrap_exceptions
+    def wait(self, timeout=None):
+        try:
+            return _psposix.wait_pid(self.pid, timeout)
+        except _psposix.TimeoutExpired:
+            # support for private module import
+            if TimeoutExpired is None:
+                raise
+            raise TimeoutExpired(timeout, self.pid, self._name)


[19/22] AMBARI-5707. Metrics system prototype implementation. (swagle)

Posted by sw...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/ApplicationHistoryData.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/ApplicationHistoryData.java b/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/ApplicationHistoryData.java
new file mode 100644
index 0000000..b7d16f3
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/ApplicationHistoryData.java
@@ -0,0 +1,213 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.applicationhistoryservice.records;
+
+import org.apache.hadoop.classification.InterfaceAudience.Public;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
+import org.apache.hadoop.yarn.api.records.YarnApplicationState;
+
+/**
+ * The class contains all the fields that are stored persistently for
+ * <code>RMApp</code>.
+ */
+@Public
+@Unstable
+public class ApplicationHistoryData {
+
+  private ApplicationId applicationId;
+
+  private String applicationName;
+
+  private String applicationType;
+
+  private String user;
+
+  private String queue;
+
+  private long submitTime;
+
+  private long startTime;
+
+  private long finishTime;
+
+  private String diagnosticsInfo;
+
+  private FinalApplicationStatus finalApplicationStatus;
+
+  private YarnApplicationState yarnApplicationState;
+
+  @Public
+  @Unstable
+  public static ApplicationHistoryData newInstance(ApplicationId applicationId,
+      String applicationName, String applicationType, String queue,
+      String user, long submitTime, long startTime, long finishTime,
+      String diagnosticsInfo, FinalApplicationStatus finalApplicationStatus,
+      YarnApplicationState yarnApplicationState) {
+    ApplicationHistoryData appHD = new ApplicationHistoryData();
+    appHD.setApplicationId(applicationId);
+    appHD.setApplicationName(applicationName);
+    appHD.setApplicationType(applicationType);
+    appHD.setQueue(queue);
+    appHD.setUser(user);
+    appHD.setSubmitTime(submitTime);
+    appHD.setStartTime(startTime);
+    appHD.setFinishTime(finishTime);
+    appHD.setDiagnosticsInfo(diagnosticsInfo);
+    appHD.setFinalApplicationStatus(finalApplicationStatus);
+    appHD.setYarnApplicationState(yarnApplicationState);
+    return appHD;
+  }
+
+  @Public
+  @Unstable
+  public ApplicationId getApplicationId() {
+    return applicationId;
+  }
+
+  @Public
+  @Unstable
+  public void setApplicationId(ApplicationId applicationId) {
+    this.applicationId = applicationId;
+  }
+
+  @Public
+  @Unstable
+  public String getApplicationName() {
+    return applicationName;
+  }
+
+  @Public
+  @Unstable
+  public void setApplicationName(String applicationName) {
+    this.applicationName = applicationName;
+  }
+
+  @Public
+  @Unstable
+  public String getApplicationType() {
+    return applicationType;
+  }
+
+  @Public
+  @Unstable
+  public void setApplicationType(String applicationType) {
+    this.applicationType = applicationType;
+  }
+
+  @Public
+  @Unstable
+  public String getUser() {
+    return user;
+  }
+
+  @Public
+  @Unstable
+  public void setUser(String user) {
+    this.user = user;
+  }
+
+  @Public
+  @Unstable
+  public String getQueue() {
+    return queue;
+  }
+
+  @Public
+  @Unstable
+  public void setQueue(String queue) {
+    this.queue = queue;
+  }
+
+  @Public
+  @Unstable
+  public long getSubmitTime() {
+    return submitTime;
+  }
+
+  @Public
+  @Unstable
+  public void setSubmitTime(long submitTime) {
+    this.submitTime = submitTime;
+  }
+
+  @Public
+  @Unstable
+  public long getStartTime() {
+    return startTime;
+  }
+
+  @Public
+  @Unstable
+  public void setStartTime(long startTime) {
+    this.startTime = startTime;
+  }
+
+  @Public
+  @Unstable
+  public long getFinishTime() {
+    return finishTime;
+  }
+
+  @Public
+  @Unstable
+  public void setFinishTime(long finishTime) {
+    this.finishTime = finishTime;
+  }
+
+  @Public
+  @Unstable
+  public String getDiagnosticsInfo() {
+    return diagnosticsInfo;
+  }
+
+  @Public
+  @Unstable
+  public void setDiagnosticsInfo(String diagnosticsInfo) {
+    this.diagnosticsInfo = diagnosticsInfo;
+  }
+
+  @Public
+  @Unstable
+  public FinalApplicationStatus getFinalApplicationStatus() {
+    return finalApplicationStatus;
+  }
+
+  @Public
+  @Unstable
+  public void setFinalApplicationStatus(
+      FinalApplicationStatus finalApplicationStatus) {
+    this.finalApplicationStatus = finalApplicationStatus;
+  }
+
+  @Public
+  @Unstable
+  public YarnApplicationState getYarnApplicationState() {
+    return this.yarnApplicationState;
+  }
+
+  @Public
+  @Unstable
+  public void
+      setYarnApplicationState(YarnApplicationState yarnApplicationState) {
+    this.yarnApplicationState = yarnApplicationState;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/ApplicationStartData.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/ApplicationStartData.java b/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/ApplicationStartData.java
new file mode 100644
index 0000000..6bc1323
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/ApplicationStartData.java
@@ -0,0 +1,106 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.applicationhistoryservice.records;
+
+import org.apache.hadoop.classification.InterfaceAudience.Public;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.util.Records;
+
+/**
+ * The class contains the fields that can be determined when <code>RMApp</code>
+ * starts, and that need to be stored persistently.
+ */
+@Public
+@Unstable
+public abstract class ApplicationStartData {
+
+  @Public
+  @Unstable
+  public static ApplicationStartData newInstance(ApplicationId applicationId,
+      String applicationName, String applicationType, String queue,
+      String user, long submitTime, long startTime) {
+    ApplicationStartData appSD = Records.newRecord(ApplicationStartData.class);
+    appSD.setApplicationId(applicationId);
+    appSD.setApplicationName(applicationName);
+    appSD.setApplicationType(applicationType);
+    appSD.setQueue(queue);
+    appSD.setUser(user);
+    appSD.setSubmitTime(submitTime);
+    appSD.setStartTime(startTime);
+    return appSD;
+  }
+
+  @Public
+  @Unstable
+  public abstract ApplicationId getApplicationId();
+
+  @Public
+  @Unstable
+  public abstract void setApplicationId(ApplicationId applicationId);
+
+  @Public
+  @Unstable
+  public abstract String getApplicationName();
+
+  @Public
+  @Unstable
+  public abstract void setApplicationName(String applicationName);
+
+  @Public
+  @Unstable
+  public abstract String getApplicationType();
+
+  @Public
+  @Unstable
+  public abstract void setApplicationType(String applicationType);
+
+  @Public
+  @Unstable
+  public abstract String getUser();
+
+  @Public
+  @Unstable
+  public abstract void setUser(String user);
+
+  @Public
+  @Unstable
+  public abstract String getQueue();
+
+  @Public
+  @Unstable
+  public abstract void setQueue(String queue);
+
+  @Public
+  @Unstable
+  public abstract long getSubmitTime();
+
+  @Public
+  @Unstable
+  public abstract void setSubmitTime(long submitTime);
+
+  @Public
+  @Unstable
+  public abstract long getStartTime();
+
+  @Public
+  @Unstable
+  public abstract void setStartTime(long startTime);
+
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/ContainerFinishData.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/ContainerFinishData.java b/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/ContainerFinishData.java
new file mode 100644
index 0000000..5eb9ddb
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/ContainerFinishData.java
@@ -0,0 +1,90 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.applicationhistoryservice.records;
+
+import org.apache.hadoop.classification.InterfaceAudience.Public;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.ContainerState;
+import org.apache.hadoop.yarn.util.Records;
+
+/**
+ * The class contains the fields that can be determined when
+ * <code>RMContainer</code> finishes, and that need to be stored persistently.
+ */
+@Public
+@Unstable
+public abstract class ContainerFinishData {
+
+  @Public
+  @Unstable
+  public static ContainerFinishData newInstance(ContainerId containerId,
+      long finishTime, String diagnosticsInfo, int containerExitCode,
+      ContainerState containerState) {
+    ContainerFinishData containerFD =
+        Records.newRecord(ContainerFinishData.class);
+    containerFD.setContainerId(containerId);
+    containerFD.setFinishTime(finishTime);
+    containerFD.setDiagnosticsInfo(diagnosticsInfo);
+    containerFD.setContainerExitStatus(containerExitCode);
+    containerFD.setContainerState(containerState);
+    return containerFD;
+  }
+
+  @Public
+  @Unstable
+  public abstract ContainerId getContainerId();
+
+  @Public
+  @Unstable
+  public abstract void setContainerId(ContainerId containerId);
+
+  @Public
+  @Unstable
+  public abstract long getFinishTime();
+
+  @Public
+  @Unstable
+  public abstract void setFinishTime(long finishTime);
+
+  @Public
+  @Unstable
+  public abstract String getDiagnosticsInfo();
+
+  @Public
+  @Unstable
+  public abstract void setDiagnosticsInfo(String diagnosticsInfo);
+
+  @Public
+  @Unstable
+  public abstract int getContainerExitStatus();
+
+  @Public
+  @Unstable
+  public abstract void setContainerExitStatus(int containerExitStatus);
+
+  @Public
+  @Unstable
+  public abstract ContainerState getContainerState();
+
+  @Public
+  @Unstable
+  public abstract void setContainerState(ContainerState containerState);
+
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/ContainerHistoryData.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/ContainerHistoryData.java b/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/ContainerHistoryData.java
new file mode 100644
index 0000000..e606185
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/ContainerHistoryData.java
@@ -0,0 +1,182 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.applicationhistoryservice.records;
+
+import org.apache.hadoop.classification.InterfaceAudience.Public;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.ContainerState;
+import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.api.records.Priority;
+import org.apache.hadoop.yarn.api.records.Resource;
+
+/**
+ * The class contains all the fields that are stored persistently for
+ * <code>RMContainer</code>.
+ */
+@Public
+@Unstable
+public class ContainerHistoryData {
+
+  private ContainerId containerId;
+
+  private Resource allocatedResource;
+
+  private NodeId assignedNode;
+
+  private Priority priority;
+
+  private long startTime;
+
+  private long finishTime;
+
+  private String diagnosticsInfo;
+
+  private int containerExitStatus;
+
+  private ContainerState containerState;
+
+  @Public
+  @Unstable
+  public static ContainerHistoryData newInstance(ContainerId containerId,
+      Resource allocatedResource, NodeId assignedNode, Priority priority,
+      long startTime, long finishTime, String diagnosticsInfo,
+      int containerExitCode, ContainerState containerState) {
+    ContainerHistoryData containerHD = new ContainerHistoryData();
+    containerHD.setContainerId(containerId);
+    containerHD.setAllocatedResource(allocatedResource);
+    containerHD.setAssignedNode(assignedNode);
+    containerHD.setPriority(priority);
+    containerHD.setStartTime(startTime);
+    containerHD.setFinishTime(finishTime);
+    containerHD.setDiagnosticsInfo(diagnosticsInfo);
+    containerHD.setContainerExitStatus(containerExitCode);
+    containerHD.setContainerState(containerState);
+    return containerHD;
+  }
+
+  @Public
+  @Unstable
+  public ContainerId getContainerId() {
+    return containerId;
+  }
+
+  @Public
+  @Unstable
+  public void setContainerId(ContainerId containerId) {
+    this.containerId = containerId;
+  }
+
+  @Public
+  @Unstable
+  public Resource getAllocatedResource() {
+    return allocatedResource;
+  }
+
+  @Public
+  @Unstable
+  public void setAllocatedResource(Resource resource) {
+    this.allocatedResource = resource;
+  }
+
+  @Public
+  @Unstable
+  public NodeId getAssignedNode() {
+    return assignedNode;
+  }
+
+  @Public
+  @Unstable
+  public void setAssignedNode(NodeId nodeId) {
+    this.assignedNode = nodeId;
+  }
+
+  @Public
+  @Unstable
+  public Priority getPriority() {
+    return priority;
+  }
+
+  @Public
+  @Unstable
+  public void setPriority(Priority priority) {
+    this.priority = priority;
+  }
+
+  @Public
+  @Unstable
+  public long getStartTime() {
+    return startTime;
+  }
+
+  @Public
+  @Unstable
+  public void setStartTime(long startTime) {
+    this.startTime = startTime;
+  }
+
+  @Public
+  @Unstable
+  public long getFinishTime() {
+    return finishTime;
+  }
+
+  @Public
+  @Unstable
+  public void setFinishTime(long finishTime) {
+    this.finishTime = finishTime;
+  }
+
+  @Public
+  @Unstable
+  public String getDiagnosticsInfo() {
+    return diagnosticsInfo;
+  }
+
+  @Public
+  @Unstable
+  public void setDiagnosticsInfo(String diagnosticsInfo) {
+    this.diagnosticsInfo = diagnosticsInfo;
+  }
+
+  @Public
+  @Unstable
+  public int getContainerExitStatus() {
+    return containerExitStatus;
+  }
+
+  @Public
+  @Unstable
+  public void setContainerExitStatus(int containerExitStatus) {
+    this.containerExitStatus = containerExitStatus;
+  }
+
+  @Public
+  @Unstable
+  public ContainerState getContainerState() {
+    return containerState;
+  }
+
+  @Public
+  @Unstable
+  public void setContainerState(ContainerState containerState) {
+    this.containerState = containerState;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/ContainerStartData.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/ContainerStartData.java b/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/ContainerStartData.java
new file mode 100644
index 0000000..0c6dd81
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/ContainerStartData.java
@@ -0,0 +1,92 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.applicationhistoryservice.records;
+
+import org.apache.hadoop.classification.InterfaceAudience.Public;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.api.records.Priority;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.util.Records;
+
+/**
+ * The class contains the fields that can be determined when
+ * <code>RMContainer</code> starts, and that need to be stored persistently.
+ */
+@Public
+@Unstable
+public abstract class ContainerStartData {
+
+  @Public
+  @Unstable
+  public static ContainerStartData newInstance(ContainerId containerId,
+      Resource allocatedResource, NodeId assignedNode, Priority priority,
+      long startTime) {
+    ContainerStartData containerSD =
+        Records.newRecord(ContainerStartData.class);
+    containerSD.setContainerId(containerId);
+    containerSD.setAllocatedResource(allocatedResource);
+    containerSD.setAssignedNode(assignedNode);
+    containerSD.setPriority(priority);
+    containerSD.setStartTime(startTime);
+    return containerSD;
+  }
+
+  @Public
+  @Unstable
+  public abstract ContainerId getContainerId();
+
+  @Public
+  @Unstable
+  public abstract void setContainerId(ContainerId containerId);
+
+  @Public
+  @Unstable
+  public abstract Resource getAllocatedResource();
+
+  @Public
+  @Unstable
+  public abstract void setAllocatedResource(Resource resource);
+
+  @Public
+  @Unstable
+  public abstract NodeId getAssignedNode();
+
+  @Public
+  @Unstable
+  public abstract void setAssignedNode(NodeId nodeId);
+
+  @Public
+  @Unstable
+  public abstract Priority getPriority();
+
+  @Public
+  @Unstable
+  public abstract void setPriority(Priority priority);
+
+  @Public
+  @Unstable
+  public abstract long getStartTime();
+
+  @Public
+  @Unstable
+  public abstract void setStartTime(long startTime);
+
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/impl/pb/ApplicationAttemptFinishDataPBImpl.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/impl/pb/ApplicationAttemptFinishDataPBImpl.java b/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/impl/pb/ApplicationAttemptFinishDataPBImpl.java
new file mode 100644
index 0000000..945c12f
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/impl/pb/ApplicationAttemptFinishDataPBImpl.java
@@ -0,0 +1,239 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.applicationhistoryservice.records.impl.pb;
+
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
+import org.apache.hadoop.yarn.api.records.YarnApplicationAttemptState;
+import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationAttemptIdPBImpl;
+import org.apache.hadoop.yarn.api.records.impl.pb.ProtoUtils;
+import org.apache.hadoop.yarn.proto.ApplicationHistoryServerProtos.ApplicationAttemptFinishDataProto;
+import org.apache.hadoop.yarn.proto.ApplicationHistoryServerProtos.ApplicationAttemptFinishDataProtoOrBuilder;
+import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationAttemptIdProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.FinalApplicationStatusProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.YarnApplicationAttemptStateProto;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ApplicationAttemptFinishData;
+
+import com.google.protobuf.TextFormat;
+
+public class ApplicationAttemptFinishDataPBImpl extends
+    ApplicationAttemptFinishData {
+
+  ApplicationAttemptFinishDataProto proto = ApplicationAttemptFinishDataProto
+    .getDefaultInstance();
+  ApplicationAttemptFinishDataProto.Builder builder = null;
+  boolean viaProto = false;
+
+  public ApplicationAttemptFinishDataPBImpl() {
+    builder = ApplicationAttemptFinishDataProto.newBuilder();
+  }
+
+  public ApplicationAttemptFinishDataPBImpl(
+      ApplicationAttemptFinishDataProto proto) {
+    this.proto = proto;
+    viaProto = true;
+  }
+
+  private ApplicationAttemptId applicationAttemptId;
+
+  @Override
+  public ApplicationAttemptId getApplicationAttemptId() {
+    if (this.applicationAttemptId != null) {
+      return this.applicationAttemptId;
+    }
+    ApplicationAttemptFinishDataProtoOrBuilder p = viaProto ? proto : builder;
+    if (!p.hasApplicationAttemptId()) {
+      return null;
+    }
+    this.applicationAttemptId =
+        convertFromProtoFormat(p.getApplicationAttemptId());
+    return this.applicationAttemptId;
+  }
+
+  @Override
+  public void
+      setApplicationAttemptId(ApplicationAttemptId applicationAttemptId) {
+    maybeInitBuilder();
+    if (applicationAttemptId == null) {
+      builder.clearApplicationAttemptId();
+    }
+    this.applicationAttemptId = applicationAttemptId;
+  }
+
+  @Override
+  public String getTrackingURL() {
+    ApplicationAttemptFinishDataProtoOrBuilder p = viaProto ? proto : builder;
+    if (!p.hasTrackingUrl()) {
+      return null;
+    }
+    return p.getTrackingUrl();
+  }
+
+  @Override
+  public void setTrackingURL(String trackingURL) {
+    maybeInitBuilder();
+    if (trackingURL == null) {
+      builder.clearTrackingUrl();
+      return;
+    }
+    builder.setTrackingUrl(trackingURL);
+  }
+
+  @Override
+  public String getDiagnosticsInfo() {
+    ApplicationAttemptFinishDataProtoOrBuilder p = viaProto ? proto : builder;
+    if (!p.hasDiagnosticsInfo()) {
+      return null;
+    }
+    return p.getDiagnosticsInfo();
+  }
+
+  @Override
+  public void setDiagnosticsInfo(String diagnosticsInfo) {
+    maybeInitBuilder();
+    if (diagnosticsInfo == null) {
+      builder.clearDiagnosticsInfo();
+      return;
+    }
+    builder.setDiagnosticsInfo(diagnosticsInfo);
+  }
+
+  @Override
+  public FinalApplicationStatus getFinalApplicationStatus() {
+    ApplicationAttemptFinishDataProtoOrBuilder p = viaProto ? proto : builder;
+    if (!p.hasFinalApplicationStatus()) {
+      return null;
+    }
+    return convertFromProtoFormat(p.getFinalApplicationStatus());
+  }
+
+  @Override
+  public void setFinalApplicationStatus(
+      FinalApplicationStatus finalApplicationStatus) {
+    maybeInitBuilder();
+    if (finalApplicationStatus == null) {
+      builder.clearFinalApplicationStatus();
+      return;
+    }
+    builder
+      .setFinalApplicationStatus(convertToProtoFormat(finalApplicationStatus));
+  }
+
+  @Override
+  public YarnApplicationAttemptState getYarnApplicationAttemptState() {
+    ApplicationAttemptFinishDataProtoOrBuilder p = viaProto ? proto : builder;
+    if (!p.hasYarnApplicationAttemptState()) {
+      return null;
+    }
+    return convertFromProtoFormat(p.getYarnApplicationAttemptState());
+  }
+
+  @Override
+  public void setYarnApplicationAttemptState(YarnApplicationAttemptState state) {
+    maybeInitBuilder();
+    if (state == null) {
+      builder.clearYarnApplicationAttemptState();
+      return;
+    }
+    builder.setYarnApplicationAttemptState(convertToProtoFormat(state));
+  }
+
+  public ApplicationAttemptFinishDataProto getProto() {
+    mergeLocalToProto();
+    proto = viaProto ? proto : builder.build();
+    viaProto = true;
+    return proto;
+  }
+
+  @Override
+  public int hashCode() {
+    return getProto().hashCode();
+  }
+
+  @Override
+  public boolean equals(Object other) {
+    if (other == null)
+      return false;
+    if (other.getClass().isAssignableFrom(this.getClass())) {
+      return this.getProto().equals(this.getClass().cast(other).getProto());
+    }
+    return false;
+  }
+
+  @Override
+  public String toString() {
+    return TextFormat.shortDebugString(getProto());
+  }
+
+  private void mergeLocalToBuilder() {
+    if (this.applicationAttemptId != null
+        && !((ApplicationAttemptIdPBImpl) this.applicationAttemptId).getProto()
+          .equals(builder.getApplicationAttemptId())) {
+      builder
+        .setApplicationAttemptId(convertToProtoFormat(this.applicationAttemptId));
+    }
+  }
+
+  private void mergeLocalToProto() {
+    if (viaProto) {
+      maybeInitBuilder();
+    }
+    mergeLocalToBuilder();
+    proto = builder.build();
+    viaProto = true;
+  }
+
+  private void maybeInitBuilder() {
+    if (viaProto || builder == null) {
+      builder = ApplicationAttemptFinishDataProto.newBuilder(proto);
+    }
+    viaProto = false;
+  }
+
+  private ApplicationAttemptIdPBImpl convertFromProtoFormat(
+      ApplicationAttemptIdProto applicationAttemptId) {
+    return new ApplicationAttemptIdPBImpl(applicationAttemptId);
+  }
+
+  private ApplicationAttemptIdProto convertToProtoFormat(
+      ApplicationAttemptId applicationAttemptId) {
+    return ((ApplicationAttemptIdPBImpl) applicationAttemptId).getProto();
+  }
+
+  private FinalApplicationStatus convertFromProtoFormat(
+      FinalApplicationStatusProto finalApplicationStatus) {
+    return ProtoUtils.convertFromProtoFormat(finalApplicationStatus);
+  }
+
+  private FinalApplicationStatusProto convertToProtoFormat(
+      FinalApplicationStatus finalApplicationStatus) {
+    return ProtoUtils.convertToProtoFormat(finalApplicationStatus);
+  }
+
+  private YarnApplicationAttemptStateProto convertToProtoFormat(
+      YarnApplicationAttemptState state) {
+    return ProtoUtils.convertToProtoFormat(state);
+  }
+
+  private YarnApplicationAttemptState convertFromProtoFormat(
+      YarnApplicationAttemptStateProto yarnApplicationAttemptState) {
+    return ProtoUtils.convertFromProtoFormat(yarnApplicationAttemptState);
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/impl/pb/ApplicationAttemptStartDataPBImpl.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/impl/pb/ApplicationAttemptStartDataPBImpl.java b/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/impl/pb/ApplicationAttemptStartDataPBImpl.java
new file mode 100644
index 0000000..1f67fc7
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/impl/pb/ApplicationAttemptStartDataPBImpl.java
@@ -0,0 +1,208 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.applicationhistoryservice.records.impl.pb;
+
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationAttemptIdPBImpl;
+import org.apache.hadoop.yarn.api.records.impl.pb.ContainerIdPBImpl;
+import org.apache.hadoop.yarn.proto.ApplicationHistoryServerProtos.ApplicationAttemptStartDataProto;
+import org.apache.hadoop.yarn.proto.ApplicationHistoryServerProtos.ApplicationAttemptStartDataProtoOrBuilder;
+import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationAttemptIdProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.ContainerIdProto;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ApplicationAttemptStartData;
+
+import com.google.protobuf.TextFormat;
+
+public class ApplicationAttemptStartDataPBImpl extends
+    ApplicationAttemptStartData {
+
+  ApplicationAttemptStartDataProto proto = ApplicationAttemptStartDataProto
+    .getDefaultInstance();
+  ApplicationAttemptStartDataProto.Builder builder = null;
+  boolean viaProto = false;
+
+  public ApplicationAttemptStartDataPBImpl() {
+    builder = ApplicationAttemptStartDataProto.newBuilder();
+  }
+
+  public ApplicationAttemptStartDataPBImpl(
+      ApplicationAttemptStartDataProto proto) {
+    this.proto = proto;
+    viaProto = true;
+  }
+
+  private ApplicationAttemptId applicationAttemptId;
+  private ContainerId masterContainerId;
+
+  @Override
+  public ApplicationAttemptId getApplicationAttemptId() {
+    if (this.applicationAttemptId != null) {
+      return this.applicationAttemptId;
+    }
+    ApplicationAttemptStartDataProtoOrBuilder p = viaProto ? proto : builder;
+    if (!p.hasApplicationAttemptId()) {
+      return null;
+    }
+    this.applicationAttemptId =
+        convertFromProtoFormat(p.getApplicationAttemptId());
+    return this.applicationAttemptId;
+  }
+
+  @Override
+  public void
+      setApplicationAttemptId(ApplicationAttemptId applicationAttemptId) {
+    maybeInitBuilder();
+    if (applicationAttemptId == null) {
+      builder.clearApplicationAttemptId();
+    }
+    this.applicationAttemptId = applicationAttemptId;
+  }
+
+  @Override
+  public String getHost() {
+    ApplicationAttemptStartDataProtoOrBuilder p = viaProto ? proto : builder;
+    if (!p.hasHost()) {
+      return null;
+    }
+    return p.getHost();
+  }
+
+  @Override
+  public void setHost(String host) {
+    maybeInitBuilder();
+    if (host == null) {
+      builder.clearHost();
+      return;
+    }
+    builder.setHost(host);
+  }
+
+  @Override
+  public int getRPCPort() {
+    ApplicationAttemptStartDataProtoOrBuilder p = viaProto ? proto : builder;
+    return p.getRpcPort();
+  }
+
+  @Override
+  public void setRPCPort(int rpcPort) {
+    maybeInitBuilder();
+    builder.setRpcPort(rpcPort);
+  }
+
+  @Override
+  public ContainerId getMasterContainerId() {
+    if (this.masterContainerId != null) {
+      return this.masterContainerId;
+    }
+    ApplicationAttemptStartDataProtoOrBuilder p = viaProto ? proto : builder;
+    if (!p.hasApplicationAttemptId()) {
+      return null;
+    }
+    this.masterContainerId = convertFromProtoFormat(p.getMasterContainerId());
+    return this.masterContainerId;
+  }
+
+  @Override
+  public void setMasterContainerId(ContainerId masterContainerId) {
+    maybeInitBuilder();
+    if (masterContainerId == null) {
+      builder.clearMasterContainerId();
+    }
+    this.masterContainerId = masterContainerId;
+  }
+
+  public ApplicationAttemptStartDataProto getProto() {
+    mergeLocalToProto();
+    proto = viaProto ? proto : builder.build();
+    viaProto = true;
+    return proto;
+  }
+
+  @Override
+  public int hashCode() {
+    return getProto().hashCode();
+  }
+
+  @Override
+  public boolean equals(Object other) {
+    if (other == null)
+      return false;
+    if (other.getClass().isAssignableFrom(this.getClass())) {
+      return this.getProto().equals(this.getClass().cast(other).getProto());
+    }
+    return false;
+  }
+
+  @Override
+  public String toString() {
+    return TextFormat.shortDebugString(getProto());
+  }
+
+  private void mergeLocalToBuilder() {
+    if (this.applicationAttemptId != null
+        && !((ApplicationAttemptIdPBImpl) this.applicationAttemptId).getProto()
+          .equals(builder.getApplicationAttemptId())) {
+      builder
+        .setApplicationAttemptId(convertToProtoFormat(this.applicationAttemptId));
+    }
+    if (this.masterContainerId != null
+        && !((ContainerIdPBImpl) this.masterContainerId).getProto().equals(
+          builder.getMasterContainerId())) {
+      builder
+        .setMasterContainerId(convertToProtoFormat(this.masterContainerId));
+    }
+  }
+
+  private void mergeLocalToProto() {
+    if (viaProto) {
+      maybeInitBuilder();
+    }
+    mergeLocalToBuilder();
+    proto = builder.build();
+    viaProto = true;
+  }
+
+  private void maybeInitBuilder() {
+    if (viaProto || builder == null) {
+      builder = ApplicationAttemptStartDataProto.newBuilder(proto);
+    }
+    viaProto = false;
+  }
+
+  private ApplicationAttemptIdPBImpl convertFromProtoFormat(
+      ApplicationAttemptIdProto applicationAttemptId) {
+    return new ApplicationAttemptIdPBImpl(applicationAttemptId);
+  }
+
+  private ApplicationAttemptIdProto convertToProtoFormat(
+      ApplicationAttemptId applicationAttemptId) {
+    return ((ApplicationAttemptIdPBImpl) applicationAttemptId).getProto();
+  }
+
+  private ContainerIdPBImpl
+      convertFromProtoFormat(ContainerIdProto containerId) {
+    return new ContainerIdPBImpl(containerId);
+  }
+
+  private ContainerIdProto convertToProtoFormat(ContainerId masterContainerId) {
+    return ((ContainerIdPBImpl) masterContainerId).getProto();
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/impl/pb/ApplicationFinishDataPBImpl.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/impl/pb/ApplicationFinishDataPBImpl.java b/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/impl/pb/ApplicationFinishDataPBImpl.java
new file mode 100644
index 0000000..337426d
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/impl/pb/ApplicationFinishDataPBImpl.java
@@ -0,0 +1,226 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.applicationhistoryservice.records.impl.pb;
+
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
+import org.apache.hadoop.yarn.api.records.YarnApplicationState;
+import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationIdPBImpl;
+import org.apache.hadoop.yarn.api.records.impl.pb.ProtoUtils;
+import org.apache.hadoop.yarn.proto.ApplicationHistoryServerProtos.ApplicationFinishDataProto;
+import org.apache.hadoop.yarn.proto.ApplicationHistoryServerProtos.ApplicationFinishDataProtoOrBuilder;
+import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationIdProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.FinalApplicationStatusProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.YarnApplicationStateProto;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ApplicationFinishData;
+
+import com.google.protobuf.TextFormat;
+
+public class ApplicationFinishDataPBImpl extends ApplicationFinishData {
+
+  ApplicationFinishDataProto proto = ApplicationFinishDataProto
+    .getDefaultInstance();
+  ApplicationFinishDataProto.Builder builder = null;
+  boolean viaProto = false;
+
+  private ApplicationId applicationId;
+
+  public ApplicationFinishDataPBImpl() {
+    builder = ApplicationFinishDataProto.newBuilder();
+  }
+
+  public ApplicationFinishDataPBImpl(ApplicationFinishDataProto proto) {
+    this.proto = proto;
+    viaProto = true;
+  }
+
+  @Override
+  public ApplicationId getApplicationId() {
+    if (this.applicationId != null) {
+      return this.applicationId;
+    }
+    ApplicationFinishDataProtoOrBuilder p = viaProto ? proto : builder;
+    if (!p.hasApplicationId()) {
+      return null;
+    }
+    this.applicationId = convertFromProtoFormat(p.getApplicationId());
+    return this.applicationId;
+  }
+
+  @Override
+  public void setApplicationId(ApplicationId applicationId) {
+    maybeInitBuilder();
+    if (applicationId == null) {
+      builder.clearApplicationId();
+    }
+    this.applicationId = applicationId;
+  }
+
+  @Override
+  public long getFinishTime() {
+    ApplicationFinishDataProtoOrBuilder p = viaProto ? proto : builder;
+    return p.getFinishTime();
+  }
+
+  @Override
+  public void setFinishTime(long finishTime) {
+    maybeInitBuilder();
+    builder.setFinishTime(finishTime);
+  }
+
+  @Override
+  public String getDiagnosticsInfo() {
+    ApplicationFinishDataProtoOrBuilder p = viaProto ? proto : builder;
+    if (!p.hasDiagnosticsInfo()) {
+      return null;
+    }
+    return p.getDiagnosticsInfo();
+  }
+
+  @Override
+  public void setDiagnosticsInfo(String diagnosticsInfo) {
+    maybeInitBuilder();
+    if (diagnosticsInfo == null) {
+      builder.clearDiagnosticsInfo();
+      return;
+    }
+    builder.setDiagnosticsInfo(diagnosticsInfo);
+  }
+
+  @Override
+  public FinalApplicationStatus getFinalApplicationStatus() {
+    ApplicationFinishDataProtoOrBuilder p = viaProto ? proto : builder;
+    if (!p.hasFinalApplicationStatus()) {
+      return null;
+    }
+    return convertFromProtoFormat(p.getFinalApplicationStatus());
+  }
+
+  @Override
+  public void setFinalApplicationStatus(
+      FinalApplicationStatus finalApplicationStatus) {
+    maybeInitBuilder();
+    if (finalApplicationStatus == null) {
+      builder.clearFinalApplicationStatus();
+      return;
+    }
+    builder
+      .setFinalApplicationStatus(convertToProtoFormat(finalApplicationStatus));
+  }
+
+  @Override
+  public YarnApplicationState getYarnApplicationState() {
+    ApplicationFinishDataProtoOrBuilder p = viaProto ? proto : builder;
+    if (!p.hasYarnApplicationState()) {
+      return null;
+    }
+    return convertFromProtoFormat(p.getYarnApplicationState());
+  }
+
+  @Override
+  public void setYarnApplicationState(YarnApplicationState state) {
+    maybeInitBuilder();
+    if (state == null) {
+      builder.clearYarnApplicationState();
+      return;
+    }
+    builder.setYarnApplicationState(convertToProtoFormat(state));
+  }
+
+  public ApplicationFinishDataProto getProto() {
+    mergeLocalToProto();
+    proto = viaProto ? proto : builder.build();
+    viaProto = true;
+    return proto;
+  }
+
+  @Override
+  public int hashCode() {
+    return getProto().hashCode();
+  }
+
+  @Override
+  public boolean equals(Object other) {
+    if (other == null)
+      return false;
+    if (other.getClass().isAssignableFrom(this.getClass())) {
+      return this.getProto().equals(this.getClass().cast(other).getProto());
+    }
+    return false;
+  }
+
+  @Override
+  public String toString() {
+    return TextFormat.shortDebugString(getProto());
+  }
+
+  private void mergeLocalToBuilder() {
+    if (this.applicationId != null
+        && !((ApplicationIdPBImpl) this.applicationId).getProto().equals(
+          builder.getApplicationId())) {
+      builder.setApplicationId(convertToProtoFormat(this.applicationId));
+    }
+  }
+
+  private void mergeLocalToProto() {
+    if (viaProto) {
+      maybeInitBuilder();
+    }
+    mergeLocalToBuilder();
+    proto = builder.build();
+    viaProto = true;
+  }
+
+  private void maybeInitBuilder() {
+    if (viaProto || builder == null) {
+      builder = ApplicationFinishDataProto.newBuilder(proto);
+    }
+    viaProto = false;
+  }
+
+  private ApplicationIdProto convertToProtoFormat(ApplicationId applicationId) {
+    return ((ApplicationIdPBImpl) applicationId).getProto();
+  }
+
+  private ApplicationIdPBImpl convertFromProtoFormat(
+      ApplicationIdProto applicationId) {
+    return new ApplicationIdPBImpl(applicationId);
+  }
+
+  private FinalApplicationStatus convertFromProtoFormat(
+      FinalApplicationStatusProto finalApplicationStatus) {
+    return ProtoUtils.convertFromProtoFormat(finalApplicationStatus);
+  }
+
+  private FinalApplicationStatusProto convertToProtoFormat(
+      FinalApplicationStatus finalApplicationStatus) {
+    return ProtoUtils.convertToProtoFormat(finalApplicationStatus);
+  }
+
+  private YarnApplicationStateProto convertToProtoFormat(
+      YarnApplicationState state) {
+    return ProtoUtils.convertToProtoFormat(state);
+  }
+
+  private YarnApplicationState convertFromProtoFormat(
+      YarnApplicationStateProto yarnApplicationState) {
+    return ProtoUtils.convertFromProtoFormat(yarnApplicationState);
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/impl/pb/ApplicationStartDataPBImpl.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/impl/pb/ApplicationStartDataPBImpl.java b/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/impl/pb/ApplicationStartDataPBImpl.java
new file mode 100644
index 0000000..56f7aff
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/impl/pb/ApplicationStartDataPBImpl.java
@@ -0,0 +1,229 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.applicationhistoryservice.records.impl.pb;
+
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationIdPBImpl;
+import org.apache.hadoop.yarn.proto.ApplicationHistoryServerProtos.ApplicationStartDataProto;
+import org.apache.hadoop.yarn.proto.ApplicationHistoryServerProtos.ApplicationStartDataProtoOrBuilder;
+import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationIdProto;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ApplicationStartData;
+
+import com.google.protobuf.TextFormat;
+
+public class ApplicationStartDataPBImpl extends ApplicationStartData {
+
+  ApplicationStartDataProto proto = ApplicationStartDataProto
+    .getDefaultInstance();
+  ApplicationStartDataProto.Builder builder = null;
+  boolean viaProto = false;
+
+  private ApplicationId applicationId;
+
+  public ApplicationStartDataPBImpl() {
+    builder = ApplicationStartDataProto.newBuilder();
+  }
+
+  public ApplicationStartDataPBImpl(ApplicationStartDataProto proto) {
+    this.proto = proto;
+    viaProto = true;
+  }
+
+  @Override
+  public ApplicationId getApplicationId() {
+    if (this.applicationId != null) {
+      return this.applicationId;
+    }
+    ApplicationStartDataProtoOrBuilder p = viaProto ? proto : builder;
+    if (!p.hasApplicationId()) {
+      return null;
+    }
+    this.applicationId = convertFromProtoFormat(p.getApplicationId());
+    return this.applicationId;
+  }
+
+  @Override
+  public void setApplicationId(ApplicationId applicationId) {
+    maybeInitBuilder();
+    if (applicationId == null) {
+      builder.clearApplicationId();
+    }
+    this.applicationId = applicationId;
+  }
+
+  @Override
+  public String getApplicationName() {
+    ApplicationStartDataProtoOrBuilder p = viaProto ? proto : builder;
+    if (!p.hasApplicationName()) {
+      return null;
+    }
+    return p.getApplicationName();
+  }
+
+  @Override
+  public void setApplicationName(String applicationName) {
+    maybeInitBuilder();
+    if (applicationName == null) {
+      builder.clearApplicationName();
+      return;
+    }
+    builder.setApplicationName(applicationName);
+  }
+
+  @Override
+  public String getApplicationType() {
+    ApplicationStartDataProtoOrBuilder p = viaProto ? proto : builder;
+    if (!p.hasApplicationType()) {
+      return null;
+    }
+    return p.getApplicationType();
+  }
+
+  @Override
+  public void setApplicationType(String applicationType) {
+    maybeInitBuilder();
+    if (applicationType == null) {
+      builder.clearApplicationType();
+      return;
+    }
+    builder.setApplicationType(applicationType);
+  }
+
+  @Override
+  public String getUser() {
+    ApplicationStartDataProtoOrBuilder p = viaProto ? proto : builder;
+    if (!p.hasUser()) {
+      return null;
+    }
+    return p.getUser();
+  }
+
+  @Override
+  public void setUser(String user) {
+    maybeInitBuilder();
+    if (user == null) {
+      builder.clearUser();
+      return;
+    }
+    builder.setUser(user);
+  }
+
+  @Override
+  public String getQueue() {
+    ApplicationStartDataProtoOrBuilder p = viaProto ? proto : builder;
+    if (!p.hasQueue()) {
+      return null;
+    }
+    return p.getQueue();
+  }
+
+  @Override
+  public void setQueue(String queue) {
+    maybeInitBuilder();
+    if (queue == null) {
+      builder.clearQueue();
+      return;
+    }
+    builder.setQueue(queue);
+  }
+
+  @Override
+  public long getSubmitTime() {
+    ApplicationStartDataProtoOrBuilder p = viaProto ? proto : builder;
+    return p.getSubmitTime();
+  }
+
+  @Override
+  public void setSubmitTime(long submitTime) {
+    maybeInitBuilder();
+    builder.setSubmitTime(submitTime);
+  }
+
+  @Override
+  public long getStartTime() {
+    ApplicationStartDataProtoOrBuilder p = viaProto ? proto : builder;
+    return p.getStartTime();
+  }
+
+  @Override
+  public void setStartTime(long startTime) {
+    maybeInitBuilder();
+    builder.setStartTime(startTime);
+  }
+
+  public ApplicationStartDataProto getProto() {
+    mergeLocalToProto();
+    proto = viaProto ? proto : builder.build();
+    viaProto = true;
+    return proto;
+  }
+
+  @Override
+  public int hashCode() {
+    return getProto().hashCode();
+  }
+
+  @Override
+  public boolean equals(Object other) {
+    if (other == null)
+      return false;
+    if (other.getClass().isAssignableFrom(this.getClass())) {
+      return this.getProto().equals(this.getClass().cast(other).getProto());
+    }
+    return false;
+  }
+
+  @Override
+  public String toString() {
+    return TextFormat.shortDebugString(getProto());
+  }
+
+  private void mergeLocalToBuilder() {
+    if (this.applicationId != null
+        && !((ApplicationIdPBImpl) this.applicationId).getProto().equals(
+          builder.getApplicationId())) {
+      builder.setApplicationId(convertToProtoFormat(this.applicationId));
+    }
+  }
+
+  private void mergeLocalToProto() {
+    if (viaProto) {
+      maybeInitBuilder();
+    }
+    mergeLocalToBuilder();
+    proto = builder.build();
+    viaProto = true;
+  }
+
+  private void maybeInitBuilder() {
+    if (viaProto || builder == null) {
+      builder = ApplicationStartDataProto.newBuilder(proto);
+    }
+    viaProto = false;
+  }
+
+  private ApplicationIdProto convertToProtoFormat(ApplicationId applicationId) {
+    return ((ApplicationIdPBImpl) applicationId).getProto();
+  }
+
+  private ApplicationIdPBImpl convertFromProtoFormat(
+      ApplicationIdProto applicationId) {
+    return new ApplicationIdPBImpl(applicationId);
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/impl/pb/ContainerFinishDataPBImpl.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/impl/pb/ContainerFinishDataPBImpl.java b/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/impl/pb/ContainerFinishDataPBImpl.java
new file mode 100644
index 0000000..8bc01e0
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/impl/pb/ContainerFinishDataPBImpl.java
@@ -0,0 +1,204 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.applicationhistoryservice.records.impl.pb;
+
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.ContainerState;
+import org.apache.hadoop.yarn.api.records.impl.pb.ContainerIdPBImpl;
+import org.apache.hadoop.yarn.api.records.impl.pb.ProtoUtils;
+import org.apache.hadoop.yarn.proto.ApplicationHistoryServerProtos.ContainerFinishDataProto;
+import org.apache.hadoop.yarn.proto.ApplicationHistoryServerProtos.ContainerFinishDataProtoOrBuilder;
+import org.apache.hadoop.yarn.proto.YarnProtos.ContainerIdProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.ContainerStateProto;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ContainerFinishData;
+
+import com.google.protobuf.TextFormat;
+
+public class ContainerFinishDataPBImpl extends ContainerFinishData {
+
+  ContainerFinishDataProto proto = ContainerFinishDataProto
+    .getDefaultInstance();
+  ContainerFinishDataProto.Builder builder = null;
+  boolean viaProto = false;
+
+  private ContainerId containerId;
+
+  public ContainerFinishDataPBImpl() {
+    builder = ContainerFinishDataProto.newBuilder();
+  }
+
+  public ContainerFinishDataPBImpl(ContainerFinishDataProto proto) {
+    this.proto = proto;
+    viaProto = true;
+  }
+
+  @Override
+  public ContainerId getContainerId() {
+    if (this.containerId != null) {
+      return this.containerId;
+    }
+    ContainerFinishDataProtoOrBuilder p = viaProto ? proto : builder;
+    if (!p.hasContainerId()) {
+      return null;
+    }
+    this.containerId = convertFromProtoFormat(p.getContainerId());
+    return this.containerId;
+  }
+
+  @Override
+  public void setContainerId(ContainerId containerId) {
+    maybeInitBuilder();
+    if (containerId == null) {
+      builder.clearContainerId();
+    }
+    this.containerId = containerId;
+  }
+
+  @Override
+  public long getFinishTime() {
+    ContainerFinishDataProtoOrBuilder p = viaProto ? proto : builder;
+    return p.getFinishTime();
+  }
+
+  @Override
+  public void setFinishTime(long finishTime) {
+    maybeInitBuilder();
+    builder.setFinishTime(finishTime);
+  }
+
+  @Override
+  public String getDiagnosticsInfo() {
+    ContainerFinishDataProtoOrBuilder p = viaProto ? proto : builder;
+    if (!p.hasDiagnosticsInfo()) {
+      return null;
+    }
+    return p.getDiagnosticsInfo();
+  }
+
+  @Override
+  public void setDiagnosticsInfo(String diagnosticsInfo) {
+    maybeInitBuilder();
+    if (diagnosticsInfo == null) {
+      builder.clearDiagnosticsInfo();
+      return;
+    }
+    builder.setDiagnosticsInfo(diagnosticsInfo);
+  }
+
+  @Override
+  public int getContainerExitStatus() {
+    ContainerFinishDataProtoOrBuilder p = viaProto ? proto : builder;
+    return p.getContainerExitStatus();
+  }
+
+  @Override
+  public ContainerState getContainerState() {
+    ContainerFinishDataProtoOrBuilder p = viaProto ? proto : builder;
+    if (!p.hasContainerState()) {
+      return null;
+    }
+    return convertFromProtoFormat(p.getContainerState());
+  }
+
+  @Override
+  public void setContainerState(ContainerState state) {
+    maybeInitBuilder();
+    if (state == null) {
+      builder.clearContainerState();
+      return;
+    }
+    builder.setContainerState(convertToProtoFormat(state));
+  }
+
+  @Override
+  public void setContainerExitStatus(int containerExitStatus) {
+    maybeInitBuilder();
+    builder.setContainerExitStatus(containerExitStatus);
+  }
+
+  public ContainerFinishDataProto getProto() {
+    mergeLocalToProto();
+    proto = viaProto ? proto : builder.build();
+    viaProto = true;
+    return proto;
+  }
+
+  @Override
+  public int hashCode() {
+    return getProto().hashCode();
+  }
+
+  @Override
+  public boolean equals(Object other) {
+    if (other == null)
+      return false;
+    if (other.getClass().isAssignableFrom(this.getClass())) {
+      return this.getProto().equals(this.getClass().cast(other).getProto());
+    }
+    return false;
+  }
+
+  @Override
+  public String toString() {
+    return TextFormat.shortDebugString(getProto());
+  }
+
+  private void mergeLocalToBuilder() {
+    if (this.containerId != null
+        && !((ContainerIdPBImpl) this.containerId).getProto().equals(
+          builder.getContainerId())) {
+      builder.setContainerId(convertToProtoFormat(this.containerId));
+    }
+  }
+
+  private void mergeLocalToProto() {
+    if (viaProto) {
+      maybeInitBuilder();
+    }
+    mergeLocalToBuilder();
+    proto = builder.build();
+    viaProto = true;
+  }
+
+  private void maybeInitBuilder() {
+    if (viaProto || builder == null) {
+      builder = ContainerFinishDataProto.newBuilder(proto);
+    }
+    viaProto = false;
+  }
+
+  private ContainerIdProto convertToProtoFormat(ContainerId containerId) {
+    return ((ContainerIdPBImpl) containerId).getProto();
+  }
+
+  private ContainerIdPBImpl
+      convertFromProtoFormat(ContainerIdProto containerId) {
+    return new ContainerIdPBImpl(containerId);
+  }
+
+  private ContainerStateProto convertToProtoFormat(ContainerState state) {
+    return ProtoUtils.convertToProtoFormat(state);
+  }
+
+  private ContainerState convertFromProtoFormat(
+      ContainerStateProto containerState) {
+    return ProtoUtils.convertFromProtoFormat(containerState);
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/impl/pb/ContainerStartDataPBImpl.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/impl/pb/ContainerStartDataPBImpl.java b/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/impl/pb/ContainerStartDataPBImpl.java
new file mode 100644
index 0000000..6d248b2
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/impl/pb/ContainerStartDataPBImpl.java
@@ -0,0 +1,258 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.applicationhistoryservice.records.impl.pb;
+
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.api.records.Priority;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.api.records.impl.pb.ContainerIdPBImpl;
+import org.apache.hadoop.yarn.api.records.impl.pb.NodeIdPBImpl;
+import org.apache.hadoop.yarn.api.records.impl.pb.PriorityPBImpl;
+import org.apache.hadoop.yarn.api.records.impl.pb.ResourcePBImpl;
+import org.apache.hadoop.yarn.proto.ApplicationHistoryServerProtos.ContainerStartDataProto;
+import org.apache.hadoop.yarn.proto.ApplicationHistoryServerProtos.ContainerStartDataProtoOrBuilder;
+import org.apache.hadoop.yarn.proto.YarnProtos.ContainerIdProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.NodeIdProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.PriorityProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.ResourceProto;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ContainerStartData;
+
+import com.google.protobuf.TextFormat;
+
+public class ContainerStartDataPBImpl extends ContainerStartData {
+
+  ContainerStartDataProto proto = ContainerStartDataProto.getDefaultInstance();
+  ContainerStartDataProto.Builder builder = null;
+  boolean viaProto = false;
+
+  private ContainerId containerId;
+  private Resource resource;
+  private NodeId nodeId;
+  private Priority priority;
+
+  public ContainerStartDataPBImpl() {
+    builder = ContainerStartDataProto.newBuilder();
+  }
+
+  public ContainerStartDataPBImpl(ContainerStartDataProto proto) {
+    this.proto = proto;
+    viaProto = true;
+  }
+
+  @Override
+  public ContainerId getContainerId() {
+    if (this.containerId != null) {
+      return this.containerId;
+    }
+    ContainerStartDataProtoOrBuilder p = viaProto ? proto : builder;
+    if (!p.hasContainerId()) {
+      return null;
+    }
+    this.containerId = convertFromProtoFormat(p.getContainerId());
+    return this.containerId;
+  }
+
+  @Override
+  public void setContainerId(ContainerId containerId) {
+    maybeInitBuilder();
+    if (containerId == null) {
+      builder.clearContainerId();
+    }
+    this.containerId = containerId;
+  }
+
+  @Override
+  public Resource getAllocatedResource() {
+    if (this.resource != null) {
+      return this.resource;
+    }
+    ContainerStartDataProtoOrBuilder p = viaProto ? proto : builder;
+    if (!p.hasAllocatedResource()) {
+      return null;
+    }
+    this.resource = convertFromProtoFormat(p.getAllocatedResource());
+    return this.resource;
+  }
+
+  @Override
+  public void setAllocatedResource(Resource resource) {
+    maybeInitBuilder();
+    if (resource == null) {
+      builder.clearAllocatedResource();
+    }
+    this.resource = resource;
+  }
+
+  @Override
+  public NodeId getAssignedNode() {
+    if (this.nodeId != null) {
+      return this.nodeId;
+    }
+    ContainerStartDataProtoOrBuilder p = viaProto ? proto : builder;
+    if (!p.hasAssignedNodeId()) {
+      return null;
+    }
+    this.nodeId = convertFromProtoFormat(p.getAssignedNodeId());
+    return this.nodeId;
+  }
+
+  @Override
+  public void setAssignedNode(NodeId nodeId) {
+    maybeInitBuilder();
+    if (nodeId == null) {
+      builder.clearAssignedNodeId();
+    }
+    this.nodeId = nodeId;
+  }
+
+  @Override
+  public Priority getPriority() {
+    if (this.priority != null) {
+      return this.priority;
+    }
+    ContainerStartDataProtoOrBuilder p = viaProto ? proto : builder;
+    if (!p.hasPriority()) {
+      return null;
+    }
+    this.priority = convertFromProtoFormat(p.getPriority());
+    return this.priority;
+  }
+
+  @Override
+  public void setPriority(Priority priority) {
+    maybeInitBuilder();
+    if (priority == null) {
+      builder.clearPriority();
+    }
+    this.priority = priority;
+  }
+
+  @Override
+  public long getStartTime() {
+    ContainerStartDataProtoOrBuilder p = viaProto ? proto : builder;
+    return p.getStartTime();
+  }
+
+  @Override
+  public void setStartTime(long startTime) {
+    maybeInitBuilder();
+    builder.setStartTime(startTime);
+  }
+
+  public ContainerStartDataProto getProto() {
+    mergeLocalToProto();
+    proto = viaProto ? proto : builder.build();
+    viaProto = true;
+    return proto;
+  }
+
+  @Override
+  public int hashCode() {
+    return getProto().hashCode();
+  }
+
+  @Override
+  public boolean equals(Object other) {
+    if (other == null)
+      return false;
+    if (other.getClass().isAssignableFrom(this.getClass())) {
+      return this.getProto().equals(this.getClass().cast(other).getProto());
+    }
+    return false;
+  }
+
+  @Override
+  public String toString() {
+    return TextFormat.shortDebugString(getProto());
+  }
+
+  private void mergeLocalToBuilder() {
+    if (this.containerId != null
+        && !((ContainerIdPBImpl) this.containerId).getProto().equals(
+          builder.getContainerId())) {
+      builder.setContainerId(convertToProtoFormat(this.containerId));
+    }
+    if (this.resource != null
+        && !((ResourcePBImpl) this.resource).getProto().equals(
+          builder.getAllocatedResource())) {
+      builder.setAllocatedResource(convertToProtoFormat(this.resource));
+    }
+    if (this.nodeId != null
+        && !((NodeIdPBImpl) this.nodeId).getProto().equals(
+          builder.getAssignedNodeId())) {
+      builder.setAssignedNodeId(convertToProtoFormat(this.nodeId));
+    }
+    if (this.priority != null
+        && !((PriorityPBImpl) this.priority).getProto().equals(
+          builder.getPriority())) {
+      builder.setPriority(convertToProtoFormat(this.priority));
+    }
+  }
+
+  private void mergeLocalToProto() {
+    if (viaProto) {
+      maybeInitBuilder();
+    }
+    mergeLocalToBuilder();
+    proto = builder.build();
+    viaProto = true;
+  }
+
+  private void maybeInitBuilder() {
+    if (viaProto || builder == null) {
+      builder = ContainerStartDataProto.newBuilder(proto);
+    }
+    viaProto = false;
+  }
+
+  private ContainerIdProto convertToProtoFormat(ContainerId containerId) {
+    return ((ContainerIdPBImpl) containerId).getProto();
+  }
+
+  private ContainerIdPBImpl
+      convertFromProtoFormat(ContainerIdProto containerId) {
+    return new ContainerIdPBImpl(containerId);
+  }
+
+  private ResourceProto convertToProtoFormat(Resource resource) {
+    return ((ResourcePBImpl) resource).getProto();
+  }
+
+  private ResourcePBImpl convertFromProtoFormat(ResourceProto resource) {
+    return new ResourcePBImpl(resource);
+  }
+
+  private NodeIdProto convertToProtoFormat(NodeId nodeId) {
+    return ((NodeIdPBImpl) nodeId).getProto();
+  }
+
+  private NodeIdPBImpl convertFromProtoFormat(NodeIdProto nodeId) {
+    return new NodeIdPBImpl(nodeId);
+  }
+
+  private PriorityProto convertToProtoFormat(Priority priority) {
+    return ((PriorityPBImpl) priority).getProto();
+  }
+
+  private PriorityPBImpl convertFromProtoFormat(PriorityProto priority) {
+    return new PriorityPBImpl(priority);
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/timeline/EntityIdentifier.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/timeline/EntityIdentifier.java b/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/timeline/EntityIdentifier.java
new file mode 100644
index 0000000..4b202d8
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/timeline/EntityIdentifier.java
@@ -0,0 +1,100 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.applicationhistoryservice.timeline;
+
+import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
+
+/**
+ * The unique identifier for an entity
+ */
+@Private
+@Unstable
+public class EntityIdentifier implements Comparable<EntityIdentifier> {
+
+  private String id;
+  private String type;
+
+  public EntityIdentifier(String id, String type) {
+    this.id = id;
+    this.type = type;
+  }
+
+  /**
+   * Get the entity Id.
+   * @return The entity Id.
+   */
+  public String getId() {
+    return id;
+  }
+
+  /**
+   * Get the entity type.
+   * @return The entity type.
+   */
+  public String getType() {
+    return type;
+  }
+
+  @Override
+  public int compareTo(EntityIdentifier other) {
+    int c = type.compareTo(other.type);
+    if (c != 0) return c;
+    return id.compareTo(other.id);
+  }
+
+  @Override
+  public int hashCode() {
+    // generated by eclipse
+    final int prime = 31;
+    int result = 1;
+    result = prime * result + ((id == null) ? 0 : id.hashCode());
+    result = prime * result + ((type == null) ? 0 : type.hashCode());
+    return result;
+  }
+
+  @Override
+  public boolean equals(Object obj) {
+    // generated by eclipse
+    if (this == obj)
+      return true;
+    if (obj == null)
+      return false;
+    if (getClass() != obj.getClass())
+      return false;
+    EntityIdentifier other = (EntityIdentifier) obj;
+    if (id == null) {
+      if (other.id != null)
+        return false;
+    } else if (!id.equals(other.id))
+      return false;
+    if (type == null) {
+      if (other.type != null)
+        return false;
+    } else if (!type.equals(other.type))
+      return false;
+    return true;
+  }
+
+  @Override
+  public String toString() {
+    return "{ id: " + id + ", type: "+ type + " }";
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/timeline/GenericObjectMapper.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/timeline/GenericObjectMapper.java b/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/timeline/GenericObjectMapper.java
new file mode 100644
index 0000000..b1846a3
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-hadoop-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/timeline/GenericObjectMapper.java
@@ -0,0 +1,135 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.applicationhistoryservice.timeline;
+
+import java.io.IOException;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.codehaus.jackson.map.ObjectMapper;
+import org.codehaus.jackson.map.ObjectReader;
+import org.codehaus.jackson.map.ObjectWriter;
+
+/**
+ * A utility class providing methods for serializing and deserializing
+ * objects. The {@link #write(Object)} and {@link #read(byte[])} methods are
+ * used by the {@link LeveldbTimelineStore} to store and retrieve arbitrary
+ * JSON, while the {@link #writeReverseOrderedLong} and {@link
+ * #readReverseOrderedLong} methods are used to sort entities in descending
+ * start time order.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+public class GenericObjectMapper {
+  private static final byte[] EMPTY_BYTES = new byte[0];
+
+  public static final ObjectReader OBJECT_READER;
+  public static final ObjectWriter OBJECT_WRITER;
+
+  static {
+    ObjectMapper mapper = new ObjectMapper();
+    OBJECT_READER = mapper.reader(Object.class);
+    OBJECT_WRITER = mapper.writer();
+  }
+
+  /**
+   * Serializes an Object into a byte array. Along with {@link #read(byte[])},
+   * can be used to serialize an Object and deserialize it into an Object of
+   * the same type without needing to specify the Object's type,
+   * as long as it is one of the JSON-compatible objects understood by
+   * ObjectMapper.
+   *
+   * @param o An Object
+   * @return A byte array representation of the Object
+   * @throws IOException if there is a write error
+   */
+  public static byte[] write(Object o) throws IOException {
+    if (o == null) {
+      return EMPTY_BYTES;
+    }
+    return OBJECT_WRITER.writeValueAsBytes(o);
+  }
+
+  /**
+   * Deserializes an Object from a byte array created with
+   * {@link #write(Object)}.
+   *
+   * @param b A byte array
+   * @return An Object
+   * @throws IOException if there is a read error
+   */
+  public static Object read(byte[] b) throws IOException {
+    return read(b, 0);
+  }
+
+  /**
+   * Deserializes an Object from a byte array at a specified offset, assuming
+   * the bytes were created with {@link #write(Object)}.
+   *
+   * @param b A byte array
+   * @param offset Offset into the array
+   * @return An Object
+   * @throws IOException if there is a read error
+   */
+  public static Object read(byte[] b, int offset) throws IOException {
+    if (b == null || b.length == 0) {
+      return null;
+    }
+    return OBJECT_READER.readValue(b, offset, b.length - offset);
+  }
+
+  /**
+   * Converts a long to a 8-byte array so that lexicographic ordering of the
+   * produced byte arrays sort the longs in descending order.
+   *
+   * @param l A long
+   * @return A byte array
+   */
+  public static byte[] writeReverseOrderedLong(long l) {
+    byte[] b = new byte[8];
+    return writeReverseOrderedLong(l, b, 0);
+  }
+
+  public static byte[] writeReverseOrderedLong(long l, byte[] b, int offset) {
+    b[offset] = (byte)(0x7f ^ ((l >> 56) & 0xff));
+    for (int i = offset+1; i < offset+7; i++) {
+      b[i] = (byte)(0xff ^ ((l >> 8*(7-i)) & 0xff));
+    }
+    b[offset+7] = (byte)(0xff ^ (l & 0xff));
+    return b;
+  }
+
+  /**
+   * Reads 8 bytes from an array starting at the specified offset and
+   * converts them to a long.  The bytes are assumed to have been created
+   * with {@link #writeReverseOrderedLong}.
+   *
+   * @param b A byte array
+   * @param offset An offset into the byte array
+   * @return A long
+   */
+  public static long readReverseOrderedLong(byte[] b, int offset) {
+    long l = b[offset] & 0xff;
+    for (int i = 1; i < 8; i++) {
+      l = l << 8;
+      l = l | (b[offset+i]&0xff);
+    }
+    return l ^ 0x7fffffffffffffffl;
+  }
+
+}


[09/22] AMBARI-5707. Metrics system prototype implementation. (swagle)

Posted by sw...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/examples/top.py
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/examples/top.py b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/examples/top.py
new file mode 100644
index 0000000..479c797
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/examples/top.py
@@ -0,0 +1,232 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+A clone of top / htop.
+
+Author: Giampaolo Rodola' <g....@gmail.com>
+
+$ python examples/top.py
+ CPU0  [|                                       ]   4.9%
+ CPU1  [|||                                     ]   7.8%
+ CPU2  [                                        ]   2.0%
+ CPU3  [|||||                                   ]  13.9%
+ Mem   [|||||||||||||||||||                     ]  49.8%  4920M/9888M
+ Swap  [                                        ]   0.0%     0M/0M
+ Processes: 287 (running=1 sleeping=286)
+ Load average: 0.34 0.54 0.46  Uptime: 3 days, 10:16:37
+
+PID    USER       NI  VIRT   RES   CPU% MEM%     TIME+  NAME
+------------------------------------------------------------
+989    giampaol    0   66M   12M    7.4  0.1   0:00.61  python
+2083   root        0  506M  159M    6.5  1.6   0:29.26  Xorg
+4503   giampaol    0  599M   25M    6.5  0.3   3:32.60  gnome-terminal
+3868   giampaol    0  358M    8M    2.8  0.1  23:12.60  pulseaudio
+3936   giampaol    0    1G  111M    2.8  1.1  33:41.67  compiz
+4401   giampaol    0  536M  141M    2.8  1.4  35:42.73  skype
+4047   giampaol    0  743M   76M    1.8  0.8  42:03.33  unity-panel-service
+13155  giampaol    0    1G  280M    1.8  2.8  41:57.34  chrome
+10     root        0    0B    0B    0.9  0.0   4:01.81  rcu_sched
+339    giampaol    0    1G  113M    0.9  1.1   8:15.73  chrome
+...
+"""
+
+import os
+import sys
+if os.name != 'posix':
+    sys.exit('platform not supported')
+import atexit
+import curses
+import time
+from datetime import datetime, timedelta
+
+import psutil
+
+
+# --- curses stuff
+def tear_down():
+    win.keypad(0)
+    curses.nocbreak()
+    curses.echo()
+    curses.endwin()
+
+win = curses.initscr()
+atexit.register(tear_down)
+curses.endwin()
+lineno = 0
+
+
+def print_line(line, highlight=False):
+    """A thin wrapper around curses's addstr()."""
+    global lineno
+    try:
+        if highlight:
+            line += " " * (win.getmaxyx()[1] - len(line))
+            win.addstr(lineno, 0, line, curses.A_REVERSE)
+        else:
+            win.addstr(lineno, 0, line, 0)
+    except curses.error:
+        lineno = 0
+        win.refresh()
+        raise
+    else:
+        lineno += 1
+# --- /curses stuff
+
+
+def bytes2human(n):
+    """
+    >>> bytes2human(10000)
+    '9K'
+    >>> bytes2human(100001221)
+    '95M'
+    """
+    symbols = ('K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y')
+    prefix = {}
+    for i, s in enumerate(symbols):
+        prefix[s] = 1 << (i + 1) * 10
+    for s in reversed(symbols):
+        if n >= prefix[s]:
+            value = int(float(n) / prefix[s])
+            return '%s%s' % (value, s)
+    return "%sB" % n
+
+
+def poll(interval):
+    # sleep some time
+    time.sleep(interval)
+    procs = []
+    procs_status = {}
+    for p in psutil.process_iter():
+        try:
+            p.dict = p.as_dict(['username', 'nice', 'memory_info',
+                                'memory_percent', 'cpu_percent',
+                                'cpu_times', 'name', 'status'])
+            try:
+                procs_status[p.dict['status']] += 1
+            except KeyError:
+                procs_status[p.dict['status']] = 1
+        except psutil.NoSuchProcess:
+            pass
+        else:
+            procs.append(p)
+
+    # return processes sorted by CPU percent usage
+    processes = sorted(procs, key=lambda p: p.dict['cpu_percent'],
+                       reverse=True)
+    return (processes, procs_status)
+
+
+def print_header(procs_status, num_procs):
+    """Print system-related info, above the process list."""
+
+    def get_dashes(perc):
+        dashes = "|" * int((float(perc) / 10 * 4))
+        empty_dashes = " " * (40 - len(dashes))
+        return dashes, empty_dashes
+
+    # cpu usage
+    percs = psutil.cpu_percent(interval=0, percpu=True)
+    for cpu_num, perc in enumerate(percs):
+        dashes, empty_dashes = get_dashes(perc)
+        print_line(" CPU%-2s [%s%s] %5s%%" % (cpu_num, dashes, empty_dashes,
+                                              perc))
+    mem = psutil.virtual_memory()
+    dashes, empty_dashes = get_dashes(mem.percent)
+    used = mem.total - mem.available
+    line = " Mem   [%s%s] %5s%% %6s/%s" % (
+        dashes, empty_dashes,
+        mem.percent,
+        str(int(used / 1024 / 1024)) + "M",
+        str(int(mem.total / 1024 / 1024)) + "M"
+    )
+    print_line(line)
+
+    # swap usage
+    swap = psutil.swap_memory()
+    dashes, empty_dashes = get_dashes(swap.percent)
+    line = " Swap  [%s%s] %5s%% %6s/%s" % (
+        dashes, empty_dashes,
+        swap.percent,
+        str(int(swap.used / 1024 / 1024)) + "M",
+        str(int(swap.total / 1024 / 1024)) + "M"
+    )
+    print_line(line)
+
+    # processes number and status
+    st = []
+    for x, y in procs_status.items():
+        if y:
+            st.append("%s=%s" % (x, y))
+    st.sort(key=lambda x: x[:3] in ('run', 'sle'), reverse=1)
+    print_line(" Processes: %s (%s)" % (num_procs, ' '.join(st)))
+    # load average, uptime
+    uptime = datetime.now() - datetime.fromtimestamp(psutil.boot_time())
+    av1, av2, av3 = os.getloadavg()
+    line = " Load average: %.2f %.2f %.2f  Uptime: %s" \
+        % (av1, av2, av3, str(uptime).split('.')[0])
+    print_line(line)
+
+
+def refresh_window(procs, procs_status):
+    """Print results on screen by using curses."""
+    curses.endwin()
+    templ = "%-6s %-8s %4s %5s %5s %6s %4s %9s  %2s"
+    win.erase()
+    header = templ % ("PID", "USER", "NI", "VIRT", "RES", "CPU%", "MEM%",
+                      "TIME+", "NAME")
+    print_header(procs_status, len(procs))
+    print_line("")
+    print_line(header, highlight=True)
+    for p in procs:
+        # TIME+ column shows process CPU cumulative time and it
+        # is expressed as: "mm:ss.ms"
+        if p.dict['cpu_times'] is not None:
+            ctime = timedelta(seconds=sum(p.dict['cpu_times']))
+            ctime = "%s:%s.%s" % (ctime.seconds // 60 % 60,
+                                  str((ctime.seconds % 60)).zfill(2),
+                                  str(ctime.microseconds)[:2])
+        else:
+            ctime = ''
+        if p.dict['memory_percent'] is not None:
+            p.dict['memory_percent'] = round(p.dict['memory_percent'], 1)
+        else:
+            p.dict['memory_percent'] = ''
+        if p.dict['cpu_percent'] is None:
+            p.dict['cpu_percent'] = ''
+        if p.dict['username']:
+            username = p.dict['username'][:8]
+        else:
+            username = ""
+        line = templ % (p.pid,
+                        username,
+                        p.dict['nice'],
+                        bytes2human(getattr(p.dict['memory_info'], 'vms', 0)),
+                        bytes2human(getattr(p.dict['memory_info'], 'rss', 0)),
+                        p.dict['cpu_percent'],
+                        p.dict['memory_percent'],
+                        ctime,
+                        p.dict['name'] or '',
+                        )
+        try:
+            print_line(line)
+        except curses.error:
+            break
+        win.refresh()
+
+
+def main():
+    try:
+        interval = 0
+        while 1:
+            args = poll(interval)
+            refresh_window(*args)
+            interval = 1
+    except (KeyboardInterrupt, SystemExit):
+        pass
+
+if __name__ == '__main__':
+    main()

http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/examples/who.py
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/examples/who.py b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/examples/who.py
new file mode 100644
index 0000000..8ffbc81
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/examples/who.py
@@ -0,0 +1,34 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+A clone of 'who' command; print information about users who are
+currently logged in.
+
+$ python examples/who.py
+giampaolo       tty7            2014-02-23 17:25  (:0)
+giampaolo       pts/7           2014-02-24 18:25  (:192.168.1.56)
+giampaolo       pts/8           2014-02-24 18:25  (:0)
+giampaolo       pts/9           2014-02-27 01:32  (:0)
+"""
+
+from datetime import datetime
+
+import psutil
+from psutil._compat import print_
+
+
+def main():
+    users = psutil.users()
+    for user in users:
+        print_("%-15s %-15s %s  (%s)" % (
+            user.name,
+            user.terminal or '-',
+            datetime.fromtimestamp(user.started).strftime("%Y-%m-%d %H:%M"),
+            user.host))
+
+if __name__ == '__main__':
+    main()

http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/make.bat
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/make.bat b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/make.bat
new file mode 100644
index 0000000..0a10bcf
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/make.bat
@@ -0,0 +1,176 @@
+@echo off
+
+rem ==========================================================================
+rem Shortcuts for various tasks, emulating UNIX "make" on Windows.
+rem It is primarly intended as a shortcut for compiling / installing
+rem psutil ("make.bat build", "make.bat install") and running tests
+rem ("make.bat test").
+rem
+rem This script is modeled after my Windows installation which uses:
+rem - mingw32 for Python 2.4 and 2.5
+rem - Visual studio 2008 for Python 2.6, 2.7, 3.2
+rem - Visual studio 2010 for Python 3.3+
+rem
+rem By default C:\Python27\python.exe is used.
+rem To compile for a specific Python version run:
+rem
+rem     set PYTHON=C:\Python24\python.exe & make.bat build
+rem
+rem If you compile by using mingw on Python 2.4 and 2.5 you need to patch
+rem distutils first: http://stackoverflow.com/questions/13592192
+rem ==========================================================================
+
+if "%PYTHON%" == "" (
+    set PYTHON=C:\Python27\python.exe
+)
+if "%TSCRIPT%" == "" (
+    set TSCRIPT=test\test_psutil.py
+)
+
+rem Needed to compile using Mingw.
+set PATH=C:\MinGW\bin;%PATH%
+
+rem Needed to locate the .pypirc file and upload exes on PYPI.
+set HOME=%USERPROFILE%
+
+rem ==========================================================================
+
+if "%1" == "help" (
+    :help
+    echo Run `make ^<target^>` where ^<target^> is one of:
+    echo   build         compile without installing
+    echo   build-exes    create exe installers in dist directory
+    echo   clean         clean build files
+    echo   install       compile and install
+    echo   memtest       run memory leak tests
+    echo   test          run tests
+    echo   test-process  run process related tests
+    echo   test-system   run system APIs related tests
+    echo   uninstall     uninstall
+    echo   upload-exes   upload exe installers on pypi
+    goto :eof
+)
+
+if "%1" == "clean" (
+    :clean
+    for /r %%R in (__pycache__) do if exist %%R (rmdir /S /Q %%R)
+    for /r %%R in (*.pyc) do if exist %%R (del /s %%R)
+    for /r %%R in (*.pyd) do if exist %%R (del /s %%R)
+    for /r %%R in (*.orig) do if exist %%R (del /s %%R)
+    for /r %%R in (*.bak) do if exist %%R (del /s %%R)
+    for /r %%R in (*.rej) do if exist %%R (del /s %%R)
+    if exist psutil.egg-info (rmdir /S /Q psutil.egg-info)
+    if exist build (rmdir /S /Q build)
+    if exist dist (rmdir /S /Q dist)
+    goto :eof
+)
+
+if "%1" == "build" (
+    :build
+    if %PYTHON%==C:\Python24\python.exe (
+        %PYTHON% setup.py build -c mingw32
+    ) else if %PYTHON%==C:\Python25\python.exe (
+        %PYTHON% setup.py build -c mingw32
+    ) else (
+        %PYTHON% setup.py build
+    )
+    if %errorlevel% neq 0 goto :error
+    goto :eof
+)
+
+if "%1" == "install" (
+    :install
+    if %PYTHON%==C:\Python24\python.exe (
+        %PYTHON% setup.py build -c mingw32 install
+    ) else if %PYTHON%==C:\Python25\python.exe (
+        %PYTHON% setup.py build -c mingw32 install
+    ) else (
+        %PYTHON% setup.py build install
+    )
+    goto :eof
+)
+
+if "%1" == "uninstall" (
+    :uninstall
+    for %%A in ("%PYTHON%") do (
+        set folder=%%~dpA
+    )
+    for /F "delims=" %%i in ('dir /b %folder%\Lib\site-packages\*psutil*') do (
+        rmdir /S /Q %folder%\Lib\site-packages\%%i
+    )
+    goto :eof
+)
+
+if "%1" == "test" (
+    :test
+    call :install
+    %PYTHON% %TSCRIPT%
+    goto :eof
+)
+
+if "%1" == "test-process" (
+    :test
+    call :install
+    %PYTHON% -m unittest -v test.test_psutil.TestProcess
+    goto :eof
+)
+
+if "%1" == "test-system" (
+    :test
+    call :install
+    %PYTHON% -m unittest -v test.test_psutil.TestSystem
+    goto :eof
+)
+
+if "%1" == "memtest" (
+    :memtest
+    call :install
+    %PYTHON% test\test_memory_leaks.py
+    goto :eof
+)
+
+if "%1" == "build-exes" (
+    :build-exes
+    rem mingw 32 versions
+    C:\Python24\python.exe setup.py build -c mingw32 bdist_wininst || goto :error
+    C:\Python25\python.exe setup.py build -c mingw32 bdist_wininst || goto :error
+    rem "standard" 32 bit versions, using VS 2008 (2.6, 2.7) or VS 2010 (3.3+)
+    C:\Python26\python.exe setup.py build bdist_wininst || goto :error
+    C:\Python27\python.exe setup.py build bdist_wininst || goto :error
+    C:\Python33\python.exe setup.py build bdist_wininst || goto :error
+    C:\Python34\python.exe setup.py build bdist_wininst || goto :error
+    rem 64 bit versions
+    rem Python 2.7 + VS 2008 requires vcvars64.bat to be run first:
+    rem http://stackoverflow.com/questions/11072521/
+    rem Windows SDK and .NET Framework 3.5 SP1 also need to be installed (sigh)
+    "C:\Program Files (x86)\Microsoft Visual Studio 9.0\VC\bin\vcvars64.bat"
+    C:\Python27-64\python.exe setup.py build bdist_wininst || goto :error
+    C:\Python33-64\python.exe setup.py build bdist_wininst || goto :error
+    C:\Python34-64\python.exe setup.py build bdist_wininst || goto :error
+    echo OK
+    goto :eof
+)
+
+if "%1" == "upload-exes" (
+    :upload-exes
+    rem mingw 32 versions
+    C:\Python25\python.exe setup.py build -c mingw32 bdist_wininst upload || goto :error
+    rem "standard" 32 bit versions, using VS 2008 (2.6, 2.7) or VS 2010 (3.3+)
+    C:\Python26\python.exe setup.py bdist_wininst upload || goto :error
+    C:\Python27\python.exe setup.py bdist_wininst upload || goto :error
+    C:\Python33\python.exe setup.py bdist_wininst upload || goto :error
+    C:\Python34\python.exe setup.py bdist_wininst upload || goto :error
+    rem 64 bit versions
+    C:\Python27-64\python.exe setup.py build bdist_wininst upload || goto :error
+    C:\Python33-64\python.exe setup.py build bdist_wininst upload || goto :error
+    C:\Python34-64\python.exe setup.py build bdist_wininst upload || goto :error
+    echo OK
+    goto :eof
+)
+
+goto :help
+
+:error
+    echo last command exited with error code %errorlevel%
+    exit /b %errorlevel%
+    goto :eof

http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/__init__.py
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/__init__.py b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/__init__.py
new file mode 100644
index 0000000..3068b10
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/__init__.py
@@ -0,0 +1,1987 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""psutil is a cross-platform library for retrieving information on
+running processes and system utilization (CPU, memory, disks, network)
+in Python.
+"""
+
+from __future__ import division
+
+__author__ = "Giampaolo Rodola'"
+__version__ = "2.1.1"
+version_info = tuple([int(num) for num in __version__.split('.')])
+
+__all__ = [
+    # exceptions
+    "Error", "NoSuchProcess", "AccessDenied", "TimeoutExpired",
+    # constants
+    "version_info", "__version__",
+    "STATUS_RUNNING", "STATUS_IDLE", "STATUS_SLEEPING", "STATUS_DISK_SLEEP",
+    "STATUS_STOPPED", "STATUS_TRACING_STOP", "STATUS_ZOMBIE", "STATUS_DEAD",
+    "STATUS_WAKING", "STATUS_LOCKED", "STATUS_WAITING", "STATUS_LOCKED",
+    "CONN_ESTABLISHED", "CONN_SYN_SENT", "CONN_SYN_RECV", "CONN_FIN_WAIT1",
+    "CONN_FIN_WAIT2", "CONN_TIME_WAIT", "CONN_CLOSE", "CONN_CLOSE_WAIT",
+    "CONN_LAST_ACK", "CONN_LISTEN", "CONN_CLOSING", "CONN_NONE",
+    # classes
+    "Process", "Popen",
+    # functions
+    "pid_exists", "pids", "process_iter", "wait_procs",             # proc
+    "virtual_memory", "swap_memory",                                # memory
+    "cpu_times", "cpu_percent", "cpu_times_percent", "cpu_count",   # cpu
+    "net_io_counters", "net_connections",                           # network
+    "disk_io_counters", "disk_partitions", "disk_usage",            # disk
+    "users", "boot_time",                                           # others
+]
+
+import sys
+import os
+import time
+import signal
+import warnings
+import errno
+import subprocess
+try:
+    import pwd
+except ImportError:
+    pwd = None
+
+from psutil._common import memoize
+from psutil._compat import property, callable, defaultdict
+from psutil._compat import (wraps as _wraps,
+                            PY3 as _PY3)
+from psutil._common import (deprecated_method as _deprecated_method,
+                            deprecated as _deprecated,
+                            sdiskio as _nt_sys_diskio,
+                            snetio as _nt_sys_netio)
+
+from psutil._common import (STATUS_RUNNING,
+                            STATUS_SLEEPING,
+                            STATUS_DISK_SLEEP,
+                            STATUS_STOPPED,
+                            STATUS_TRACING_STOP,
+                            STATUS_ZOMBIE,
+                            STATUS_DEAD,
+                            STATUS_WAKING,
+                            STATUS_LOCKED,
+                            STATUS_IDLE,  # bsd
+                            STATUS_WAITING,  # bsd
+                            STATUS_LOCKED)  # bsd
+
+from psutil._common import (CONN_ESTABLISHED,
+                            CONN_SYN_SENT,
+                            CONN_SYN_RECV,
+                            CONN_FIN_WAIT1,
+                            CONN_FIN_WAIT2,
+                            CONN_TIME_WAIT,
+                            CONN_CLOSE,
+                            CONN_CLOSE_WAIT,
+                            CONN_LAST_ACK,
+                            CONN_LISTEN,
+                            CONN_CLOSING,
+                            CONN_NONE)
+
+if sys.platform.startswith("linux"):
+    import psutil._pslinux as _psplatform
+    from psutil._pslinux import (phymem_buffers,
+                                 cached_phymem)
+
+    from psutil._pslinux import (IOPRIO_CLASS_NONE,
+                                 IOPRIO_CLASS_RT,
+                                 IOPRIO_CLASS_BE,
+                                 IOPRIO_CLASS_IDLE)
+    # Linux >= 2.6.36
+    if _psplatform.HAS_PRLIMIT:
+        from _psutil_linux import (RLIM_INFINITY,
+                                   RLIMIT_AS,
+                                   RLIMIT_CORE,
+                                   RLIMIT_CPU,
+                                   RLIMIT_DATA,
+                                   RLIMIT_FSIZE,
+                                   RLIMIT_LOCKS,
+                                   RLIMIT_MEMLOCK,
+                                   RLIMIT_NOFILE,
+                                   RLIMIT_NPROC,
+                                   RLIMIT_RSS,
+                                   RLIMIT_STACK)
+        # Kinda ugly but considerably faster than using hasattr() and
+        # setattr() against the module object (we are at import time:
+        # speed matters).
+        import _psutil_linux
+        try:
+            RLIMIT_MSGQUEUE = _psutil_linux.RLIMIT_MSGQUEUE
+        except AttributeError:
+            pass
+        try:
+            RLIMIT_NICE = _psutil_linux.RLIMIT_NICE
+        except AttributeError:
+            pass
+        try:
+            RLIMIT_RTPRIO = _psutil_linux.RLIMIT_RTPRIO
+        except AttributeError:
+            pass
+        try:
+            RLIMIT_RTTIME = _psutil_linux.RLIMIT_RTTIME
+        except AttributeError:
+            pass
+        try:
+            RLIMIT_SIGPENDING = _psutil_linux.RLIMIT_SIGPENDING
+        except AttributeError:
+            pass
+        del _psutil_linux
+
+elif sys.platform.startswith("win32"):
+    import psutil._pswindows as _psplatform
+    from _psutil_windows import (ABOVE_NORMAL_PRIORITY_CLASS,
+                                 BELOW_NORMAL_PRIORITY_CLASS,
+                                 HIGH_PRIORITY_CLASS,
+                                 IDLE_PRIORITY_CLASS,
+                                 NORMAL_PRIORITY_CLASS,
+                                 REALTIME_PRIORITY_CLASS)
+    from psutil._pswindows import CONN_DELETE_TCB
+
+elif sys.platform.startswith("darwin"):
+    import psutil._psosx as _psplatform
+
+elif sys.platform.startswith("freebsd"):
+    import psutil._psbsd as _psplatform
+
+elif sys.platform.startswith("sunos"):
+    import psutil._pssunos as _psplatform
+    from psutil._pssunos import (CONN_IDLE,
+                                 CONN_BOUND)
+
+else:
+    raise NotImplementedError('platform %s is not supported' % sys.platform)
+
+__all__.extend(_psplatform.__extra__all__)
+
+
+_TOTAL_PHYMEM = None
+_POSIX = os.name == 'posix'
+_WINDOWS = os.name == 'nt'
+_timer = getattr(time, 'monotonic', time.time)
+
+
+# =====================================================================
+# --- exceptions
+# =====================================================================
+
+class Error(Exception):
+    """Base exception class. All other psutil exceptions inherit
+    from this one.
+    """
+
+
+class NoSuchProcess(Error):
+    """Exception raised when a process with a certain PID doesn't
+    or no longer exists (zombie).
+    """
+
+    def __init__(self, pid, name=None, msg=None):
+        Error.__init__(self)
+        self.pid = pid
+        self.name = name
+        self.msg = msg
+        if msg is None:
+            if name:
+                details = "(pid=%s, name=%s)" % (self.pid, repr(self.name))
+            else:
+                details = "(pid=%s)" % self.pid
+            self.msg = "process no longer exists " + details
+
+    def __str__(self):
+        return self.msg
+
+
+class AccessDenied(Error):
+    """Exception raised when permission to perform an action is denied."""
+
+    def __init__(self, pid=None, name=None, msg=None):
+        Error.__init__(self)
+        self.pid = pid
+        self.name = name
+        self.msg = msg
+        if msg is None:
+            if (pid is not None) and (name is not None):
+                self.msg = "(pid=%s, name=%s)" % (pid, repr(name))
+            elif (pid is not None):
+                self.msg = "(pid=%s)" % self.pid
+            else:
+                self.msg = ""
+
+    def __str__(self):
+        return self.msg
+
+
+class TimeoutExpired(Error):
+    """Raised on Process.wait(timeout) if timeout expires and process
+    is still alive.
+    """
+
+    def __init__(self, seconds, pid=None, name=None):
+        Error.__init__(self)
+        self.seconds = seconds
+        self.pid = pid
+        self.name = name
+        self.msg = "timeout after %s seconds" % seconds
+        if (pid is not None) and (name is not None):
+            self.msg += " (pid=%s, name=%s)" % (pid, repr(name))
+        elif (pid is not None):
+            self.msg += " (pid=%s)" % self.pid
+
+    def __str__(self):
+        return self.msg
+
+# push exception classes into platform specific module namespace
+_psplatform.NoSuchProcess = NoSuchProcess
+_psplatform.AccessDenied = AccessDenied
+_psplatform.TimeoutExpired = TimeoutExpired
+
+
+# =====================================================================
+# --- Process class
+# =====================================================================
+
+def _assert_pid_not_reused(fun):
+    """Decorator which raises NoSuchProcess in case a process is no
+    longer running or its PID has been reused.
+    """
+    @_wraps(fun)
+    def wrapper(self, *args, **kwargs):
+        if not self.is_running():
+            raise NoSuchProcess(self.pid, self._name)
+        return fun(self, *args, **kwargs)
+    return wrapper
+
+
+class Process(object):
+    """Represents an OS process with the given PID.
+    If PID is omitted current process PID (os.getpid()) is used.
+    Raise NoSuchProcess if PID does not exist.
+
+    Note that most of the methods of this class do not make sure
+    the PID of the process being queried has been reused over time.
+    That means you might end up retrieving an information referring
+    to another process in case the original one this instance
+    refers to is gone in the meantime.
+
+    The only exceptions for which process identity is pre-emptively
+    checked and guaranteed are:
+
+     - parent()
+     - children()
+     - nice() (set)
+     - ionice() (set)
+     - rlimit() (set)
+     - cpu_affinity (set)
+     - suspend()
+     - resume()
+     - send_signal()
+     - terminate()
+     - kill()
+
+    To prevent this problem for all other methods you can:
+      - use is_running() before querying the process
+      - if you're continuously iterating over a set of Process
+        instances use process_iter() which pre-emptively checks
+        process identity for every yielded instance
+    """
+
+    def __init__(self, pid=None):
+        self._init(pid)
+
+    def _init(self, pid, _ignore_nsp=False):
+        if pid is None:
+            pid = os.getpid()
+        else:
+            if not _PY3 and not isinstance(pid, (int, long)):
+                raise TypeError('pid must be an integer (got %r)' % pid)
+            if pid < 0:
+                raise ValueError('pid must be a positive integer (got %s)'
+                                 % pid)
+        self._pid = pid
+        self._name = None
+        self._exe = None
+        self._create_time = None
+        self._gone = False
+        self._hash = None
+        # used for caching on Windows only (on POSIX ppid may change)
+        self._ppid = None
+        # platform-specific modules define an _psplatform.Process
+        # implementation class
+        self._proc = _psplatform.Process(pid)
+        self._last_sys_cpu_times = None
+        self._last_proc_cpu_times = None
+        # cache creation time for later use in is_running() method
+        try:
+            self.create_time()
+        except AccessDenied:
+            # we should never get here as AFAIK we're able to get
+            # process creation time on all platforms even as a
+            # limited user
+            pass
+        except NoSuchProcess:
+            if not _ignore_nsp:
+                msg = 'no process found with pid %s' % pid
+                raise NoSuchProcess(pid, None, msg)
+            else:
+                self._gone = True
+        # This pair is supposed to indentify a Process instance
+        # univocally over time (the PID alone is not enough as
+        # it might refer to a process whose PID has been reused).
+        # This will be used later in __eq__() and is_running().
+        self._ident = (self.pid, self._create_time)
+
+    def __str__(self):
+        try:
+            pid = self.pid
+            name = repr(self.name())
+        except NoSuchProcess:
+            details = "(pid=%s (terminated))" % self.pid
+        except AccessDenied:
+            details = "(pid=%s)" % (self.pid)
+        else:
+            details = "(pid=%s, name=%s)" % (pid, name)
+        return "%s.%s%s" % (self.__class__.__module__,
+                            self.__class__.__name__, details)
+
+    def __repr__(self):
+        return "<%s at %s>" % (self.__str__(), id(self))
+
+    def __eq__(self, other):
+        # Test for equality with another Process object based
+        # on PID and creation time.
+        if not isinstance(other, Process):
+            return NotImplemented
+        return self._ident == other._ident
+
+    def __ne__(self, other):
+        return not self == other
+
+    def __hash__(self):
+        if self._hash is None:
+            self._hash = hash(self._ident)
+        return self._hash
+
+    # --- utility methods
+
+    def as_dict(self, attrs=[], ad_value=None):
+        """Utility method returning process information as a
+        hashable dictionary.
+
+        If 'attrs' is specified it must be a list of strings
+        reflecting available Process class' attribute names
+        (e.g. ['cpu_times', 'name']) else all public (read
+        only) attributes are assumed.
+
+        'ad_value' is the value which gets assigned in case
+        AccessDenied  exception is raised when retrieving that
+        particular process information.
+        """
+        excluded_names = set(
+            ['send_signal', 'suspend', 'resume', 'terminate', 'kill', 'wait',
+             'is_running', 'as_dict', 'parent', 'children', 'rlimit'])
+        retdict = dict()
+        ls = set(attrs or [x for x in dir(self) if not x.startswith('get')])
+        for name in ls:
+            if name.startswith('_'):
+                continue
+            if name.startswith('set_'):
+                continue
+            if name.startswith('get_'):
+                msg = "%s() is deprecated; use %s() instead" % (name, name[4:])
+                warnings.warn(msg, category=DeprecationWarning, stacklevel=2)
+                name = name[4:]
+                if name in ls:
+                    continue
+            if name == 'getcwd':
+                msg = "getcwd() is deprecated; use cwd() instead"
+                warnings.warn(msg, category=DeprecationWarning, stacklevel=2)
+                name = 'cwd'
+                if name in ls:
+                    continue
+
+            if name in excluded_names:
+                continue
+            try:
+                attr = getattr(self, name)
+                if callable(attr):
+                    ret = attr()
+                else:
+                    ret = attr
+            except AccessDenied:
+                ret = ad_value
+            except NotImplementedError:
+                # in case of not implemented functionality (may happen
+                # on old or exotic systems) we want to crash only if
+                # the user explicitly asked for that particular attr
+                if attrs:
+                    raise
+                continue
+            retdict[name] = ret
+        return retdict
+
+    def parent(self):
+        """Return the parent process as a Process object pre-emptively
+        checking whether PID has been reused.
+        If no parent is known return None.
+        """
+        ppid = self.ppid()
+        if ppid is not None:
+            try:
+                parent = Process(ppid)
+                if parent.create_time() <= self.create_time():
+                    return parent
+                # ...else ppid has been reused by another process
+            except NoSuchProcess:
+                pass
+
+    def is_running(self):
+        """Return whether this process is running.
+        It also checks if PID has been reused by another process in
+        which case return False.
+        """
+        if self._gone:
+            return False
+        try:
+            # Checking if PID is alive is not enough as the PID might
+            # have been reused by another process: we also want to
+            # check process identity.
+            # Process identity / uniqueness over time is greanted by
+            # (PID + creation time) and that is verified in __eq__.
+            return self == Process(self.pid)
+        except NoSuchProcess:
+            self._gone = True
+            return False
+
+    # --- actual API
+
+    @property
+    def pid(self):
+        """The process PID."""
+        return self._pid
+
+    def ppid(self):
+        """The process parent PID.
+        On Windows the return value is cached after first call.
+        """
+        # On POSIX we don't want to cache the ppid as it may unexpectedly
+        # change to 1 (init) in case this process turns into a zombie:
+        # https://code.google.com/p/psutil/issues/detail?id=321
+        # http://stackoverflow.com/questions/356722/
+
+        # XXX should we check creation time here rather than in
+        # Process.parent()?
+        if _POSIX:
+            return self._proc.ppid()
+        else:
+            if self._ppid is None:
+                self._ppid = self._proc.ppid()
+            return self._ppid
+
+    def name(self):
+        """The process name. The return value is cached after first call."""
+        if self._name is None:
+            name = self._proc.name()
+            if _POSIX and len(name) >= 15:
+                # On UNIX the name gets truncated to the first 15 characters.
+                # If it matches the first part of the cmdline we return that
+                # one instead because it's usually more explicative.
+                # Examples are "gnome-keyring-d" vs. "gnome-keyring-daemon".
+                try:
+                    cmdline = self.cmdline()
+                except AccessDenied:
+                    pass
+                else:
+                    if cmdline:
+                        extended_name = os.path.basename(cmdline[0])
+                        if extended_name.startswith(name):
+                            name = extended_name
+            self._proc._name = name
+            self._name = name
+        return self._name
+
+    def exe(self):
+        """The process executable as an absolute path.
+        May also be an empty string.
+        The return value is cached after first call.
+        """
+        def guess_it(fallback):
+            # try to guess exe from cmdline[0] in absence of a native
+            # exe representation
+            cmdline = self.cmdline()
+            if cmdline and hasattr(os, 'access') and hasattr(os, 'X_OK'):
+                exe = cmdline[0]  # the possible exe
+                # Attempt to guess only in case of an absolute path.
+                # It is not safe otherwise as the process might have
+                # changed cwd.
+                if (os.path.isabs(exe)
+                        and os.path.isfile(exe)
+                        and os.access(exe, os.X_OK)):
+                    return exe
+            if isinstance(fallback, AccessDenied):
+                raise fallback
+            return fallback
+
+        if self._exe is None:
+            try:
+                exe = self._proc.exe()
+            except AccessDenied:
+                err = sys.exc_info()[1]
+                return guess_it(fallback=err)
+            else:
+                if not exe:
+                    # underlying implementation can legitimately return an
+                    # empty string; if that's the case we don't want to
+                    # raise AD while guessing from the cmdline
+                    try:
+                        exe = guess_it(fallback=exe)
+                    except AccessDenied:
+                        pass
+                self._exe = exe
+        return self._exe
+
+    def cmdline(self):
+        """The command line this process has been called with."""
+        return self._proc.cmdline()
+
+    def status(self):
+        """The process current status as a STATUS_* constant."""
+        return self._proc.status()
+
+    def username(self):
+        """The name of the user that owns the process.
+        On UNIX this is calculated by using *real* process uid.
+        """
+        if _POSIX:
+            if pwd is None:
+                # might happen if python was installed from sources
+                raise ImportError(
+                    "requires pwd module shipped with standard python")
+            return pwd.getpwuid(self.uids().real).pw_name
+        else:
+            return self._proc.username()
+
+    def create_time(self):
+        """The process creation time as a floating point number
+        expressed in seconds since the epoch, in UTC.
+        The return value is cached after first call.
+        """
+        if self._create_time is None:
+            self._create_time = self._proc.create_time()
+        return self._create_time
+
+    def cwd(self):
+        """Process current working directory as an absolute path."""
+        return self._proc.cwd()
+
+    def nice(self, value=None):
+        """Get or set process niceness (priority)."""
+        if value is None:
+            return self._proc.nice_get()
+        else:
+            if not self.is_running():
+                raise NoSuchProcess(self.pid, self._name)
+            self._proc.nice_set(value)
+
+    if _POSIX:
+
+        def uids(self):
+            """Return process UIDs as a (real, effective, saved)
+            namedtuple.
+            """
+            return self._proc.uids()
+
+        def gids(self):
+            """Return process GIDs as a (real, effective, saved)
+            namedtuple.
+            """
+            return self._proc.gids()
+
+        def terminal(self):
+            """The terminal associated with this process, if any,
+            else None.
+            """
+            return self._proc.terminal()
+
+        def num_fds(self):
+            """Return the number of file descriptors opened by this
+            process (POSIX only).
+            """
+            return self._proc.num_fds()
+
+    # Linux, BSD and Windows only
+    if hasattr(_psplatform.Process, "io_counters"):
+
+        def io_counters(self):
+            """Return process I/O statistics as a
+            (read_count, write_count, read_bytes, write_bytes)
+            namedtuple.
+            Those are the number of read/write calls performed and the
+            amount of bytes read and written by the process.
+            """
+            return self._proc.io_counters()
+
+    # Linux and Windows >= Vista only
+    if hasattr(_psplatform.Process, "ionice_get"):
+
+        def ionice(self, ioclass=None, value=None):
+            """Get or set process I/O niceness (priority).
+
+            On Linux 'ioclass' is one of the IOPRIO_CLASS_* constants.
+            'value' is a number which goes from 0 to 7. The higher the
+            value, the lower the I/O priority of the process.
+
+            On Windows only 'ioclass' is used and it can be set to 2
+            (normal), 1 (low) or 0 (very low).
+
+            Available on Linux and Windows > Vista only.
+            """
+            if ioclass is None:
+                if value is not None:
+                    raise ValueError("'ioclass' must be specified")
+                return self._proc.ionice_get()
+            else:
+                return self._proc.ionice_set(ioclass, value)
+
+    # Linux only
+    if hasattr(_psplatform.Process, "rlimit"):
+
+        def rlimit(self, resource, limits=None):
+            """Get or set process resource limits as a (soft, hard)
+            tuple.
+
+            'resource' is one of the RLIMIT_* constants.
+            'limits' is supposed to be a (soft, hard)  tuple.
+
+            See "man prlimit" for further info.
+            Available on Linux only.
+            """
+            if limits is None:
+                return self._proc.rlimit(resource)
+            else:
+                return self._proc.rlimit(resource, limits)
+
+    # Windows and Linux only
+    if hasattr(_psplatform.Process, "cpu_affinity_get"):
+
+        def cpu_affinity(self, cpus=None):
+            """Get or set process CPU affinity.
+            If specified 'cpus' must be a list of CPUs for which you
+            want to set the affinity (e.g. [0, 1]).
+            """
+            if cpus is None:
+                return self._proc.cpu_affinity_get()
+            else:
+                self._proc.cpu_affinity_set(cpus)
+
+    if _WINDOWS:
+
+        def num_handles(self):
+            """Return the number of handles opened by this process
+            (Windows only).
+            """
+            return self._proc.num_handles()
+
+    def num_ctx_switches(self):
+        """Return the number of voluntary and involuntary context
+        switches performed by this process.
+        """
+        return self._proc.num_ctx_switches()
+
+    def num_threads(self):
+        """Return the number of threads used by this process."""
+        return self._proc.num_threads()
+
+    def threads(self):
+        """Return threads opened by process as a list of
+        (id, user_time, system_time) namedtuples representing
+        thread id and thread CPU times (user/system).
+        """
+        return self._proc.threads()
+
+    @_assert_pid_not_reused
+    def children(self, recursive=False):
+        """Return the children of this process as a list of Process
+        instances, pre-emptively checking whether PID has been reused.
+        If recursive is True return all the parent descendants.
+
+        Example (A == this process):
+
+         A ─┐
+            │
+            ├─ B (child) ─┐
+            │             └─ X (grandchild) ─┐
+            │                                └─ Y (great grandchild)
+            ├─ C (child)
+            └─ D (child)
+
+        >>> import psutil
+        >>> p = psutil.Process()
+        >>> p.children()
+        B, C, D
+        >>> p.children(recursive=True)
+        B, X, Y, C, D
+
+        Note that in the example above if process X disappears
+        process Y won't be listed as the reference to process A
+        is lost.
+        """
+        if hasattr(_psplatform, 'ppid_map'):
+            # Windows only: obtain a {pid:ppid, ...} dict for all running
+            # processes in one shot (faster).
+            ppid_map = _psplatform.ppid_map()
+        else:
+            ppid_map = None
+
+        ret = []
+        if not recursive:
+            if ppid_map is None:
+                # 'slow' version, common to all platforms except Windows
+                for p in process_iter():
+                    try:
+                        if p.ppid() == self.pid:
+                            # if child happens to be older than its parent
+                            # (self) it means child's PID has been reused
+                            if self.create_time() <= p.create_time():
+                                ret.append(p)
+                    except NoSuchProcess:
+                        pass
+            else:
+                # Windows only (faster)
+                for pid, ppid in ppid_map.items():
+                    if ppid == self.pid:
+                        try:
+                            child = Process(pid)
+                            # if child happens to be older than its parent
+                            # (self) it means child's PID has been reused
+                            if self.create_time() <= child.create_time():
+                                ret.append(child)
+                        except NoSuchProcess:
+                            pass
+        else:
+            # construct a dict where 'values' are all the processes
+            # having 'key' as their parent
+            table = defaultdict(list)
+            if ppid_map is None:
+                for p in process_iter():
+                    try:
+                        table[p.ppid()].append(p)
+                    except NoSuchProcess:
+                        pass
+            else:
+                for pid, ppid in ppid_map.items():
+                    try:
+                        p = Process(pid)
+                        table[ppid].append(p)
+                    except NoSuchProcess:
+                        pass
+            # At this point we have a mapping table where table[self.pid]
+            # are the current process' children.
+            # Below, we look for all descendants recursively, similarly
+            # to a recursive function call.
+            checkpids = [self.pid]
+            for pid in checkpids:
+                for child in table[pid]:
+                    try:
+                        # if child happens to be older than its parent
+                        # (self) it means child's PID has been reused
+                        intime = self.create_time() <= child.create_time()
+                    except NoSuchProcess:
+                        pass
+                    else:
+                        if intime:
+                            ret.append(child)
+                            if child.pid not in checkpids:
+                                checkpids.append(child.pid)
+        return ret
+
+    def cpu_percent(self, interval=None):
+        """Return a float representing the current process CPU
+        utilization as a percentage.
+
+        When interval is 0.0 or None (default) compares process times
+        to system CPU times elapsed since last call, returning
+        immediately (non-blocking). That means that the first time
+        this is called it will return a meaningful 0.0 value.
+
+        When interval is > 0.0 compares process times to system CPU
+        times elapsed before and after the interval (blocking).
+
+        In this case is recommended for accuracy that this function
+        be called with at least 0.1 seconds between calls.
+
+        Examples:
+
+          >>> import psutil
+          >>> p = psutil.Process(os.getpid())
+          >>> # blocking
+          >>> p.cpu_percent(interval=1)
+          2.0
+          >>> # non-blocking (percentage since last call)
+          >>> p.cpu_percent(interval=None)
+          2.9
+          >>>
+        """
+        blocking = interval is not None and interval > 0.0
+        num_cpus = cpu_count()
+        if _POSIX:
+            timer = lambda: _timer() * num_cpus
+        else:
+            timer = lambda: sum(cpu_times())
+        if blocking:
+            st1 = timer()
+            pt1 = self._proc.cpu_times()
+            time.sleep(interval)
+            st2 = timer()
+            pt2 = self._proc.cpu_times()
+        else:
+            st1 = self._last_sys_cpu_times
+            pt1 = self._last_proc_cpu_times
+            st2 = timer()
+            pt2 = self._proc.cpu_times()
+            if st1 is None or pt1 is None:
+                self._last_sys_cpu_times = st2
+                self._last_proc_cpu_times = pt2
+                return 0.0
+
+        delta_proc = (pt2.user - pt1.user) + (pt2.system - pt1.system)
+        delta_time = st2 - st1
+        # reset values for next call in case of interval == None
+        self._last_sys_cpu_times = st2
+        self._last_proc_cpu_times = pt2
+
+        try:
+            # The utilization split between all CPUs.
+            # Note: a percentage > 100 is legitimate as it can result
+            # from a process with multiple threads running on different
+            # CPU cores, see:
+            # http://stackoverflow.com/questions/1032357
+            # https://code.google.com/p/psutil/issues/detail?id=474
+            overall_percent = ((delta_proc / delta_time) * 100) * num_cpus
+        except ZeroDivisionError:
+            # interval was too low
+            return 0.0
+        else:
+            return round(overall_percent, 1)
+
+    def cpu_times(self):
+        """Return a (user, system) namedtuple representing  the
+        accumulated process time, in seconds.
+        This is the same as os.times() but per-process.
+        """
+        return self._proc.cpu_times()
+
+    def memory_info(self):
+        """Return a tuple representing RSS (Resident Set Size) and VMS
+        (Virtual Memory Size) in bytes.
+
+        On UNIX RSS and VMS are the same values shown by 'ps'.
+
+        On Windows RSS and VMS refer to "Mem Usage" and "VM Size"
+        columns of taskmgr.exe.
+        """
+        return self._proc.memory_info()
+
+    def memory_info_ex(self):
+        """Return a namedtuple with variable fields depending on the
+        platform representing extended memory information about
+        this process. All numbers are expressed in bytes.
+        """
+        return self._proc.memory_info_ex()
+
+    def memory_percent(self):
+        """Compare physical system memory to process resident memory
+        (RSS) and calculate process memory utilization as a percentage.
+        """
+        rss = self._proc.memory_info()[0]
+        # use cached value if available
+        total_phymem = _TOTAL_PHYMEM or virtual_memory().total
+        try:
+            return (rss / float(total_phymem)) * 100
+        except ZeroDivisionError:
+            return 0.0
+
+    def memory_maps(self, grouped=True):
+        """Return process' mapped memory regions as a list of nameduples
+        whose fields are variable depending on the platform.
+
+        If 'grouped' is True the mapped regions with the same 'path'
+        are grouped together and the different memory fields are summed.
+
+        If 'grouped' is False every mapped region is shown as a single
+        entity and the namedtuple will also include the mapped region's
+        address space ('addr') and permission set ('perms').
+        """
+        it = self._proc.memory_maps()
+        if grouped:
+            d = {}
+            for tupl in it:
+                path = tupl[2]
+                nums = tupl[3:]
+                try:
+                    d[path] = map(lambda x, y: x + y, d[path], nums)
+                except KeyError:
+                    d[path] = nums
+            nt = _psplatform.pmmap_grouped
+            return [nt(path, *d[path]) for path in d]
+        else:
+            nt = _psplatform.pmmap_ext
+            return [nt(*x) for x in it]
+
+    def open_files(self):
+        """Return files opened by process as a list of
+        (path, fd) namedtuples including the absolute file name
+        and file descriptor number.
+        """
+        return self._proc.open_files()
+
+    def connections(self, kind='inet'):
+        """Return connections opened by process as a list of
+        (fd, family, type, laddr, raddr, status) namedtuples.
+        The 'kind' parameter filters for connections that match the
+        following criteria:
+
+        Kind Value      Connections using
+        inet            IPv4 and IPv6
+        inet4           IPv4
+        inet6           IPv6
+        tcp             TCP
+        tcp4            TCP over IPv4
+        tcp6            TCP over IPv6
+        udp             UDP
+        udp4            UDP over IPv4
+        udp6            UDP over IPv6
+        unix            UNIX socket (both UDP and TCP protocols)
+        all             the sum of all the possible families and protocols
+        """
+        return self._proc.connections(kind)
+
+    if _POSIX:
+        def _send_signal(self, sig):
+            try:
+                os.kill(self.pid, sig)
+            except OSError:
+                err = sys.exc_info()[1]
+                if err.errno == errno.ESRCH:
+                    self._gone = True
+                    raise NoSuchProcess(self.pid, self._name)
+                if err.errno == errno.EPERM:
+                    raise AccessDenied(self.pid, self._name)
+                raise
+
+    @_assert_pid_not_reused
+    def send_signal(self, sig):
+        """Send a signal to process pre-emptively checking whether
+        PID has been reused (see signal module constants) .
+        On Windows only SIGTERM is valid and is treated as an alias
+        for kill().
+        """
+        if _POSIX:
+            self._send_signal(sig)
+        else:
+            if sig == signal.SIGTERM:
+                self._proc.kill()
+            else:
+                raise ValueError("only SIGTERM is supported on Windows")
+
+    @_assert_pid_not_reused
+    def suspend(self):
+        """Suspend process execution with SIGSTOP pre-emptively checking
+        whether PID has been reused.
+        On Windows this has the effect ot suspending all process threads.
+        """
+        if _POSIX:
+            self._send_signal(signal.SIGSTOP)
+        else:
+            self._proc.suspend()
+
+    @_assert_pid_not_reused
+    def resume(self):
+        """Resume process execution with SIGCONT pre-emptively checking
+        whether PID has been reused.
+        On Windows this has the effect of resuming all process threads.
+        """
+        if _POSIX:
+            self._send_signal(signal.SIGCONT)
+        else:
+            self._proc.resume()
+
+    @_assert_pid_not_reused
+    def terminate(self):
+        """Terminate the process with SIGTERM pre-emptively checking
+        whether PID has been reused.
+        On Windows this is an alias for kill().
+        """
+        if _POSIX:
+            self._send_signal(signal.SIGTERM)
+        else:
+            self._proc.kill()
+
+    @_assert_pid_not_reused
+    def kill(self):
+        """Kill the current process with SIGKILL pre-emptively checking
+        whether PID has been reused.
+        """
+        if _POSIX:
+            self._send_signal(signal.SIGKILL)
+        else:
+            self._proc.kill()
+
+    def wait(self, timeout=None):
+        """Wait for process to terminate and, if process is a children
+        of os.getpid(), also return its exit code, else None.
+
+        If the process is already terminated immediately return None
+        instead of raising NoSuchProcess.
+
+        If timeout (in seconds) is specified and process is still alive
+        raise TimeoutExpired.
+
+        To wait for multiple Process(es) use psutil.wait_procs().
+        """
+        if timeout is not None and not timeout >= 0:
+            raise ValueError("timeout must be a positive integer")
+        return self._proc.wait(timeout)
+
+    # --- deprecated APIs
+
+    _locals = set(locals())
+
+    @_deprecated_method(replacement='children')
+    def get_children(self):
+        pass
+
+    @_deprecated_method(replacement='connections')
+    def get_connections(self):
+        pass
+
+    if "cpu_affinity" in _locals:
+        @_deprecated_method(replacement='cpu_affinity')
+        def get_cpu_affinity(self):
+            pass
+
+        @_deprecated_method(replacement='cpu_affinity')
+        def set_cpu_affinity(self, cpus):
+            pass
+
+    @_deprecated_method(replacement='cpu_percent')
+    def get_cpu_percent(self):
+        pass
+
+    @_deprecated_method(replacement='cpu_times')
+    def get_cpu_times(self):
+        pass
+
+    @_deprecated_method(replacement='cwd')
+    def getcwd(self):
+        pass
+
+    @_deprecated_method(replacement='memory_info_ex')
+    def get_ext_memory_info(self):
+        pass
+
+    if "io_counters" in _locals:
+        @_deprecated_method(replacement='io_counters')
+        def get_io_counters(self):
+            pass
+
+    if "ionice" in _locals:
+        @_deprecated_method(replacement='ionice')
+        def get_ionice(self):
+            pass
+
+        @_deprecated_method(replacement='ionice')
+        def set_ionice(self, ioclass, value=None):
+            pass
+
+    @_deprecated_method(replacement='memory_info')
+    def get_memory_info(self):
+        pass
+
+    @_deprecated_method(replacement='memory_maps')
+    def get_memory_maps(self):
+        pass
+
+    @_deprecated_method(replacement='memory_percent')
+    def get_memory_percent(self):
+        pass
+
+    @_deprecated_method(replacement='nice')
+    def get_nice(self):
+        pass
+
+    @_deprecated_method(replacement='num_ctx_switches')
+    def get_num_ctx_switches(self):
+        pass
+
+    if 'num_fds' in _locals:
+        @_deprecated_method(replacement='num_fds')
+        def get_num_fds(self):
+            pass
+
+    if 'num_handles' in _locals:
+        @_deprecated_method(replacement='num_handles')
+        def get_num_handles(self):
+            pass
+
+    @_deprecated_method(replacement='num_threads')
+    def get_num_threads(self):
+        pass
+
+    @_deprecated_method(replacement='open_files')
+    def get_open_files(self):
+        pass
+
+    if "rlimit" in _locals:
+        @_deprecated_method(replacement='rlimit')
+        def get_rlimit(self):
+            pass
+
+        @_deprecated_method(replacement='rlimit')
+        def set_rlimit(self, resource, limits):
+            pass
+
+    @_deprecated_method(replacement='threads')
+    def get_threads(self):
+        pass
+
+    @_deprecated_method(replacement='nice')
+    def set_nice(self, value):
+        pass
+
+    del _locals
+
+
+# =====================================================================
+# --- Popen class
+# =====================================================================
+
+class Popen(Process):
+    """A more convenient interface to stdlib subprocess module.
+    It starts a sub process and deals with it exactly as when using
+    subprocess.Popen class but in addition also provides all the
+    properties and methods of psutil.Process class as a unified
+    interface:
+
+      >>> import psutil
+      >>> from subprocess import PIPE
+      >>> p = psutil.Popen(["python", "-c", "print 'hi'"], stdout=PIPE)
+      >>> p.name()
+      'python'
+      >>> p.uids()
+      user(real=1000, effective=1000, saved=1000)
+      >>> p.username()
+      'giampaolo'
+      >>> p.communicate()
+      ('hi\n', None)
+      >>> p.terminate()
+      >>> p.wait(timeout=2)
+      0
+      >>>
+
+    For method names common to both classes such as kill(), terminate()
+    and wait(), psutil.Process implementation takes precedence.
+
+    Unlike subprocess.Popen this class pre-emptively checks wheter PID
+    has been reused on send_signal(), terminate() and kill() so that
+    you don't accidentally terminate another process, fixing
+    http://bugs.python.org/issue6973.
+
+    For a complete documentation refer to:
+    http://docs.python.org/library/subprocess.html
+    """
+
+    def __init__(self, *args, **kwargs):
+        # Explicitly avoid to raise NoSuchProcess in case the process
+        # spawned by subprocess.Popen terminates too quickly, see:
+        # https://code.google.com/p/psutil/issues/detail?id=193
+        self.__subproc = subprocess.Popen(*args, **kwargs)
+        self._init(self.__subproc.pid, _ignore_nsp=True)
+
+    def __dir__(self):
+        return sorted(set(dir(Popen) + dir(subprocess.Popen)))
+
+    def __getattribute__(self, name):
+        try:
+            return object.__getattribute__(self, name)
+        except AttributeError:
+            try:
+                return object.__getattribute__(self.__subproc, name)
+            except AttributeError:
+                raise AttributeError("%s instance has no attribute '%s'"
+                                     % (self.__class__.__name__, name))
+
+    def wait(self, timeout=None):
+        if self.__subproc.returncode is not None:
+            return self.__subproc.returncode
+        ret = super(Popen, self).wait(timeout)
+        self.__subproc.returncode = ret
+        return ret
+
+
+# =====================================================================
+# --- system processes related functions
+# =====================================================================
+
+def pids():
+    """Return a list of current running PIDs."""
+    return _psplatform.pids()
+
+
+def pid_exists(pid):
+    """Return True if given PID exists in the current process list.
+    This is faster than doing "pid in psutil.pids()" and
+    should be preferred.
+    """
+    if pid < 0:
+        return False
+    elif pid == 0 and _POSIX:
+        # On POSIX we use os.kill() to determine PID existence.
+        # According to "man 2 kill" PID 0 has a special meaning
+        # though: it refers to <<every process in the process
+        # group of the calling process>> and that is not we want
+        # to do here.
+        return pid in pids()
+    else:
+        return _psplatform.pid_exists(pid)
+
+
+_pmap = {}
+
+def process_iter():
+    """Return a generator yielding a Process instance for all
+    running processes.
+
+    Every new Process instance is only created once and then cached
+    into an internal table which is updated every time this is used.
+
+    Cached Process instances are checked for identity so that you're
+    safe in case a PID has been reused by another process, in which
+    case the cached instance is updated.
+
+    The sorting order in which processes are yielded is based on
+    their PIDs.
+    """
+    def add(pid):
+        proc = Process(pid)
+        _pmap[proc.pid] = proc
+        return proc
+
+    def remove(pid):
+        _pmap.pop(pid, None)
+
+    a = set(pids())
+    b = set(_pmap.keys())
+    new_pids = a - b
+    gone_pids = b - a
+
+    for pid in gone_pids:
+        remove(pid)
+    for pid, proc in sorted(list(_pmap.items()) +
+                            list(dict.fromkeys(new_pids).items())):
+        try:
+            if proc is None:  # new process
+                yield add(pid)
+            else:
+                # use is_running() to check whether PID has been reused by
+                # another process in which case yield a new Process instance
+                if proc.is_running():
+                    yield proc
+                else:
+                    yield add(pid)
+        except NoSuchProcess:
+            remove(pid)
+        except AccessDenied:
+            # Process creation time can't be determined hence there's
+            # no way to tell whether the pid of the cached process
+            # has been reused. Just return the cached version.
+            yield proc
+
+
+def wait_procs(procs, timeout=None, callback=None):
+    """Convenience function which waits for a list of processes to
+    terminate.
+
+    Return a (gone, alive) tuple indicating which processes
+    are gone and which ones are still alive.
+
+    The gone ones will have a new 'returncode' attribute indicating
+    process exit status (may be None).
+
+    'callback' is a function which gets called every time a process
+    terminates (a Process instance is passed as callback argument).
+
+    Function will return as soon as all processes terminate or when
+    timeout occurs.
+
+    Typical use case is:
+
+     - send SIGTERM to a list of processes
+     - give them some time to terminate
+     - send SIGKILL to those ones which are still alive
+
+    Example:
+
+    >>> def on_terminate(proc):
+    ...     print("process {} terminated".format(proc))
+    ...
+    >>> for p in procs:
+    ...    p.terminate()
+    ...
+    >>> gone, alive = wait_procs(procs, timeout=3, callback=on_terminate)
+    >>> for p in alive:
+    ...     p.kill()
+    """
+    def check_gone(proc, timeout):
+        try:
+            returncode = proc.wait(timeout=timeout)
+        except TimeoutExpired:
+            pass
+        else:
+            if returncode is not None or not proc.is_running():
+                proc.returncode = returncode
+                gone.add(proc)
+                if callback is not None:
+                    callback(proc)
+
+    if timeout is not None and not timeout >= 0:
+        msg = "timeout must be a positive integer, got %s" % timeout
+        raise ValueError(msg)
+    gone = set()
+    alive = set(procs)
+    if callback is not None and not callable(callback):
+        raise TypeError("callback %r is not a callable" % callable)
+    if timeout is not None:
+        deadline = _timer() + timeout
+
+    while alive:
+        if timeout is not None and timeout <= 0:
+            break
+        for proc in alive:
+            # Make sure that every complete iteration (all processes)
+            # will last max 1 sec.
+            # We do this because we don't want to wait too long on a
+            # single process: in case it terminates too late other
+            # processes may disappear in the meantime and their PID
+            # reused.
+            max_timeout = 1.0 / len(alive)
+            if timeout is not None:
+                timeout = min((deadline - _timer()), max_timeout)
+                if timeout <= 0:
+                    break
+                check_gone(proc, timeout)
+            else:
+                check_gone(proc, max_timeout)
+        alive = alive - gone
+
+    if alive:
+        # Last attempt over processes survived so far.
+        # timeout == 0 won't make this function wait any further.
+        for proc in alive:
+            check_gone(proc, 0)
+        alive = alive - gone
+
+    return (list(gone), list(alive))
+
+
+# =====================================================================
+# --- CPU related functions
+# =====================================================================
+
+@memoize
+def cpu_count(logical=True):
+    """Return the number of logical CPUs in the system (same as
+    os.cpu_count() in Python 3.4).
+
+    If logical is False return the number of physical cores only
+    (hyper thread CPUs are excluded).
+
+    Return None if undetermined.
+
+    The return value is cached after first call.
+    If desired cache can be cleared like this:
+
+    >>> psutil.cpu_count.cache_clear()
+    """
+    if logical:
+        return _psplatform.cpu_count_logical()
+    else:
+        return _psplatform.cpu_count_physical()
+
+
+def cpu_times(percpu=False):
+    """Return system-wide CPU times as a namedtuple.
+    Every CPU time represents the seconds the CPU has spent in the given mode.
+    The namedtuple's fields availability varies depending on the platform:
+     - user
+     - system
+     - idle
+     - nice (UNIX)
+     - iowait (Linux)
+     - irq (Linux, FreeBSD)
+     - softirq (Linux)
+     - steal (Linux >= 2.6.11)
+     - guest (Linux >= 2.6.24)
+     - guest_nice (Linux >= 3.2.0)
+
+    When percpu is True return a list of nameduples for each CPU.
+    First element of the list refers to first CPU, second element
+    to second CPU and so on.
+    The order of the list is consistent across calls.
+    """
+    if not percpu:
+        return _psplatform.cpu_times()
+    else:
+        return _psplatform.per_cpu_times()
+
+
+_last_cpu_times = cpu_times()
+_last_per_cpu_times = cpu_times(percpu=True)
+
+def cpu_percent(interval=None, percpu=False):
+    """Return a float representing the current system-wide CPU
+    utilization as a percentage.
+
+    When interval is > 0.0 compares system CPU times elapsed before
+    and after the interval (blocking).
+
+    When interval is 0.0 or None compares system CPU times elapsed
+    since last call or module import, returning immediately (non
+    blocking). That means the first time this is called it will
+    return a meaningless 0.0 value which you should ignore.
+    In this case is recommended for accuracy that this function be
+    called with at least 0.1 seconds between calls.
+
+    When percpu is True returns a list of floats representing the
+    utilization as a percentage for each CPU.
+    First element of the list refers to first CPU, second element
+    to second CPU and so on.
+    The order of the list is consistent across calls.
+
+    Examples:
+
+      >>> # blocking, system-wide
+      >>> psutil.cpu_percent(interval=1)
+      2.0
+      >>>
+      >>> # blocking, per-cpu
+      >>> psutil.cpu_percent(interval=1, percpu=True)
+      [2.0, 1.0]
+      >>>
+      >>> # non-blocking (percentage since last call)
+      >>> psutil.cpu_percent(interval=None)
+      2.9
+      >>>
+    """
+    global _last_cpu_times
+    global _last_per_cpu_times
+    blocking = interval is not None and interval > 0.0
+
+    def calculate(t1, t2):
+        t1_all = sum(t1)
+        t1_busy = t1_all - t1.idle
+
+        t2_all = sum(t2)
+        t2_busy = t2_all - t2.idle
+
+        # this usually indicates a float precision issue
+        if t2_busy <= t1_busy:
+            return 0.0
+
+        busy_delta = t2_busy - t1_busy
+        all_delta = t2_all - t1_all
+        busy_perc = (busy_delta / all_delta) * 100
+        return round(busy_perc, 1)
+
+    # system-wide usage
+    if not percpu:
+        if blocking:
+            t1 = cpu_times()
+            time.sleep(interval)
+        else:
+            t1 = _last_cpu_times
+        _last_cpu_times = cpu_times()
+        return calculate(t1, _last_cpu_times)
+    # per-cpu usage
+    else:
+        ret = []
+        if blocking:
+            tot1 = cpu_times(percpu=True)
+            time.sleep(interval)
+        else:
+            tot1 = _last_per_cpu_times
+        _last_per_cpu_times = cpu_times(percpu=True)
+        for t1, t2 in zip(tot1, _last_per_cpu_times):
+            ret.append(calculate(t1, t2))
+        return ret
+
+
+# Use separate global vars for cpu_times_percent() so that it's
+# independent from cpu_percent() and they can both be used within
+# the same program.
+_last_cpu_times_2 = _last_cpu_times
+_last_per_cpu_times_2 = _last_per_cpu_times
+
+def cpu_times_percent(interval=None, percpu=False):
+    """Same as cpu_percent() but provides utilization percentages
+    for each specific CPU time as is returned by cpu_times().
+    For instance, on Linux we'll get:
+
+      >>> cpu_times_percent()
+      cpupercent(user=4.8, nice=0.0, system=4.8, idle=90.5, iowait=0.0,
+                 irq=0.0, softirq=0.0, steal=0.0, guest=0.0, guest_nice=0.0)
+      >>>
+
+    interval and percpu arguments have the same meaning as in
+    cpu_percent().
+    """
+    global _last_cpu_times_2
+    global _last_per_cpu_times_2
+    blocking = interval is not None and interval > 0.0
+
+    def calculate(t1, t2):
+        nums = []
+        all_delta = sum(t2) - sum(t1)
+        for field in t1._fields:
+            field_delta = getattr(t2, field) - getattr(t1, field)
+            try:
+                field_perc = (100 * field_delta) / all_delta
+            except ZeroDivisionError:
+                field_perc = 0.0
+            field_perc = round(field_perc, 1)
+            if _WINDOWS:
+                # XXX
+                # Work around:
+                # https://code.google.com/p/psutil/issues/detail?id=392
+                # CPU times are always supposed to increase over time
+                # or at least remain the same and that's because time
+                # cannot go backwards.
+                # Surprisingly sometimes this might not be the case on
+                # Windows where 'system' CPU time can be smaller
+                # compared to the previous call, resulting in corrupted
+                # percentages (< 0 or > 100).
+                # I really don't know what to do about that except
+                # forcing the value to 0 or 100.
+                if field_perc > 100.0:
+                    field_perc = 100.0
+                elif field_perc < 0.0:
+                    field_perc = 0.0
+            nums.append(field_perc)
+        return _psplatform.scputimes(*nums)
+
+    # system-wide usage
+    if not percpu:
+        if blocking:
+            t1 = cpu_times()
+            time.sleep(interval)
+        else:
+            t1 = _last_cpu_times_2
+        _last_cpu_times_2 = cpu_times()
+        return calculate(t1, _last_cpu_times_2)
+    # per-cpu usage
+    else:
+        ret = []
+        if blocking:
+            tot1 = cpu_times(percpu=True)
+            time.sleep(interval)
+        else:
+            tot1 = _last_per_cpu_times_2
+        _last_per_cpu_times_2 = cpu_times(percpu=True)
+        for t1, t2 in zip(tot1, _last_per_cpu_times_2):
+            ret.append(calculate(t1, t2))
+        return ret
+
+
+# =====================================================================
+# --- system memory related functions
+# =====================================================================
+
+def virtual_memory():
+    """Return statistics about system memory usage as a namedtuple
+    including the following fields, expressed in bytes:
+
+     - total:
+       total physical memory available.
+
+     - available:
+       the actual amount of available memory that can be given
+       instantly to processes that request more memory in bytes; this
+       is calculated by summing different memory values depending on
+       the platform (e.g. free + buffers + cached on Linux) and it is
+       supposed to be used to monitor actual memory usage in a cross
+       platform fashion.
+
+     - percent:
+       the percentage usage calculated as (total - available) / total * 100
+
+     - used:
+       memory used, calculated differently depending on the platform and
+       designed for informational purposes only:
+        OSX: active + inactive + wired
+        BSD: active + wired + cached
+        LINUX: total - free
+
+     - free:
+       memory not being used at all (zeroed) that is readily available;
+       note that this doesn't reflect the actual memory available
+       (use 'available' instead)
+
+    Platform-specific fields:
+
+     - active (UNIX):
+       memory currently in use or very recently used, and so it is in RAM.
+
+     - inactive (UNIX):
+       memory that is marked as not used.
+
+     - buffers (BSD, Linux):
+       cache for things like file system metadata.
+
+     - cached (BSD, OSX):
+       cache for various things.
+
+     - wired (OSX, BSD):
+       memory that is marked to always stay in RAM. It is never moved to disk.
+
+     - shared (BSD):
+       memory that may be simultaneously accessed by multiple processes.
+
+    The sum of 'used' and 'available' does not necessarily equal total.
+    On Windows 'available' and 'free' are the same.
+    """
+    global _TOTAL_PHYMEM
+    ret = _psplatform.virtual_memory()
+    # cached for later use in Process.memory_percent()
+    _TOTAL_PHYMEM = ret.total
+    return ret
+
+
+def swap_memory():
+    """Return system swap memory statistics as a namedtuple including
+    the following fields:
+
+     - total:   total swap memory in bytes
+     - used:    used swap memory in bytes
+     - free:    free swap memory in bytes
+     - percent: the percentage usage
+     - sin:     no. of bytes the system has swapped in from disk (cumulative)
+     - sout:    no. of bytes the system has swapped out from disk (cumulative)
+
+    'sin' and 'sout' on Windows are meaningless and always set to 0.
+    """
+    return _psplatform.swap_memory()
+
+
+# =====================================================================
+# --- disks/paritions related functions
+# =====================================================================
+
+def disk_usage(path):
+    """Return disk usage statistics about the given path as a namedtuple
+    including total, used and free space expressed in bytes plus the
+    percentage usage.
+    """
+    return _psplatform.disk_usage(path)
+
+
+def disk_partitions(all=False):
+    """Return mounted partitions as a list of
+    (device, mountpoint, fstype, opts) namedtuple.
+    'opts' field is a raw string separated by commas indicating mount
+    options which may vary depending on the platform.
+
+    If "all" parameter is False return physical devices only and ignore
+    all others.
+    """
+    return _psplatform.disk_partitions(all)
+
+
+def disk_io_counters(perdisk=False):
+    """Return system disk I/O statistics as a namedtuple including
+    the following fields:
+
+     - read_count:  number of reads
+     - write_count: number of writes
+     - read_bytes:  number of bytes read
+     - write_bytes: number of bytes written
+     - read_time:   time spent reading from disk (in milliseconds)
+     - write_time:  time spent writing to disk (in milliseconds)
+
+    If perdisk is True return the same information for every
+    physical disk installed on the system as a dictionary
+    with partition names as the keys and the namedutuple
+    described above as the values.
+
+    On recent Windows versions 'diskperf -y' command may need to be
+    executed first otherwise this function won't find any disk.
+    """
+    rawdict = _psplatform.disk_io_counters()
+    if not rawdict:
+        raise RuntimeError("couldn't find any physical disk")
+    if perdisk:
+        for disk, fields in rawdict.items():
+            rawdict[disk] = _nt_sys_diskio(*fields)
+        return rawdict
+    else:
+        return _nt_sys_diskio(*[sum(x) for x in zip(*rawdict.values())])
+
+
+# =====================================================================
+# --- network related functions
+# =====================================================================
+
+def net_io_counters(pernic=False):
+    """Return network I/O statistics as a namedtuple including
+    the following fields:
+
+     - bytes_sent:   number of bytes sent
+     - bytes_recv:   number of bytes received
+     - packets_sent: number of packets sent
+     - packets_recv: number of packets received
+     - errin:        total number of errors while receiving
+     - errout:       total number of errors while sending
+     - dropin:       total number of incoming packets which were dropped
+     - dropout:      total number of outgoing packets which were dropped
+                     (always 0 on OSX and BSD)
+
+    If pernic is True return the same information for every
+    network interface installed on the system as a dictionary
+    with network interface names as the keys and the namedtuple
+    described above as the values.
+    """
+    rawdict = _psplatform.net_io_counters()
+    if not rawdict:
+        raise RuntimeError("couldn't find any network interface")
+    if pernic:
+        for nic, fields in rawdict.items():
+            rawdict[nic] = _nt_sys_netio(*fields)
+        return rawdict
+    else:
+        return _nt_sys_netio(*[sum(x) for x in zip(*rawdict.values())])
+
+
+def net_connections(kind='inet'):
+    """Return system-wide connections as a list of
+    (fd, family, type, laddr, raddr, status, pid) namedtuples.
+    In case of limited privileges 'fd' and 'pid' may be set to -1
+    and None respectively.
+    The 'kind' parameter filters for connections that fit the
+    following criteria:
+
+    Kind Value      Connections using
+    inet            IPv4 and IPv6
+    inet4           IPv4
+    inet6           IPv6
+    tcp             TCP
+    tcp4            TCP over IPv4
+    tcp6            TCP over IPv6
+    udp             UDP
+    udp4            UDP over IPv4
+    udp6            UDP over IPv6
+    unix            UNIX socket (both UDP and TCP protocols)
+    all             the sum of all the possible families and protocols
+    """
+    return _psplatform.net_connections(kind)
+
+# =====================================================================
+# --- other system related functions
+# =====================================================================
+
+def boot_time():
+    """Return the system boot time expressed in seconds since the epoch.
+    This is also available as psutil.BOOT_TIME.
+    """
+    # Note: we are not caching this because it is subject to
+    # system clock updates.
+    return _psplatform.boot_time()
+
+
+def users():
+    """Return users currently connected on the system as a list of
+    namedtuples including the following fields.
+
+     - user: the name of the user
+     - terminal: the tty or pseudo-tty associated with the user, if any.
+     - host: the host name associated with the entry, if any.
+     - started: the creation time as a floating point number expressed in
+       seconds since the epoch.
+    """
+    return _psplatform.users()
+
+
+# =====================================================================
+# --- deprecated functions
+# =====================================================================
+
+@_deprecated(replacement="psutil.pids()")
+def get_pid_list():
+    return pids()
+
+
+@_deprecated(replacement="list(process_iter())")
+def get_process_list():
+    return list(process_iter())
+
+
+@_deprecated(replacement="psutil.users()")
+def get_users():
+    return users()
+
+
+@_deprecated(replacement="psutil.virtual_memory()")
+def phymem_usage():
+    """Return the amount of total, used and free physical memory
+    on the system in bytes plus the percentage usage.
+    Deprecated; use psutil.virtual_memory() instead.
+    """
+    return virtual_memory()
+
+
+@_deprecated(replacement="psutil.swap_memory()")
+def virtmem_usage():
+    return swap_memory()
+
+
+@_deprecated(replacement="psutil.phymem_usage().free")
+def avail_phymem():
+    return phymem_usage().free
+
+
+@_deprecated(replacement="psutil.phymem_usage().used")
+def used_phymem():
+    return phymem_usage().used
+
+
+@_deprecated(replacement="psutil.virtmem_usage().total")
+def total_virtmem():
+    return virtmem_usage().total
+
+
+@_deprecated(replacement="psutil.virtmem_usage().used")
+def used_virtmem():
+    return virtmem_usage().used
+
+
+@_deprecated(replacement="psutil.virtmem_usage().free")
+def avail_virtmem():
+    return virtmem_usage().free
+
+
+@_deprecated(replacement="psutil.net_io_counters()")
+def network_io_counters(pernic=False):
+    return net_io_counters(pernic)
+
+
+def test():
+    """List info of all currently running processes emulating ps aux
+    output.
+    """
+    import datetime
+    from psutil._compat import print_
+
+    today_day = datetime.date.today()
+    templ = "%-10s %5s %4s %4s %7s %7s %-13s %5s %7s  %s"
+    attrs = ['pid', 'cpu_percent', 'memory_percent', 'name', 'cpu_times',
+             'create_time', 'memory_info']
+    if _POSIX:
+        attrs.append('uids')
+        attrs.append('terminal')
+    print_(templ % ("USER", "PID", "%CPU", "%MEM", "VSZ", "RSS", "TTY",
+                    "START", "TIME", "COMMAND"))
+    for p in process_iter():
+        try:
+            pinfo = p.as_dict(attrs, ad_value='')
+        except NoSuchProcess:
+            pass
+        else:
+            if pinfo['create_time']:
+                ctime = datetime.datetime.fromtimestamp(pinfo['create_time'])
+                if ctime.date() == today_day:
+                    ctime = ctime.strftime("%H:%M")
+                else:
+                    ctime = ctime.strftime("%b%d")
+            else:
+                ctime = ''
+            cputime = time.strftime("%M:%S",
+                                    time.localtime(sum(pinfo['cpu_times'])))
+            try:
+                user = p.username()
+            except KeyError:
+                if _POSIX:
+                    if pinfo['uids']:
+                        user = str(pinfo['uids'].real)
+                    else:
+                        user = ''
+                else:
+                    raise
+            except Error:
+                user = ''
+            if _WINDOWS and '\\' in user:
+                user = user.split('\\')[1]
+            vms = pinfo['memory_info'] and \
+                int(pinfo['memory_info'].vms / 1024) or '?'
+            rss = pinfo['memory_info'] and \
+                int(pinfo['memory_info'].rss / 1024) or '?'
+            memp = pinfo['memory_percent'] and \
+                round(pinfo['memory_percent'], 1) or '?'
+            print_(templ % (user[:10],
+                            pinfo['pid'],
+                            pinfo['cpu_percent'],
+                            memp,
+                            vms,
+                            rss,
+                            pinfo.get('terminal', '') or '?',
+                            ctime,
+                            cputime,
+                            pinfo['name'].strip() or '?'))
+
+
+def _replace_module():
+    """Dirty hack to replace the module object in order to access
+    deprecated module constants, see:
+    http://www.dr-josiah.com/2013/12/properties-on-python-modules.html
+    """
+    class ModuleWrapper(object):
+
+        def __repr__(self):
+            return repr(self._module)
+        __str__ = __repr__
+
+        @property
+        def NUM_CPUS(self):
+            msg = "NUM_CPUS constant is deprecated; use cpu_count() instead"
+            warnings.warn(msg, category=DeprecationWarning, stacklevel=2)
+            return cpu_count()
+
+        @property
+        def BOOT_TIME(self):
+            msg = "BOOT_TIME constant is deprecated; use boot_time() instead"
+            warnings.warn(msg, category=DeprecationWarning, stacklevel=2)
+            return boot_time()
+
+        @property
+        def TOTAL_PHYMEM(self):
+            msg = "TOTAL_PHYMEM constant is deprecated; " \
+                  "use virtual_memory().total instead"
+            warnings.warn(msg, category=DeprecationWarning, stacklevel=2)
+            return virtual_memory().total
+
+    mod = ModuleWrapper()
+    mod.__dict__ = globals()
+    mod._module = sys.modules[__name__]
+    sys.modules[__name__] = mod
+
+
+_replace_module()
+del property, memoize, division, _replace_module
+if sys.version_info < (3, 0):
+    del num
+
+if __name__ == "__main__":
+    test()


[04/22] AMBARI-5707. Metrics system prototype implementation. (swagle)

Posted by sw...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/_psutil_sunos.c
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/_psutil_sunos.c b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/_psutil_sunos.c
new file mode 100644
index 0000000..f02415c
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/_psutil_sunos.c
@@ -0,0 +1,1290 @@
+/*
+ * Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ *
+ * Functions specific to Sun OS Solaris platforms.
+ *
+ * Thanks to Justin Venus who originally wrote a consistent part of
+ * this in Cython which I later on translated in C.
+ */
+
+
+#include <Python.h>
+
+// fix for "Cannot use procfs in the large file compilation environment"
+// error, see:
+// http://sourceware.org/ml/gdb-patches/2010-11/msg00336.html
+#undef _FILE_OFFSET_BITS
+#define _STRUCTURED_PROC 1
+
+// fix compilation issue on SunOS 5.10, see:
+// https://code.google.com/p/psutil/issues/detail?id=421
+#define NEW_MIB_COMPLIANT
+
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <sys/proc.h>
+#include <sys/swap.h>
+#include <sys/sysinfo.h>
+#include <sys/mntent.h>  // for MNTTAB
+#include <sys/mnttab.h>
+#include <sys/procfs.h>
+#include <fcntl.h>
+#include <utmpx.h>
+#include <kstat.h>
+#include <sys/ioctl.h>
+#include <sys/tihdr.h>
+#include <stropts.h>
+#include <inet/tcp.h>
+#include <arpa/inet.h>
+
+#include "_psutil_sunos.h"
+
+
+#define TV2DOUBLE(t)   (((t).tv_nsec * 0.000000001) + (t).tv_sec)
+
+/*
+ * Read a file content and fills a C structure with it.
+ */
+int
+psutil_file_to_struct(char *path, void *fstruct, size_t size)
+{
+    int fd;
+    size_t nbytes;
+    fd = open(path, O_RDONLY);
+    if (fd == -1) {
+        PyErr_SetFromErrnoWithFilename(PyExc_OSError, path);
+        return 0;
+    }
+    nbytes = read(fd, fstruct, size);
+    if (nbytes <= 0) {
+        close(fd);
+        PyErr_SetFromErrno(PyExc_OSError);
+        return 0;
+    }
+    if (nbytes != size) {
+        close(fd);
+        PyErr_SetString(PyExc_RuntimeError, "structure size mismatch");
+        return 0;
+    }
+    close(fd);
+    return nbytes;
+}
+
+
+/*
+ * Return process ppid, rss, vms, ctime, nice, nthreads, status and tty
+ * as a Python tuple.
+ */
+static PyObject *
+psutil_proc_basic_info(PyObject *self, PyObject *args)
+{
+    int pid;
+    char path[100];
+    psinfo_t info;
+
+    if (! PyArg_ParseTuple(args, "i", &pid))
+        return NULL;
+    sprintf(path, "/proc/%i/psinfo", pid);
+    if (! psutil_file_to_struct(path, (void *)&info, sizeof(info)))
+        return NULL;
+    return Py_BuildValue("ikkdiiik",
+                         info.pr_ppid,              // parent pid
+                         info.pr_rssize,            // rss
+                         info.pr_size,              // vms
+                         TV2DOUBLE(info.pr_start),  // create time
+                         info.pr_lwp.pr_nice,       // nice
+                         info.pr_nlwp,              // no. of threads
+                         info.pr_lwp.pr_state,      // status code
+                         info.pr_ttydev             // tty nr
+                        );
+}
+
+
+/*
+ * Return process name and args as a Python tuple.
+ */
+static PyObject *
+psutil_proc_name_and_args(PyObject *self, PyObject *args)
+{
+    int pid;
+    char path[100];
+    psinfo_t info;
+
+    if (! PyArg_ParseTuple(args, "i", &pid))
+        return NULL;
+    sprintf(path, "/proc/%i/psinfo", pid);
+    if (! psutil_file_to_struct(path, (void *)&info, sizeof(info)))
+        return NULL;
+    return Py_BuildValue("ss", info.pr_fname, info.pr_psargs);
+}
+
+
+/*
+ * Return process user and system CPU times as a Python tuple.
+ */
+static PyObject *
+psutil_proc_cpu_times(PyObject *self, PyObject *args)
+{
+    int pid;
+    char path[100];
+    pstatus_t info;
+
+    if (! PyArg_ParseTuple(args, "i", &pid))
+        return NULL;
+    sprintf(path, "/proc/%i/status", pid);
+    if (! psutil_file_to_struct(path, (void *)&info, sizeof(info)))
+        return NULL;
+    // results are more precise than os.times()
+    return Py_BuildValue("dd",
+                         TV2DOUBLE(info.pr_utime),
+                         TV2DOUBLE(info.pr_stime));
+}
+
+
+/*
+ * Return process uids/gids as a Python tuple.
+ */
+static PyObject *
+psutil_proc_cred(PyObject *self, PyObject *args)
+{
+    int pid;
+    char path[100];
+    prcred_t info;
+
+    if (! PyArg_ParseTuple(args, "i", &pid))
+        return NULL;
+    sprintf(path, "/proc/%i/cred", pid);
+    if (! psutil_file_to_struct(path, (void *)&info, sizeof(info)))
+        return NULL;
+    return Py_BuildValue("iiiiii",
+                         info.pr_ruid, info.pr_euid, info.pr_suid,
+                         info.pr_rgid, info.pr_egid, info.pr_sgid);
+}
+
+
+/*
+ * Return process uids/gids as a Python tuple.
+ */
+static PyObject *
+psutil_proc_num_ctx_switches(PyObject *self, PyObject *args)
+{
+    int pid;
+    char path[100];
+    prusage_t info;
+
+    if (! PyArg_ParseTuple(args, "i", &pid))
+        return NULL;
+    sprintf(path, "/proc/%i/usage", pid);
+    if (! psutil_file_to_struct(path, (void *)&info, sizeof(info)))
+        return NULL;
+    return Py_BuildValue("kk", info.pr_vctx, info.pr_ictx);
+}
+
+
+/*
+ * Process IO counters.
+ *
+ * Commented out and left here as a reminder.  Apparently we cannot
+ * retrieve process IO stats because:
+ * - 'pr_ioch' is a sum of chars read and written, with no distinction
+ * - 'pr_inblk' and 'pr_oublk', which should be the number of bytes
+ *    read and written, hardly increase and according to:
+ *    http://www.brendangregg.com/Perf/paper_diskubyp1.pdf
+ *    ...they should be meaningless anyway.
+ *
+static PyObject*
+proc_io_counters(PyObject* self, PyObject* args)
+{
+    int pid;
+    char path[100];
+    prusage_t info;
+
+    if (! PyArg_ParseTuple(args, "i", &pid)) {
+        return NULL;
+    }
+    sprintf(path, "/proc/%i/usage", pid);
+    if (! psutil_file_to_struct(path, (void *)&info, sizeof(info))) {
+        return NULL;
+    }
+
+    // On Solaris we only have 'pr_ioch' which accounts for bytes read
+    // *and* written.
+    // 'pr_inblk' and 'pr_oublk' should be expressed in blocks of
+    // 8KB according to:
+    // http://www.brendangregg.com/Perf/paper_diskubyp1.pdf  (pag. 8)
+    return Py_BuildValue("kkkk",
+                         info.pr_ioch,
+                         info.pr_ioch,
+                         info.pr_inblk,
+                         info.pr_oublk);
+}
+ */
+
+
+/*
+ * Return information about a given process thread.
+ */
+static PyObject *
+psutil_proc_query_thread(PyObject *self, PyObject *args)
+{
+    int pid, tid;
+    char path[100];
+    lwpstatus_t info;
+
+    if (! PyArg_ParseTuple(args, "ii", &pid, &tid))
+        return NULL;
+    sprintf(path, "/proc/%i/lwp/%i/lwpstatus", pid, tid);
+    if (! psutil_file_to_struct(path, (void *)&info, sizeof(info)))
+        return NULL;
+    return Py_BuildValue("dd",
+                         TV2DOUBLE(info.pr_utime),
+                         TV2DOUBLE(info.pr_stime));
+}
+
+
+/*
+ * Return information about system virtual memory.
+ */
+static PyObject *
+psutil_swap_mem(PyObject *self, PyObject *args)
+{
+// XXX (arghhh!)
+// total/free swap mem: commented out as for some reason I can't
+// manage to get the same results shown by "swap -l", despite the
+// code below is exactly the same as:
+// http://cvs.opensolaris.org/source/xref/onnv/onnv-gate/usr/src/
+//    cmd/swap/swap.c
+// We're going to parse "swap -l" output from Python (sigh!)
+
+/*
+    struct swaptable     *st;
+    struct swapent    *swapent;
+    int    i;
+    struct stat64 statbuf;
+    char *path;
+    char fullpath[MAXPATHLEN+1];
+    int    num;
+
+    if ((num = swapctl(SC_GETNSWP, NULL)) == -1) {
+        PyErr_SetFromErrno(PyExc_OSError);
+        return NULL;
+    }
+    if (num == 0) {
+        PyErr_SetString(PyExc_RuntimeError, "no swap devices configured");
+        return NULL;
+    }
+    if ((st = malloc(num * sizeof(swapent_t) + sizeof (int))) == NULL) {
+        PyErr_SetString(PyExc_RuntimeError, "malloc failed");
+        return NULL;
+    }
+    if ((path = malloc(num * MAXPATHLEN)) == NULL) {
+        PyErr_SetString(PyExc_RuntimeError, "malloc failed");
+        return NULL;
+    }
+    swapent = st->swt_ent;
+    for (i = 0; i < num; i++, swapent++) {
+        swapent->ste_path = path;
+        path += MAXPATHLEN;
+    }
+    st->swt_n = num;
+    if ((num = swapctl(SC_LIST, st)) == -1) {
+        PyErr_SetFromErrno(PyExc_OSError);
+        return NULL;
+    }
+
+    swapent = st->swt_ent;
+    long t = 0, f = 0;
+    for (i = 0; i < num; i++, swapent++) {
+        int diskblks_per_page =(int)(sysconf(_SC_PAGESIZE) >> DEV_BSHIFT);
+        t += (long)swapent->ste_pages;
+        f += (long)swapent->ste_free;
+    }
+
+    free(st);
+    return Py_BuildValue("(kk)", t, f);
+*/
+
+    kstat_ctl_t *kc;
+    kstat_t     *k;
+    cpu_stat_t  *cpu;
+    int         cpu_count = 0;
+    int         flag = 0;
+    uint_t      sin = 0;
+    uint_t      sout = 0;
+
+    kc = kstat_open();
+    if (kc == NULL) {
+        return PyErr_SetFromErrno(PyExc_OSError);;
+    }
+
+    k = kc->kc_chain;
+    while (k != NULL) {
+        if ((strncmp(k->ks_name, "cpu_stat", 8) == 0) && \
+                (kstat_read(kc, k, NULL) != -1) )
+        {
+            flag = 1;
+            cpu = (cpu_stat_t *) k->ks_data;
+            sin += cpu->cpu_vminfo.pgswapin;    // num pages swapped in
+            sout += cpu->cpu_vminfo.pgswapout;  // num pages swapped out
+        }
+        cpu_count += 1;
+        k = k->ks_next;
+    }
+    kstat_close(kc);
+    if (!flag) {
+        PyErr_SetString(PyExc_RuntimeError, "no swap device was found");
+        return NULL;
+    }
+    return Py_BuildValue("(II)", sin, sout);
+}
+
+
+/*
+ * Return users currently connected on the system.
+ */
+static PyObject *
+psutil_users(PyObject *self, PyObject *args)
+{
+    struct utmpx *ut;
+    PyObject *ret_list = PyList_New(0);
+    PyObject *tuple = NULL;
+    PyObject *user_proc = NULL;
+
+    if (ret_list == NULL)
+        return NULL;
+
+    while (NULL != (ut = getutxent())) {
+        if (ut->ut_type == USER_PROCESS)
+            user_proc = Py_True;
+        else
+            user_proc = Py_False;
+        tuple = Py_BuildValue(
+            "(sssfO)",
+            ut->ut_user,              // username
+            ut->ut_line,              // tty
+            ut->ut_host,              // hostname
+            (float)ut->ut_tv.tv_sec,  // tstamp
+            user_proc);               // (bool) user process
+        if (tuple == NULL)
+            goto error;
+        if (PyList_Append(ret_list, tuple))
+            goto error;
+        Py_DECREF(tuple);
+    }
+    endutent();
+
+    return ret_list;
+
+error:
+    Py_XDECREF(tuple);
+    Py_DECREF(ret_list);
+    if (ut != NULL)
+        endutent();
+    return NULL;
+}
+
+
+/*
+ * Return disk mounted partitions as a list of tuples including device,
+ * mount point and filesystem type.
+ */
+static PyObject *
+psutil_disk_partitions(PyObject *self, PyObject *args)
+{
+    FILE *file;
+    struct mnttab mt;
+    PyObject *py_retlist = PyList_New(0);
+    PyObject *py_tuple = NULL;
+
+    if (py_retlist == NULL)
+        return NULL;
+
+    file = fopen(MNTTAB, "rb");
+    if (file == NULL) {
+        PyErr_SetFromErrno(PyExc_OSError);
+        goto error;
+    }
+
+    while (getmntent(file, &mt) == 0) {
+        py_tuple = Py_BuildValue(
+            "(ssss)",
+            mt.mnt_special,   // device
+            mt.mnt_mountp,    // mount point
+            mt.mnt_fstype,    // fs type
+            mt.mnt_mntopts);  // options
+        if (py_tuple == NULL)
+            goto error;
+        if (PyList_Append(py_retlist, py_tuple))
+            goto error;
+        Py_DECREF(py_tuple);
+
+    }
+    fclose(file);
+    return py_retlist;
+
+error:
+    Py_XDECREF(py_tuple);
+    Py_DECREF(py_retlist);
+    if (file != NULL)
+        fclose(file);
+    return NULL;
+}
+
+
+/*
+ * Return system-wide CPU times.
+ */
+static PyObject *
+psutil_per_cpu_times(PyObject *self, PyObject *args)
+{
+    kstat_ctl_t *kc;
+    kstat_t *ksp;
+    cpu_stat_t cs;
+    int numcpus;
+    int i;
+    PyObject *py_retlist = PyList_New(0);
+    PyObject *py_cputime = NULL;
+
+    if (py_retlist == NULL)
+        return NULL;
+
+    kc = kstat_open();
+    if (kc == NULL) {
+        PyErr_SetFromErrno(PyExc_OSError);
+        goto error;
+    }
+
+    numcpus = sysconf(_SC_NPROCESSORS_ONLN) - 1;
+    for (i = 0; i <= numcpus; i++) {
+        ksp = kstat_lookup(kc, "cpu_stat", i, NULL);
+        if (ksp == NULL) {
+            PyErr_SetFromErrno(PyExc_OSError);
+            goto error;
+        }
+        if (kstat_read(kc, ksp, &cs) == -1) {
+            PyErr_SetFromErrno(PyExc_OSError);
+            goto error;
+        }
+
+        py_cputime = Py_BuildValue("ffff",
+                                   (float)cs.cpu_sysinfo.cpu[CPU_USER],
+                                   (float)cs.cpu_sysinfo.cpu[CPU_KERNEL],
+                                   (float)cs.cpu_sysinfo.cpu[CPU_IDLE],
+                                   (float)cs.cpu_sysinfo.cpu[CPU_WAIT]);
+        if (py_cputime == NULL)
+            goto error;
+        if (PyList_Append(py_retlist, py_cputime))
+            goto error;
+        Py_DECREF(py_cputime);
+
+    }
+
+    kstat_close(kc);
+    return py_retlist;
+
+error:
+    Py_XDECREF(py_cputime);
+    Py_DECREF(py_retlist);
+    if (kc != NULL)
+        kstat_close(kc);
+    return NULL;
+}
+
+
+/*
+ * Return disk IO statistics.
+ */
+static PyObject *
+psutil_disk_io_counters(PyObject *self, PyObject *args)
+{
+    kstat_ctl_t *kc;
+    kstat_t *ksp;
+    kstat_io_t kio;
+    PyObject *py_retdict = PyDict_New();
+    PyObject *py_disk_info = NULL;
+
+    if (py_retdict == NULL)
+        return NULL;
+    kc = kstat_open();
+    if (kc == NULL) {
+        PyErr_SetFromErrno(PyExc_OSError);;
+        goto error;
+    }
+    ksp = kc->kc_chain;
+    while (ksp != NULL) {
+        if (ksp->ks_type == KSTAT_TYPE_IO) {
+            if (strcmp(ksp->ks_class, "disk") == 0) {
+                if (kstat_read(kc, ksp, &kio) == -1) {
+                    kstat_close(kc);
+                    return PyErr_SetFromErrno(PyExc_OSError);;
+                }
+                py_disk_info = Py_BuildValue(
+                    "(IIKKLL)",
+                    kio.reads,
+                    kio.writes,
+                    kio.nread,
+                    kio.nwritten,
+                    kio.rtime / 1000 / 1000,  // from nano to milli secs
+                    kio.wtime / 1000 / 1000   // from nano to milli secs
+                );
+                if (!py_disk_info)
+                    goto error;
+                if (PyDict_SetItemString(py_retdict, ksp->ks_name,
+                                         py_disk_info))
+                    goto error;
+                Py_DECREF(py_disk_info);
+            }
+        }
+        ksp = ksp->ks_next;
+    }
+    kstat_close(kc);
+
+    return py_retdict;
+
+error:
+    Py_XDECREF(py_disk_info);
+    Py_DECREF(py_retdict);
+    if (kc != NULL)
+        kstat_close(kc);
+    return NULL;
+}
+
+
+/*
+ * Return process memory mappings.
+ */
+static PyObject *
+psutil_proc_memory_maps(PyObject *self, PyObject *args)
+{
+    int pid;
+    int fd = -1;
+    char path[100];
+    char perms[10];
+    char *name;
+    struct stat st;
+    pstatus_t status;
+
+    prxmap_t *xmap = NULL, *p;
+    off_t size;
+    size_t nread;
+    int nmap;
+    uintptr_t pr_addr_sz;
+    uintptr_t stk_base_sz, brk_base_sz;
+
+    PyObject *pytuple = NULL;
+    PyObject *py_retlist = PyList_New(0);
+
+    if (py_retlist == NULL) {
+        return NULL;
+    }
+    if (! PyArg_ParseTuple(args, "i", &pid)) {
+        goto error;
+    }
+
+    sprintf(path, "/proc/%i/status", pid);
+    if (! psutil_file_to_struct(path, (void *)&status, sizeof(status))) {
+        goto error;
+    }
+
+    sprintf(path, "/proc/%i/xmap", pid);
+    if (stat(path, &st) == -1) {
+        PyErr_SetFromErrno(PyExc_OSError);
+        goto error;
+    }
+
+    size = st.st_size;
+
+    fd = open(path, O_RDONLY);
+    if (fd == -1) {
+        PyErr_SetFromErrno(PyExc_OSError);
+        goto error;
+    }
+
+    xmap = (prxmap_t *)malloc(size);
+    if (xmap == NULL) {
+        PyErr_NoMemory();
+        goto error;
+    }
+
+    nread = pread(fd, xmap, size, 0);
+    nmap = nread / sizeof(prxmap_t);
+    p = xmap;
+
+    while (nmap) {
+        nmap -= 1;
+        if (p == NULL) {
+            p += 1;
+            continue;
+        }
+
+        perms[0] = '\0';
+        pr_addr_sz = p->pr_vaddr + p->pr_size;
+
+        // perms
+        sprintf(perms, "%c%c%c%c%c%c", p->pr_mflags & MA_READ ? 'r' : '-',
+                p->pr_mflags & MA_WRITE ? 'w' : '-',
+                p->pr_mflags & MA_EXEC ? 'x' : '-',
+                p->pr_mflags & MA_SHARED ? 's' : '-',
+                p->pr_mflags & MA_NORESERVE ? 'R' : '-',
+                p->pr_mflags & MA_RESERVED1 ? '*' : ' ');
+
+        // name
+        if (strlen(p->pr_mapname) > 0) {
+            name = p->pr_mapname;
+        }
+        else {
+            if ((p->pr_mflags & MA_ISM) || (p->pr_mflags & MA_SHM)) {
+                name = "[shmid]";
+            }
+            else {
+                stk_base_sz = status.pr_stkbase + status.pr_stksize;
+                brk_base_sz = status.pr_brkbase + status.pr_brksize;
+
+                if ((pr_addr_sz > status.pr_stkbase) &&
+                        (p->pr_vaddr < stk_base_sz)) {
+                    name = "[stack]";
+                }
+                else if ((p->pr_mflags & MA_ANON) && \
+                         (pr_addr_sz > status.pr_brkbase) && \
+                         (p->pr_vaddr < brk_base_sz)) {
+                    name = "[heap]";
+                }
+                else {
+                    name = "[anon]";
+                }
+            }
+        }
+
+        pytuple = Py_BuildValue("iisslll",
+                                p->pr_vaddr,
+                                pr_addr_sz,
+                                perms,
+                                name,
+                                (long)p->pr_rss * p->pr_pagesize,
+                                (long)p->pr_anon * p->pr_pagesize,
+                                (long)p->pr_locked * p->pr_pagesize);
+        if (!pytuple)
+            goto error;
+        if (PyList_Append(py_retlist, pytuple))
+            goto error;
+        Py_DECREF(pytuple);
+
+        // increment pointer
+        p += 1;
+    }
+
+    close(fd);
+    free(xmap);
+    return py_retlist;
+
+error:
+    if (fd != -1)
+        close(fd);
+    Py_XDECREF(pytuple);
+    Py_DECREF(py_retlist);
+    if (xmap != NULL)
+        free(xmap);
+    return NULL;
+}
+
+
+/*
+ * Return a list of tuples for network I/O statistics.
+ */
+static PyObject *
+psutil_net_io_counters(PyObject *self, PyObject *args)
+{
+    kstat_ctl_t    *kc = NULL;
+    kstat_t *ksp;
+    kstat_named_t *rbytes, *wbytes, *rpkts, *wpkts, *ierrs, *oerrs;
+
+    PyObject *py_retdict = PyDict_New();
+    PyObject *py_ifc_info = NULL;
+
+    if (py_retdict == NULL)
+        return NULL;
+    kc = kstat_open();
+    if (kc == NULL)
+        goto error;
+
+    ksp = kc->kc_chain;
+    while (ksp != NULL) {
+        if (ksp->ks_type != KSTAT_TYPE_NAMED)
+            goto next;
+        if (strcmp(ksp->ks_class, "net") != 0)
+            goto next;
+        /*
+        // XXX "lo" (localhost) interface makes kstat_data_lookup() fail
+        // (maybe because "ifconfig -a" says it's a virtual interface?).
+        if ((strcmp(ksp->ks_module, "link") != 0) &&
+            (strcmp(ksp->ks_module, "lo") != 0)) {
+            goto skip;
+        */
+        if ((strcmp(ksp->ks_module, "link") != 0)) {
+            goto next;
+        }
+
+        if (kstat_read(kc, ksp, NULL) == -1) {
+            errno = 0;
+            continue;
+        }
+
+        rbytes = (kstat_named_t *)kstat_data_lookup(ksp, "rbytes");
+        wbytes = (kstat_named_t *)kstat_data_lookup(ksp, "obytes");
+        rpkts = (kstat_named_t *)kstat_data_lookup(ksp, "ipackets");
+        wpkts = (kstat_named_t *)kstat_data_lookup(ksp, "opackets");
+        ierrs = (kstat_named_t *)kstat_data_lookup(ksp, "ierrors");
+        oerrs = (kstat_named_t *)kstat_data_lookup(ksp, "oerrors");
+
+        if ((rbytes == NULL) || (wbytes == NULL) || (rpkts == NULL) ||
+                (wpkts == NULL) || (ierrs == NULL) || (oerrs == NULL))
+        {
+            PyErr_SetString(PyExc_RuntimeError, "kstat_data_lookup() failed");
+            goto error;
+        }
+
+#if defined(_INT64_TYPE)
+        py_ifc_info = Py_BuildValue("(KKKKkkii)",
+                                    rbytes->value.ui64,
+                                    wbytes->value.ui64,
+                                    rpkts->value.ui64,
+                                    wpkts->value.ui64,
+                                    ierrs->value.ui32,
+                                    oerrs->value.ui32,
+#else
+        py_ifc_info = Py_BuildValue("(kkkkkkii)",
+                                    rbytes->value.ui32,
+                                    wbytes->value.ui32,
+                                    rpkts->value.ui32,
+                                    wpkts->value.ui32,
+                                    ierrs->value.ui32,
+                                    oerrs->value.ui32,
+#endif
+                                    0,  // dropin not supported
+                                    0   // dropout not supported
+                                   );
+        if (!py_ifc_info)
+            goto error;
+        if (PyDict_SetItemString(py_retdict, ksp->ks_name, py_ifc_info))
+            goto error;
+        Py_DECREF(py_ifc_info);
+        goto next;
+
+next:
+        ksp = ksp->ks_next;
+    }
+
+    kstat_close(kc);
+    return py_retdict;
+
+error:
+    Py_XDECREF(py_ifc_info);
+    Py_DECREF(py_retdict);
+    if (kc != NULL)
+        kstat_close(kc);
+    return NULL;
+}
+
+
+#ifndef EXPER_IP_AND_ALL_IRES
+#define EXPER_IP_AND_ALL_IRES   (1024+4)
+#endif
+
+// a signaler for connections without an actual status
+static int PSUTIL_CONN_NONE = 128;
+
+/*
+ * Return TCP and UDP connections opened by process.
+ * UNIX sockets are excluded.
+ *
+ * Thanks to:
+ * https://github.com/DavidGriffith/finx/blob/master/
+ *     nxsensor-3.5.0-1/src/sysdeps/solaris.c
+ * ...and:
+ * https://hg.java.net/hg/solaris~on-src/file/tip/usr/src/cmd/
+ *     cmd-inet/usr.bin/netstat/netstat.c
+ */
+static PyObject *
+psutil_net_connections(PyObject *self, PyObject *args)
+{
+    long pid;
+    int sd = NULL;
+    mib2_tcpConnEntry_t *tp = NULL;
+    mib2_udpEntry_t     *ude;
+#if defined(AF_INET6)
+    mib2_tcp6ConnEntry_t *tp6;
+    mib2_udp6Entry_t     *ude6;
+#endif
+    char buf[512];
+    int i, flags, getcode, num_ent, state;
+    char lip[200], rip[200];
+    int lport, rport;
+    int processed_pid;
+    struct strbuf ctlbuf, databuf;
+    struct T_optmgmt_req *tor = (struct T_optmgmt_req *)buf;
+    struct T_optmgmt_ack *toa = (struct T_optmgmt_ack *)buf;
+    struct T_error_ack   *tea = (struct T_error_ack *)buf;
+    struct opthdr        *mibhdr;
+
+    PyObject *py_retlist = PyList_New(0);
+    PyObject *py_tuple = NULL;
+    PyObject *py_laddr = NULL;
+    PyObject *py_raddr = NULL;
+    PyObject *af_filter = NULL;
+    PyObject *type_filter = NULL;
+
+    if (py_retlist == NULL)
+        return NULL;
+    if (! PyArg_ParseTuple(args, "lOO", &pid, &af_filter, &type_filter))
+        goto error;
+    if (!PySequence_Check(af_filter) || !PySequence_Check(type_filter)) {
+        PyErr_SetString(PyExc_TypeError, "arg 2 or 3 is not a sequence");
+        goto error;
+    }
+
+    sd = open("/dev/arp", O_RDWR);
+    if (sd == -1) {
+        PyErr_SetFromErrnoWithFilename(PyExc_OSError, "/dev/arp");
+        goto error;
+    }
+
+    /*
+    XXX - These 2 are used in ifconfig.c but they seem unnecessary
+    ret = ioctl(sd, I_PUSH, "tcp");
+    if (ret == -1) {
+        PyErr_SetFromErrno(PyExc_OSError);
+        goto error;
+    }
+    ret = ioctl(sd, I_PUSH, "udp");
+    if (ret == -1) {
+        PyErr_SetFromErrno(PyExc_OSError);
+        goto error;
+    }
+    */
+
+    // OK, this mess is basically copied and pasted from nxsensor project
+    // which copied and pasted it from netstat source code, mibget()
+    // function.  Also see:
+    // http://stackoverflow.com/questions/8723598/
+    tor->PRIM_type = T_SVR4_OPTMGMT_REQ;
+    tor->OPT_offset = sizeof (struct T_optmgmt_req);
+    tor->OPT_length = sizeof (struct opthdr);
+    tor->MGMT_flags = T_CURRENT;
+    mibhdr = (struct opthdr *)&tor[1];
+    mibhdr->level = EXPER_IP_AND_ALL_IRES;
+    mibhdr->name  = 0;
+    mibhdr->len   = 0;
+
+    ctlbuf.buf = buf;
+    ctlbuf.len = tor->OPT_offset + tor->OPT_length;
+    flags = 0;  // request to be sent in non-priority
+
+    if (putmsg(sd, &ctlbuf, (struct strbuf *)0, flags) == -1) {
+        PyErr_SetFromErrno(PyExc_OSError);
+        goto error;
+    }
+
+    mibhdr = (struct opthdr *)&toa[1];
+    ctlbuf.maxlen = sizeof (buf);
+
+    for (;;) {
+        flags = 0;
+        getcode = getmsg(sd, &ctlbuf, (struct strbuf *)0, &flags);
+
+        if (getcode != MOREDATA ||
+                ctlbuf.len < sizeof (struct T_optmgmt_ack) ||
+                toa->PRIM_type != T_OPTMGMT_ACK ||
+                toa->MGMT_flags != T_SUCCESS)
+        {
+            break;
+        }
+        if (ctlbuf.len >= sizeof (struct T_error_ack) &&
+                tea->PRIM_type == T_ERROR_ACK)
+        {
+            PyErr_SetString(PyExc_RuntimeError, "ERROR_ACK");
+            goto error;
+        }
+        if (getcode == 0 &&
+                ctlbuf.len >= sizeof (struct T_optmgmt_ack) &&
+                toa->PRIM_type == T_OPTMGMT_ACK &&
+                toa->MGMT_flags == T_SUCCESS)
+        {
+            PyErr_SetString(PyExc_RuntimeError, "ERROR_T_OPTMGMT_ACK");
+            goto error;
+        }
+
+        databuf.maxlen = mibhdr->len;
+        databuf.len = 0;
+        databuf.buf = (char *)malloc((int)mibhdr->len);
+        if (!databuf.buf) {
+            PyErr_NoMemory();
+            goto error;
+        }
+
+        flags = 0;
+        getcode = getmsg(sd, (struct strbuf *)0, &databuf, &flags);
+        if (getcode < 0) {
+            PyErr_SetFromErrno(PyExc_OSError);
+            goto error;
+        }
+
+        // TCPv4
+        if (mibhdr->level == MIB2_TCP && mibhdr->name == MIB2_TCP_13) {
+            tp = (mib2_tcpConnEntry_t *)databuf.buf;
+            num_ent = mibhdr->len / sizeof(mib2_tcpConnEntry_t);
+            for (i = 0; i < num_ent; i++, tp++) {
+                processed_pid = tp->tcpConnCreationProcess;
+                if (pid != -1 && processed_pid != pid)
+                    continue;
+                // construct local/remote addresses
+                inet_ntop(AF_INET, &tp->tcpConnLocalAddress, lip, sizeof(lip));
+                inet_ntop(AF_INET, &tp->tcpConnRemAddress, rip, sizeof(rip));
+                lport = tp->tcpConnLocalPort;
+                rport = tp->tcpConnRemPort;
+
+                // contruct python tuple/list
+                py_laddr = Py_BuildValue("(si)", lip, lport);
+                if (!py_laddr)
+                    goto error;
+                if (rport != 0) {
+                    py_raddr = Py_BuildValue("(si)", rip, rport);
+                }
+                else {
+                    py_raddr = Py_BuildValue("()");
+                }
+                if (!py_raddr)
+                    goto error;
+                state = tp->tcpConnEntryInfo.ce_state;
+
+                // add item
+                py_tuple = Py_BuildValue("(iiiNNiI)", -1, AF_INET, SOCK_STREAM,
+                                         py_laddr, py_raddr, state,
+                                         processed_pid);
+                if (!py_tuple) {
+                    goto error;
+                }
+                if (PyList_Append(py_retlist, py_tuple))
+                    goto error;
+                Py_DECREF(py_tuple);
+            }
+        }
+#if defined(AF_INET6)
+        // TCPv6
+        else if (mibhdr->level == MIB2_TCP6 && mibhdr->name == MIB2_TCP6_CONN)
+        {
+            tp6 = (mib2_tcp6ConnEntry_t *)databuf.buf;
+            num_ent = mibhdr->len / sizeof(mib2_tcp6ConnEntry_t);
+
+            for (i = 0; i < num_ent; i++, tp6++) {
+                processed_pid = tp6->tcp6ConnCreationProcess;
+                if (pid != -1 && processed_pid != pid)
+                    continue;
+                // construct local/remote addresses
+                inet_ntop(AF_INET6, &tp6->tcp6ConnLocalAddress, lip, sizeof(lip));
+                inet_ntop(AF_INET6, &tp6->tcp6ConnRemAddress, rip, sizeof(rip));
+                lport = tp6->tcp6ConnLocalPort;
+                rport = tp6->tcp6ConnRemPort;
+
+                // contruct python tuple/list
+                py_laddr = Py_BuildValue("(si)", lip, lport);
+                if (!py_laddr)
+                    goto error;
+                if (rport != 0) {
+                    py_raddr = Py_BuildValue("(si)", rip, rport);
+                }
+                else {
+                    py_raddr = Py_BuildValue("()");
+                }
+                if (!py_raddr)
+                    goto error;
+                state = tp6->tcp6ConnEntryInfo.ce_state;
+
+                // add item
+                py_tuple = Py_BuildValue("(iiiNNiI)", -1, AF_INET6, SOCK_STREAM,
+                                         py_laddr, py_raddr, state, processed_pid);
+                if (!py_tuple) {
+                    goto error;
+                }
+                if (PyList_Append(py_retlist, py_tuple))
+                    goto error;
+                Py_DECREF(py_tuple);
+            }
+        }
+#endif
+        // UDPv4
+        else if (mibhdr->level == MIB2_UDP || mibhdr->level == MIB2_UDP_ENTRY) {
+            ude = (mib2_udpEntry_t *)databuf.buf;
+            num_ent = mibhdr->len / sizeof(mib2_udpEntry_t);
+            for (i = 0; i < num_ent; i++, ude++) {
+                processed_pid = ude->udpCreationProcess;
+                if (pid != -1 && processed_pid != pid)
+                    continue;
+                // XXX Very ugly hack! It seems we get here only the first
+                // time we bump into a UDPv4 socket.  PID is a very high
+                // number (clearly impossible) and the address does not
+                // belong to any valid interface.  Not sure what else
+                // to do other than skipping.
+                if (processed_pid > 131072)
+                    continue;
+                inet_ntop(AF_INET, &ude->udpLocalAddress, lip, sizeof(lip));
+                lport = ude->udpLocalPort;
+                py_laddr = Py_BuildValue("(si)", lip, lport);
+                if (!py_laddr)
+                    goto error;
+                py_raddr = Py_BuildValue("()");
+                if (!py_raddr)
+                    goto error;
+                py_tuple = Py_BuildValue("(iiiNNiI)", -1, AF_INET, SOCK_DGRAM,
+                                         py_laddr, py_raddr, PSUTIL_CONN_NONE,
+                                         processed_pid);
+                if (!py_tuple) {
+                    goto error;
+                }
+                if (PyList_Append(py_retlist, py_tuple))
+                    goto error;
+                Py_DECREF(py_tuple);
+            }
+        }
+#if defined(AF_INET6)
+        // UDPv6
+        else if (mibhdr->level == MIB2_UDP6 || mibhdr->level == MIB2_UDP6_ENTRY) {
+            ude6 = (mib2_udp6Entry_t *)databuf.buf;
+            num_ent = mibhdr->len / sizeof(mib2_udp6Entry_t);
+            for (i = 0; i < num_ent; i++, ude6++) {
+                processed_pid = ude6->udp6CreationProcess;
+                if (pid != -1 && processed_pid != pid)
+                    continue;
+                inet_ntop(AF_INET6, &ude6->udp6LocalAddress, lip, sizeof(lip));
+                lport = ude6->udp6LocalPort;
+                py_laddr = Py_BuildValue("(si)", lip, lport);
+                if (!py_laddr)
+                    goto error;
+                py_raddr = Py_BuildValue("()");
+                if (!py_raddr)
+                    goto error;
+                py_tuple = Py_BuildValue("(iiiNNiI)", -1, AF_INET6, SOCK_DGRAM,
+                                         py_laddr, py_raddr, PSUTIL_CONN_NONE,
+                                         processed_pid);
+                if (!py_tuple) {
+                    goto error;
+                }
+                if (PyList_Append(py_retlist, py_tuple))
+                    goto error;
+                Py_DECREF(py_tuple);
+            }
+        }
+#endif
+        free(databuf.buf);
+    }
+
+    close(sd);
+    return py_retlist;
+
+error:
+    Py_XDECREF(py_tuple);
+    Py_XDECREF(py_laddr);
+    Py_XDECREF(py_raddr);
+    Py_DECREF(py_retlist);
+    // TODO : free databuf
+    if (sd != NULL)
+        close(sd);
+    return NULL;
+}
+
+
+static PyObject *
+psutil_boot_time(PyObject *self, PyObject *args)
+{
+    float boot_time = 0.0;
+    struct utmpx *ut;
+
+    while (NULL != (ut = getutxent())) {
+        if (ut->ut_type == BOOT_TIME) {
+            boot_time = (float)ut->ut_tv.tv_sec;
+            break;
+        }
+    }
+    endutent();
+    if (boot_time != 0.0) {
+        return Py_BuildValue("f", boot_time);
+    }
+    else {
+        PyErr_SetString(PyExc_RuntimeError, "can't determine boot time");
+        return NULL;
+    }
+}
+
+
+/*
+ * Return the number of physical CPU cores on the system.
+ */
+static PyObject *
+psutil_cpu_count_phys(PyObject *self, PyObject *args)
+{
+    kstat_ctl_t *kc;
+    kstat_t *ksp;
+    int ncpus = 0;
+
+    kc = kstat_open();
+    if (kc == NULL)
+        goto error;
+    ksp = kstat_lookup(kc, "cpu_info", -1, NULL);
+    if (ksp == NULL)
+        goto error;
+
+    for (ksp = kc->kc_chain; ksp; ksp = ksp->ks_next) {
+        if (strcmp(ksp->ks_module, "cpu_info") != 0)
+            continue;
+        if (kstat_read(kc, ksp, NULL) == NULL)
+            goto error;
+        ncpus += 1;
+    }
+
+    kstat_close(kc);
+    if (ncpus > 0)
+        return Py_BuildValue("i", ncpus);
+    else
+        goto error;
+
+error:
+    // mimic os.cpu_count()
+    if (kc != NULL)
+        kstat_close(kc);
+    Py_INCREF(Py_None);
+    return Py_None;
+}
+
+
+/*
+ * define the psutil C module methods and initialize the module.
+ */
+static PyMethodDef
+PsutilMethods[] =
+{
+    // --- process-related functions
+    {"proc_basic_info", psutil_proc_basic_info, METH_VARARGS,
+     "Return process ppid, rss, vms, ctime, nice, nthreads, status and tty"},
+    {"proc_name_and_args", psutil_proc_name_and_args, METH_VARARGS,
+     "Return process name and args."},
+    {"proc_cpu_times", psutil_proc_cpu_times, METH_VARARGS,
+     "Return process user and system CPU times."},
+    {"proc_cred", psutil_proc_cred, METH_VARARGS,
+     "Return process uids/gids."},
+    {"query_process_thread", psutil_proc_query_thread, METH_VARARGS,
+     "Return info about a process thread"},
+    {"proc_memory_maps", psutil_proc_memory_maps, METH_VARARGS,
+     "Return process memory mappings"},
+    {"proc_num_ctx_switches", psutil_proc_num_ctx_switches, METH_VARARGS,
+     "Return the number of context switches performed by process"},
+
+    // --- system-related functions
+    {"swap_mem", psutil_swap_mem, METH_VARARGS,
+     "Return information about system swap memory."},
+    {"users", psutil_users, METH_VARARGS,
+     "Return currently connected users."},
+    {"disk_partitions", psutil_disk_partitions, METH_VARARGS,
+     "Return disk partitions."},
+    {"per_cpu_times", psutil_per_cpu_times, METH_VARARGS,
+     "Return system per-CPU times."},
+    {"disk_io_counters", psutil_disk_io_counters, METH_VARARGS,
+     "Return a Python dict of tuples for disk I/O statistics."},
+    {"net_io_counters", psutil_net_io_counters, METH_VARARGS,
+     "Return a Python dict of tuples for network I/O statistics."},
+    {"boot_time", psutil_boot_time, METH_VARARGS,
+     "Return system boot time in seconds since the EPOCH."},
+    {"cpu_count_phys", psutil_cpu_count_phys, METH_VARARGS,
+     "Return the number of physical CPUs on the system."},
+    {"net_connections", psutil_net_connections, METH_VARARGS,
+     "Return TCP and UDP syste-wide open connections."},
+
+{NULL, NULL, 0, NULL}
+};
+
+
+struct module_state {
+    PyObject *error;
+};
+
+#if PY_MAJOR_VERSION >= 3
+#define GETSTATE(m) ((struct module_state*)PyModule_GetState(m))
+#else
+#define GETSTATE(m) (&_state)
+#endif
+
+#if PY_MAJOR_VERSION >= 3
+
+static int
+psutil_sunos_traverse(PyObject *m, visitproc visit, void *arg) {
+    Py_VISIT(GETSTATE(m)->error);
+    return 0;
+}
+
+static int
+psutil_sunos_clear(PyObject *m) {
+    Py_CLEAR(GETSTATE(m)->error);
+    return 0;
+}
+
+static struct PyModuleDef moduledef = {
+    PyModuleDef_HEAD_INIT,
+    "psutil_sunos",
+    NULL,
+    sizeof(struct module_state),
+    PsutilMethods,
+    NULL,
+    psutil_sunos_traverse,
+    psutil_sunos_clear,
+    NULL
+};
+
+#define INITERROR return NULL
+
+PyMODINIT_FUNC PyInit__psutil_sunos(void)
+
+#else
+#define INITERROR return
+
+void init_psutil_sunos(void)
+#endif
+{
+#if PY_MAJOR_VERSION >= 3
+    PyObject *module = PyModule_Create(&moduledef);
+#else
+    PyObject *module = Py_InitModule("_psutil_sunos", PsutilMethods);
+#endif
+    PyModule_AddIntConstant(module, "SSLEEP", SSLEEP);
+    PyModule_AddIntConstant(module, "SRUN", SRUN);
+    PyModule_AddIntConstant(module, "SZOMB", SZOMB);
+    PyModule_AddIntConstant(module, "SSTOP", SSTOP);
+    PyModule_AddIntConstant(module, "SIDL", SIDL);
+    PyModule_AddIntConstant(module, "SONPROC", SONPROC);
+    PyModule_AddIntConstant(module, "SWAIT", SWAIT);
+
+    PyModule_AddIntConstant(module, "PRNODEV", PRNODEV);  // for process tty
+
+    PyModule_AddIntConstant(module, "TCPS_CLOSED", TCPS_CLOSED);
+    PyModule_AddIntConstant(module, "TCPS_CLOSING", TCPS_CLOSING);
+    PyModule_AddIntConstant(module, "TCPS_CLOSE_WAIT", TCPS_CLOSE_WAIT);
+    PyModule_AddIntConstant(module, "TCPS_LISTEN", TCPS_LISTEN);
+    PyModule_AddIntConstant(module, "TCPS_ESTABLISHED", TCPS_ESTABLISHED);
+    PyModule_AddIntConstant(module, "TCPS_SYN_SENT", TCPS_SYN_SENT);
+    PyModule_AddIntConstant(module, "TCPS_SYN_RCVD", TCPS_SYN_RCVD);
+    PyModule_AddIntConstant(module, "TCPS_FIN_WAIT_1", TCPS_FIN_WAIT_1);
+    PyModule_AddIntConstant(module, "TCPS_FIN_WAIT_2", TCPS_FIN_WAIT_2);
+    PyModule_AddIntConstant(module, "TCPS_LAST_ACK", TCPS_LAST_ACK);
+    PyModule_AddIntConstant(module, "TCPS_TIME_WAIT", TCPS_TIME_WAIT);
+    // sunos specific
+    PyModule_AddIntConstant(module, "TCPS_IDLE", TCPS_IDLE);
+    // sunos specific
+    PyModule_AddIntConstant(module, "TCPS_BOUND", TCPS_BOUND);
+    PyModule_AddIntConstant(module, "PSUTIL_CONN_NONE", PSUTIL_CONN_NONE);
+
+    if (module == NULL) {
+        INITERROR;
+    }
+#if PY_MAJOR_VERSION >= 3
+    return module;
+#endif
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/_psutil_sunos.h
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/_psutil_sunos.h b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/_psutil_sunos.h
new file mode 100644
index 0000000..414a7d8
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/_psutil_sunos.h
@@ -0,0 +1,27 @@
+/*
+ * Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include <Python.h>
+
+// processes
+static PyObject* psutil_proc_basic_info(PyObject* self, PyObject* args);
+static PyObject* psutil_proc_cpu_times(PyObject* self, PyObject* args);
+static PyObject* psutil_proc_cred(PyObject* self, PyObject* args);
+static PyObject* psutil_proc_memory_maps(PyObject* self, PyObject* args);
+static PyObject* psutil_proc_name_and_args(PyObject* self, PyObject* args);
+static PyObject* psutil_proc_num_ctx_switches(PyObject* self, PyObject* args);
+static PyObject* psutil_proc_query_thread(PyObject* self, PyObject* args);
+
+// system
+static PyObject* psutil_boot_time(PyObject* self, PyObject* args);
+static PyObject* psutil_cpu_count_phys(PyObject* self, PyObject* args);
+static PyObject* psutil_disk_io_counters(PyObject* self, PyObject* args);
+static PyObject* psutil_disk_partitions(PyObject* self, PyObject* args);
+static PyObject* psutil_net_io_counters(PyObject* self, PyObject* args);
+static PyObject* psutil_per_cpu_times(PyObject* self, PyObject* args);
+static PyObject* psutil_swap_mem(PyObject* self, PyObject* args);
+static PyObject* psutil_users(PyObject* self, PyObject* args);
+static PyObject* psutil_net_connections(PyObject* self, PyObject* args);