You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by sw...@apache.org on 2014/09/22 20:02:18 UTC

[08/22] AMBARI-5707. Metrics system prototype implementation. (swagle)

http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/_common.py
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/_common.py b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/_common.py
new file mode 100644
index 0000000..3d2f27c
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/_common.py
@@ -0,0 +1,258 @@
+#/usr/bin/env python
+
+# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Common objects shared by all _ps* modules."""
+
+from __future__ import division
+import errno
+import os
+import socket
+import stat
+import sys
+import warnings
+try:
+    import threading
+except ImportError:
+    import dummy_threading as threading
+
+from socket import AF_INET, SOCK_STREAM, SOCK_DGRAM
+
+from psutil._compat import namedtuple, wraps
+
+# --- constants
+
+AF_INET6 = getattr(socket, 'AF_INET6', None)
+AF_UNIX = getattr(socket, 'AF_UNIX', None)
+
+STATUS_RUNNING = "running"
+STATUS_SLEEPING = "sleeping"
+STATUS_DISK_SLEEP = "disk-sleep"
+STATUS_STOPPED = "stopped"
+STATUS_TRACING_STOP = "tracing-stop"
+STATUS_ZOMBIE = "zombie"
+STATUS_DEAD = "dead"
+STATUS_WAKE_KILL = "wake-kill"
+STATUS_WAKING = "waking"
+STATUS_IDLE = "idle"  # BSD
+STATUS_LOCKED = "locked"  # BSD
+STATUS_WAITING = "waiting"  # BSD
+
+CONN_ESTABLISHED = "ESTABLISHED"
+CONN_SYN_SENT = "SYN_SENT"
+CONN_SYN_RECV = "SYN_RECV"
+CONN_FIN_WAIT1 = "FIN_WAIT1"
+CONN_FIN_WAIT2 = "FIN_WAIT2"
+CONN_TIME_WAIT = "TIME_WAIT"
+CONN_CLOSE = "CLOSE"
+CONN_CLOSE_WAIT = "CLOSE_WAIT"
+CONN_LAST_ACK = "LAST_ACK"
+CONN_LISTEN = "LISTEN"
+CONN_CLOSING = "CLOSING"
+CONN_NONE = "NONE"
+
+
+# --- functions
+
+def usage_percent(used, total, _round=None):
+    """Calculate percentage usage of 'used' against 'total'."""
+    try:
+        ret = (used / total) * 100
+    except ZeroDivisionError:
+        ret = 0
+    if _round is not None:
+        return round(ret, _round)
+    else:
+        return ret
+
+
+def memoize(fun):
+    """A simple memoize decorator for functions supporting (hashable)
+    positional arguments.
+    It also provides a cache_clear() function for clearing the cache:
+
+    >>> @memoize
+    ... def foo()
+    ...     return 1
+    ...
+    >>> foo()
+    1
+    >>> foo.cache_clear()
+    >>>
+    """
+    @wraps(fun)
+    def wrapper(*args, **kwargs):
+        key = (args, frozenset(sorted(kwargs.items())))
+        lock.acquire()
+        try:
+            try:
+                return cache[key]
+            except KeyError:
+                ret = cache[key] = fun(*args, **kwargs)
+        finally:
+            lock.release()
+        return ret
+
+    def cache_clear():
+        """Clear cache."""
+        lock.acquire()
+        try:
+            cache.clear()
+        finally:
+            lock.release()
+
+    lock = threading.RLock()
+    cache = {}
+    wrapper.cache_clear = cache_clear
+    return wrapper
+
+
+# http://code.activestate.com/recipes/577819-deprecated-decorator/
+def deprecated(replacement=None):
+    """A decorator which can be used to mark functions as deprecated."""
+    def outer(fun):
+        msg = "psutil.%s is deprecated" % fun.__name__
+        if replacement is not None:
+            msg += "; use %s instead" % replacement
+        if fun.__doc__ is None:
+            fun.__doc__ = msg
+
+        @wraps(fun)
+        def inner(*args, **kwargs):
+            warnings.warn(msg, category=DeprecationWarning, stacklevel=2)
+            return fun(*args, **kwargs)
+
+        return inner
+    return outer
+
+
+def deprecated_method(replacement):
+    """A decorator which can be used to mark a method as deprecated
+    'replcement' is the method name which will be called instead.
+    """
+    def outer(fun):
+        msg = "%s() is deprecated; use %s() instead" % (
+            fun.__name__, replacement)
+        if fun.__doc__ is None:
+            fun.__doc__ = msg
+
+        @wraps(fun)
+        def inner(self, *args, **kwargs):
+            warnings.warn(msg, category=DeprecationWarning, stacklevel=2)
+            return getattr(self, replacement)(*args, **kwargs)
+        return inner
+    return outer
+
+
+def isfile_strict(path):
+    """Same as os.path.isfile() but does not swallow EACCES / EPERM
+    exceptions, see:
+    http://mail.python.org/pipermail/python-dev/2012-June/120787.html
+    """
+    try:
+        st = os.stat(path)
+    except OSError:
+        err = sys.exc_info()[1]
+        if err.errno in (errno.EPERM, errno.EACCES):
+            raise
+        return False
+    else:
+        return stat.S_ISREG(st.st_mode)
+
+
+# --- Process.connections() 'kind' parameter mapping
+
+conn_tmap = {
+    "all": ([AF_INET, AF_INET6, AF_UNIX], [SOCK_STREAM, SOCK_DGRAM]),
+    "tcp": ([AF_INET, AF_INET6], [SOCK_STREAM]),
+    "tcp4": ([AF_INET], [SOCK_STREAM]),
+    "udp": ([AF_INET, AF_INET6], [SOCK_DGRAM]),
+    "udp4": ([AF_INET], [SOCK_DGRAM]),
+    "inet": ([AF_INET, AF_INET6], [SOCK_STREAM, SOCK_DGRAM]),
+    "inet4": ([AF_INET], [SOCK_STREAM, SOCK_DGRAM]),
+    "inet6": ([AF_INET6], [SOCK_STREAM, SOCK_DGRAM]),
+}
+
+if AF_INET6 is not None:
+    conn_tmap.update({
+        "tcp6": ([AF_INET6], [SOCK_STREAM]),
+        "udp6": ([AF_INET6], [SOCK_DGRAM]),
+    })
+
+if AF_UNIX is not None:
+    conn_tmap.update({
+        "unix": ([AF_UNIX], [SOCK_STREAM, SOCK_DGRAM]),
+    })
+
+del AF_INET, AF_INET6, AF_UNIX, SOCK_STREAM, SOCK_DGRAM, socket
+
+
+# --- namedtuples for psutil.* system-related functions
+
+# psutil.swap_memory()
+sswap = namedtuple('sswap', ['total', 'used', 'free', 'percent', 'sin',
+                             'sout'])
+# psutil.disk_usage()
+sdiskusage = namedtuple('sdiskusage', ['total', 'used', 'free', 'percent'])
+# psutil.disk_io_counters()
+sdiskio = namedtuple('sdiskio', ['read_count', 'write_count',
+                                 'read_bytes', 'write_bytes',
+                                 'read_time', 'write_time'])
+# psutil.disk_partitions()
+sdiskpart = namedtuple('sdiskpart', ['device', 'mountpoint', 'fstype', 'opts'])
+# psutil.net_io_counters()
+snetio = namedtuple('snetio', ['bytes_sent', 'bytes_recv',
+                               'packets_sent', 'packets_recv',
+                               'errin', 'errout',
+                               'dropin', 'dropout'])
+# psutil.users()
+suser = namedtuple('suser', ['name', 'terminal', 'host', 'started'])
+# psutil.net_connections()
+sconn = namedtuple('sconn', ['fd', 'family', 'type', 'laddr', 'raddr',
+                             'status', 'pid'])
+
+
+# --- namedtuples for psutil.Process methods
+
+# psutil.Process.memory_info()
+pmem = namedtuple('pmem', ['rss', 'vms'])
+# psutil.Process.cpu_times()
+pcputimes = namedtuple('pcputimes', ['user', 'system'])
+# psutil.Process.open_files()
+popenfile = namedtuple('popenfile', ['path', 'fd'])
+# psutil.Process.threads()
+pthread = namedtuple('pthread', ['id', 'user_time', 'system_time'])
+# psutil.Process.uids()
+puids = namedtuple('puids', ['real', 'effective', 'saved'])
+# psutil.Process.gids()
+pgids = namedtuple('pgids', ['real', 'effective', 'saved'])
+# psutil.Process.io_counters()
+pio = namedtuple('pio', ['read_count', 'write_count',
+                         'read_bytes', 'write_bytes'])
+# psutil.Process.ionice()
+pionice = namedtuple('pionice', ['ioclass', 'value'])
+# psutil.Process.ctx_switches()
+pctxsw = namedtuple('pctxsw', ['voluntary', 'involuntary'])
+
+
+# --- misc
+
+# backward compatibility layer for Process.connections() ntuple
+class pconn(
+    namedtuple('pconn',
+               ['fd', 'family', 'type', 'laddr', 'raddr', 'status'])):
+    __slots__ = ()
+
+    @property
+    def local_address(self):
+        warnings.warn("'local_address' field is deprecated; use 'laddr'"
+                      "instead", category=DeprecationWarning, stacklevel=2)
+        return self.laddr
+
+    @property
+    def remote_address(self):
+        warnings.warn("'remote_address' field is deprecated; use 'raddr'"
+                      "instead", category=DeprecationWarning, stacklevel=2)
+        return self.raddr

http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/_compat.py
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/_compat.py b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/_compat.py
new file mode 100644
index 0000000..b6ac933
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/_compat.py
@@ -0,0 +1,433 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Module which provides compatibility with older Python versions."""
+
+__all__ = ["PY3", "int", "long", "xrange", "exec_", "callable", "namedtuple",
+           "property", "wraps", "defaultdict", "update_wrapper", "lru_cache"]
+
+import sys
+try:
+    import __builtin__
+except ImportError:
+    import builtins as __builtin__  # py3
+
+PY3 = sys.version_info[0] == 3
+
+if PY3:
+    int = int
+    long = int
+    xrange = range
+    unicode = str
+    exec_ = getattr(__builtin__, "exec")
+    print_ = getattr(__builtin__, "print")
+
+    def u(s):
+        return s
+
+    def b(s):
+        return s.encode("latin-1")
+else:
+    int = int
+    long = long
+    xrange = xrange
+    unicode = unicode
+
+    def u(s):
+        return unicode(s, "unicode_escape")
+
+    def b(s):
+        return s
+
+    def exec_(code, globs=None, locs=None):
+        if globs is None:
+            frame = _sys._getframe(1)
+            globs = frame.f_globals
+            if locs is None:
+                locs = frame.f_locals
+            del frame
+        elif locs is None:
+            locs = globs
+        exec("""exec code in globs, locs""")
+
+    def print_(s):
+        sys.stdout.write(s + '\n')
+        sys.stdout.flush()
+
+
+# removed in 3.0, reintroduced in 3.2
+try:
+    callable = callable
+except NameError:
+    def callable(obj):
+        return any("__call__" in klass.__dict__ for klass in type(obj).__mro__)
+
+
+# --- stdlib additions
+
+# py 2.6 collections.namedtuple
+# Taken from: http://code.activestate.com/recipes/500261/
+# Credits: Raymond Hettinger
+try:
+    from collections import namedtuple
+except ImportError:
+    from operator import itemgetter as _itemgetter
+    from keyword import iskeyword as _iskeyword
+    import sys as _sys
+
+    def namedtuple(typename, field_names, verbose=False, rename=False):
+        """A collections.namedtuple implementation, see:
+        http://docs.python.org/library/collections.html#namedtuple
+        """
+        if isinstance(field_names, basestring):
+            field_names = field_names.replace(',', ' ').split()
+        field_names = tuple(map(str, field_names))
+        if rename:
+            names = list(field_names)
+            seen = set()
+            for i, name in enumerate(names):
+                if ((not min(c.isalnum() or c == '_' for c in name)
+                        or _iskeyword(name)
+                        or not name or name[0].isdigit()
+                        or name.startswith('_')
+                        or name in seen)):
+                    names[i] = '_%d' % i
+                seen.add(name)
+            field_names = tuple(names)
+        for name in (typename,) + field_names:
+            if not min(c.isalnum() or c == '_' for c in name):
+                raise ValueError('Type names and field names can only contain '
+                                 'alphanumeric characters and underscores: %r'
+                                 % name)
+            if _iskeyword(name):
+                raise ValueError('Type names and field names cannot be a '
+                                 'keyword: %r' % name)
+            if name[0].isdigit():
+                raise ValueError('Type names and field names cannot start '
+                                 'with a number: %r' % name)
+        seen_names = set()
+        for name in field_names:
+            if name.startswith('_') and not rename:
+                raise ValueError(
+                    'Field names cannot start with an underscore: %r' % name)
+            if name in seen_names:
+                raise ValueError('Encountered duplicate field name: %r' % name)
+            seen_names.add(name)
+
+        numfields = len(field_names)
+        argtxt = repr(field_names).replace("'", "")[1:-1]
+        reprtxt = ', '.join('%s=%%r' % name for name in field_names)
+        template = '''class %(typename)s(tuple):
+        '%(typename)s(%(argtxt)s)' \n
+        __slots__ = () \n
+        _fields = %(field_names)r \n
+        def __new__(_cls, %(argtxt)s):
+            return _tuple.__new__(_cls, (%(argtxt)s)) \n
+        @classmethod
+        def _make(cls, iterable, new=tuple.__new__, len=len):
+            'Make a new %(typename)s object from a sequence or iterable'
+            result = new(cls, iterable)
+            if len(result) != %(numfields)d:
+                raise TypeError(
+                    'Expected %(numfields)d arguments, got %%d' %% len(result))
+            return result \n
+        def __repr__(self):
+            return '%(typename)s(%(reprtxt)s)' %% self \n
+        def _asdict(self):
+            'Return a new dict which maps field names to their values'
+            return dict(zip(self._fields, self)) \n
+        def _replace(_self, **kwds):
+            result = _self._make(map(kwds.pop, %(field_names)r, _self))
+            if kwds:
+                raise ValueError(
+                    'Got unexpected field names: %%r' %% kwds.keys())
+            return result \n
+        def __getnewargs__(self):
+            return tuple(self) \n\n''' % locals()
+        for i, name in enumerate(field_names):
+            template += '        %s = _property(_itemgetter(%d))\n' % (name, i)
+        if verbose:
+            sys.stdout.write(template + '\n')
+            sys.stdout.flush()
+
+        namespace = dict(
+            _itemgetter=_itemgetter, __name__='namedtuple_%s' % typename,
+            _property=property, _tuple=tuple)
+        try:
+            exec_(template, namespace)
+        except SyntaxError:
+            e = sys.exc_info()[1]
+            raise SyntaxError(e.message + ':\n' + template)
+        result = namespace[typename]
+        try:
+            result.__module__ = _sys._getframe(
+                1).f_globals.get('__name__', '__main__')
+        except (AttributeError, ValueError):
+            pass
+
+        return result
+
+
+# hack to support property getter/setter/deleter on python < 2.6
+# http://docs.python.org/library/functions.html?highlight=property#property
+if hasattr(property, 'setter'):
+    property = property
+else:
+    class property(__builtin__.property):
+        __metaclass__ = type
+
+        def __init__(self, fget, *args, **kwargs):
+            super(property, self).__init__(fget, *args, **kwargs)
+            self.__doc__ = fget.__doc__
+
+        def getter(self, method):
+            return property(method, self.fset, self.fdel)
+
+        def setter(self, method):
+            return property(self.fget, method, self.fdel)
+
+        def deleter(self, method):
+            return property(self.fget, self.fset, method)
+
+
+# py 2.5 collections.defauldict
+# Taken from:
+# http://code.activestate.com/recipes/523034-emulate-collectionsdefaultdict/
+# Credits: Jason Kirtland
+try:
+    from collections import defaultdict
+except ImportError:
+    class defaultdict(dict):
+        """Dict subclass that calls a factory function to supply
+        missing values:
+        http://docs.python.org/library/collections.html#collections.defaultdict
+        """
+
+        def __init__(self, default_factory=None, *a, **kw):
+            if ((default_factory is not None and
+                    not hasattr(default_factory, '__call__'))):
+                raise TypeError('first argument must be callable')
+            dict.__init__(self, *a, **kw)
+            self.default_factory = default_factory
+
+        def __getitem__(self, key):
+            try:
+                return dict.__getitem__(self, key)
+            except KeyError:
+                return self.__missing__(key)
+
+        def __missing__(self, key):
+            if self.default_factory is None:
+                raise KeyError(key)
+            self[key] = value = self.default_factory()
+            return value
+
+        def __reduce__(self):
+            if self.default_factory is None:
+                args = tuple()
+            else:
+                args = self.default_factory,
+            return type(self), args, None, None, self.items()
+
+        def copy(self):
+            return self.__copy__()
+
+        def __copy__(self):
+            return type(self)(self.default_factory, self)
+
+        def __deepcopy__(self, memo):
+            import copy
+            return type(self)(self.default_factory,
+                              copy.deepcopy(self.items()))
+
+        def __repr__(self):
+            return 'defaultdict(%s, %s)' % (self.default_factory,
+                                            dict.__repr__(self))
+
+
+# py 2.5 functools.wraps
+try:
+    from functools import wraps
+except ImportError:
+    def wraps(original):
+        def inner(fn):
+            for attribute in ['__module__', '__name__', '__doc__']:
+                setattr(fn, attribute, getattr(original, attribute))
+            for attribute in ['__dict__']:
+                if hasattr(fn, attribute):
+                    getattr(fn, attribute).update(getattr(original, attribute))
+                else:
+                    setattr(fn, attribute,
+                            getattr(original, attribute).copy())
+            return fn
+        return inner
+
+
+# py 2.5 functools.update_wrapper
+try:
+    from functools import update_wrapper
+except ImportError:
+    WRAPPER_ASSIGNMENTS = ('__module__', '__name__', '__doc__')
+    WRAPPER_UPDATES = ('__dict__',)
+
+    def update_wrapper(wrapper, wrapped, assigned=WRAPPER_ASSIGNMENTS,
+                       updated=WRAPPER_UPDATES):
+        """Update a wrapper function to look like the wrapped function, see:
+        http://docs.python.org/library/functools.html#functools.update_wrapper
+        """
+        for attr in assigned:
+            setattr(wrapper, attr, getattr(wrapped, attr))
+        for attr in updated:
+            getattr(wrapper, attr).update(getattr(wrapped, attr, {}))
+        return wrapper
+
+
+# py 3.2 functools.lru_cache
+# Taken from: http://code.activestate.com/recipes/578078
+# Credit: Raymond Hettinger
+try:
+    from functools import lru_cache
+except ImportError:
+    try:
+        from threading import RLock
+    except ImportError:
+        from dummy_threading import RLock
+
+    _CacheInfo = namedtuple("CacheInfo",
+                            ["hits", "misses", "maxsize", "currsize"])
+
+    class _HashedSeq(list):
+        __slots__ = 'hashvalue'
+
+        def __init__(self, tup, hash=hash):
+            self[:] = tup
+            self.hashvalue = hash(tup)
+
+        def __hash__(self):
+            return self.hashvalue
+
+    def _make_key(args, kwds, typed,
+                  kwd_mark=(object(), ),
+                  fasttypes=set((int, str, frozenset, type(None))),
+                  sorted=sorted, tuple=tuple, type=type, len=len):
+        key = args
+        if kwds:
+            sorted_items = sorted(kwds.items())
+            key += kwd_mark
+            for item in sorted_items:
+                key += item
+        if typed:
+            key += tuple(type(v) for v in args)
+            if kwds:
+                key += tuple(type(v) for k, v in sorted_items)
+        elif len(key) == 1 and type(key[0]) in fasttypes:
+            return key[0]
+        return _HashedSeq(key)
+
+    def lru_cache(maxsize=100, typed=False):
+        """Least-recently-used cache decorator, see:
+        http://docs.python.org/3/library/functools.html#functools.lru_cache
+        """
+        def decorating_function(user_function):
+            cache = dict()
+            stats = [0, 0]
+            HITS, MISSES = 0, 1
+            make_key = _make_key
+            cache_get = cache.get
+            _len = len
+            lock = RLock()
+            root = []
+            root[:] = [root, root, None, None]
+            nonlocal_root = [root]
+            PREV, NEXT, KEY, RESULT = 0, 1, 2, 3
+            if maxsize == 0:
+                def wrapper(*args, **kwds):
+                    result = user_function(*args, **kwds)
+                    stats[MISSES] += 1
+                    return result
+            elif maxsize is None:
+                def wrapper(*args, **kwds):
+                    key = make_key(args, kwds, typed)
+                    result = cache_get(key, root)
+                    if result is not root:
+                        stats[HITS] += 1
+                        return result
+                    result = user_function(*args, **kwds)
+                    cache[key] = result
+                    stats[MISSES] += 1
+                    return result
+            else:
+                def wrapper(*args, **kwds):
+                    if kwds or typed:
+                        key = make_key(args, kwds, typed)
+                    else:
+                        key = args
+                    lock.acquire()
+                    try:
+                        link = cache_get(key)
+                        if link is not None:
+                            root, = nonlocal_root
+                            link_prev, link_next, key, result = link
+                            link_prev[NEXT] = link_next
+                            link_next[PREV] = link_prev
+                            last = root[PREV]
+                            last[NEXT] = root[PREV] = link
+                            link[PREV] = last
+                            link[NEXT] = root
+                            stats[HITS] += 1
+                            return result
+                    finally:
+                        lock.release()
+                    result = user_function(*args, **kwds)
+                    lock.acquire()
+                    try:
+                        root, = nonlocal_root
+                        if key in cache:
+                            pass
+                        elif _len(cache) >= maxsize:
+                            oldroot = root
+                            oldroot[KEY] = key
+                            oldroot[RESULT] = result
+                            root = nonlocal_root[0] = oldroot[NEXT]
+                            oldkey = root[KEY]
+                            root[KEY] = root[RESULT] = None
+                            del cache[oldkey]
+                            cache[key] = oldroot
+                        else:
+                            last = root[PREV]
+                            link = [last, root, key, result]
+                            last[NEXT] = root[PREV] = cache[key] = link
+                        stats[MISSES] += 1
+                    finally:
+                        lock.release()
+                    return result
+
+            def cache_info():
+                """Report cache statistics"""
+                lock.acquire()
+                try:
+                    return _CacheInfo(stats[HITS], stats[MISSES], maxsize,
+                                      len(cache))
+                finally:
+                    lock.release()
+
+            def cache_clear():
+                """Clear the cache and cache statistics"""
+                lock.acquire()
+                try:
+                    cache.clear()
+                    root = nonlocal_root[0]
+                    root[:] = [root, root, None, None]
+                    stats[:] = [0, 0]
+                finally:
+                    lock.release()
+
+            wrapper.__wrapped__ = user_function
+            wrapper.cache_info = cache_info
+            wrapper.cache_clear = cache_clear
+            return update_wrapper(wrapper, user_function)
+
+        return decorating_function

http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/_psbsd.py
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/_psbsd.py b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/_psbsd.py
new file mode 100644
index 0000000..5663736
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/_psbsd.py
@@ -0,0 +1,389 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""FreeBSD platform implementation."""
+
+import errno
+import os
+import sys
+
+from psutil import _common
+from psutil import _psposix
+from psutil._common import conn_tmap, usage_percent
+from psutil._compat import namedtuple, wraps
+import _psutil_bsd as cext
+import _psutil_posix
+
+
+__extra__all__ = []
+
+# --- constants
+
+PROC_STATUSES = {
+    cext.SSTOP: _common.STATUS_STOPPED,
+    cext.SSLEEP: _common.STATUS_SLEEPING,
+    cext.SRUN: _common.STATUS_RUNNING,
+    cext.SIDL: _common.STATUS_IDLE,
+    cext.SWAIT: _common.STATUS_WAITING,
+    cext.SLOCK: _common.STATUS_LOCKED,
+    cext.SZOMB: _common.STATUS_ZOMBIE,
+}
+
+TCP_STATUSES = {
+    cext.TCPS_ESTABLISHED: _common.CONN_ESTABLISHED,
+    cext.TCPS_SYN_SENT: _common.CONN_SYN_SENT,
+    cext.TCPS_SYN_RECEIVED: _common.CONN_SYN_RECV,
+    cext.TCPS_FIN_WAIT_1: _common.CONN_FIN_WAIT1,
+    cext.TCPS_FIN_WAIT_2: _common.CONN_FIN_WAIT2,
+    cext.TCPS_TIME_WAIT: _common.CONN_TIME_WAIT,
+    cext.TCPS_CLOSED: _common.CONN_CLOSE,
+    cext.TCPS_CLOSE_WAIT: _common.CONN_CLOSE_WAIT,
+    cext.TCPS_LAST_ACK: _common.CONN_LAST_ACK,
+    cext.TCPS_LISTEN: _common.CONN_LISTEN,
+    cext.TCPS_CLOSING: _common.CONN_CLOSING,
+    cext.PSUTIL_CONN_NONE: _common.CONN_NONE,
+}
+
+PAGESIZE = os.sysconf("SC_PAGE_SIZE")
+
+# extend base mem ntuple with BSD-specific memory metrics
+svmem = namedtuple(
+    'svmem', ['total', 'available', 'percent', 'used', 'free',
+              'active', 'inactive', 'buffers', 'cached', 'shared', 'wired'])
+scputimes = namedtuple(
+    'scputimes', ['user', 'nice', 'system', 'idle', 'irq'])
+pextmem = namedtuple('pextmem', ['rss', 'vms', 'text', 'data', 'stack'])
+pmmap_grouped = namedtuple(
+    'pmmap_grouped', 'path rss, private, ref_count, shadow_count')
+pmmap_ext = namedtuple(
+    'pmmap_ext', 'addr, perms path rss, private, ref_count, shadow_count')
+
+# set later from __init__.py
+NoSuchProcess = None
+AccessDenied = None
+TimeoutExpired = None
+
+
+def virtual_memory():
+    """System virtual memory as a namedutple."""
+    mem = cext.virtual_mem()
+    total, free, active, inactive, wired, cached, buffers, shared = mem
+    avail = inactive + cached + free
+    used = active + wired + cached
+    percent = usage_percent((total - avail), total, _round=1)
+    return svmem(total, avail, percent, used, free,
+                 active, inactive, buffers, cached, shared, wired)
+
+
+def swap_memory():
+    """System swap memory as (total, used, free, sin, sout) namedtuple."""
+    total, used, free, sin, sout = [x * PAGESIZE for x in cext.swap_mem()]
+    percent = usage_percent(used, total, _round=1)
+    return _common.sswap(total, used, free, percent, sin, sout)
+
+
+def cpu_times():
+    """Return system per-CPU times as a named tuple"""
+    user, nice, system, idle, irq = cext.cpu_times()
+    return scputimes(user, nice, system, idle, irq)
+
+
+if hasattr(cext, "per_cpu_times"):
+    def per_cpu_times():
+        """Return system CPU times as a named tuple"""
+        ret = []
+        for cpu_t in cext.per_cpu_times():
+            user, nice, system, idle, irq = cpu_t
+            item = scputimes(user, nice, system, idle, irq)
+            ret.append(item)
+        return ret
+else:
+    # XXX
+    # Ok, this is very dirty.
+    # On FreeBSD < 8 we cannot gather per-cpu information, see:
+    # http://code.google.com/p/psutil/issues/detail?id=226
+    # If num cpus > 1, on first call we return single cpu times to avoid a
+    # crash at psutil import time.
+    # Next calls will fail with NotImplementedError
+    def per_cpu_times():
+        if cpu_count_logical() == 1:
+            return [cpu_times()]
+        if per_cpu_times.__called__:
+            raise NotImplementedError("supported only starting from FreeBSD 8")
+        per_cpu_times.__called__ = True
+        return [cpu_times()]
+
+    per_cpu_times.__called__ = False
+
+
+def cpu_count_logical():
+    """Return the number of logical CPUs in the system."""
+    return cext.cpu_count_logical()
+
+
+def cpu_count_physical():
+    """Return the number of physical CPUs in the system."""
+    # From the C module we'll get an XML string similar to this:
+    # http://manpages.ubuntu.com/manpages/precise/man4/smp.4freebsd.html
+    # We may get None in case "sysctl kern.sched.topology_spec"
+    # is not supported on this BSD version, in which case we'll mimic
+    # os.cpu_count() and return None.
+    s = cext.cpu_count_phys()
+    if s is not None:
+        # get rid of padding chars appended at the end of the string
+        index = s.rfind("</groups>")
+        if index != -1:
+            s = s[:index + 9]
+            if sys.version_info >= (2, 5):
+                import xml.etree.ElementTree as ET
+                root = ET.fromstring(s)
+                return len(root.findall('group/children/group/cpu')) or None
+            else:
+                s = s[s.find('<children>'):]
+                return s.count("<cpu") or None
+
+
+def boot_time():
+    """The system boot time expressed in seconds since the epoch."""
+    return cext.boot_time()
+
+
+def disk_partitions(all=False):
+    retlist = []
+    partitions = cext.disk_partitions()
+    for partition in partitions:
+        device, mountpoint, fstype, opts = partition
+        if device == 'none':
+            device = ''
+        if not all:
+            if not os.path.isabs(device) or not os.path.exists(device):
+                continue
+        ntuple = _common.sdiskpart(device, mountpoint, fstype, opts)
+        retlist.append(ntuple)
+    return retlist
+
+
+def users():
+    retlist = []
+    rawlist = cext.users()
+    for item in rawlist:
+        user, tty, hostname, tstamp = item
+        if tty == '~':
+            continue  # reboot or shutdown
+        nt = _common.suser(user, tty or None, hostname, tstamp)
+        retlist.append(nt)
+    return retlist
+
+
+def net_connections(kind):
+    if kind not in _common.conn_tmap:
+        raise ValueError("invalid %r kind argument; choose between %s"
+                        % (kind, ', '.join([repr(x) for x in conn_tmap])))
+    families, types = conn_tmap[kind]
+    ret = []
+    rawlist = cext.net_connections()
+    for item in rawlist:
+        fd, fam, type, laddr, raddr, status, pid = item
+        # TODO: apply filter at C level
+        if fam in families and type in types:
+            status = TCP_STATUSES[status]
+            nt = _common.sconn(fd, fam, type, laddr, raddr, status, pid)
+            ret.append(nt)
+    return ret
+
+
+pids = cext.pids
+pid_exists = _psposix.pid_exists
+disk_usage = _psposix.disk_usage
+net_io_counters = cext.net_io_counters
+disk_io_counters = cext.disk_io_counters
+
+
+def wrap_exceptions(fun):
+    """Decorator which translates bare OSError exceptions into
+    NoSuchProcess and AccessDenied.
+    """
+    @wraps(fun)
+    def wrapper(self, *args, **kwargs):
+        try:
+            return fun(self, *args, **kwargs)
+        except OSError:
+            # support for private module import
+            if NoSuchProcess is None or AccessDenied is None:
+                raise
+            err = sys.exc_info()[1]
+            if err.errno == errno.ESRCH:
+                raise NoSuchProcess(self.pid, self._name)
+            if err.errno in (errno.EPERM, errno.EACCES):
+                raise AccessDenied(self.pid, self._name)
+            raise
+    return wrapper
+
+
+class Process(object):
+    """Wrapper class around underlying C implementation."""
+
+    __slots__ = ["pid", "_name"]
+
+    def __init__(self, pid):
+        self.pid = pid
+        self._name = None
+
+    @wrap_exceptions
+    def name(self):
+        return cext.proc_name(self.pid)
+
+    @wrap_exceptions
+    def exe(self):
+        return cext.proc_exe(self.pid)
+
+    @wrap_exceptions
+    def cmdline(self):
+        return cext.proc_cmdline(self.pid)
+
+    @wrap_exceptions
+    def terminal(self):
+        tty_nr = cext.proc_tty_nr(self.pid)
+        tmap = _psposix._get_terminal_map()
+        try:
+            return tmap[tty_nr]
+        except KeyError:
+            return None
+
+    @wrap_exceptions
+    def ppid(self):
+        return cext.proc_ppid(self.pid)
+
+    @wrap_exceptions
+    def uids(self):
+        real, effective, saved = cext.proc_uids(self.pid)
+        return _common.puids(real, effective, saved)
+
+    @wrap_exceptions
+    def gids(self):
+        real, effective, saved = cext.proc_gids(self.pid)
+        return _common.pgids(real, effective, saved)
+
+    @wrap_exceptions
+    def cpu_times(self):
+        user, system = cext.proc_cpu_times(self.pid)
+        return _common.pcputimes(user, system)
+
+    @wrap_exceptions
+    def memory_info(self):
+        rss, vms = cext.proc_memory_info(self.pid)[:2]
+        return _common.pmem(rss, vms)
+
+    @wrap_exceptions
+    def memory_info_ex(self):
+        return pextmem(*cext.proc_memory_info(self.pid))
+
+    @wrap_exceptions
+    def create_time(self):
+        return cext.proc_create_time(self.pid)
+
+    @wrap_exceptions
+    def num_threads(self):
+        return cext.proc_num_threads(self.pid)
+
+    @wrap_exceptions
+    def num_ctx_switches(self):
+        return _common.pctxsw(*cext.proc_num_ctx_switches(self.pid))
+
+    @wrap_exceptions
+    def threads(self):
+        rawlist = cext.proc_threads(self.pid)
+        retlist = []
+        for thread_id, utime, stime in rawlist:
+            ntuple = _common.pthread(thread_id, utime, stime)
+            retlist.append(ntuple)
+        return retlist
+
+    @wrap_exceptions
+    def connections(self, kind='inet'):
+        if kind not in conn_tmap:
+            raise ValueError("invalid %r kind argument; choose between %s"
+                             % (kind, ', '.join([repr(x) for x in conn_tmap])))
+        families, types = conn_tmap[kind]
+        rawlist = cext.proc_connections(self.pid, families, types)
+        ret = []
+        for item in rawlist:
+            fd, fam, type, laddr, raddr, status = item
+            status = TCP_STATUSES[status]
+            nt = _common.pconn(fd, fam, type, laddr, raddr, status)
+            ret.append(nt)
+        return ret
+
+    @wrap_exceptions
+    def wait(self, timeout=None):
+        try:
+            return _psposix.wait_pid(self.pid, timeout)
+        except _psposix.TimeoutExpired:
+            # support for private module import
+            if TimeoutExpired is None:
+                raise
+            raise TimeoutExpired(timeout, self.pid, self._name)
+
+    @wrap_exceptions
+    def nice_get(self):
+        return _psutil_posix.getpriority(self.pid)
+
+    @wrap_exceptions
+    def nice_set(self, value):
+        return _psutil_posix.setpriority(self.pid, value)
+
+    @wrap_exceptions
+    def status(self):
+        code = cext.proc_status(self.pid)
+        if code in PROC_STATUSES:
+            return PROC_STATUSES[code]
+        # XXX is this legit? will we even ever get here?
+        return "?"
+
+    @wrap_exceptions
+    def io_counters(self):
+        rc, wc, rb, wb = cext.proc_io_counters(self.pid)
+        return _common.pio(rc, wc, rb, wb)
+
+    nt_mmap_grouped = namedtuple(
+        'mmap', 'path rss, private, ref_count, shadow_count')
+    nt_mmap_ext = namedtuple(
+        'mmap', 'addr, perms path rss, private, ref_count, shadow_count')
+
+    # FreeBSD < 8 does not support functions based on kinfo_getfile()
+    # and kinfo_getvmmap()
+    if hasattr(cext, 'proc_open_files'):
+
+        @wrap_exceptions
+        def open_files(self):
+            """Return files opened by process as a list of namedtuples."""
+            rawlist = cext.proc_open_files(self.pid)
+            return [_common.popenfile(path, fd) for path, fd in rawlist]
+
+        @wrap_exceptions
+        def cwd(self):
+            """Return process current working directory."""
+            # sometimes we get an empty string, in which case we turn
+            # it into None
+            return cext.proc_cwd(self.pid) or None
+
+        @wrap_exceptions
+        def memory_maps(self):
+            return cext.proc_memory_maps(self.pid)
+
+        @wrap_exceptions
+        def num_fds(self):
+            """Return the number of file descriptors opened by this process."""
+            return cext.proc_num_fds(self.pid)
+
+    else:
+        def _not_implemented(self):
+            raise NotImplementedError("supported only starting from FreeBSD 8")
+
+        open_files = _not_implemented
+        proc_cwd = _not_implemented
+        memory_maps = _not_implemented
+        num_fds = _not_implemented

http://git-wip-us.apache.org/repos/asf/ambari/blob/865d187e/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/_pslinux.py
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/_pslinux.py b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/_pslinux.py
new file mode 100644
index 0000000..d20b267
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/psutil/_pslinux.py
@@ -0,0 +1,1225 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Linux platform implementation."""
+
+from __future__ import division
+
+import base64
+import errno
+import os
+import re
+import socket
+import struct
+import sys
+import warnings
+
+from psutil import _common
+from psutil import _psposix
+from psutil._common import (isfile_strict, usage_percent, deprecated)
+from psutil._compat import PY3, xrange, namedtuple, wraps, b, defaultdict
+import _psutil_linux as cext
+import _psutil_posix
+
+
+__extra__all__ = [
+    # io prio constants
+    "IOPRIO_CLASS_NONE", "IOPRIO_CLASS_RT", "IOPRIO_CLASS_BE",
+    "IOPRIO_CLASS_IDLE",
+    # connection status constants
+    "CONN_ESTABLISHED", "CONN_SYN_SENT", "CONN_SYN_RECV", "CONN_FIN_WAIT1",
+    "CONN_FIN_WAIT2", "CONN_TIME_WAIT", "CONN_CLOSE", "CONN_CLOSE_WAIT",
+    "CONN_LAST_ACK", "CONN_LISTEN", "CONN_CLOSING",
+    # other
+    "phymem_buffers", "cached_phymem"]
+
+
+# --- constants
+
+HAS_PRLIMIT = hasattr(cext, "linux_prlimit")
+
+# RLIMIT_* constants, not guaranteed to be present on all kernels
+if HAS_PRLIMIT:
+    for name in dir(cext):
+        if name.startswith('RLIM'):
+            __extra__all__.append(name)
+
+# Number of clock ticks per second
+CLOCK_TICKS = os.sysconf("SC_CLK_TCK")
+PAGESIZE = os.sysconf("SC_PAGE_SIZE")
+BOOT_TIME = None  # set later
+DEFAULT_ENCODING = sys.getdefaultencoding()
+
+# ioprio_* constants http://linux.die.net/man/2/ioprio_get
+IOPRIO_CLASS_NONE = 0
+IOPRIO_CLASS_RT = 1
+IOPRIO_CLASS_BE = 2
+IOPRIO_CLASS_IDLE = 3
+
+# taken from /fs/proc/array.c
+PROC_STATUSES = {
+    "R": _common.STATUS_RUNNING,
+    "S": _common.STATUS_SLEEPING,
+    "D": _common.STATUS_DISK_SLEEP,
+    "T": _common.STATUS_STOPPED,
+    "t": _common.STATUS_TRACING_STOP,
+    "Z": _common.STATUS_ZOMBIE,
+    "X": _common.STATUS_DEAD,
+    "x": _common.STATUS_DEAD,
+    "K": _common.STATUS_WAKE_KILL,
+    "W": _common.STATUS_WAKING
+}
+
+# http://students.mimuw.edu.pl/lxr/source/include/net/tcp_states.h
+TCP_STATUSES = {
+    "01": _common.CONN_ESTABLISHED,
+    "02": _common.CONN_SYN_SENT,
+    "03": _common.CONN_SYN_RECV,
+    "04": _common.CONN_FIN_WAIT1,
+    "05": _common.CONN_FIN_WAIT2,
+    "06": _common.CONN_TIME_WAIT,
+    "07": _common.CONN_CLOSE,
+    "08": _common.CONN_CLOSE_WAIT,
+    "09": _common.CONN_LAST_ACK,
+    "0A": _common.CONN_LISTEN,
+    "0B": _common.CONN_CLOSING
+}
+
+# set later from __init__.py
+NoSuchProcess = None
+AccessDenied = None
+TimeoutExpired = None
+
+
+# --- named tuples
+
+def _get_cputimes_fields():
+    """Return a namedtuple of variable fields depending on the
+    CPU times available on this Linux kernel version which may be:
+    (user, nice, system, idle, iowait, irq, softirq, [steal, [guest,
+     [guest_nice]]])
+    """
+    f = open('/proc/stat', 'rb')
+    try:
+        values = f.readline().split()[1:]
+    finally:
+        f.close()
+    fields = ['user', 'nice', 'system', 'idle', 'iowait', 'irq', 'softirq']
+    vlen = len(values)
+    if vlen >= 8:
+        # Linux >= 2.6.11
+        fields.append('steal')
+    if vlen >= 9:
+        # Linux >= 2.6.24
+        fields.append('guest')
+    if vlen >= 10:
+        # Linux >= 3.2.0
+        fields.append('guest_nice')
+    return fields
+
+
+scputimes = namedtuple('scputimes', _get_cputimes_fields())
+
+svmem = namedtuple(
+    'svmem', ['total', 'available', 'percent', 'used', 'free',
+              'active', 'inactive', 'buffers', 'cached'])
+
+pextmem = namedtuple('pextmem', 'rss vms shared text lib data dirty')
+
+pmmap_grouped = namedtuple(
+    'pmmap_grouped', ['path', 'rss', 'size', 'pss', 'shared_clean',
+                      'shared_dirty', 'private_clean', 'private_dirty',
+                      'referenced', 'anonymous', 'swap'])
+
+pmmap_ext = namedtuple(
+    'pmmap_ext', 'addr perms ' + ' '.join(pmmap_grouped._fields))
+
+
+# --- system memory
+
+def virtual_memory():
+    total, free, buffers, shared, _, _ = cext.linux_sysinfo()
+    cached = active = inactive = None
+    f = open('/proc/meminfo', 'rb')
+    CACHED, ACTIVE, INACTIVE = b("Cached:"), b("Active:"), b("Inactive:")
+    try:
+        for line in f:
+            if line.startswith(CACHED):
+                cached = int(line.split()[1]) * 1024
+            elif line.startswith(ACTIVE):
+                active = int(line.split()[1]) * 1024
+            elif line.startswith(INACTIVE):
+                inactive = int(line.split()[1]) * 1024
+            if (cached is not None
+                    and active is not None
+                    and inactive is not None):
+                break
+        else:
+            # we might get here when dealing with exotic Linux flavors, see:
+            # http://code.google.com/p/psutil/issues/detail?id=313
+            msg = "'cached', 'active' and 'inactive' memory stats couldn't " \
+                  "be determined and were set to 0"
+            warnings.warn(msg, RuntimeWarning)
+            cached = active = inactive = 0
+    finally:
+        f.close()
+    avail = free + buffers + cached
+    used = total - free
+    percent = usage_percent((total - avail), total, _round=1)
+    return svmem(total, avail, percent, used, free,
+                 active, inactive, buffers, cached)
+
+
+def swap_memory():
+    _, _, _, _, total, free = cext.linux_sysinfo()
+    used = total - free
+    percent = usage_percent(used, total, _round=1)
+    # get pgin/pgouts
+    f = open("/proc/vmstat", "rb")
+    SIN, SOUT = b('pswpin'), b('pswpout')
+    sin = sout = None
+    try:
+        for line in f:
+            # values are expressed in 4 kilo bytes, we want bytes instead
+            if line.startswith(SIN):
+                sin = int(line.split(b(' '))[1]) * 4 * 1024
+            elif line.startswith(SOUT):
+                sout = int(line.split(b(' '))[1]) * 4 * 1024
+            if sin is not None and sout is not None:
+                break
+        else:
+            # we might get here when dealing with exotic Linux flavors, see:
+            # http://code.google.com/p/psutil/issues/detail?id=313
+            msg = "'sin' and 'sout' swap memory stats couldn't " \
+                  "be determined and were set to 0"
+            warnings.warn(msg, RuntimeWarning)
+            sin = sout = 0
+    finally:
+        f.close()
+    return _common.sswap(total, used, free, percent, sin, sout)
+
+
+@deprecated(replacement='psutil.virtual_memory().cached')
+def cached_phymem():
+    return virtual_memory().cached
+
+
+@deprecated(replacement='psutil.virtual_memory().buffers')
+def phymem_buffers():
+    return virtual_memory().buffers
+
+
+# --- CPUs
+
+def cpu_times():
+    """Return a named tuple representing the following system-wide
+    CPU times:
+    (user, nice, system, idle, iowait, irq, softirq [steal, [guest,
+     [guest_nice]]])
+    Last 3 fields may not be available on all Linux kernel versions.
+    """
+    f = open('/proc/stat', 'rb')
+    try:
+        values = f.readline().split()
+    finally:
+        f.close()
+    fields = values[1:len(scputimes._fields) + 1]
+    fields = [float(x) / CLOCK_TICKS for x in fields]
+    return scputimes(*fields)
+
+
+def per_cpu_times():
+    """Return a list of namedtuple representing the CPU times
+    for every CPU available on the system.
+    """
+    cpus = []
+    f = open('/proc/stat', 'rb')
+    try:
+        # get rid of the first line which refers to system wide CPU stats
+        f.readline()
+        CPU = b('cpu')
+        for line in f:
+            if line.startswith(CPU):
+                values = line.split()
+                fields = values[1:len(scputimes._fields) + 1]
+                fields = [float(x) / CLOCK_TICKS for x in fields]
+                entry = scputimes(*fields)
+                cpus.append(entry)
+        return cpus
+    finally:
+        f.close()
+
+
+def cpu_count_logical():
+    """Return the number of logical CPUs in the system."""
+    try:
+        return os.sysconf("SC_NPROCESSORS_ONLN")
+    except ValueError:
+        # as a second fallback we try to parse /proc/cpuinfo
+        num = 0
+        f = open('/proc/cpuinfo', 'rb')
+        try:
+            lines = f.readlines()
+        finally:
+            f.close()
+        PROCESSOR = b('processor')
+        for line in lines:
+            if line.lower().startswith(PROCESSOR):
+                num += 1
+
+    # unknown format (e.g. amrel/sparc architectures), see:
+    # http://code.google.com/p/psutil/issues/detail?id=200
+    # try to parse /proc/stat as a last resort
+    if num == 0:
+        f = open('/proc/stat', 'rt')
+        try:
+            lines = f.readlines()
+        finally:
+            f.close()
+        search = re.compile('cpu\d')
+        for line in lines:
+            line = line.split(' ')[0]
+            if search.match(line):
+                num += 1
+
+    if num == 0:
+        # mimic os.cpu_count()
+        return None
+    return num
+
+
+def cpu_count_physical():
+    """Return the number of physical CPUs in the system."""
+    f = open('/proc/cpuinfo', 'rb')
+    try:
+        lines = f.readlines()
+    finally:
+        f.close()
+    found = set()
+    PHYSICAL_ID = b('physical id')
+    for line in lines:
+        if line.lower().startswith(PHYSICAL_ID):
+            found.add(line.strip())
+    if found:
+        return len(found)
+    else:
+        return None  # mimic os.cpu_count()
+
+
+# --- other system functions
+
+def users():
+    """Return currently connected users as a list of namedtuples."""
+    retlist = []
+    rawlist = cext.users()
+    for item in rawlist:
+        user, tty, hostname, tstamp, user_process = item
+        # note: the underlying C function includes entries about
+        # system boot, run level and others.  We might want
+        # to use them in the future.
+        if not user_process:
+            continue
+        if hostname == ':0.0':
+            hostname = 'localhost'
+        nt = _common.suser(user, tty or None, hostname, tstamp)
+        retlist.append(nt)
+    return retlist
+
+
+def boot_time():
+    """Return the system boot time expressed in seconds since the epoch."""
+    global BOOT_TIME
+    f = open('/proc/stat', 'rb')
+    try:
+        BTIME = b('btime')
+        for line in f:
+            if line.startswith(BTIME):
+                ret = float(line.strip().split()[1])
+                BOOT_TIME = ret
+                return ret
+        raise RuntimeError("line 'btime' not found")
+    finally:
+        f.close()
+
+
+# --- processes
+
+def pids():
+    """Returns a list of PIDs currently running on the system."""
+    return [int(x) for x in os.listdir(b('/proc')) if x.isdigit()]
+
+
+def pid_exists(pid):
+    """Check For the existence of a unix pid."""
+    return _psposix.pid_exists(pid)
+
+
+# --- network
+
+class Connections:
+    """A wrapper on top of /proc/net/* files, retrieving per-process
+    and system-wide open connections (TCP, UDP, UNIX) similarly to
+    "netstat -an".
+
+    Note: in case of UNIX sockets we're only able to determine the
+    local endpoint/path, not the one it's connected to.
+    According to [1] it would be possible but not easily.
+
+    [1] http://serverfault.com/a/417946
+    """
+
+    def __init__(self):
+        tcp4 = ("tcp", socket.AF_INET, socket.SOCK_STREAM)
+        tcp6 = ("tcp6", socket.AF_INET6, socket.SOCK_STREAM)
+        udp4 = ("udp", socket.AF_INET, socket.SOCK_DGRAM)
+        udp6 = ("udp6", socket.AF_INET6, socket.SOCK_DGRAM)
+        unix = ("unix", socket.AF_UNIX, None)
+        self.tmap = {
+            "all": (tcp4, tcp6, udp4, udp6, unix),
+            "tcp": (tcp4, tcp6),
+            "tcp4": (tcp4,),
+            "tcp6": (tcp6,),
+            "udp": (udp4, udp6),
+            "udp4": (udp4,),
+            "udp6": (udp6,),
+            "unix": (unix,),
+            "inet": (tcp4, tcp6, udp4, udp6),
+            "inet4": (tcp4, udp4),
+            "inet6": (tcp6, udp6),
+        }
+
+    def get_proc_inodes(self, pid):
+        inodes = defaultdict(list)
+        for fd in os.listdir("/proc/%s/fd" % pid):
+            try:
+                inode = os.readlink("/proc/%s/fd/%s" % (pid, fd))
+            except OSError:
+                # TODO: need comment here
+                continue
+            else:
+                if inode.startswith('socket:['):
+                    # the process is using a socket
+                    inode = inode[8:][:-1]
+                    inodes[inode].append((pid, int(fd)))
+        return inodes
+
+    def get_all_inodes(self):
+        inodes = {}
+        for pid in pids():
+            try:
+                inodes.update(self.get_proc_inodes(pid))
+            except OSError:
+                # os.listdir() is gonna raise a lot of access denied
+                # exceptions in case of unprivileged user; that's fine
+                # as we'll just end up returning a connection with PID
+                # and fd set to None anyway.
+                # Both netstat -an and lsof does the same so it's
+                # unlikely we can do any better.
+                # ENOENT just means a PID disappeared on us.
+                err = sys.exc_info()[1]
+                if err.errno not in (errno.ENOENT, errno.EPERM, errno.EACCES):
+                    raise
+        return inodes
+
+    def decode_address(self, addr, family):
+        """Accept an "ip:port" address as displayed in /proc/net/*
+        and convert it into a human readable form, like:
+
+        "0500000A:0016" -> ("10.0.0.5", 22)
+        "0000000000000000FFFF00000100007F:9E49" -> ("::ffff:127.0.0.1", 40521)
+
+        The IP address portion is a little or big endian four-byte
+        hexadecimal number; that is, the least significant byte is listed
+        first, so we need to reverse the order of the bytes to convert it
+        to an IP address.
+        The port is represented as a two-byte hexadecimal number.
+
+        Reference:
+        http://linuxdevcenter.com/pub/a/linux/2000/11/16/LinuxAdmin.html
+        """
+        ip, port = addr.split(':')
+        port = int(port, 16)
+        if PY3:
+            ip = ip.encode('ascii')
+        # this usually refers to a local socket in listen mode with
+        # no end-points connected
+        if not port:
+            return ()
+        if family == socket.AF_INET:
+            # see: http://code.google.com/p/psutil/issues/detail?id=201
+            if sys.byteorder == 'little':
+                ip = socket.inet_ntop(family, base64.b16decode(ip)[::-1])
+            else:
+                ip = socket.inet_ntop(family, base64.b16decode(ip))
+        else:  # IPv6
+            # old version - let's keep it, just in case...
+            # ip = ip.decode('hex')
+            # return socket.inet_ntop(socket.AF_INET6,
+            #          ''.join(ip[i:i+4][::-1] for i in xrange(0, 16, 4)))
+            ip = base64.b16decode(ip)
+            # see: http://code.google.com/p/psutil/issues/detail?id=201
+            if sys.byteorder == 'little':
+                ip = socket.inet_ntop(
+                    socket.AF_INET6,
+                    struct.pack('>4I', *struct.unpack('<4I', ip)))
+            else:
+                ip = socket.inet_ntop(
+                    socket.AF_INET6,
+                    struct.pack('<4I', *struct.unpack('<4I', ip)))
+        return (ip, port)
+
+    def process_inet(self, file, family, type_, inodes, filter_pid=None):
+        """Parse /proc/net/tcp* and /proc/net/udp* files."""
+        if file.endswith('6') and not os.path.exists(file):
+            # IPv6 not supported
+            return
+        f = open(file, 'rt')
+        try:
+            f.readline()  # skip the first line
+            for line in f:
+                _, laddr, raddr, status, _, _, _, _, _, inode = \
+                    line.split()[:10]
+                if inode in inodes:
+                    # We assume inet sockets are unique, so we error
+                    # out if there are multiple references to the
+                    # same inode. We won't do this for UNIX sockets.
+                    if len(inodes[inode]) > 1 and type_ != socket.AF_UNIX:
+                        raise ValueError("ambiguos inode with multiple "
+                                         "PIDs references")
+                    pid, fd = inodes[inode][0]
+                else:
+                    pid, fd = None, -1
+                if filter_pid is not None and filter_pid != pid:
+                    continue
+                else:
+                    if type_ == socket.SOCK_STREAM:
+                        status = TCP_STATUSES[status]
+                    else:
+                        status = _common.CONN_NONE
+                    laddr = self.decode_address(laddr, family)
+                    raddr = self.decode_address(raddr, family)
+                    yield (fd, family, type_, laddr, raddr, status, pid)
+        finally:
+            f.close()
+
+    def process_unix(self, file, family, inodes, filter_pid=None):
+        """Parse /proc/net/unix files."""
+        f = open(file, 'rt')
+        try:
+            f.readline()  # skip the first line
+            for line in f:
+                tokens = line.split()
+                _, _, _, _, type_, _, inode = tokens[0:7]
+                if inode in inodes:
+                    # With UNIX sockets we can have a single inode
+                    # referencing many file descriptors.
+                    pairs = inodes[inode]
+                else:
+                    pairs = [(None, -1)]
+                for pid, fd in pairs:
+                    if filter_pid is not None and filter_pid != pid:
+                        continue
+                    else:
+                        if len(tokens) == 8:
+                            path = tokens[-1]
+                        else:
+                            path = ""
+                        type_ = int(type_)
+                        raddr = None
+                        status = _common.CONN_NONE
+                        yield (fd, family, type_, path, raddr, status, pid)
+        finally:
+            f.close()
+
+    def retrieve(self, kind, pid=None):
+        if kind not in self.tmap:
+            raise ValueError("invalid %r kind argument; choose between %s"
+                             % (kind, ', '.join([repr(x) for x in self.tmap])))
+        if pid is not None:
+            inodes = self.get_proc_inodes(pid)
+            if not inodes:
+                # no connections for this process
+                return []
+        else:
+            inodes = self.get_all_inodes()
+        ret = []
+        for f, family, type_ in self.tmap[kind]:
+            if family in (socket.AF_INET, socket.AF_INET6):
+                ls = self.process_inet(
+                    "/proc/net/%s" % f, family, type_, inodes, filter_pid=pid)
+            else:
+                ls = self.process_unix(
+                    "/proc/net/%s" % f, family, inodes, filter_pid=pid)
+            for fd, family, type_, laddr, raddr, status, bound_pid in ls:
+                if pid:
+                    conn = _common.pconn(fd, family, type_, laddr, raddr,
+                                         status)
+                else:
+                    conn = _common.sconn(fd, family, type_, laddr, raddr,
+                                         status, bound_pid)
+                ret.append(conn)
+        return ret
+
+
+_connections = Connections()
+
+
+def net_connections(kind='inet'):
+    """Return system-wide open connections."""
+    return _connections.retrieve(kind)
+
+
+def net_io_counters():
+    """Return network I/O statistics for every network interface
+    installed on the system as a dict of raw tuples.
+    """
+    f = open("/proc/net/dev", "rt")
+    try:
+        lines = f.readlines()
+    finally:
+        f.close()
+
+    retdict = {}
+    for line in lines[2:]:
+        colon = line.rfind(':')
+        assert colon > 0, repr(line)
+        name = line[:colon].strip()
+        fields = line[colon + 1:].strip().split()
+        bytes_recv = int(fields[0])
+        packets_recv = int(fields[1])
+        errin = int(fields[2])
+        dropin = int(fields[3])
+        bytes_sent = int(fields[8])
+        packets_sent = int(fields[9])
+        errout = int(fields[10])
+        dropout = int(fields[11])
+        retdict[name] = (bytes_sent, bytes_recv, packets_sent, packets_recv,
+                         errin, errout, dropin, dropout)
+    return retdict
+
+
+# --- disks
+
+def disk_io_counters():
+    """Return disk I/O statistics for every disk installed on the
+    system as a dict of raw tuples.
+    """
+    # man iostat states that sectors are equivalent with blocks and
+    # have a size of 512 bytes since 2.4 kernels. This value is
+    # needed to calculate the amount of disk I/O in bytes.
+    SECTOR_SIZE = 512
+
+    # determine partitions we want to look for
+    partitions = []
+    f = open("/proc/partitions", "rt")
+    try:
+        lines = f.readlines()[2:]
+    finally:
+        f.close()
+    for line in reversed(lines):
+        _, _, _, name = line.split()
+        if name[-1].isdigit():
+            # we're dealing with a partition (e.g. 'sda1'); 'sda' will
+            # also be around but we want to omit it
+            partitions.append(name)
+        else:
+            if not partitions or not partitions[-1].startswith(name):
+                # we're dealing with a disk entity for which no
+                # partitions have been defined (e.g. 'sda' but
+                # 'sda1' was not around), see:
+                # http://code.google.com/p/psutil/issues/detail?id=338
+                partitions.append(name)
+    #
+    retdict = {}
+    f = open("/proc/diskstats", "rt")
+    try:
+        lines = f.readlines()
+    finally:
+        f.close()
+    for line in lines:
+        # http://www.mjmwired.net/kernel/Documentation/iostats.txt
+        _, _, name, reads, _, rbytes, rtime, writes, _, wbytes, wtime = \
+            line.split()[:11]
+        if name in partitions:
+            rbytes = int(rbytes) * SECTOR_SIZE
+            wbytes = int(wbytes) * SECTOR_SIZE
+            reads = int(reads)
+            writes = int(writes)
+            rtime = int(rtime)
+            wtime = int(wtime)
+            retdict[name] = (reads, writes, rbytes, wbytes, rtime, wtime)
+    return retdict
+
+
+def disk_partitions(all=False):
+    """Return mounted disk partitions as a list of nameduples"""
+    phydevs = []
+    f = open("/proc/filesystems", "r")
+    try:
+        for line in f:
+            if not line.startswith("nodev"):
+                phydevs.append(line.strip())
+    finally:
+        f.close()
+
+    retlist = []
+    partitions = cext.disk_partitions()
+    for partition in partitions:
+        device, mountpoint, fstype, opts = partition
+        if device == 'none':
+            device = ''
+        if not all:
+            if device == '' or fstype not in phydevs:
+                continue
+        ntuple = _common.sdiskpart(device, mountpoint, fstype, opts)
+        retlist.append(ntuple)
+    return retlist
+
+
+disk_usage = _psposix.disk_usage
+
+
+# --- decorators
+
+def wrap_exceptions(fun):
+    """Decorator which translates bare OSError and IOError exceptions
+    into NoSuchProcess and AccessDenied.
+    """
+    @wraps(fun)
+    def wrapper(self, *args, **kwargs):
+        try:
+            return fun(self, *args, **kwargs)
+        except EnvironmentError:
+            # support for private module import
+            if NoSuchProcess is None or AccessDenied is None:
+                raise
+            # ENOENT (no such file or directory) gets raised on open().
+            # ESRCH (no such process) can get raised on read() if
+            # process is gone in meantime.
+            err = sys.exc_info()[1]
+            if err.errno in (errno.ENOENT, errno.ESRCH):
+                raise NoSuchProcess(self.pid, self._name)
+            if err.errno in (errno.EPERM, errno.EACCES):
+                raise AccessDenied(self.pid, self._name)
+            raise
+    return wrapper
+
+
+class Process(object):
+    """Linux process implementation."""
+
+    __slots__ = ["pid", "_name"]
+
+    def __init__(self, pid):
+        self.pid = pid
+        self._name = None
+
+    @wrap_exceptions
+    def name(self):
+        fname = "/proc/%s/stat" % self.pid
+        if PY3:
+            f = open(fname, "rt", encoding=DEFAULT_ENCODING)
+        else:
+            f = open(fname, "rt")
+        try:
+            name = f.read().split(' ')[1].replace('(', '').replace(')', '')
+        finally:
+            f.close()
+        # XXX - gets changed later and probably needs refactoring
+        return name
+
+    def exe(self):
+        try:
+            exe = os.readlink("/proc/%s/exe" % self.pid)
+        except (OSError, IOError):
+            err = sys.exc_info()[1]
+            if err.errno == errno.ENOENT:
+                # no such file error; might be raised also if the
+                # path actually exists for system processes with
+                # low pids (about 0-20)
+                if os.path.lexists("/proc/%s" % self.pid):
+                    return ""
+                else:
+                    # ok, it is a process which has gone away
+                    raise NoSuchProcess(self.pid, self._name)
+            if err.errno in (errno.EPERM, errno.EACCES):
+                raise AccessDenied(self.pid, self._name)
+            raise
+
+        # readlink() might return paths containing null bytes ('\x00').
+        # Certain names have ' (deleted)' appended. Usually this is
+        # bogus as the file actually exists. Either way that's not
+        # important as we don't want to discriminate executables which
+        # have been deleted.
+        exe = exe.split('\x00')[0]
+        if exe.endswith(' (deleted)') and not os.path.exists(exe):
+            exe = exe[:-10]
+        return exe
+
+    @wrap_exceptions
+    def cmdline(self):
+        fname = "/proc/%s/cmdline" % self.pid
+        if PY3:
+            f = open(fname, "rt", encoding=DEFAULT_ENCODING)
+        else:
+            f = open(fname, "rt")
+        try:
+            # return the args as a list
+            return [x for x in f.read().split('\x00') if x]
+        finally:
+            f.close()
+
+    @wrap_exceptions
+    def terminal(self):
+        tmap = _psposix._get_terminal_map()
+        f = open("/proc/%s/stat" % self.pid, 'rb')
+        try:
+            tty_nr = int(f.read().split(b(' '))[6])
+        finally:
+            f.close()
+        try:
+            return tmap[tty_nr]
+        except KeyError:
+            return None
+
+    if os.path.exists('/proc/%s/io' % os.getpid()):
+        @wrap_exceptions
+        def io_counters(self):
+            fname = "/proc/%s/io" % self.pid
+            f = open(fname, 'rb')
+            SYSCR, SYSCW = b("syscr"), b("syscw")
+            READ_BYTES, WRITE_BYTES = b("read_bytes"), b("write_bytes")
+            try:
+                rcount = wcount = rbytes = wbytes = None
+                for line in f:
+                    if rcount is None and line.startswith(SYSCR):
+                        rcount = int(line.split()[1])
+                    elif wcount is None and line.startswith(SYSCW):
+                        wcount = int(line.split()[1])
+                    elif rbytes is None and line.startswith(READ_BYTES):
+                        rbytes = int(line.split()[1])
+                    elif wbytes is None and line.startswith(WRITE_BYTES):
+                        wbytes = int(line.split()[1])
+                for x in (rcount, wcount, rbytes, wbytes):
+                    if x is None:
+                        raise NotImplementedError(
+                            "couldn't read all necessary info from %r" % fname)
+                return _common.pio(rcount, wcount, rbytes, wbytes)
+            finally:
+                f.close()
+    else:
+        def io_counters(self):
+            raise NotImplementedError("couldn't find /proc/%s/io (kernel "
+                                      "too old?)" % self.pid)
+
+    @wrap_exceptions
+    def cpu_times(self):
+        f = open("/proc/%s/stat" % self.pid, 'rb')
+        try:
+            st = f.read().strip()
+        finally:
+            f.close()
+        # ignore the first two values ("pid (exe)")
+        st = st[st.find(b(')')) + 2:]
+        values = st.split(b(' '))
+        utime = float(values[11]) / CLOCK_TICKS
+        stime = float(values[12]) / CLOCK_TICKS
+        return _common.pcputimes(utime, stime)
+
+    @wrap_exceptions
+    def wait(self, timeout=None):
+        try:
+            return _psposix.wait_pid(self.pid, timeout)
+        except _psposix.TimeoutExpired:
+            # support for private module import
+            if TimeoutExpired is None:
+                raise
+            raise TimeoutExpired(timeout, self.pid, self._name)
+
+    @wrap_exceptions
+    def create_time(self):
+        f = open("/proc/%s/stat" % self.pid, 'rb')
+        try:
+            st = f.read().strip()
+        finally:
+            f.close()
+        # ignore the first two values ("pid (exe)")
+        st = st[st.rfind(b(')')) + 2:]
+        values = st.split(b(' '))
+        # According to documentation, starttime is in field 21 and the
+        # unit is jiffies (clock ticks).
+        # We first divide it for clock ticks and then add uptime returning
+        # seconds since the epoch, in UTC.
+        # Also use cached value if available.
+        bt = BOOT_TIME or boot_time()
+        return (float(values[19]) / CLOCK_TICKS) + bt
+
+    @wrap_exceptions
+    def memory_info(self):
+        f = open("/proc/%s/statm" % self.pid, 'rb')
+        try:
+            vms, rss = f.readline().split()[:2]
+            return _common.pmem(int(rss) * PAGESIZE,
+                                int(vms) * PAGESIZE)
+        finally:
+            f.close()
+
+    @wrap_exceptions
+    def memory_info_ex(self):
+        #  ============================================================
+        # | FIELD  | DESCRIPTION                         | AKA  | TOP  |
+        #  ============================================================
+        # | rss    | resident set size                   |      | RES  |
+        # | vms    | total program size                  | size | VIRT |
+        # | shared | shared pages (from shared mappings) |      | SHR  |
+        # | text   | text ('code')                       | trs  | CODE |
+        # | lib    | library (unused in Linux 2.6)       | lrs  |      |
+        # | data   | data + stack                        | drs  | DATA |
+        # | dirty  | dirty pages (unused in Linux 2.6)   | dt   |      |
+        #  ============================================================
+        f = open("/proc/%s/statm" % self.pid, "rb")
+        try:
+            vms, rss, shared, text, lib, data, dirty = \
+                [int(x) * PAGESIZE for x in f.readline().split()[:7]]
+        finally:
+            f.close()
+        return pextmem(rss, vms, shared, text, lib, data, dirty)
+
+    if os.path.exists('/proc/%s/smaps' % os.getpid()):
+        def memory_maps(self):
+            """Return process's mapped memory regions as a list of nameduples.
+            Fields are explained in 'man proc'; here is an updated (Apr 2012)
+            version: http://goo.gl/fmebo
+            """
+            f = None
+            try:
+                f = open("/proc/%s/smaps" % self.pid, "rt")
+                first_line = f.readline()
+                current_block = [first_line]
+
+                def get_blocks():
+                    data = {}
+                    for line in f:
+                        fields = line.split(None, 5)
+                        if not fields[0].endswith(':'):
+                            # new block section
+                            yield (current_block.pop(), data)
+                            current_block.append(line)
+                        else:
+                            try:
+                                data[fields[0]] = int(fields[1]) * 1024
+                            except ValueError:
+                                if fields[0].startswith('VmFlags:'):
+                                    # see issue #369
+                                    continue
+                                else:
+                                    raise ValueError("don't know how to inte"
+                                                     "rpret line %r" % line)
+                    yield (current_block.pop(), data)
+
+                if first_line:  # smaps file can be empty
+                    for header, data in get_blocks():
+                        hfields = header.split(None, 5)
+                        try:
+                            addr, perms, offset, dev, inode, path = hfields
+                        except ValueError:
+                            addr, perms, offset, dev, inode, path = \
+                                hfields + ['']
+                        if not path:
+                            path = '[anon]'
+                        else:
+                            path = path.strip()
+                        yield (addr, perms, path,
+                               data['Rss:'],
+                               data.get('Size:', 0),
+                               data.get('Pss:', 0),
+                               data.get('Shared_Clean:', 0),
+                               data.get('Shared_Dirty:', 0),
+                               data.get('Private_Clean:', 0),
+                               data.get('Private_Dirty:', 0),
+                               data.get('Referenced:', 0),
+                               data.get('Anonymous:', 0),
+                               data.get('Swap:', 0))
+                f.close()
+            except EnvironmentError:
+                # XXX - Can't use wrap_exceptions decorator as we're
+                # returning a generator;  this probably needs some
+                # refactoring in order to avoid this code duplication.
+                if f is not None:
+                    f.close()
+                err = sys.exc_info()[1]
+                if err.errno in (errno.ENOENT, errno.ESRCH):
+                    raise NoSuchProcess(self.pid, self._name)
+                if err.errno in (errno.EPERM, errno.EACCES):
+                    raise AccessDenied(self.pid, self._name)
+                raise
+            except:
+                if f is not None:
+                    f.close()
+                raise
+            f.close()
+
+    else:
+        def memory_maps(self, ext):
+            msg = "couldn't find /proc/%s/smaps; kernel < 2.6.14 or "  \
+                  "CONFIG_MMU kernel configuration option is not enabled" \
+                  % self.pid
+            raise NotImplementedError(msg)
+
+    @wrap_exceptions
+    def cwd(self):
+        # readlink() might return paths containing null bytes causing
+        # problems when used with other fs-related functions (os.*,
+        # open(), ...)
+        path = os.readlink("/proc/%s/cwd" % self.pid)
+        return path.replace('\x00', '')
+
+    @wrap_exceptions
+    def num_ctx_switches(self):
+        vol = unvol = None
+        f = open("/proc/%s/status" % self.pid, "rb")
+        VOLUNTARY = b("voluntary_ctxt_switches")
+        NON_VOLUNTARY = b("nonvoluntary_ctxt_switches")
+        try:
+            for line in f:
+                if line.startswith(VOLUNTARY):
+                    vol = int(line.split()[1])
+                elif line.startswith(NON_VOLUNTARY):
+                    unvol = int(line.split()[1])
+                if vol is not None and unvol is not None:
+                    return _common.pctxsw(vol, unvol)
+            raise NotImplementedError(
+                "'voluntary_ctxt_switches' and 'nonvoluntary_ctxt_switches'"
+                "fields were not found in /proc/%s/status; the kernel is "
+                "probably older than 2.6.23" % self.pid)
+        finally:
+            f.close()
+
+    @wrap_exceptions
+    def num_threads(self):
+        f = open("/proc/%s/status" % self.pid, "rb")
+        try:
+            THREADS = b("Threads:")
+            for line in f:
+                if line.startswith(THREADS):
+                    return int(line.split()[1])
+            raise NotImplementedError("line not found")
+        finally:
+            f.close()
+
+    @wrap_exceptions
+    def threads(self):
+        thread_ids = os.listdir("/proc/%s/task" % self.pid)
+        thread_ids.sort()
+        retlist = []
+        hit_enoent = False
+        for thread_id in thread_ids:
+            try:
+                f = open("/proc/%s/task/%s/stat" % (self.pid, thread_id), 'rb')
+            except EnvironmentError:
+                err = sys.exc_info()[1]
+                if err.errno == errno.ENOENT:
+                    # no such file or directory; it means thread
+                    # disappeared on us
+                    hit_enoent = True
+                    continue
+                raise
+            try:
+                st = f.read().strip()
+            finally:
+                f.close()
+            # ignore the first two values ("pid (exe)")
+            st = st[st.find(b(')')) + 2:]
+            values = st.split(b(' '))
+            utime = float(values[11]) / CLOCK_TICKS
+            stime = float(values[12]) / CLOCK_TICKS
+            ntuple = _common.pthread(int(thread_id), utime, stime)
+            retlist.append(ntuple)
+        if hit_enoent:
+            # raise NSP if the process disappeared on us
+            os.stat('/proc/%s' % self.pid)
+        return retlist
+
+    @wrap_exceptions
+    def nice_get(self):
+        #f = open('/proc/%s/stat' % self.pid, 'r')
+        # try:
+        #   data = f.read()
+        #   return int(data.split()[18])
+        # finally:
+        #   f.close()
+
+        # Use C implementation
+        return _psutil_posix.getpriority(self.pid)
+
+    @wrap_exceptions
+    def nice_set(self, value):
+        return _psutil_posix.setpriority(self.pid, value)
+
+    @wrap_exceptions
+    def cpu_affinity_get(self):
+        from_bitmask = lambda x: [i for i in xrange(64) if (1 << i) & x]
+        bitmask = cext.proc_cpu_affinity_get(self.pid)
+        return from_bitmask(bitmask)
+
+    @wrap_exceptions
+    def cpu_affinity_set(self, cpus):
+        try:
+            cext.proc_cpu_affinity_set(self.pid, cpus)
+        except OSError:
+            err = sys.exc_info()[1]
+            if err.errno == errno.EINVAL:
+                allcpus = tuple(range(len(per_cpu_times())))
+                for cpu in cpus:
+                    if cpu not in allcpus:
+                        raise ValueError("invalid CPU #%i (choose between %s)"
+                                         % (cpu, allcpus))
+            raise
+
+    # only starting from kernel 2.6.13
+    if hasattr(cext, "proc_ioprio_get"):
+
+        @wrap_exceptions
+        def ionice_get(self):
+            ioclass, value = cext.proc_ioprio_get(self.pid)
+            return _common.pionice(ioclass, value)
+
+        @wrap_exceptions
+        def ionice_set(self, ioclass, value):
+            if ioclass in (IOPRIO_CLASS_NONE, None):
+                if value:
+                    msg = "can't specify value with IOPRIO_CLASS_NONE"
+                    raise ValueError(msg)
+                ioclass = IOPRIO_CLASS_NONE
+                value = 0
+            if ioclass in (IOPRIO_CLASS_RT, IOPRIO_CLASS_BE):
+                if value is None:
+                    value = 4
+            elif ioclass == IOPRIO_CLASS_IDLE:
+                if value:
+                    msg = "can't specify value with IOPRIO_CLASS_IDLE"
+                    raise ValueError(msg)
+                value = 0
+            else:
+                value = 0
+            if not 0 <= value <= 8:
+                raise ValueError(
+                    "value argument range expected is between 0 and 8")
+            return cext.proc_ioprio_set(self.pid, ioclass, value)
+
+    if HAS_PRLIMIT:
+        @wrap_exceptions
+        def rlimit(self, resource, limits=None):
+            # if pid is 0 prlimit() applies to the calling process and
+            # we don't want that
+            if self.pid == 0:
+                raise ValueError("can't use prlimit() against PID 0 process")
+            if limits is None:
+                # get
+                return cext.linux_prlimit(self.pid, resource)
+            else:
+                # set
+                if len(limits) != 2:
+                    raise ValueError(
+                        "second argument must be a (soft, hard) tuple")
+                soft, hard = limits
+                cext.linux_prlimit(self.pid, resource, soft, hard)
+
+    @wrap_exceptions
+    def status(self):
+        f = open("/proc/%s/status" % self.pid, 'rb')
+        try:
+            STATE = b("State:")
+            for line in f:
+                if line.startswith(STATE):
+                    letter = line.split()[1]
+                    if PY3:
+                        letter = letter.decode()
+                    # XXX is '?' legit? (we're not supposed to return
+                    # it anyway)
+                    return PROC_STATUSES.get(letter, '?')
+        finally:
+            f.close()
+
+    @wrap_exceptions
+    def open_files(self):
+        retlist = []
+        files = os.listdir("/proc/%s/fd" % self.pid)
+        hit_enoent = False
+        for fd in files:
+            file = "/proc/%s/fd/%s" % (self.pid, fd)
+            if os.path.islink(file):
+                try:
+                    file = os.readlink(file)
+                except OSError:
+                    # ENOENT == file which is gone in the meantime
+                    err = sys.exc_info()[1]
+                    if err.errno == errno.ENOENT:
+                        hit_enoent = True
+                        continue
+                    raise
+                else:
+                    # If file is not an absolute path there's no way
+                    # to tell whether it's a regular file or not,
+                    # so we skip it. A regular file is always supposed
+                    # to be absolutized though.
+                    if file.startswith('/') and isfile_strict(file):
+                        ntuple = _common.popenfile(file, int(fd))
+                        retlist.append(ntuple)
+        if hit_enoent:
+            # raise NSP if the process disappeared on us
+            os.stat('/proc/%s' % self.pid)
+        return retlist
+
+    @wrap_exceptions
+    def connections(self, kind='inet'):
+        ret = _connections.retrieve(kind, self.pid)
+        # raise NSP if the process disappeared on us
+        os.stat('/proc/%s' % self.pid)
+        return ret
+
+    @wrap_exceptions
+    def num_fds(self):
+        return len(os.listdir("/proc/%s/fd" % self.pid))
+
+    @wrap_exceptions
+    def ppid(self):
+        f = open("/proc/%s/status" % self.pid, 'rb')
+        try:
+            PPID = b("PPid:")
+            for line in f:
+                if line.startswith(PPID):
+                    # PPid: nnnn
+                    return int(line.split()[1])
+            raise NotImplementedError("line not found")
+        finally:
+            f.close()
+
+    @wrap_exceptions
+    def uids(self):
+        f = open("/proc/%s/status" % self.pid, 'rb')
+        try:
+            UID = b('Uid:')
+            for line in f:
+                if line.startswith(UID):
+                    _, real, effective, saved, fs = line.split()
+                    return _common.puids(int(real), int(effective), int(saved))
+            raise NotImplementedError("line not found")
+        finally:
+            f.close()
+
+    @wrap_exceptions
+    def gids(self):
+        f = open("/proc/%s/status" % self.pid, 'rb')
+        try:
+            GID = b('Gid:')
+            for line in f:
+                if line.startswith(GID):
+                    _, real, effective, saved, fs = line.split()
+                    return _common.pgids(int(real), int(effective), int(saved))
+            raise NotImplementedError("line not found")
+        finally:
+            f.close()