You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by mp...@apache.org on 2014/11/19 01:38:18 UTC

[2/5] ambari git commit: AMBARI-7681. Add Metrics Service to common services stack. (mpapirkovskyy)

http://git-wip-us.apache.org/repos/asf/ambari/blob/a3ed7a3f/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/build/lib.macosx-10.8-intel-2.7/psutil/_psposix.py
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/build/lib.macosx-10.8-intel-2.7/psutil/_psposix.py b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/build/lib.macosx-10.8-intel-2.7/psutil/_psposix.py
deleted file mode 100644
index 1188c9f..0000000
--- a/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/build/lib.macosx-10.8-intel-2.7/psutil/_psposix.py
+++ /dev/null
@@ -1,157 +0,0 @@
-#!/usr/bin/env python
-
-# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-"""Routines common to all posix systems."""
-
-import errno
-import glob
-import os
-import sys
-import time
-
-from psutil._common import sdiskusage, usage_percent, memoize
-from psutil._compat import PY3, unicode
-
-
-class TimeoutExpired(Exception):
-    pass
-
-
-def pid_exists(pid):
-    """Check whether pid exists in the current process table."""
-    if pid == 0:
-        # According to "man 2 kill" PID 0 has a special meaning:
-        # it refers to <<every process in the process group of the
-        # calling process>> so we don't want to go any further.
-        # If we get here it means this UNIX platform *does* have
-        # a process with id 0.
-        return True
-    try:
-        os.kill(pid, 0)
-    except OSError:
-        err = sys.exc_info()[1]
-        if err.errno == errno.ESRCH:
-            # ESRCH == No such process
-            return False
-        elif err.errno == errno.EPERM:
-            # EPERM clearly means there's a process to deny access to
-            return True
-        else:
-            # According to "man 2 kill" possible error values are
-            # (EINVAL, EPERM, ESRCH) therefore we should never get
-            # here. If we do let's be explicit in considering this
-            # an error.
-            raise err
-    else:
-        return True
-
-
-def wait_pid(pid, timeout=None):
-    """Wait for process with pid 'pid' to terminate and return its
-    exit status code as an integer.
-
-    If pid is not a children of os.getpid() (current process) just
-    waits until the process disappears and return None.
-
-    If pid does not exist at all return None immediately.
-
-    Raise TimeoutExpired on timeout expired.
-    """
-    def check_timeout(delay):
-        if timeout is not None:
-            if timer() >= stop_at:
-                raise TimeoutExpired()
-        time.sleep(delay)
-        return min(delay * 2, 0.04)
-
-    timer = getattr(time, 'monotonic', time.time)
-    if timeout is not None:
-        waitcall = lambda: os.waitpid(pid, os.WNOHANG)
-        stop_at = timer() + timeout
-    else:
-        waitcall = lambda: os.waitpid(pid, 0)
-
-    delay = 0.0001
-    while 1:
-        try:
-            retpid, status = waitcall()
-        except OSError:
-            err = sys.exc_info()[1]
-            if err.errno == errno.EINTR:
-                delay = check_timeout(delay)
-                continue
-            elif err.errno == errno.ECHILD:
-                # This has two meanings:
-                # - pid is not a child of os.getpid() in which case
-                #   we keep polling until it's gone
-                # - pid never existed in the first place
-                # In both cases we'll eventually return None as we
-                # can't determine its exit status code.
-                while 1:
-                    if pid_exists(pid):
-                        delay = check_timeout(delay)
-                    else:
-                        return
-            else:
-                raise
-        else:
-            if retpid == 0:
-                # WNOHANG was used, pid is still running
-                delay = check_timeout(delay)
-                continue
-            # process exited due to a signal; return the integer of
-            # that signal
-            if os.WIFSIGNALED(status):
-                return os.WTERMSIG(status)
-            # process exited using exit(2) system call; return the
-            # integer exit(2) system call has been called with
-            elif os.WIFEXITED(status):
-                return os.WEXITSTATUS(status)
-            else:
-                # should never happen
-                raise RuntimeError("unknown process exit status")
-
-
-def disk_usage(path):
-    """Return disk usage associated with path."""
-    try:
-        st = os.statvfs(path)
-    except UnicodeEncodeError:
-        if not PY3 and isinstance(path, unicode):
-            # this is a bug with os.statvfs() and unicode on
-            # Python 2, see:
-            # - https://code.google.com/p/psutil/issues/detail?id=416
-            # - http://bugs.python.org/issue18695
-            try:
-                path = path.encode(sys.getfilesystemencoding())
-            except UnicodeEncodeError:
-                pass
-            st = os.statvfs(path)
-        else:
-            raise
-    free = (st.f_bavail * st.f_frsize)
-    total = (st.f_blocks * st.f_frsize)
-    used = (st.f_blocks - st.f_bfree) * st.f_frsize
-    percent = usage_percent(used, total, _round=1)
-    # NB: the percentage is -5% than what shown by df due to
-    # reserved blocks that we are currently not considering:
-    # http://goo.gl/sWGbH
-    return sdiskusage(total, used, free, percent)
-
-
-@memoize
-def _get_terminal_map():
-    ret = {}
-    ls = glob.glob('/dev/tty*') + glob.glob('/dev/pts/*')
-    for name in ls:
-        assert name not in ret
-        try:
-            ret[os.stat(name).st_rdev] = name
-        except OSError:
-            err = sys.exc_info()[1]
-            if err.errno != errno.ENOENT:
-                raise
-    return ret

http://git-wip-us.apache.org/repos/asf/ambari/blob/a3ed7a3f/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/build/lib.macosx-10.8-intel-2.7/psutil/_pssunos.py
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/build/lib.macosx-10.8-intel-2.7/psutil/_pssunos.py b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/build/lib.macosx-10.8-intel-2.7/psutil/_pssunos.py
deleted file mode 100644
index bc18427..0000000
--- a/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/build/lib.macosx-10.8-intel-2.7/psutil/_pssunos.py
+++ /dev/null
@@ -1,533 +0,0 @@
-#!/usr/bin/env python
-
-# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-"""Sun OS Solaris platform implementation."""
-
-import errno
-import os
-import socket
-import subprocess
-import sys
-
-from psutil import _common
-from psutil import _psposix
-from psutil._common import (conn_tmap, usage_percent, isfile_strict)
-from psutil._compat import namedtuple, PY3
-import _psutil_posix
-import _psutil_sunos as cext
-
-
-__extra__all__ = ["CONN_IDLE", "CONN_BOUND"]
-
-PAGE_SIZE = os.sysconf('SC_PAGE_SIZE')
-
-CONN_IDLE = "IDLE"
-CONN_BOUND = "BOUND"
-
-PROC_STATUSES = {
-    cext.SSLEEP: _common.STATUS_SLEEPING,
-    cext.SRUN: _common.STATUS_RUNNING,
-    cext.SZOMB: _common.STATUS_ZOMBIE,
-    cext.SSTOP: _common.STATUS_STOPPED,
-    cext.SIDL: _common.STATUS_IDLE,
-    cext.SONPROC: _common.STATUS_RUNNING,  # same as run
-    cext.SWAIT: _common.STATUS_WAITING,
-}
-
-TCP_STATUSES = {
-    cext.TCPS_ESTABLISHED: _common.CONN_ESTABLISHED,
-    cext.TCPS_SYN_SENT: _common.CONN_SYN_SENT,
-    cext.TCPS_SYN_RCVD: _common.CONN_SYN_RECV,
-    cext.TCPS_FIN_WAIT_1: _common.CONN_FIN_WAIT1,
-    cext.TCPS_FIN_WAIT_2: _common.CONN_FIN_WAIT2,
-    cext.TCPS_TIME_WAIT: _common.CONN_TIME_WAIT,
-    cext.TCPS_CLOSED: _common.CONN_CLOSE,
-    cext.TCPS_CLOSE_WAIT: _common.CONN_CLOSE_WAIT,
-    cext.TCPS_LAST_ACK: _common.CONN_LAST_ACK,
-    cext.TCPS_LISTEN: _common.CONN_LISTEN,
-    cext.TCPS_CLOSING: _common.CONN_CLOSING,
-    cext.PSUTIL_CONN_NONE: _common.CONN_NONE,
-    cext.TCPS_IDLE: CONN_IDLE,  # sunos specific
-    cext.TCPS_BOUND: CONN_BOUND,  # sunos specific
-}
-
-scputimes = namedtuple('scputimes', ['user', 'system', 'idle', 'iowait'])
-svmem = namedtuple('svmem', ['total', 'available', 'percent', 'used', 'free'])
-pextmem = namedtuple('pextmem', ['rss', 'vms'])
-pmmap_grouped = namedtuple('pmmap_grouped', ['path', 'rss', 'anon', 'locked'])
-pmmap_ext = namedtuple(
-    'pmmap_ext', 'addr perms ' + ' '.join(pmmap_grouped._fields))
-
-# set later from __init__.py
-NoSuchProcess = None
-AccessDenied = None
-TimeoutExpired = None
-
-# --- functions
-
-disk_io_counters = cext.disk_io_counters
-net_io_counters = cext.net_io_counters
-disk_usage = _psposix.disk_usage
-
-
-def virtual_memory():
-    # we could have done this with kstat, but imho this is good enough
-    total = os.sysconf('SC_PHYS_PAGES') * PAGE_SIZE
-    # note: there's no difference on Solaris
-    free = avail = os.sysconf('SC_AVPHYS_PAGES') * PAGE_SIZE
-    used = total - free
-    percent = usage_percent(used, total, _round=1)
-    return svmem(total, avail, percent, used, free)
-
-
-def swap_memory():
-    sin, sout = cext.swap_mem()
-    # XXX
-    # we are supposed to get total/free by doing so:
-    # http://cvs.opensolaris.org/source/xref/onnv/onnv-gate/
-    #     usr/src/cmd/swap/swap.c
-    # ...nevertheless I can't manage to obtain the same numbers as 'swap'
-    # cmdline utility, so let's parse its output (sigh!)
-    p = subprocess.Popen(['swap', '-l', '-k'], stdout=subprocess.PIPE)
-    stdout, stderr = p.communicate()
-    if PY3:
-        stdout = stdout.decode(sys.stdout.encoding)
-    if p.returncode != 0:
-        raise RuntimeError("'swap -l -k' failed (retcode=%s)" % p.returncode)
-
-    lines = stdout.strip().split('\n')[1:]
-    if not lines:
-        raise RuntimeError('no swap device(s) configured')
-    total = free = 0
-    for line in lines:
-        line = line.split()
-        t, f = line[-2:]
-        t = t.replace('K', '')
-        f = f.replace('K', '')
-        total += int(int(t) * 1024)
-        free += int(int(f) * 1024)
-    used = total - free
-    percent = usage_percent(used, total, _round=1)
-    return _common.sswap(total, used, free, percent,
-                       sin * PAGE_SIZE, sout * PAGE_SIZE)
-
-
-def pids():
-    """Returns a list of PIDs currently running on the system."""
-    return [int(x) for x in os.listdir('/proc') if x.isdigit()]
-
-
-def pid_exists(pid):
-    """Check for the existence of a unix pid."""
-    return _psposix.pid_exists(pid)
-
-
-def cpu_times():
-    """Return system-wide CPU times as a named tuple"""
-    ret = cext.per_cpu_times()
-    return scputimes(*[sum(x) for x in zip(*ret)])
-
-
-def per_cpu_times():
-    """Return system per-CPU times as a list of named tuples"""
-    ret = cext.per_cpu_times()
-    return [scputimes(*x) for x in ret]
-
-
-def cpu_count_logical():
-    """Return the number of logical CPUs in the system."""
-    try:
-        return os.sysconf("SC_NPROCESSORS_ONLN")
-    except ValueError:
-        # mimic os.cpu_count() behavior
-        return None
-
-
-def cpu_count_physical():
-    """Return the number of physical CPUs in the system."""
-    return cext.cpu_count_phys()
-
-
-def boot_time():
-    """The system boot time expressed in seconds since the epoch."""
-    return cext.boot_time()
-
-
-def users():
-    """Return currently connected users as a list of namedtuples."""
-    retlist = []
-    rawlist = cext.users()
-    localhost = (':0.0', ':0')
-    for item in rawlist:
-        user, tty, hostname, tstamp, user_process = item
-        # note: the underlying C function includes entries about
-        # system boot, run level and others.  We might want
-        # to use them in the future.
-        if not user_process:
-            continue
-        if hostname in localhost:
-            hostname = 'localhost'
-        nt = _common.suser(user, tty, hostname, tstamp)
-        retlist.append(nt)
-    return retlist
-
-
-def disk_partitions(all=False):
-    """Return system disk partitions."""
-    # TODO - the filtering logic should be better checked so that
-    # it tries to reflect 'df' as much as possible
-    retlist = []
-    partitions = cext.disk_partitions()
-    for partition in partitions:
-        device, mountpoint, fstype, opts = partition
-        if device == 'none':
-            device = ''
-        if not all:
-            # Differently from, say, Linux, we don't have a list of
-            # common fs types so the best we can do, AFAIK, is to
-            # filter by filesystem having a total size > 0.
-            if not disk_usage(mountpoint).total:
-                continue
-        ntuple = _common.sdiskpart(device, mountpoint, fstype, opts)
-        retlist.append(ntuple)
-    return retlist
-
-
-def net_connections(kind, _pid=-1):
-    """Return socket connections.  If pid == -1 return system-wide
-    connections (as opposed to connections opened by one process only).
-    Only INET sockets are returned (UNIX are not).
-    """
-    cmap = _common.conn_tmap.copy()
-    if _pid == -1:
-        cmap.pop('unix', 0)
-    if kind not in cmap:
-        raise ValueError("invalid %r kind argument; choose between %s"
-                         % (kind, ', '.join([repr(x) for x in cmap])))
-    families, types = _common.conn_tmap[kind]
-    rawlist = cext.net_connections(_pid, families, types)
-    ret = []
-    for item in rawlist:
-        fd, fam, type_, laddr, raddr, status, pid = item
-        if fam not in families:
-            continue
-        if type_ not in types:
-            continue
-        status = TCP_STATUSES[status]
-        if _pid == -1:
-            nt = _common.sconn(fd, fam, type_, laddr, raddr, status, pid)
-        else:
-            nt = _common.pconn(fd, fam, type_, laddr, raddr, status)
-        ret.append(nt)
-    return ret
-
-
-def wrap_exceptions(fun):
-    """Call callable into a try/except clause and translate ENOENT,
-    EACCES and EPERM in NoSuchProcess or AccessDenied exceptions.
-    """
-    def wrapper(self, *args, **kwargs):
-        try:
-            return fun(self, *args, **kwargs)
-        except EnvironmentError:
-            # support for private module import
-            if NoSuchProcess is None or AccessDenied is None:
-                raise
-            # ENOENT (no such file or directory) gets raised on open().
-            # ESRCH (no such process) can get raised on read() if
-            # process is gone in meantime.
-            err = sys.exc_info()[1]
-            if err.errno in (errno.ENOENT, errno.ESRCH):
-                raise NoSuchProcess(self.pid, self._name)
-            if err.errno in (errno.EPERM, errno.EACCES):
-                raise AccessDenied(self.pid, self._name)
-            raise
-    return wrapper
-
-
-class Process(object):
-    """Wrapper class around underlying C implementation."""
-
-    __slots__ = ["pid", "_name"]
-
-    def __init__(self, pid):
-        self.pid = pid
-        self._name = None
-
-    @wrap_exceptions
-    def name(self):
-        # note: max len == 15
-        return cext.proc_name_and_args(self.pid)[0]
-
-    @wrap_exceptions
-    def exe(self):
-        # Will be guess later from cmdline but we want to explicitly
-        # invoke cmdline here in order to get an AccessDenied
-        # exception if the user has not enough privileges.
-        self.cmdline()
-        return ""
-
-    @wrap_exceptions
-    def cmdline(self):
-        return cext.proc_name_and_args(self.pid)[1].split(' ')
-
-    @wrap_exceptions
-    def create_time(self):
-        return cext.proc_basic_info(self.pid)[3]
-
-    @wrap_exceptions
-    def num_threads(self):
-        return cext.proc_basic_info(self.pid)[5]
-
-    @wrap_exceptions
-    def nice_get(self):
-        # For some reason getpriority(3) return ESRCH (no such process)
-        # for certain low-pid processes, no matter what (even as root).
-        # The process actually exists though, as it has a name,
-        # creation time, etc.
-        # The best thing we can do here appears to be raising AD.
-        # Note: tested on Solaris 11; on Open Solaris 5 everything is
-        # fine.
-        try:
-            return _psutil_posix.getpriority(self.pid)
-        except EnvironmentError:
-            err = sys.exc_info()[1]
-            if err.errno in (errno.ENOENT, errno.ESRCH):
-                if pid_exists(self.pid):
-                    raise AccessDenied(self.pid, self._name)
-            raise
-
-    @wrap_exceptions
-    def nice_set(self, value):
-        if self.pid in (2, 3):
-            # Special case PIDs: internally setpriority(3) return ESRCH
-            # (no such process), no matter what.
-            # The process actually exists though, as it has a name,
-            # creation time, etc.
-            raise AccessDenied(self.pid, self._name)
-        return _psutil_posix.setpriority(self.pid, value)
-
-    @wrap_exceptions
-    def ppid(self):
-        return cext.proc_basic_info(self.pid)[0]
-
-    @wrap_exceptions
-    def uids(self):
-        real, effective, saved, _, _, _ = cext.proc_cred(self.pid)
-        return _common.puids(real, effective, saved)
-
-    @wrap_exceptions
-    def gids(self):
-        _, _, _, real, effective, saved = cext.proc_cred(self.pid)
-        return _common.puids(real, effective, saved)
-
-    @wrap_exceptions
-    def cpu_times(self):
-        user, system = cext.proc_cpu_times(self.pid)
-        return _common.pcputimes(user, system)
-
-    @wrap_exceptions
-    def terminal(self):
-        hit_enoent = False
-        tty = wrap_exceptions(
-            cext.proc_basic_info(self.pid)[0])
-        if tty != cext.PRNODEV:
-            for x in (0, 1, 2, 255):
-                try:
-                    return os.readlink('/proc/%d/path/%d' % (self.pid, x))
-                except OSError:
-                    err = sys.exc_info()[1]
-                    if err.errno == errno.ENOENT:
-                        hit_enoent = True
-                        continue
-                    raise
-        if hit_enoent:
-            # raise NSP if the process disappeared on us
-            os.stat('/proc/%s' % self.pid)
-
-    @wrap_exceptions
-    def cwd(self):
-        # /proc/PID/path/cwd may not be resolved by readlink() even if
-        # it exists (ls shows it). If that's the case and the process
-        # is still alive return None (we can return None also on BSD).
-        # Reference: http://goo.gl/55XgO
-        try:
-            return os.readlink("/proc/%s/path/cwd" % self.pid)
-        except OSError:
-            err = sys.exc_info()[1]
-            if err.errno == errno.ENOENT:
-                os.stat("/proc/%s" % self.pid)
-                return None
-            raise
-
-    @wrap_exceptions
-    def memory_info(self):
-        ret = cext.proc_basic_info(self.pid)
-        rss, vms = ret[1] * 1024, ret[2] * 1024
-        return _common.pmem(rss, vms)
-
-    # it seems Solaris uses rss and vms only
-    memory_info_ex = memory_info
-
-    @wrap_exceptions
-    def status(self):
-        code = cext.proc_basic_info(self.pid)[6]
-        # XXX is '?' legit? (we're not supposed to return it anyway)
-        return PROC_STATUSES.get(code, '?')
-
-    @wrap_exceptions
-    def threads(self):
-        ret = []
-        tids = os.listdir('/proc/%d/lwp' % self.pid)
-        hit_enoent = False
-        for tid in tids:
-            tid = int(tid)
-            try:
-                utime, stime = cext.query_process_thread(
-                    self.pid, tid)
-            except EnvironmentError:
-                # ENOENT == thread gone in meantime
-                err = sys.exc_info()[1]
-                if err.errno == errno.ENOENT:
-                    hit_enoent = True
-                    continue
-                raise
-            else:
-                nt = _common.pthread(tid, utime, stime)
-                ret.append(nt)
-        if hit_enoent:
-            # raise NSP if the process disappeared on us
-            os.stat('/proc/%s' % self.pid)
-        return ret
-
-    @wrap_exceptions
-    def open_files(self):
-        retlist = []
-        hit_enoent = False
-        pathdir = '/proc/%d/path' % self.pid
-        for fd in os.listdir('/proc/%d/fd' % self.pid):
-            path = os.path.join(pathdir, fd)
-            if os.path.islink(path):
-                try:
-                    file = os.readlink(path)
-                except OSError:
-                    # ENOENT == file which is gone in the meantime
-                    err = sys.exc_info()[1]
-                    if err.errno == errno.ENOENT:
-                        hit_enoent = True
-                        continue
-                    raise
-                else:
-                    if isfile_strict(file):
-                        retlist.append(_common.popenfile(file, int(fd)))
-        if hit_enoent:
-            # raise NSP if the process disappeared on us
-            os.stat('/proc/%s' % self.pid)
-        return retlist
-
-    def _get_unix_sockets(self, pid):
-        """Get UNIX sockets used by process by parsing 'pfiles' output."""
-        # TODO: rewrite this in C (...but the damn netstat source code
-        # does not include this part! Argh!!)
-        cmd = "pfiles %s" % pid
-        p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,
-                             stderr=subprocess.PIPE)
-        stdout, stderr = p.communicate()
-        if PY3:
-            stdout, stderr = [x.decode(sys.stdout.encoding)
-                              for x in (stdout, stderr)]
-        if p.returncode != 0:
-            if 'permission denied' in stderr.lower():
-                raise AccessDenied(self.pid, self._name)
-            if 'no such process' in stderr.lower():
-                raise NoSuchProcess(self.pid, self._name)
-            raise RuntimeError("%r command error\n%s" % (cmd, stderr))
-
-        lines = stdout.split('\n')[2:]
-        for i, line in enumerate(lines):
-            line = line.lstrip()
-            if line.startswith('sockname: AF_UNIX'):
-                path = line.split(' ', 2)[2]
-                type = lines[i - 2].strip()
-                if type == 'SOCK_STREAM':
-                    type = socket.SOCK_STREAM
-                elif type == 'SOCK_DGRAM':
-                    type = socket.SOCK_DGRAM
-                else:
-                    type = -1
-                yield (-1, socket.AF_UNIX, type, path, "", _common.CONN_NONE)
-
-    @wrap_exceptions
-    def connections(self, kind='inet'):
-        ret = net_connections(kind, _pid=self.pid)
-        # The underlying C implementation retrieves all OS connections
-        # and filters them by PID.  At this point we can't tell whether
-        # an empty list means there were no connections for process or
-        # process is no longer active so we force NSP in case the PID
-        # is no longer there.
-        if not ret:
-            os.stat('/proc/%s' % self.pid)  # will raise NSP if process is gone
-
-        # UNIX sockets
-        if kind in ('all', 'unix'):
-            ret.extend([_common.pconn(*conn) for conn in
-                        self._get_unix_sockets(self.pid)])
-        return ret
-
-    nt_mmap_grouped = namedtuple('mmap', 'path rss anon locked')
-    nt_mmap_ext = namedtuple('mmap', 'addr perms path rss anon locked')
-
-    @wrap_exceptions
-    def memory_maps(self):
-        def toaddr(start, end):
-            return '%s-%s' % (hex(start)[2:].strip('L'),
-                              hex(end)[2:].strip('L'))
-
-        retlist = []
-        rawlist = cext.proc_memory_maps(self.pid)
-        hit_enoent = False
-        for item in rawlist:
-            addr, addrsize, perm, name, rss, anon, locked = item
-            addr = toaddr(addr, addrsize)
-            if not name.startswith('['):
-                try:
-                    name = os.readlink('/proc/%s/path/%s' % (self.pid, name))
-                except OSError:
-                    err = sys.exc_info()[1]
-                    if err.errno == errno.ENOENT:
-                        # sometimes the link may not be resolved by
-                        # readlink() even if it exists (ls shows it).
-                        # If that's the case we just return the
-                        # unresolved link path.
-                        # This seems an incosistency with /proc similar
-                        # to: http://goo.gl/55XgO
-                        name = '/proc/%s/path/%s' % (self.pid, name)
-                        hit_enoent = True
-                    else:
-                        raise
-            retlist.append((addr, perm, name, rss, anon, locked))
-        if hit_enoent:
-            # raise NSP if the process disappeared on us
-            os.stat('/proc/%s' % self.pid)
-        return retlist
-
-    @wrap_exceptions
-    def num_fds(self):
-        return len(os.listdir("/proc/%s/fd" % self.pid))
-
-    @wrap_exceptions
-    def num_ctx_switches(self):
-        return _common.pctxsw(*cext.proc_num_ctx_switches(self.pid))
-
-    @wrap_exceptions
-    def wait(self, timeout=None):
-        try:
-            return _psposix.wait_pid(self.pid, timeout)
-        except _psposix.TimeoutExpired:
-            # support for private module import
-            if TimeoutExpired is None:
-                raise
-            raise TimeoutExpired(timeout, self.pid, self._name)

http://git-wip-us.apache.org/repos/asf/ambari/blob/a3ed7a3f/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/build/lib.macosx-10.8-intel-2.7/psutil/_pswindows.py
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/build/lib.macosx-10.8-intel-2.7/psutil/_pswindows.py b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/build/lib.macosx-10.8-intel-2.7/psutil/_pswindows.py
deleted file mode 100644
index 1a786f1..0000000
--- a/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/build/lib.macosx-10.8-intel-2.7/psutil/_pswindows.py
+++ /dev/null
@@ -1,485 +0,0 @@
-#!/usr/bin/env python
-
-# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-"""Windows platform implementation."""
-
-import errno
-import os
-import sys
-
-from psutil import _common
-from psutil._common import conn_tmap, usage_percent, isfile_strict
-from psutil._compat import PY3, xrange, wraps, lru_cache, namedtuple
-import _psutil_windows as cext
-
-# process priority constants, import from __init__.py:
-# http://msdn.microsoft.com/en-us/library/ms686219(v=vs.85).aspx
-__extra__all__ = ["ABOVE_NORMAL_PRIORITY_CLASS", "BELOW_NORMAL_PRIORITY_CLASS",
-                  "HIGH_PRIORITY_CLASS", "IDLE_PRIORITY_CLASS",
-                  "NORMAL_PRIORITY_CLASS", "REALTIME_PRIORITY_CLASS",
-                  #
-                  "CONN_DELETE_TCB",
-                  ]
-
-# --- module level constants (gets pushed up to psutil module)
-
-CONN_DELETE_TCB = "DELETE_TCB"
-WAIT_TIMEOUT = 0x00000102  # 258 in decimal
-ACCESS_DENIED_SET = frozenset([errno.EPERM, errno.EACCES,
-                               cext.ERROR_ACCESS_DENIED])
-
-TCP_STATUSES = {
-    cext.MIB_TCP_STATE_ESTAB: _common.CONN_ESTABLISHED,
-    cext.MIB_TCP_STATE_SYN_SENT: _common.CONN_SYN_SENT,
-    cext.MIB_TCP_STATE_SYN_RCVD: _common.CONN_SYN_RECV,
-    cext.MIB_TCP_STATE_FIN_WAIT1: _common.CONN_FIN_WAIT1,
-    cext.MIB_TCP_STATE_FIN_WAIT2: _common.CONN_FIN_WAIT2,
-    cext.MIB_TCP_STATE_TIME_WAIT: _common.CONN_TIME_WAIT,
-    cext.MIB_TCP_STATE_CLOSED: _common.CONN_CLOSE,
-    cext.MIB_TCP_STATE_CLOSE_WAIT: _common.CONN_CLOSE_WAIT,
-    cext.MIB_TCP_STATE_LAST_ACK: _common.CONN_LAST_ACK,
-    cext.MIB_TCP_STATE_LISTEN: _common.CONN_LISTEN,
-    cext.MIB_TCP_STATE_CLOSING: _common.CONN_CLOSING,
-    cext.MIB_TCP_STATE_DELETE_TCB: CONN_DELETE_TCB,
-    cext.PSUTIL_CONN_NONE: _common.CONN_NONE,
-}
-
-
-scputimes = namedtuple('scputimes', ['user', 'system', 'idle'])
-svmem = namedtuple('svmem', ['total', 'available', 'percent', 'used', 'free'])
-pextmem = namedtuple(
-    'pextmem', ['num_page_faults', 'peak_wset', 'wset', 'peak_paged_pool',
-                'paged_pool', 'peak_nonpaged_pool', 'nonpaged_pool',
-                'pagefile', 'peak_pagefile', 'private'])
-pmmap_grouped = namedtuple('pmmap_grouped', ['path', 'rss'])
-pmmap_ext = namedtuple(
-    'pmmap_ext', 'addr perms ' + ' '.join(pmmap_grouped._fields))
-
-# set later from __init__.py
-NoSuchProcess = None
-AccessDenied = None
-TimeoutExpired = None
-
-
-@lru_cache(maxsize=512)
-def _win32_QueryDosDevice(s):
-    return cext.win32_QueryDosDevice(s)
-
-
-def _convert_raw_path(s):
-    # convert paths using native DOS format like:
-    # "\Device\HarddiskVolume1\Windows\systemew\file.txt"
-    # into: "C:\Windows\systemew\file.txt"
-    if PY3 and not isinstance(s, str):
-        s = s.decode('utf8')
-    rawdrive = '\\'.join(s.split('\\')[:3])
-    driveletter = _win32_QueryDosDevice(rawdrive)
-    return os.path.join(driveletter, s[len(rawdrive):])
-
-
-# --- public functions
-
-
-def virtual_memory():
-    """System virtual memory as a namedtuple."""
-    mem = cext.virtual_mem()
-    totphys, availphys, totpagef, availpagef, totvirt, freevirt = mem
-    #
-    total = totphys
-    avail = availphys
-    free = availphys
-    used = total - avail
-    percent = usage_percent((total - avail), total, _round=1)
-    return svmem(total, avail, percent, used, free)
-
-
-def swap_memory():
-    """Swap system memory as a (total, used, free, sin, sout) tuple."""
-    mem = cext.virtual_mem()
-    total = mem[2]
-    free = mem[3]
-    used = total - free
-    percent = usage_percent(used, total, _round=1)
-    return _common.sswap(total, used, free, percent, 0, 0)
-
-
-def disk_usage(path):
-    """Return disk usage associated with path."""
-    try:
-        total, free = cext.disk_usage(path)
-    except WindowsError:
-        if not os.path.exists(path):
-            msg = "No such file or directory: '%s'" % path
-            raise OSError(errno.ENOENT, msg)
-        raise
-    used = total - free
-    percent = usage_percent(used, total, _round=1)
-    return _common.sdiskusage(total, used, free, percent)
-
-
-def disk_partitions(all):
-    """Return disk partitions."""
-    rawlist = cext.disk_partitions(all)
-    return [_common.sdiskpart(*x) for x in rawlist]
-
-
-def cpu_times():
-    """Return system CPU times as a named tuple."""
-    user, system, idle = cext.cpu_times()
-    return scputimes(user, system, idle)
-
-
-def per_cpu_times():
-    """Return system per-CPU times as a list of named tuples."""
-    ret = []
-    for cpu_t in cext.per_cpu_times():
-        user, system, idle = cpu_t
-        item = scputimes(user, system, idle)
-        ret.append(item)
-    return ret
-
-
-def cpu_count_logical():
-    """Return the number of logical CPUs in the system."""
-    return cext.cpu_count_logical()
-
-
-def cpu_count_physical():
-    """Return the number of physical CPUs in the system."""
-    return cext.cpu_count_phys()
-
-
-def boot_time():
-    """The system boot time expressed in seconds since the epoch."""
-    return cext.boot_time()
-
-
-def net_connections(kind, _pid=-1):
-    """Return socket connections.  If pid == -1 return system-wide
-    connections (as opposed to connections opened by one process only).
-    """
-    if kind not in conn_tmap:
-        raise ValueError("invalid %r kind argument; choose between %s"
-                         % (kind, ', '.join([repr(x) for x in conn_tmap])))
-    families, types = conn_tmap[kind]
-    rawlist = cext.net_connections(_pid, families, types)
-    ret = []
-    for item in rawlist:
-        fd, fam, type, laddr, raddr, status, pid = item
-        status = TCP_STATUSES[status]
-        if _pid == -1:
-            nt = _common.sconn(fd, fam, type, laddr, raddr, status, pid)
-        else:
-            nt = _common.pconn(fd, fam, type, laddr, raddr, status)
-        ret.append(nt)
-    return ret
-
-
-def users():
-    """Return currently connected users as a list of namedtuples."""
-    retlist = []
-    rawlist = cext.users()
-    for item in rawlist:
-        user, hostname, tstamp = item
-        nt = _common.suser(user, None, hostname, tstamp)
-        retlist.append(nt)
-    return retlist
-
-
-pids = cext.pids
-pid_exists = cext.pid_exists
-net_io_counters = cext.net_io_counters
-disk_io_counters = cext.disk_io_counters
-ppid_map = cext.ppid_map  # not meant to be public
-
-
-def wrap_exceptions(fun):
-    """Decorator which translates bare OSError and WindowsError
-    exceptions into NoSuchProcess and AccessDenied.
-    """
-    @wraps(fun)
-    def wrapper(self, *args, **kwargs):
-        try:
-            return fun(self, *args, **kwargs)
-        except OSError:
-            # support for private module import
-            if NoSuchProcess is None or AccessDenied is None:
-                raise
-            err = sys.exc_info()[1]
-            if err.errno in ACCESS_DENIED_SET:
-                raise AccessDenied(self.pid, self._name)
-            if err.errno == errno.ESRCH:
-                raise NoSuchProcess(self.pid, self._name)
-            raise
-    return wrapper
-
-
-class Process(object):
-    """Wrapper class around underlying C implementation."""
-
-    __slots__ = ["pid", "_name"]
-
-    def __init__(self, pid):
-        self.pid = pid
-        self._name = None
-
-    @wrap_exceptions
-    def name(self):
-        """Return process name, which on Windows is always the final
-        part of the executable.
-        """
-        # This is how PIDs 0 and 4 are always represented in taskmgr
-        # and process-hacker.
-        if self.pid == 0:
-            return "System Idle Process"
-        elif self.pid == 4:
-            return "System"
-        else:
-            return os.path.basename(self.exe())
-
-    @wrap_exceptions
-    def exe(self):
-        # Note: os.path.exists(path) may return False even if the file
-        # is there, see:
-        # http://stackoverflow.com/questions/3112546/os-path-exists-lies
-        return _convert_raw_path(cext.proc_exe(self.pid))
-
-    @wrap_exceptions
-    def cmdline(self):
-        return cext.proc_cmdline(self.pid)
-
-    def ppid(self):
-        try:
-            return ppid_map()[self.pid]
-        except KeyError:
-            raise NoSuchProcess(self.pid, self._name)
-
-    def _get_raw_meminfo(self):
-        try:
-            return cext.proc_memory_info(self.pid)
-        except OSError:
-            err = sys.exc_info()[1]
-            if err.errno in ACCESS_DENIED_SET:
-                return cext.proc_memory_info_2(self.pid)
-            raise
-
-    @wrap_exceptions
-    def memory_info(self):
-        # on Windows RSS == WorkingSetSize and VSM == PagefileUsage
-        # fields of PROCESS_MEMORY_COUNTERS struct:
-        # http://msdn.microsoft.com/en-us/library/windows/desktop/
-        #     ms684877(v=vs.85).aspx
-        t = self._get_raw_meminfo()
-        return _common.pmem(t[2], t[7])
-
-    @wrap_exceptions
-    def memory_info_ex(self):
-        return pextmem(*self._get_raw_meminfo())
-
-    def memory_maps(self):
-        try:
-            raw = cext.proc_memory_maps(self.pid)
-        except OSError:
-            # XXX - can't use wrap_exceptions decorator as we're
-            # returning a generator; probably needs refactoring.
-            err = sys.exc_info()[1]
-            if err.errno in ACCESS_DENIED_SET:
-                raise AccessDenied(self.pid, self._name)
-            if err.errno == errno.ESRCH:
-                raise NoSuchProcess(self.pid, self._name)
-            raise
-        else:
-            for addr, perm, path, rss in raw:
-                path = _convert_raw_path(path)
-                addr = hex(addr)
-                yield (addr, perm, path, rss)
-
-    @wrap_exceptions
-    def kill(self):
-        return cext.proc_kill(self.pid)
-
-    @wrap_exceptions
-    def wait(self, timeout=None):
-        if timeout is None:
-            timeout = cext.INFINITE
-        else:
-            # WaitForSingleObject() expects time in milliseconds
-            timeout = int(timeout * 1000)
-        ret = cext.proc_wait(self.pid, timeout)
-        if ret == WAIT_TIMEOUT:
-            # support for private module import
-            if TimeoutExpired is None:
-                raise RuntimeError("timeout expired")
-            raise TimeoutExpired(timeout, self.pid, self._name)
-        return ret
-
-    @wrap_exceptions
-    def username(self):
-        if self.pid in (0, 4):
-            return 'NT AUTHORITY\\SYSTEM'
-        return cext.proc_username(self.pid)
-
-    @wrap_exceptions
-    def create_time(self):
-        # special case for kernel process PIDs; return system boot time
-        if self.pid in (0, 4):
-            return boot_time()
-        try:
-            return cext.proc_create_time(self.pid)
-        except OSError:
-            err = sys.exc_info()[1]
-            if err.errno in ACCESS_DENIED_SET:
-                return cext.proc_create_time_2(self.pid)
-            raise
-
-    @wrap_exceptions
-    def num_threads(self):
-        return cext.proc_num_threads(self.pid)
-
-    @wrap_exceptions
-    def threads(self):
-        rawlist = cext.proc_threads(self.pid)
-        retlist = []
-        for thread_id, utime, stime in rawlist:
-            ntuple = _common.pthread(thread_id, utime, stime)
-            retlist.append(ntuple)
-        return retlist
-
-    @wrap_exceptions
-    def cpu_times(self):
-        try:
-            ret = cext.proc_cpu_times(self.pid)
-        except OSError:
-            err = sys.exc_info()[1]
-            if err.errno in ACCESS_DENIED_SET:
-                ret = cext.proc_cpu_times_2(self.pid)
-            else:
-                raise
-        return _common.pcputimes(*ret)
-
-    @wrap_exceptions
-    def suspend(self):
-        return cext.proc_suspend(self.pid)
-
-    @wrap_exceptions
-    def resume(self):
-        return cext.proc_resume(self.pid)
-
-    @wrap_exceptions
-    def cwd(self):
-        if self.pid in (0, 4):
-            raise AccessDenied(self.pid, self._name)
-        # return a normalized pathname since the native C function appends
-        # "\\" at the and of the path
-        path = cext.proc_cwd(self.pid)
-        return os.path.normpath(path)
-
-    @wrap_exceptions
-    def open_files(self):
-        if self.pid in (0, 4):
-            return []
-        retlist = []
-        # Filenames come in in native format like:
-        # "\Device\HarddiskVolume1\Windows\systemew\file.txt"
-        # Convert the first part in the corresponding drive letter
-        # (e.g. "C:\") by using Windows's QueryDosDevice()
-        raw_file_names = cext.proc_open_files(self.pid)
-        for file in raw_file_names:
-            file = _convert_raw_path(file)
-            if isfile_strict(file) and file not in retlist:
-                ntuple = _common.popenfile(file, -1)
-                retlist.append(ntuple)
-        return retlist
-
-    @wrap_exceptions
-    def connections(self, kind='inet'):
-        return net_connections(kind, _pid=self.pid)
-
-    @wrap_exceptions
-    def nice_get(self):
-        return cext.proc_priority_get(self.pid)
-
-    @wrap_exceptions
-    def nice_set(self, value):
-        return cext.proc_priority_set(self.pid, value)
-
-    # available on Windows >= Vista
-    if hasattr(cext, "proc_io_priority_get"):
-        @wrap_exceptions
-        def ionice_get(self):
-            return cext.proc_io_priority_get(self.pid)
-
-        @wrap_exceptions
-        def ionice_set(self, value, _):
-            if _:
-                raise TypeError("set_proc_ionice() on Windows takes only "
-                                "1 argument (2 given)")
-            if value not in (2, 1, 0):
-                raise ValueError("value must be 2 (normal), 1 (low) or 0 "
-                                 "(very low); got %r" % value)
-            return cext.proc_io_priority_set(self.pid, value)
-
-    @wrap_exceptions
-    def io_counters(self):
-        try:
-            ret = cext.proc_io_counters(self.pid)
-        except OSError:
-            err = sys.exc_info()[1]
-            if err.errno in ACCESS_DENIED_SET:
-                ret = cext.proc_io_counters_2(self.pid)
-            else:
-                raise
-        return _common.pio(*ret)
-
-    @wrap_exceptions
-    def status(self):
-        suspended = cext.proc_is_suspended(self.pid)
-        if suspended:
-            return _common.STATUS_STOPPED
-        else:
-            return _common.STATUS_RUNNING
-
-    @wrap_exceptions
-    def cpu_affinity_get(self):
-        from_bitmask = lambda x: [i for i in xrange(64) if (1 << i) & x]
-        bitmask = cext.proc_cpu_affinity_get(self.pid)
-        return from_bitmask(bitmask)
-
-    @wrap_exceptions
-    def cpu_affinity_set(self, value):
-        def to_bitmask(l):
-            if not l:
-                raise ValueError("invalid argument %r" % l)
-            out = 0
-            for b in l:
-                out |= 2 ** b
-            return out
-
-        # SetProcessAffinityMask() states that ERROR_INVALID_PARAMETER
-        # is returned for an invalid CPU but this seems not to be true,
-        # therefore we check CPUs validy beforehand.
-        allcpus = list(range(len(per_cpu_times())))
-        for cpu in value:
-            if cpu not in allcpus:
-                raise ValueError("invalid CPU %r" % cpu)
-
-        bitmask = to_bitmask(value)
-        cext.proc_cpu_affinity_set(self.pid, bitmask)
-
-    @wrap_exceptions
-    def num_handles(self):
-        try:
-            return cext.proc_num_handles(self.pid)
-        except OSError:
-            err = sys.exc_info()[1]
-            if err.errno in ACCESS_DENIED_SET:
-                return cext.proc_num_handles_2(self.pid)
-            raise
-
-    @wrap_exceptions
-    def num_ctx_switches(self):
-        tupl = cext.proc_num_ctx_switches(self.pid)
-        return _common.pctxsw(*tupl)

http://git-wip-us.apache.org/repos/asf/ambari/blob/a3ed7a3f/ambari-metrics/pom.xml
----------------------------------------------------------------------
diff --git a/ambari-metrics/pom.xml b/ambari-metrics/pom.xml
index 4bc6c82..67f5892 100644
--- a/ambari-metrics/pom.xml
+++ b/ambari-metrics/pom.xml
@@ -1,21 +1,42 @@
 <?xml version="1.0" encoding="UTF-8"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
 <project xmlns="http://maven.apache.org/POM/4.0.0"
          xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
          xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
                              http://maven.apache.org/xsd/maven-4.0.0.xsd">
-  <parent>
-    <groupId>org.apache.ambari</groupId>
-    <artifactId>ambari-project</artifactId>
-    <version>1.3.0-SNAPSHOT</version>
-    <relativePath>../ambari-project</relativePath>
-  </parent>
+
+  <groupId>org.apache.ambari</groupId>
   <modelVersion>4.0.0</modelVersion>
   <artifactId>ambari-metrics</artifactId>
+  <version>0.1.0-SNAPSHOT</version>
   <packaging>pom</packaging>
+  <modules>
+    <module>ambari-metrics-hadoop-sink</module>
+    <module>ambari-metrics-hadoop-timelineservice</module>
+    <module>ambari-metrics-host-monitoring</module>
+  </modules>
   <properties>
     <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
     <python.ver>python &gt;= 2.6</python.ver>
     <deb.python.ver>python (&gt;= 2.6)</deb.python.ver>
+    <!--TODO change to HDP URL-->
+    <hbase.tar>http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/updates/2.2.0.0/tars/hbase-0.98.4.2.2.0.0-1995-hadoop2.tar.gz</hbase.tar>
+    <hbase.folder>hbase-0.98.4.2.2.0.0-1995-hadoop2</hbase.folder>
   </properties>
   <repositories>
     <repository>
@@ -28,6 +49,11 @@
     <plugins>
       <plugin>
         <artifactId>maven-assembly-plugin</artifactId>
+        <configuration>
+          <descriptors>
+            <descriptor>../ambari-project/src/main/assemblies/empty.xml</descriptor>
+          </descriptors>
+        </configuration>
         <executions>
           <execution>
             <id>build-tarball</id>
@@ -84,26 +110,39 @@
           </filesets>
         </configuration>
       </plugin>
-      <!--
       <plugin>
-        <artifactId>maven-assembly-plugin</artifactId>
-        <configuration>
-          <descriptors>
-            <descriptor>src/main/assemblies/metrics.xml</descriptor>
-          </descriptors>
-          <tarLongFileMode>gnu</tarLongFileMode>
-        </configuration>
+        <groupId>org.codehaus.mojo</groupId>
+        <artifactId>build-helper-maven-plugin</artifactId>
+        <version>1.8</version>
         <executions>
           <execution>
-            <id>build-tarball</id>
-            <phase>package</phase>
+            <id>parse-package-version</id>
             <goals>
-              <goal>single</goal>
+              <goal>regex-property</goal>
             </goals>
+            <configuration>
+              <name>package-version</name>
+              <value>${project.version}</value>
+              <regex>^([0-9]+)\.([0-9]+)\.([0-9]+)(\.|-).*</regex>
+              <replacement>$1.$2.$3</replacement>
+              <failIfNoMatch>true</failIfNoMatch>
+            </configuration>
+          </execution>
+          <execution>
+            <id>parse-package-release</id>
+            <goals>
+              <goal>regex-property</goal>
+            </goals>
+            <configuration>
+              <name>package-release</name>
+              <value>${project.version}</value>
+              <regex>^([0-9]+)\.([0-9]+)\.([0-9]+)(\.|-)(([0-9]+)|(SNAPSHOT)).*</regex>
+              <replacement>$5</replacement>
+              <failIfNoMatch>true</failIfNoMatch>
+            </configuration>
           </execution>
         </executions>
       </plugin>
-      -->
       <plugin>
         <groupId>org.codehaus.mojo</groupId>
         <artifactId>rpm-maven-plugin</artifactId>
@@ -113,7 +152,7 @@
             <!-- unbinds rpm creation from maven lifecycle -->
             <phase>none</phase>
             <goals>
-              <goal>rpm</goal>
+              <goal>attached-rpm</goal>
             </goals>
           </execution>
         </executions>
@@ -121,14 +160,30 @@
           <copyright>2012, Apache Software Foundation</copyright>
           <group>Development</group>
           <description>Maven Recipe: RPM Package.</description>
-          <autoRequires>false</autoRequires>
-          <requires>
-            <require>${python.ver}</require>
-            <require>gcc</require>
-            <require>python-devel</require>
-          </requires>
+          <version>${package-version}</version>
+          <release>${package-release}</release>
+          <mappings/>
         </configuration>
       </plugin>
+      <!--<plugin>-->
+        <!--<artifactId>maven-assembly-plugin</artifactId>-->
+        <!--<configuration>-->
+          <!--<descriptors>-->
+            <!--<descriptor>src/main/assemblies/metrics.xml</descriptor>-->
+          <!--</descriptors>-->
+          <!--<tarLongFileMode>gnu</tarLongFileMode>-->
+        <!--</configuration>-->
+        <!--<executions>-->
+          <!--<execution>-->
+            <!--<id>build-tarball</id>-->
+            <!--<phase>package</phase>-->
+            <!--<goals>-->
+              <!--<goal>single</goal>-->
+            <!--</goals>-->
+          <!--</execution>-->
+        <!--</executions>-->
+      <!--</plugin>-->
+
     </plugins>
   </build>
 
@@ -140,10 +195,5 @@
     </profile>
   </profiles>
 
-  <modules>
-    <module>ambari-metrics-hadoop-sink</module>
-    <module>ambari-metrics-hadoop-timelineservice</module>
-    <module>ambari-metrics-host-monitoring</module>
-  </modules>
 
 </project>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/a3ed7a3f/ambari-server/src/main/java/org/apache/ambari/server/utils/StageUtils.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/utils/StageUtils.java b/ambari-server/src/main/java/org/apache/ambari/server/utils/StageUtils.java
index 2a13653..8cee0fc 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/utils/StageUtils.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/utils/StageUtils.java
@@ -128,6 +128,7 @@ public class StageUtils {
     componentToClusterInfoKeyMap.put("HBASE_REGIONSERVER", "hbase_rs_hosts");
     componentToClusterInfoKeyMap.put("KERBEROS_SERVER", "kdc_host");
     componentToClusterInfoKeyMap.put("KERBEROS_ADMIN_CLIENT", "kerberos_adminclient_host");
+    componentToClusterInfoKeyMap.put("METRIC_COLLECTOR", "metric_collector_hosts");
   }
 
   static {

http://git-wip-us.apache.org/repos/asf/ambari/blob/a3ed7a3f/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/params.py
index d4ff0d3..d42a8dd 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/params.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/params.py
@@ -66,6 +66,7 @@ jtnode_host = default("/clusterHostInfo/jtnode_host", [])
 namenode_host = default("/clusterHostInfo/namenode_host", [])
 zk_hosts = default("/clusterHostInfo/zookeeper_hosts", [])
 ganglia_server_hosts = default("/clusterHostInfo/ganglia_server_host", [])
+ams_collector_hosts = default("/clusterHostInfo/metric_collector_hosts", [])
 
 has_namenode = not len(namenode_host) == 0
 has_resourcemanager = not len(rm_host) == 0
@@ -77,6 +78,7 @@ has_hive_server_host = not len(hive_server_host)  == 0
 has_hbase_masters = not len(hbase_master_hosts) == 0
 has_zk_host = not len(zk_hosts) == 0
 has_ganglia_server = not len(ganglia_server_hosts) == 0
+has_metric_collector = not len(ams_collector_hosts) == 0
 
 is_namenode_master = hostname in namenode_host
 is_jtnode_master = hostname in jtnode_host
@@ -86,6 +88,8 @@ is_hbase_master = hostname in hbase_master_hosts
 is_slave = hostname in slave_hosts
 if has_ganglia_server:
   ganglia_server_host = ganglia_server_hosts[0]
+if has_metric_collector:
+  metric_collector_host = ams_collector_hosts[0]
 #hadoop params
 
 if has_namenode:

http://git-wip-us.apache.org/repos/asf/ambari/blob/a3ed7a3f/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/templates/hadoop-metrics2.properties.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/templates/hadoop-metrics2.properties.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/templates/hadoop-metrics2.properties.j2
index c4759f4..d1b82b6 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/templates/hadoop-metrics2.properties.j2
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/templates/hadoop-metrics2.properties.j2
@@ -63,3 +63,18 @@ supervisor.sink.ganglia.servers={{ganglia_server_host}}:8650
 resourcemanager.sink.ganglia.tagsForPrefix.yarn=Queue
 
 {% endif %}
+
+{% if has_metric_collector %}
+
+datanode.sink.timeline.collector={{metric_collector_host}}:8188
+namenode.sink.timeline.collector={{metric_collector_host}}:8188
+resourcemanager.sink.timeline.collector={{metric_collector_host}}:8188
+nodemanager.sink.timeline.collector={{metric_collector_host}}:8188
+historyserver.sink.timeline.collector={{metric_collector_host}}:8188
+journalnode.sink.timeline.collector={{metric_collector_host}}:8188
+nimbus.sink.timeline.collector={{metric_collector_host}}:8188
+supervisor.sink.timeline.collector={{metric_collector_host}}:8188
+maptask.sink.timeline.collector={{metric_collector_host}}:8188
+reducetask.sink.timeline.collector={{metric_collector_host}}:8188
+
+{% endif %}

http://git-wip-us.apache.org/repos/asf/ambari/blob/a3ed7a3f/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/configuration/hadoop-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/configuration/hadoop-env.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/configuration/hadoop-env.xml
index b3935d7..41274c7 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/configuration/hadoop-env.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/configuration/hadoop-env.xml
@@ -184,6 +184,9 @@ if [ -d "/usr/lib/tez" ]; then
   export HADOOP_CLASSPATH=$HADOOP_CLASSPATH:/usr/lib/tez/*:/usr/lib/tez/lib/*:/etc/tez/conf
 fi
 
+#TODO temporary addition
+export HADOOP_CLASSPATH=$HADOOP_CLASSPATH:/usr/lib/hadoop/lib/*
+
 # Setting path to hdfs command line
 export HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/a3ed7a3f/ambari-server/src/main/resources/stacks/HDP/2.2/services/AMS/configuration/ams-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.2/services/AMS/configuration/ams-env.xml b/ambari-server/src/main/resources/stacks/HDP/2.2/services/AMS/configuration/ams-env.xml
new file mode 100644
index 0000000..746db5c
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.2/services/AMS/configuration/ams-env.xml
@@ -0,0 +1,43 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+  ~ Licensed to the Apache Software Foundation (ASF) under one
+  ~ or more contributor license agreements.  See the NOTICE file
+  ~ distributed with this work for additional information
+  ~ regarding copyright ownership.  The ASF licenses this file
+  ~ to you under the Apache License, Version 2.0 (the
+  ~ "License"); you may not use this file except in compliance
+  ~ with the License.  You may obtain a copy of the License at
+  ~
+  ~     http://www.apache.org/licenses/LICENSE-2.0
+  ~
+  ~ Unless required by applicable law or agreed to in writing, software
+  ~ distributed under the License is distributed on an "AS IS" BASIS,
+  ~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  ~ See the License for the specific language governing permissions and
+  ~ limitations under the License.
+  -->
+
+<configuration>
+  <property>
+    <name>ams_user</name>
+    <value>root</value>
+    <property-type>USER</property-type>
+    <description>AMS User Name.</description>
+  </property>
+
+  <property>
+    <name>content</name>
+    <value>
+      # Set environment variables here.
+
+      # The java implementation to use. Java 1.6 required.
+      export JAVA_HOME={{java64_home}}
+
+      export JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}:{{hadoop_native_lib}}
+
+      #TODO
+    </value>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/a3ed7a3f/ambari-server/src/main/resources/stacks/HDP/2.2/services/AMS/configuration/ams-hbase-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.2/services/AMS/configuration/ams-hbase-env.xml b/ambari-server/src/main/resources/stacks/HDP/2.2/services/AMS/configuration/ams-hbase-env.xml
index 9306cd1..e0015ea 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.2/services/AMS/configuration/ams-hbase-env.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.2/services/AMS/configuration/ams-hbase-env.xml
@@ -23,22 +23,22 @@
 <configuration>
   <property>
     <name>hbase_log_dir</name>
-    <value>/var/log/hbase-ams/</value>
+    <value>/var/log/ams-hbase/</value>
     <description>Log Directories for HBase.</description>
   </property>
   <property>
     <name>hbase_pid_dir</name>
-    <value>/var/run/hbase-ams/</value>
+    <value>/var/run/ams-hbase/</value>
     <description>Pid Directory for HBase.</description>
   </property>
   <property>
     <name>hbase_regionserver_heapsize</name>
-    <value>1024</value>
+    <value>1024m</value>
     <description>HBase RegionServer Heap Size.</description>
   </property>
   <property>
     <name>hbase_regionserver_xmn_max</name>
-    <value>512</value>
+    <value>512m</value>
     <description>HBase RegionServer maximum value for minimum heap size.</description>
   </property>
   <property>
@@ -48,7 +48,7 @@
   </property>
   <property>
     <name>hbase_master_heapsize</name>
-    <value>1024</value>
+    <value>1024m</value>
     <description>HBase Master Heap Size</description>
   </property>
   <property>
@@ -81,7 +81,7 @@
       # Below are what we set by default. May only work with SUN JVM.
       # For more on why as well as other possible settings,
       # see http://wiki.apache.org/hadoop/PerformanceTuning
-      export HBASE_OPTS="-XX:+UseConcMarkSweepGC -XX:ErrorFile={{log_dir}}/hs_err_pid%p.log"
+      export HBASE_OPTS="-XX:+UseConcMarkSweepGC -XX:ErrorFile={{hbase_log_dir}}/hs_err_pid%p.log"
       export SERVER_GC_OPTS="-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:{{log_dir}}/gc.log-`date +'%Y%m%d%H%M'`"
       # Uncomment below to enable java garbage collection logging.
       # export HBASE_OPTS="$HBASE_OPTS -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:$HBASE_HOME/logs/gc-hbase.log"
@@ -103,7 +103,7 @@
       # export HBASE_SSH_OPTS="-o ConnectTimeout=1 -o SendEnv=HBASE_CONF_DIR"
 
       # Where log files are stored. $HBASE_HOME/logs by default.
-      export HBASE_LOG_DIR={{log_dir}}
+      export HBASE_LOG_DIR={{hbase_log_dir}}
 
       # A string representing this instance of hbase. $USER by default.
       # export HBASE_IDENT_STRING=$USER
@@ -112,7 +112,7 @@
       # export HBASE_NICENESS=10
 
       # The directory where pid files are stored. /tmp by default.
-      export HBASE_PID_DIR={{pid_dir}}
+      export HBASE_PID_DIR={{hbase_pid_dir}}
 
       # Seconds to sleep between slave commands. Unset by default. This
       # can be useful in large clusters, where, e.g., slave rsyncs can

http://git-wip-us.apache.org/repos/asf/ambari/blob/a3ed7a3f/ambari-server/src/main/resources/stacks/HDP/2.2/services/AMS/configuration/ams-hbase-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.2/services/AMS/configuration/ams-hbase-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.2/services/AMS/configuration/ams-hbase-site.xml
index 7c3b732..8446725 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.2/services/AMS/configuration/ams-hbase-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.2/services/AMS/configuration/ams-hbase-site.xml
@@ -23,7 +23,7 @@
 <configuration>
   <property>
     <name>hbase.rootdir</name>
-    <value>file:///var/lib/hbase</value>
+    <value>file:///var/lib/ambari-metrics-collector/hbase</value>
     <description>
       AMS service uses HBase as default storage backend. Set the rootdir for
       HBase to either local filesystem path if using AMS in embedded mode or
@@ -34,7 +34,7 @@
   </property>
   <property>
     <name>hbase.tmp.dir</name>
-    <value>/tmp</value>
+    <value>/var/lib/ambari-metrics-collector/hbase-tmp</value>
     <description>
       Temporary directory on the local filesystem.
       Change this setting to point to a location more permanent
@@ -50,7 +50,7 @@
   </property>
   <property>
     <name>hbase.cluster.distributed</name>
-    <value>true</value>
+    <value>false</value>
     <description>
       The mode the cluster will be in. Possible values are false for
       standalone mode and true for distributed mode. If false, startup will run
@@ -82,12 +82,12 @@
   </property>
   <property>
     <name>hbase.master.info.port</name>
-    <value>90010</value>
+    <value>61310</value>
     <description>The port for the HBase Master web UI.</description>
   </property>
   <property>
     <name>hbase.regionserver.info.port</name>
-    <value>90030</value>
+    <value>61330</value>
     <description>The port for the HBase RegionServer web UI.</description>
   </property>
   <property>
@@ -223,4 +223,8 @@
       hbase.server.thread.wakefrequency.
     </description>
   </property>
+  <property>
+    <name>phoenix.query.spoolThresholdBytes</name>
+    <value>12582912</value>
+  </property>
 </configuration>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/a3ed7a3f/ambari-server/src/main/resources/stacks/HDP/2.2/services/AMS/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.2/services/AMS/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.2/services/AMS/metainfo.xml
index 537f2e6..2839387 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.2/services/AMS/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.2/services/AMS/metainfo.xml
@@ -49,9 +49,12 @@
         </component>
         <component>
           <name>METRIC_MONITOR</name>
-          <displayName>Metric monitor</displayName>
+          <displayName>Metric Monitor</displayName>
           <category>SLAVE</category>
-          <cardinality>1+</cardinality>
+          <cardinality>ALL</cardinality>
+          <auto-deploy>
+            <enabled>true</enabled>
+          </auto-deploy>
           <commandScript>
             <script>scripts/metric_monitor.py</script>
             <scriptType>PYTHON</scriptType>
@@ -70,6 +73,9 @@
             <package>
               <name>ambari-metrics-monitor</name>
             </package>
+            <package>
+              <name>hadoop_2_2_*</name>
+            </package>
           </packages>
         </osSpecific>
       </osSpecifics>
@@ -86,10 +92,10 @@
 
       <configuration-dependencies>
         <config-type>ams-site</config-type>
-        <config-type>hbase-ams-policy</config-type>
-        <config-type>hbase-ams-site</config-type>
-        <config-type>hbase-ams-env</config-type>
-        <config-type>hbase-ams-log4j</config-type>
+        <config-type>ams-hbase-policy</config-type>
+        <config-type>ams-hbase-site</config-type>
+        <config-type>ams-hbase-env</config-type>
+        <config-type>ams-hbase-log4j</config-type>
       </configuration-dependencies>
 
     </service>

http://git-wip-us.apache.org/repos/asf/ambari/blob/a3ed7a3f/ambari-server/src/main/resources/stacks/HDP/2.2/services/AMS/package/scripts/ams.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.2/services/AMS/package/scripts/ams.py b/ambari-server/src/main/resources/stacks/HDP/2.2/services/AMS/package/scripts/ams.py
new file mode 100644
index 0000000..2e5ce35
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.2/services/AMS/package/scripts/ams.py
@@ -0,0 +1,73 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+
+
+def ams(name=None):
+  import params
+
+  if name == 'collector':
+    Directory(params.ams_collector_conf_dir,
+              owner=params.ams_user,
+              group=params.user_group,
+              recursive=True
+    )
+
+    XmlConfig("ams-site.xml",
+              conf_dir=params.ams_collector_conf_dir,
+              configurations=params.config['configurations']['ams-site'],
+              configuration_attributes=params.config['configuration_attributes']['ams-site'],
+              owner=params.ams_user,
+              group=params.user_group
+    )
+
+    File(format("{ams_collector_conf_dir}/ams-env.sh"),
+         owner=params.ams_user,
+         content=InlineTemplate(params.ams_env_sh_template)
+    )
+
+    pass
+
+  elif name == 'monitor':
+    Directory(params.ams_monitor_conf_dir,
+              owner=params.ams_user,
+              group=params.user_group,
+              recursive=True
+    )
+
+    TemplateConfig(
+      format("{ams_monitor_conf_dir}/metric_monitor.ini"),
+      owner=params.ams_user,
+      group=params.user_group,
+      template_tag=None
+    )
+
+    TemplateConfig(
+      format("{ams_monitor_conf_dir}/metric_groups.conf"),
+      owner=params.ams_user,
+      group=params.user_group,
+      template_tag=None
+    )
+
+    # TODO
+    pass
+
+  pass

http://git-wip-us.apache.org/repos/asf/ambari/blob/a3ed7a3f/ambari-server/src/main/resources/stacks/HDP/2.2/services/AMS/package/scripts/ams_service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.2/services/AMS/package/scripts/ams_service.py b/ambari-server/src/main/resources/stacks/HDP/2.2/services/AMS/package/scripts/ams_service.py
new file mode 100644
index 0000000..5fdd0f7
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.2/services/AMS/package/scripts/ams_service.py
@@ -0,0 +1,72 @@
+# !/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+
+
+def ams_service(name='collector', action='start'):
+  import params
+
+  if name == 'collector':
+    cmd = format("{ams_collector_script} --config {ams_collector_conf_dir}")
+    pid_file = format("{ams_collector_pid_dir}/ambari-metrics-collector.pid")
+    no_op_test = format("ls {pid_file} >/dev/null 2>&1 && ps `cat {pid_file}` >/dev/null 2>&1")
+
+    if action == 'start':
+      daemon_cmd = format("{cmd} start")
+      Execute(daemon_cmd,
+              not_if=no_op_test,
+              user=params.ams_user
+      )
+
+      pass
+    elif action == 'stop':
+      daemon_cmd = format("{cmd} stop")
+      Execute(daemon_cmd,
+              user=params.ams_user
+      )
+
+      pass
+    pass
+  elif name == 'monitor':
+    cmd = format("{ams_monitor_script} --config {ams_monitor_conf_dir}")
+    pid_file = format("{ams_monitor_pid_dir}/ambari-metrics-monitor.pid")
+    no_op_test = format("ls {pid_file} >/dev/null 2>&1 && ps `cat {pid_file}` >/dev/null 2>&1")
+
+    if action == 'start':
+      daemon_cmd = format("{cmd} start")
+      Execute(daemon_cmd,
+              user=params.ams_user
+      )
+
+      pass
+    elif action == 'stop':
+
+      daemon_cmd = format("{cmd} stop")
+      Execute(daemon_cmd,
+              user=params.ams_user
+      )
+
+      pass
+    pass
+
+    #TODO
+    pass
+  pass

http://git-wip-us.apache.org/repos/asf/ambari/blob/a3ed7a3f/ambari-server/src/main/resources/stacks/HDP/2.2/services/AMS/package/scripts/functions.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.2/services/AMS/package/scripts/functions.py b/ambari-server/src/main/resources/stacks/HDP/2.2/services/AMS/package/scripts/functions.py
new file mode 100644
index 0000000..a5107d6
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.2/services/AMS/package/scripts/functions.py
@@ -0,0 +1,40 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import os
+import re
+import math
+import datetime
+
+from resource_management.core.shell import checked_call
+
+def calc_xmn_from_xms(heapsize_str, xmn_percent, xmn_max):
+  """
+  @param heapsize_str: str (e.g '1000m')
+  @param xmn_percent: float (e.g 0.2)
+  @param xmn_max: integer (e.g 512)
+  """
+  heapsize = int(re.search('\d+', str(heapsize_str)).group(0))
+  heapsize_unit = re.search('\D+', str(heapsize_str)).group(0)
+  xmn_val = int(math.floor(heapsize*xmn_percent))
+  xmn_val -= xmn_val % 8
+  
+  result_xmn_val = xmn_max if xmn_val > xmn_max else xmn_val
+  return str(result_xmn_val) + heapsize_unit

http://git-wip-us.apache.org/repos/asf/ambari/blob/a3ed7a3f/ambari-server/src/main/resources/stacks/HDP/2.2/services/AMS/package/scripts/hbase.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.2/services/AMS/package/scripts/hbase.py b/ambari-server/src/main/resources/stacks/HDP/2.2/services/AMS/package/scripts/hbase.py
index 2829c7b..cf221ec 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.2/services/AMS/package/scripts/hbase.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.2/services/AMS/package/scripts/hbase.py
@@ -32,7 +32,7 @@ def hbase(name=None # 'master' or 'regionserver' or 'client'
       recursive = True
   )
 
-  Directory (params.tmp_dir,
+  Directory (params.hbase_tmp_dir,
              owner = params.hbase_user,
              recursive = True
   )
@@ -46,33 +46,33 @@ def hbase(name=None # 'master' or 'regionserver' or 'client'
 
   XmlConfig( "hbase-site.xml",
             conf_dir = params.hbase_conf_dir,
-            configurations = params.config['configurations']['hbase-site'],
-            configuration_attributes=params.config['configuration_attributes']['hbase-site'],
+            configurations = params.config['configurations']['ams-hbase-site'],
+            configuration_attributes=params.config['configuration_attributes']['ams-hbase-site'],
             owner = params.hbase_user,
             group = params.user_group
   )
 
-  XmlConfig( "hdfs-site.xml",
-            conf_dir = params.hbase_conf_dir,
-            configurations = params.config['configurations']['hdfs-site'],
-            configuration_attributes=params.config['configuration_attributes']['hdfs-site'],
-            owner = params.hbase_user,
-            group = params.user_group
-  )
-
-  XmlConfig("hdfs-site.xml",
-            conf_dir=params.hadoop_conf_dir,
-            configurations=params.config['configurations']['hdfs-site'],
-            configuration_attributes=params.config['configuration_attributes']['hdfs-site'],
-            owner=params.hdfs_user,
-            group=params.user_group
-  )
-
-  if 'hbase-policy' in params.config['configurations']:
+  # XmlConfig( "hdfs-site.xml",
+  #           conf_dir = params.hbase_conf_dir,
+  #           configurations = params.config['configurations']['hdfs-site'],
+  #           configuration_attributes=params.config['configuration_attributes']['hdfs-site'],
+  #           owner = params.hbase_user,
+  #           group = params.user_group
+  # )
+  #
+  # XmlConfig("hdfs-site.xml",
+  #           conf_dir=params.hadoop_conf_dir,
+  #           configurations=params.config['configurations']['hdfs-site'],
+  #           configuration_attributes=params.config['configuration_attributes']['hdfs-site'],
+  #           owner=params.hdfs_user,
+  #           group=params.user_group
+  # )
+
+  if 'ams-hbase-policy' in params.config['configurations']:
     XmlConfig( "hbase-policy.xml",
             conf_dir = params.hbase_conf_dir,
-            configurations = params.config['configurations']['hbase-policy'],
-            configuration_attributes=params.config['configuration_attributes']['hbase-policy'],
+            configurations = params.config['configurations']['ams-hbase-policy'],
+            configuration_attributes=params.config['configuration_attributes']['ams-hbase-policy'],
             owner = params.hbase_user,
             group = params.user_group
     )
@@ -88,9 +88,9 @@ def hbase(name=None # 'master' or 'regionserver' or 'client'
        content=InlineTemplate(params.hbase_env_sh_template)
   )     
        
-  hbase_TemplateConfig( params.metric_prop_file_name,
-    tag = 'GANGLIA-MASTER' if name == 'master' else 'GANGLIA-RS'
-  )
+  # hbase_TemplateConfig( params.metric_prop_file_name,
+  #   tag = 'GANGLIA-MASTER' if name == 'master' else 'GANGLIA-RS'
+  # )
 
   hbase_TemplateConfig( 'regionservers')
 
@@ -98,12 +98,12 @@ def hbase(name=None # 'master' or 'regionserver' or 'client'
     hbase_TemplateConfig( format("hbase_{name}_jaas.conf"))
   
   if name != "client":
-    Directory( params.pid_dir,
+    Directory( params.hbase_pid_dir,
       owner = params.hbase_user,
       recursive = True
     )
   
-    Directory (params.log_dir,
+    Directory (params.hbase_log_dir,
       owner = params.hbase_user,
       recursive = True
     )
@@ -121,17 +121,17 @@ def hbase(name=None # 'master' or 'regionserver' or 'client'
       group=params.user_group,
       owner=params.hbase_user
     )
-  if name in ["master","regionserver"]:
-    params.HdfsDirectory(params.hbase_hdfs_root_dir,
-                         action="create_delayed",
-                         owner=params.hbase_user
-    )
-    params.HdfsDirectory(params.hbase_staging_dir,
-                         action="create_delayed",
-                         owner=params.hbase_user,
-                         mode=0711
-    )
-    params.HdfsDirectory(None, action="create")
+  # if name in ["master","regionserver"]:
+  #   params.HdfsDirectory(params.hbase_hdfs_root_dir,
+  #                        action="create_delayed",
+  #                        owner=params.hbase_user
+  #   )
+  #   params.HdfsDirectory(params.hbase_staging_dir,
+  #                        action="create_delayed",
+  #                        owner=params.hbase_user,
+  #                        mode=0711
+  #   )
+  #   params.HdfsDirectory(None, action="create")
 
 def hbase_TemplateConfig(name, 
                          tag=None

http://git-wip-us.apache.org/repos/asf/ambari/blob/a3ed7a3f/ambari-server/src/main/resources/stacks/HDP/2.2/services/AMS/package/scripts/hbase_service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.2/services/AMS/package/scripts/hbase_service.py b/ambari-server/src/main/resources/stacks/HDP/2.2/services/AMS/package/scripts/hbase_service.py
index 723d4e2..f2d20d6 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.2/services/AMS/package/scripts/hbase_service.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.2/services/AMS/package/scripts/hbase_service.py
@@ -48,4 +48,4 @@ def hbase_service(
         on_timeout = format("{no_op_test} && kill -9 `cat {pid_file}`")
       )
       
-      Execute (format("rm -f {pid_file}"))
\ No newline at end of file
+      Execute (format("rm -f {pid_file}"))

http://git-wip-us.apache.org/repos/asf/ambari/blob/a3ed7a3f/ambari-server/src/main/resources/stacks/HDP/2.2/services/AMS/package/scripts/metric_collector.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.2/services/AMS/package/scripts/metric_collector.py b/ambari-server/src/main/resources/stacks/HDP/2.2/services/AMS/package/scripts/metric_collector.py
new file mode 100644
index 0000000..ae71aa1
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.2/services/AMS/package/scripts/metric_collector.py
@@ -0,0 +1,63 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+from ams import ams
+from ams_service import ams_service
+from hbase import hbase
+
+class AmsCollector(Script):
+    def install(self, env):
+        self.install_packages(env)
+
+
+    def configure(self, env):
+        import params
+        env.set_params(params)
+        hbase('master')
+        hbase('regionserver')
+        ams(name='collector')
+
+    def start(self, env):
+        import params
+        env.set_params(params)
+        self.configure(env) # for security
+
+        ams_service( 'collector',
+                       action = 'start'
+        )
+
+    def stop(self, env):
+        import params
+        env.set_params(params)
+
+        ams_service( 'collector',
+                       action = 'stop'
+        )
+
+    def status(self, env):
+        import status_params
+        env.set_params(status_params)
+        pid_file = format("{ams_collector_pid_dir}/ambari-metrics-collector.pid")
+        check_process_status(pid_file)
+
+
+if __name__ == "__main__":
+    AmsCollector().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/a3ed7a3f/ambari-server/src/main/resources/stacks/HDP/2.2/services/AMS/package/scripts/metric_monitor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.2/services/AMS/package/scripts/metric_monitor.py b/ambari-server/src/main/resources/stacks/HDP/2.2/services/AMS/package/scripts/metric_monitor.py
new file mode 100644
index 0000000..23bdf39
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.2/services/AMS/package/scripts/metric_monitor.py
@@ -0,0 +1,61 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+from ams import ams
+from ams_service import ams_service
+from hbase import hbase
+
+class AmsMonitor(Script):
+  def install(self, env):
+    self.install_packages(env)
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+    ams(name='monitor')
+
+  def start(self, env):
+    import params
+    env.set_params(params)
+    self.configure(env) # for security
+
+    ams_service( 'monitor',
+                 action = 'start'
+    )
+
+  def stop(self, env):
+    import params
+    env.set_params(params)
+
+    ams_service( 'monitor',
+                 action = 'stop'
+    )
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+    pid_file = format("{ams_monitor_pid_dir}/ambari-metrics-monitor.pid")
+    check_process_status(pid_file)
+
+
+if __name__ == "__main__":
+  AmsMonitor().execute()
+