You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@quickstep.apache.org by zu...@apache.org on 2017/01/29 02:15:17 UTC

[07/53] [partial] incubator-quickstep git commit: Make the third party directory leaner.

http://git-wip-us.apache.org/repos/asf/incubator-quickstep/blob/9661f956/third_party/gperftools/src/base/linuxthreads.cc
----------------------------------------------------------------------
diff --git a/third_party/gperftools/src/base/linuxthreads.cc b/third_party/gperftools/src/base/linuxthreads.cc
deleted file mode 100644
index 891e70c..0000000
--- a/third_party/gperftools/src/base/linuxthreads.cc
+++ /dev/null
@@ -1,707 +0,0 @@
-// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
-/* Copyright (c) 2005-2007, Google Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
- *
- *     * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *     * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following disclaimer
- * in the documentation and/or other materials provided with the
- * distribution.
- *     * Neither the name of Google Inc. nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * ---
- * Author: Markus Gutschke
- */
-
-#include "base/linuxthreads.h"
-
-#ifdef THREADS
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include <sched.h>
-#include <signal.h>
-#include <stdlib.h>
-#include <string.h>
-#include <fcntl.h>
-#include <sys/socket.h>
-#include <sys/wait.h>
-#include <sys/prctl.h>
-#include <semaphore.h>
-
-#include "base/linux_syscall_support.h"
-#include "base/thread_lister.h"
-
-#ifndef CLONE_UNTRACED
-#define CLONE_UNTRACED 0x00800000
-#endif
-
-
-/* Synchronous signals that should not be blocked while in the lister thread.
- */
-static const int sync_signals[]  = { SIGABRT, SIGILL, SIGFPE, SIGSEGV, SIGBUS,
-                                     SIGXCPU, SIGXFSZ };
-
-/* itoa() is not a standard function, and we cannot safely call printf()
- * after suspending threads. So, we just implement our own copy. A
- * recursive approach is the easiest here.
- */
-static char *local_itoa(char *buf, int i) {
-  if (i < 0) {
-    *buf++ = '-';
-    return local_itoa(buf, -i);
-  } else {
-    if (i >= 10)
-      buf = local_itoa(buf, i/10);
-    *buf++ = (i%10) + '0';
-    *buf   = '\000';
-    return buf;
-  }
-}
-
-
-/* Wrapper around clone() that runs "fn" on the same stack as the
- * caller! Unlike fork(), the cloned thread shares the same address space.
- * The caller must be careful to use only minimal amounts of stack until
- * the cloned thread has returned.
- * There is a good chance that the cloned thread and the caller will share
- * the same copy of errno!
- */
-#ifdef __GNUC__
-#if __GNUC__ == 3 && __GNUC_MINOR__ >= 1 || __GNUC__ > 3
-/* Try to force this function into a separate stack frame, and make sure
- * that arguments are passed on the stack.
- */
-static int local_clone (int (*fn)(void *), void *arg, ...)
-  __attribute__ ((noinline));
-#endif
-#endif
-
-/* To avoid the gap cross page boundaries, increase by the large parge
- * size mostly PowerPC system uses.  */
-#ifdef __PPC64__
-#define CLONE_STACK_SIZE 65536
-#else
-#define CLONE_STACK_SIZE 4096
-#endif
-
-static int local_clone (int (*fn)(void *), void *arg, ...) {
-  /* Leave 4kB of gap between the callers stack and the new clone. This
-   * should be more than sufficient for the caller to call waitpid() until
-   * the cloned thread terminates.
-   *
-   * It is important that we set the CLONE_UNTRACED flag, because newer
-   * versions of "gdb" otherwise attempt to attach to our thread, and will
-   * attempt to reap its status codes. This subsequently results in the
-   * caller hanging indefinitely in waitpid(), waiting for a change in
-   * status that will never happen. By setting the CLONE_UNTRACED flag, we
-   * prevent "gdb" from stealing events, but we still expect the thread
-   * lister to fail, because it cannot PTRACE_ATTACH to the process that
-   * is being debugged. This is OK and the error code will be reported
-   * correctly.
-   */
-  return sys_clone(fn, (char *)&arg - CLONE_STACK_SIZE,
-                   CLONE_VM|CLONE_FS|CLONE_FILES|CLONE_UNTRACED, arg, 0, 0, 0);
-}
-
-
-/* Local substitute for the atoi() function, which is not necessarily safe
- * to call once threads are suspended (depending on whether libc looks up
- * locale information,  when executing atoi()).
- */
-static int local_atoi(const char *s) {
-  int n   = 0;
-  int neg = *s == '-';
-  if (neg)
-    s++;
-  while (*s >= '0' && *s <= '9')
-    n = 10*n + (*s++ - '0');
-  return neg ? -n : n;
-}
-
-
-/* Re-runs fn until it doesn't cause EINTR
- */
-#define NO_INTR(fn)   do {} while ((fn) < 0 && errno == EINTR)
-
-
-/* Wrap a class around system calls, in order to give us access to
- * a private copy of errno. This only works in C++, but it has the
- * advantage of not needing nested functions, which are a non-standard
- * language extension.
- */
-#ifdef __cplusplus
-namespace {
-  class SysCalls {
-   public:
-    #define SYS_CPLUSPLUS
-    #define SYS_ERRNO     my_errno
-    #define SYS_INLINE    inline
-    #define SYS_PREFIX    -1
-    #undef  SYS_LINUX_SYSCALL_SUPPORT_H
-    #include "linux_syscall_support.h"
-    SysCalls() : my_errno(0) { }
-    int my_errno;
-  };
-}
-#define ERRNO sys.my_errno
-#else
-#define ERRNO my_errno
-#endif
-
-
-/* Wrapper for open() which is guaranteed to never return EINTR.
- */
-static int c_open(const char *fname, int flags, int mode) {
-  ssize_t rc;
-  NO_INTR(rc = sys_open(fname, flags, mode));
-  return rc;
-}
-
-
-/* abort() is not safely reentrant, and changes it's behavior each time
- * it is called. This means, if the main application ever called abort()
- * we cannot safely call it again. This would happen if we were called
- * from a SIGABRT signal handler in the main application. So, document
- * that calling SIGABRT from the thread lister makes it not signal safe
- * (and vice-versa).
- * Also, since we share address space with the main application, we
- * cannot call abort() from the callback and expect the main application
- * to behave correctly afterwards. In fact, the only thing we can do, is
- * to terminate the main application with extreme prejudice (aka
- * PTRACE_KILL).
- * We set up our own SIGABRT handler to do this.
- * In order to find the main application from the signal handler, we
- * need to store information about it in global variables. This is
- * safe, because the main application should be suspended at this
- * time. If the callback ever called TCMalloc_ResumeAllProcessThreads(), then
- * we are running a higher risk, though. So, try to avoid calling
- * abort() after calling TCMalloc_ResumeAllProcessThreads.
- */
-static volatile int *sig_pids, sig_num_threads, sig_proc, sig_marker;
-
-
-/* Signal handler to help us recover from dying while we are attached to
- * other threads.
- */
-static void SignalHandler(int signum, siginfo_t *si, void *data) {
-  if (sig_pids != NULL) {
-    if (signum == SIGABRT) {
-      while (sig_num_threads-- > 0) {
-        /* Not sure if sched_yield is really necessary here, but it does not */
-        /* hurt, and it might be necessary for the same reasons that we have */
-        /* to do so in sys_ptrace_detach().                                  */
-        sys_sched_yield();
-        sys_ptrace(PTRACE_KILL, sig_pids[sig_num_threads], 0, 0);
-      }
-    } else if (sig_num_threads > 0) {
-      TCMalloc_ResumeAllProcessThreads(sig_num_threads, (int *)sig_pids);
-    }
-  }
-  sig_pids = NULL;
-  if (sig_marker >= 0)
-    NO_INTR(sys_close(sig_marker));
-  sig_marker = -1;
-  if (sig_proc >= 0)
-    NO_INTR(sys_close(sig_proc));
-  sig_proc = -1;
-
-  sys__exit(signum == SIGABRT ? 1 : 2);
-}
-
-
-/* Try to dirty the stack, and hope that the compiler is not smart enough
- * to optimize this function away. Or worse, the compiler could inline the
- * function and permanently allocate the data on the stack.
- */
-static void DirtyStack(size_t amount) {
-  char buf[amount];
-  memset(buf, 0, amount);
-  sys_read(-1, buf, amount);
-}
-
-
-/* Data structure for passing arguments to the lister thread.
- */
-#define ALT_STACKSIZE (MINSIGSTKSZ + 4096)
-
-struct ListerParams {
-  int         result, err;
-  char        *altstack_mem;
-  ListAllProcessThreadsCallBack callback;
-  void        *parameter;
-  va_list     ap;
-  sem_t       *lock;
-};
-
-
-static void ListerThread(struct ListerParams *args) {
-  int                found_parent = 0;
-  pid_t              clone_pid  = sys_gettid(), ppid = sys_getppid();
-  char               proc_self_task[80], marker_name[48], *marker_path;
-  const char         *proc_paths[3];
-  const char *const  *proc_path = proc_paths;
-  int                proc = -1, marker = -1, num_threads = 0;
-  int                max_threads = 0, sig;
-  struct kernel_stat marker_sb, proc_sb;
-  stack_t            altstack;
-
-  /* Wait for parent thread to set appropriate permissions
-   * to allow ptrace activity
-   */
-  if (sem_wait(args->lock) < 0) {
-    goto failure;
-  }
-
-  /* Create "marker" that we can use to detect threads sharing the same
-   * address space and the same file handles. By setting the FD_CLOEXEC flag
-   * we minimize the risk of misidentifying child processes as threads;
-   * and since there is still a race condition,  we will filter those out
-   * later, anyway.
-   */
-  if ((marker = sys_socket(PF_LOCAL, SOCK_DGRAM, 0)) < 0 ||
-      sys_fcntl(marker, F_SETFD, FD_CLOEXEC) < 0) {
-  failure:
-    args->result = -1;
-    args->err    = errno;
-    if (marker >= 0)
-      NO_INTR(sys_close(marker));
-    sig_marker = marker = -1;
-    if (proc >= 0)
-      NO_INTR(sys_close(proc));
-    sig_proc = proc = -1;
-    sys__exit(1);
-  }
-
-  /* Compute search paths for finding thread directories in /proc            */
-  local_itoa(strrchr(strcpy(proc_self_task, "/proc/"), '\000'), ppid);
-  strcpy(marker_name, proc_self_task);
-  marker_path = marker_name + strlen(marker_name);
-  strcat(proc_self_task, "/task/");
-  proc_paths[0] = proc_self_task; /* /proc/$$/task/                          */
-  proc_paths[1] = "/proc/";       /* /proc/                                  */
-  proc_paths[2] = NULL;
-
-  /* Compute path for marker socket in /proc                                 */
-  local_itoa(strcpy(marker_path, "/fd/") + 4, marker);
-  if (sys_stat(marker_name, &marker_sb) < 0) {
-    goto failure;
-  }
-
-  /* Catch signals on an alternate pre-allocated stack. This way, we can
-   * safely execute the signal handler even if we ran out of memory.
-   */
-  memset(&altstack, 0, sizeof(altstack));
-  altstack.ss_sp    = args->altstack_mem;
-  altstack.ss_flags = 0;
-  altstack.ss_size  = ALT_STACKSIZE;
-  sys_sigaltstack(&altstack, (const stack_t *)NULL);
-
-  /* Some kernels forget to wake up traced processes, when the
-   * tracer dies.  So, intercept synchronous signals and make sure
-   * that we wake up our tracees before dying. It is the caller's
-   * responsibility to ensure that asynchronous signals do not
-   * interfere with this function.
-   */
-  sig_marker = marker;
-  sig_proc   = -1;
-  for (sig = 0; sig < sizeof(sync_signals)/sizeof(*sync_signals); sig++) {
-    struct kernel_sigaction sa;
-    memset(&sa, 0, sizeof(sa));
-    sa.sa_sigaction_ = SignalHandler;
-    sys_sigfillset(&sa.sa_mask);
-    sa.sa_flags      = SA_ONSTACK|SA_SIGINFO|SA_RESETHAND;
-    sys_sigaction(sync_signals[sig], &sa, (struct kernel_sigaction *)NULL);
-  }
-  
-  /* Read process directories in /proc/...                                   */
-  for (;;) {
-    /* Some kernels know about threads, and hide them in "/proc"
-     * (although they are still there, if you know the process
-     * id). Threads are moved into a separate "task" directory. We
-     * check there first, and then fall back on the older naming
-     * convention if necessary.
-     */
-    if ((sig_proc = proc = c_open(*proc_path, O_RDONLY|O_DIRECTORY, 0)) < 0) {
-      if (*++proc_path != NULL)
-        continue;
-      goto failure;
-    }
-    if (sys_fstat(proc, &proc_sb) < 0)
-      goto failure;
-    
-    /* Since we are suspending threads, we cannot call any libc
-     * functions that might acquire locks. Most notably, we cannot
-     * call malloc(). So, we have to allocate memory on the stack,
-     * instead. Since we do not know how much memory we need, we
-     * make a best guess. And if we guessed incorrectly we retry on
-     * a second iteration (by jumping to "detach_threads").
-     *
-     * Unless the number of threads is increasing very rapidly, we
-     * should never need to do so, though, as our guestimate is very
-     * conservative.
-     */
-    if (max_threads < proc_sb.st_nlink + 100)
-      max_threads = proc_sb.st_nlink + 100;
-    
-    /* scope */ {
-      pid_t pids[max_threads];
-      int   added_entries = 0;
-      sig_num_threads     = num_threads;
-      sig_pids            = pids;
-      for (;;) {
-        struct KERNEL_DIRENT *entry;
-        char buf[4096];
-        ssize_t nbytes = GETDENTS(proc, (struct KERNEL_DIRENT *)buf,
-                                         sizeof(buf));
-        if (nbytes < 0)
-          goto failure;
-        else if (nbytes == 0) {
-          if (added_entries) {
-            /* Need to keep iterating over "/proc" in multiple
-             * passes until we no longer find any more threads. This
-             * algorithm eventually completes, when all threads have
-             * been suspended.
-             */
-            added_entries = 0;
-            sys_lseek(proc, 0, SEEK_SET);
-            continue;
-          }
-          break;
-        }
-        for (entry = (struct KERNEL_DIRENT *)buf;
-             entry < (struct KERNEL_DIRENT *)&buf[nbytes];
-             entry = (struct KERNEL_DIRENT *)((char *)entry+entry->d_reclen)) {
-          if (entry->d_ino != 0) {
-            const char *ptr = entry->d_name;
-            pid_t pid;
-            
-            /* Some kernels hide threads by preceding the pid with a '.'     */
-            if (*ptr == '.')
-              ptr++;
-            
-            /* If the directory is not numeric, it cannot be a
-             * process/thread
-             */
-            if (*ptr < '0' || *ptr > '9')
-              continue;
-            pid = local_atoi(ptr);
-
-            /* Attach (and suspend) all threads                              */
-            if (pid && pid != clone_pid) {
-              struct kernel_stat tmp_sb;
-              char fname[entry->d_reclen + 48];
-              strcat(strcat(strcpy(fname, "/proc/"),
-                            entry->d_name), marker_path);
-              
-              /* Check if the marker is identical to the one we created      */
-              if (sys_stat(fname, &tmp_sb) >= 0 &&
-                  marker_sb.st_ino == tmp_sb.st_ino) {
-                long i, j;
-
-                /* Found one of our threads, make sure it is no duplicate    */
-                for (i = 0; i < num_threads; i++) {
-                  /* Linear search is slow, but should not matter much for
-                   * the typically small number of threads.
-                   */
-                  if (pids[i] == pid) {
-                    /* Found a duplicate; most likely on second pass         */
-                    goto next_entry;
-                  }
-                }
-                
-                /* Check whether data structure needs growing                */
-                if (num_threads >= max_threads) {
-                  /* Back to square one, this time with more memory          */
-                  NO_INTR(sys_close(proc));
-                  goto detach_threads;
-                }
-
-                /* Attaching to thread suspends it                           */
-                pids[num_threads++] = pid;
-                sig_num_threads     = num_threads;
-                if (sys_ptrace(PTRACE_ATTACH, pid, (void *)0,
-                               (void *)0) < 0) {
-                  /* If operation failed, ignore thread. Maybe it
-                   * just died?  There might also be a race
-                   * condition with a concurrent core dumper or
-                   * with a debugger. In that case, we will just
-                   * make a best effort, rather than failing
-                   * entirely.
-                   */
-                  num_threads--;
-                  sig_num_threads = num_threads;
-                  goto next_entry;
-                }
-                while (sys_waitpid(pid, (int *)0, __WALL) < 0) {
-                  if (errno != EINTR) {
-                    sys_ptrace_detach(pid);
-                    num_threads--;
-                    sig_num_threads = num_threads;
-                    goto next_entry;
-                  }
-                }
-
-                if (sys_ptrace(PTRACE_PEEKDATA, pid, &i, &j) || i++ != j ||
-                    sys_ptrace(PTRACE_PEEKDATA, pid, &i, &j) || i   != j) {
-                  /* Address spaces are distinct, even though both
-                   * processes show the "marker". This is probably
-                   * a forked child process rather than a thread.
-                   */
-                  sys_ptrace_detach(pid);
-                  num_threads--;
-                  sig_num_threads = num_threads;
-                } else {
-                  found_parent |= pid == ppid;
-                  added_entries++;
-                }
-              }
-            }
-          }
-        next_entry:;
-        }
-      }
-      NO_INTR(sys_close(proc));
-      sig_proc = proc = -1;
-
-      /* If we failed to find any threads, try looking somewhere else in
-       * /proc. Maybe, threads are reported differently on this system.
-       */
-      if (num_threads > 1 || !*++proc_path) {
-        NO_INTR(sys_close(marker));
-        sig_marker = marker = -1;
-
-        /* If we never found the parent process, something is very wrong.
-         * Most likely, we are running in debugger. Any attempt to operate
-         * on the threads would be very incomplete. Let's just report an
-         * error to the caller.
-         */
-        if (!found_parent) {
-          TCMalloc_ResumeAllProcessThreads(num_threads, pids);
-          sys__exit(3);
-        }
-
-        /* Now we are ready to call the callback,
-         * which takes care of resuming the threads for us.
-         */
-        args->result = args->callback(args->parameter, num_threads,
-                                      pids, args->ap);
-        args->err = errno;
-
-        /* Callback should have resumed threads, but better safe than sorry  */
-        if (TCMalloc_ResumeAllProcessThreads(num_threads, pids)) {
-          /* Callback forgot to resume at least one thread, report error     */
-          args->err    = EINVAL;
-          args->result = -1;
-        }
-
-        sys__exit(0);
-      }
-    detach_threads:
-      /* Resume all threads prior to retrying the operation                  */
-      TCMalloc_ResumeAllProcessThreads(num_threads, pids);
-      sig_pids = NULL;
-      num_threads = 0;
-      sig_num_threads = num_threads;
-      max_threads += 100;
-    }
-  }
-}
-
-
-/* This function gets the list of all linux threads of the current process
- * passes them to the 'callback' along with the 'parameter' pointer; at the
- * call back call time all the threads are paused via
- * PTRACE_ATTACH.
- * The callback is executed from a separate thread which shares only the
- * address space, the filesystem, and the filehandles with the caller. Most
- * notably, it does not share the same pid and ppid; and if it terminates,
- * the rest of the application is still there. 'callback' is supposed to do
- * or arrange for TCMalloc_ResumeAllProcessThreads. This happens automatically, if
- * the thread raises a synchronous signal (e.g. SIGSEGV); asynchronous
- * signals are blocked. If the 'callback' decides to unblock them, it must
- * ensure that they cannot terminate the application, or that
- * TCMalloc_ResumeAllProcessThreads will get called.
- * It is an error for the 'callback' to make any library calls that could
- * acquire locks. Most notably, this means that most system calls have to
- * avoid going through libc. Also, this means that it is not legal to call
- * exit() or abort().
- * We return -1 on error and the return value of 'callback' on success.
- */
-int TCMalloc_ListAllProcessThreads(void *parameter,
-                                   ListAllProcessThreadsCallBack callback, ...) {
-  char                   altstack_mem[ALT_STACKSIZE];
-  struct ListerParams    args;
-  pid_t                  clone_pid;
-  int                    dumpable = 1, sig;
-  struct kernel_sigset_t sig_blocked, sig_old;
-  sem_t                  lock;
-
-  va_start(args.ap, callback);
-
-  /* If we are short on virtual memory, initializing the alternate stack
-   * might trigger a SIGSEGV. Let's do this early, before it could get us
-   * into more trouble (i.e. before signal handlers try to use the alternate
-   * stack, and before we attach to other threads).
-   */
-  memset(altstack_mem, 0, sizeof(altstack_mem));
-
-  /* Some of our cleanup functions could conceivable use more stack space.
-   * Try to touch the stack right now. This could be defeated by the compiler
-   * being too smart for it's own good, so try really hard.
-   */
-  DirtyStack(32768);
-
-  /* Make this process "dumpable". This is necessary in order to ptrace()
-   * after having called setuid().
-   */
-  dumpable = sys_prctl(PR_GET_DUMPABLE, 0);
-  if (!dumpable)
-    sys_prctl(PR_SET_DUMPABLE, 1);
-
-  /* Fill in argument block for dumper thread                                */
-  args.result       = -1;
-  args.err          = 0;
-  args.altstack_mem = altstack_mem;
-  args.parameter    = parameter;
-  args.callback     = callback;
-  args.lock         = &lock;
-
-  /* Before cloning the thread lister, block all asynchronous signals, as we */
-  /* are not prepared to handle them.                                        */
-  sys_sigfillset(&sig_blocked);
-  for (sig = 0; sig < sizeof(sync_signals)/sizeof(*sync_signals); sig++) {
-    sys_sigdelset(&sig_blocked, sync_signals[sig]);
-  }
-  if (sys_sigprocmask(SIG_BLOCK, &sig_blocked, &sig_old)) {
-    args.err = errno;
-    args.result = -1;
-    goto failed;
-  }
-
-  /* scope */ {
-    /* After cloning, both the parent and the child share the same instance
-     * of errno. We must make sure that at least one of these processes
-     * (in our case, the parent) uses modified syscall macros that update
-     * a local copy of errno, instead.
-     */
-    #ifdef __cplusplus
-      #define sys0_sigprocmask sys.sigprocmask
-      #define sys0_waitpid     sys.waitpid
-      SysCalls sys;
-    #else
-      int my_errno;
-      #define SYS_ERRNO        my_errno
-      #define SYS_INLINE       inline
-      #define SYS_PREFIX       0
-      #undef  SYS_LINUX_SYSCALL_SUPPORT_H
-      #include "linux_syscall_support.h"
-    #endif
-
-    /* Lock before clone so that parent can set
-	 * ptrace permissions (if necessary) prior
-     * to ListerThread actually executing
-     */
-    if (sem_init(&lock, 0, 0) == 0) {
-
-      int clone_errno;
-      clone_pid = local_clone((int (*)(void *))ListerThread, &args);
-      clone_errno = errno;
-
-      sys_sigprocmask(SIG_SETMASK, &sig_old, &sig_old);
-
-      if (clone_pid >= 0) {
-#ifdef PR_SET_PTRACER
-        /* In newer versions of glibc permission must explicitly
-         * be given to allow for ptrace.
-         */
-        prctl(PR_SET_PTRACER, clone_pid, 0, 0, 0);
-#endif
-        /* Releasing the lock here allows the
-         * ListerThread to execute and ptrace us.
-		 */
-        sem_post(&lock);
-        int status, rc;
-        while ((rc = sys0_waitpid(clone_pid, &status, __WALL)) < 0 &&
-               ERRNO == EINTR) {
-                /* Keep waiting                                                 */
-        }
-        if (rc < 0) {
-          args.err = ERRNO;
-          args.result = -1;
-        } else if (WIFEXITED(status)) {
-          switch (WEXITSTATUS(status)) {
-            case 0: break;             /* Normal process termination           */
-            case 2: args.err = EFAULT; /* Some fault (e.g. SIGSEGV) detected   */
-                    args.result = -1;
-                    break;
-            case 3: args.err = EPERM;  /* Process is already being traced      */
-                    args.result = -1;
-                    break;
-            default:args.err = ECHILD; /* Child died unexpectedly              */
-                    args.result = -1;
-                    break;
-          }
-        } else if (!WIFEXITED(status)) {
-          args.err    = EFAULT;        /* Terminated due to an unhandled signal*/
-          args.result = -1;
-        }
-        sem_destroy(&lock);
-      } else {
-        args.result = -1;
-        args.err    = clone_errno;
-      }
-    } else {
-      args.result = -1;
-      args.err    = errno;
-    }
-  }
-
-  /* Restore the "dumpable" state of the process                             */
-failed:
-  if (!dumpable)
-    sys_prctl(PR_SET_DUMPABLE, dumpable);
-
-  va_end(args.ap);
-
-  errno = args.err;
-  return args.result;
-}
-
-/* This function resumes the list of all linux threads that
- * TCMalloc_ListAllProcessThreads pauses before giving to its callback.
- * The function returns non-zero if at least one thread was
- * suspended and has now been resumed.
- */
-int TCMalloc_ResumeAllProcessThreads(int num_threads, pid_t *thread_pids) {
-  int detached_at_least_one = 0;
-  while (num_threads-- > 0) {
-    detached_at_least_one |= sys_ptrace_detach(thread_pids[num_threads]) >= 0;
-  }
-  return detached_at_least_one;
-}
-
-#ifdef __cplusplus
-}
-#endif
-#endif

http://git-wip-us.apache.org/repos/asf/incubator-quickstep/blob/9661f956/third_party/gperftools/src/base/linuxthreads.h
----------------------------------------------------------------------
diff --git a/third_party/gperftools/src/base/linuxthreads.h b/third_party/gperftools/src/base/linuxthreads.h
deleted file mode 100644
index 16bc8c6..0000000
--- a/third_party/gperftools/src/base/linuxthreads.h
+++ /dev/null
@@ -1,53 +0,0 @@
-/* Copyright (c) 2005-2007, Google Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
- *
- *     * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *     * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following disclaimer
- * in the documentation and/or other materials provided with the
- * distribution.
- *     * Neither the name of Google Inc. nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * ---
- * Author: Markus Gutschke
- */
-
-#ifndef _LINUXTHREADS_H
-#define _LINUXTHREADS_H
-
-/* Include thread_lister.h to get the interface that we implement for linux.
- */
-
-/* We currently only support x86-32 and x86-64 on Linux. Porting to other
- * related platforms should not be difficult.
- */
-#if (defined(__i386__) || defined(__x86_64__) || defined(__ARM_ARCH_3__) || \
-     defined(__mips__) || defined(__PPC__) || defined(__aarch64__)) && defined(__linux)
-
-/* Define the THREADS symbol to make sure that there is exactly one core dumper
- * built into the library.
- */
-#define THREADS "Linux /proc"
-
-#endif
-
-#endif  /* _LINUXTHREADS_H */

http://git-wip-us.apache.org/repos/asf/incubator-quickstep/blob/9661f956/third_party/gperftools/src/base/logging.cc
----------------------------------------------------------------------
diff --git a/third_party/gperftools/src/base/logging.cc b/third_party/gperftools/src/base/logging.cc
deleted file mode 100644
index 761c2fd..0000000
--- a/third_party/gperftools/src/base/logging.cc
+++ /dev/null
@@ -1,108 +0,0 @@
-// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
-// Copyright (c) 2007, Google Inc.
-// All rights reserved.
-// 
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-// 
-//     * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-//     * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-// 
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// ---
-// This file just provides storage for FLAGS_verbose.
-
-#include <config.h>
-#include "base/logging.h"
-#include "base/commandlineflags.h"
-
-DEFINE_int32(verbose, EnvToInt("PERFTOOLS_VERBOSE", 0),
-             "Set to numbers >0 for more verbose output, or <0 for less.  "
-             "--verbose == -4 means we log fatal errors only.");
-
-
-#if defined(_WIN32) || defined(__CYGWIN__) || defined(__CYGWIN32__)
-
-// While windows does have a POSIX-compatible API
-// (_open/_write/_close), it acquires memory.  Using this lower-level
-// windows API is the closest we can get to being "raw".
-RawFD RawOpenForWriting(const char* filename) {
-  // CreateFile allocates memory if file_name isn't absolute, so if
-  // that ever becomes a problem then we ought to compute the absolute
-  // path on its behalf (perhaps the ntdll/kernel function isn't aware
-  // of the working directory?)
-  RawFD fd = CreateFileA(filename, GENERIC_WRITE, 0, NULL,
-                         CREATE_ALWAYS, 0, NULL);
-  if (fd != kIllegalRawFD && GetLastError() == ERROR_ALREADY_EXISTS)
-    SetEndOfFile(fd);    // truncate the existing file
-  return fd;
-}
-
-void RawWrite(RawFD handle, const char* buf, size_t len) {
-  while (len > 0) {
-    DWORD wrote;
-    BOOL ok = WriteFile(handle, buf, len, &wrote, NULL);
-    // We do not use an asynchronous file handle, so ok==false means an error
-    if (!ok) break;
-    buf += wrote;
-    len -= wrote;
-  }
-}
-
-void RawClose(RawFD handle) {
-  CloseHandle(handle);
-}
-
-#else  // _WIN32 || __CYGWIN__ || __CYGWIN32__
-
-#ifdef HAVE_SYS_TYPES_H
-#include <sys/types.h>
-#endif
-#ifdef HAVE_UNISTD_H
-#include <unistd.h>
-#endif
-#ifdef HAVE_FCNTL_H
-#include <fcntl.h>
-#endif
-
-// Re-run fn until it doesn't cause EINTR.
-#define NO_INTR(fn)  do {} while ((fn) < 0 && errno == EINTR)
-
-RawFD RawOpenForWriting(const char* filename) {
-  return open(filename, O_WRONLY|O_CREAT|O_TRUNC, 0664);
-}
-
-void RawWrite(RawFD fd, const char* buf, size_t len) {
-  while (len > 0) {
-    ssize_t r;
-    NO_INTR(r = write(fd, buf, len));
-    if (r <= 0) break;
-    buf += r;
-    len -= r;
-  }
-}
-
-void RawClose(RawFD fd) {
-  NO_INTR(close(fd));
-}
-
-#endif  // _WIN32 || __CYGWIN__ || __CYGWIN32__

http://git-wip-us.apache.org/repos/asf/incubator-quickstep/blob/9661f956/third_party/gperftools/src/base/logging.h
----------------------------------------------------------------------
diff --git a/third_party/gperftools/src/base/logging.h b/third_party/gperftools/src/base/logging.h
deleted file mode 100644
index a1afe4d..0000000
--- a/third_party/gperftools/src/base/logging.h
+++ /dev/null
@@ -1,259 +0,0 @@
-// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
-// Copyright (c) 2005, Google Inc.
-// All rights reserved.
-// 
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-// 
-//     * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-//     * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-// 
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// ---
-// This file contains #include information about logging-related stuff.
-// Pretty much everybody needs to #include this file so that they can
-// log various happenings.
-//
-#ifndef _LOGGING_H_
-#define _LOGGING_H_
-
-#include <config.h>
-#include <stdarg.h>
-#include <stdlib.h>
-#include <stdio.h>
-#ifdef HAVE_UNISTD_H
-#include <unistd.h>    // for write()
-#endif
-#include <string.h>    // for strlen(), strcmp()
-#include <assert.h>
-#include <errno.h>     // for errno
-#include "base/commandlineflags.h"
-
-// On some systems (like freebsd), we can't call write() at all in a
-// global constructor, perhaps because errno hasn't been set up.
-// (In windows, we can't call it because it might call malloc.)
-// Calling the write syscall is safer (it doesn't set errno), so we
-// prefer that.  Note we don't care about errno for logging: we just
-// do logging on a best-effort basis.
-#if defined(_MSC_VER)
-#define WRITE_TO_STDERR(buf, len) WriteToStderr(buf, len);  // in port.cc
-#elif defined(HAVE_SYS_SYSCALL_H)
-#include <sys/syscall.h>
-#define WRITE_TO_STDERR(buf, len) syscall(SYS_write, STDERR_FILENO, buf, len)
-#else
-#define WRITE_TO_STDERR(buf, len) write(STDERR_FILENO, buf, len)
-#endif
-
-// MSVC and mingw define their own, safe version of vnsprintf (the
-// windows one in broken) in port.cc.  Everyone else can use the
-// version here.  We had to give it a unique name for windows.
-#ifndef _WIN32
-# define perftools_vsnprintf vsnprintf
-#endif
-
-
-// We log all messages at this log-level and below.
-// INFO == -1, WARNING == -2, ERROR == -3, FATAL == -4
-DECLARE_int32(verbose);
-
-// CHECK dies with a fatal error if condition is not true.  It is *not*
-// controlled by NDEBUG, so the check will be executed regardless of
-// compilation mode.  Therefore, it is safe to do things like:
-//    CHECK(fp->Write(x) == 4)
-// Note we use write instead of printf/puts to avoid the risk we'll
-// call malloc().
-#define CHECK(condition)                                                \
-  do {                                                                  \
-    if (!(condition)) {                                                 \
-      WRITE_TO_STDERR("Check failed: " #condition "\n",                 \
-                      sizeof("Check failed: " #condition "\n")-1);      \
-      abort();                                                          \
-    }                                                                   \
-  } while (0)
-
-// This takes a message to print.  The name is historical.
-#define RAW_CHECK(condition, message)                                          \
-  do {                                                                         \
-    if (!(condition)) {                                                        \
-      WRITE_TO_STDERR("Check failed: " #condition ": " message "\n",           \
-                      sizeof("Check failed: " #condition ": " message "\n")-1);\
-      abort();                                                                 \
-    }                                                                          \
-  } while (0)
-
-// This is like RAW_CHECK, but only in debug-mode
-#ifdef NDEBUG
-enum { DEBUG_MODE = 0 };
-#define RAW_DCHECK(condition, message)
-#else
-enum { DEBUG_MODE = 1 };
-#define RAW_DCHECK(condition, message)  RAW_CHECK(condition, message)
-#endif
-
-// This prints errno as well.  Note we use write instead of printf/puts to
-// avoid the risk we'll call malloc().
-#define PCHECK(condition)                                               \
-  do {                                                                  \
-    if (!(condition)) {                                                 \
-      const int err_no = errno;                                         \
-      WRITE_TO_STDERR("Check failed: " #condition ": ",                 \
-                      sizeof("Check failed: " #condition ": ")-1);      \
-      WRITE_TO_STDERR(strerror(err_no), strlen(strerror(err_no)));      \
-      WRITE_TO_STDERR("\n", sizeof("\n")-1);                            \
-      abort();                                                          \
-    }                                                                   \
-  } while (0)
-
-// Helper macro for binary operators; prints the two values on error
-// Don't use this macro directly in your code, use CHECK_EQ et al below
-
-// WARNING: These don't compile correctly if one of the arguments is a pointer
-// and the other is NULL. To work around this, simply static_cast NULL to the
-// type of the desired pointer.
-
-// TODO(jandrews): Also print the values in case of failure.  Requires some
-// sort of type-sensitive ToString() function.
-#define CHECK_OP(op, val1, val2)                                        \
-  do {                                                                  \
-    if (!((val1) op (val2))) {                                          \
-      fprintf(stderr, "Check failed: %s %s %s\n", #val1, #op, #val2);   \
-      abort();                                                          \
-    }                                                                   \
-  } while (0)
-
-#define CHECK_EQ(val1, val2) CHECK_OP(==, val1, val2)
-#define CHECK_NE(val1, val2) CHECK_OP(!=, val1, val2)
-#define CHECK_LE(val1, val2) CHECK_OP(<=, val1, val2)
-#define CHECK_LT(val1, val2) CHECK_OP(< , val1, val2)
-#define CHECK_GE(val1, val2) CHECK_OP(>=, val1, val2)
-#define CHECK_GT(val1, val2) CHECK_OP(> , val1, val2)
-
-// Synonyms for CHECK_* that are used in some unittests.
-#define EXPECT_EQ(val1, val2) CHECK_EQ(val1, val2)
-#define EXPECT_NE(val1, val2) CHECK_NE(val1, val2)
-#define EXPECT_LE(val1, val2) CHECK_LE(val1, val2)
-#define EXPECT_LT(val1, val2) CHECK_LT(val1, val2)
-#define EXPECT_GE(val1, val2) CHECK_GE(val1, val2)
-#define EXPECT_GT(val1, val2) CHECK_GT(val1, val2)
-#define ASSERT_EQ(val1, val2) EXPECT_EQ(val1, val2)
-#define ASSERT_NE(val1, val2) EXPECT_NE(val1, val2)
-#define ASSERT_LE(val1, val2) EXPECT_LE(val1, val2)
-#define ASSERT_LT(val1, val2) EXPECT_LT(val1, val2)
-#define ASSERT_GE(val1, val2) EXPECT_GE(val1, val2)
-#define ASSERT_GT(val1, val2) EXPECT_GT(val1, val2)
-// As are these variants.
-#define EXPECT_TRUE(cond)     CHECK(cond)
-#define EXPECT_FALSE(cond)    CHECK(!(cond))
-#define EXPECT_STREQ(a, b)    CHECK(strcmp(a, b) == 0)
-#define ASSERT_TRUE(cond)     EXPECT_TRUE(cond)
-#define ASSERT_FALSE(cond)    EXPECT_FALSE(cond)
-#define ASSERT_STREQ(a, b)    EXPECT_STREQ(a, b)
-
-// Used for (libc) functions that return -1 and set errno
-#define CHECK_ERR(invocation)  PCHECK((invocation) != -1)
-
-// A few more checks that only happen in debug mode
-#ifdef NDEBUG
-#define DCHECK_EQ(val1, val2)
-#define DCHECK_NE(val1, val2)
-#define DCHECK_LE(val1, val2)
-#define DCHECK_LT(val1, val2)
-#define DCHECK_GE(val1, val2)
-#define DCHECK_GT(val1, val2)
-#else
-#define DCHECK_EQ(val1, val2)  CHECK_EQ(val1, val2)
-#define DCHECK_NE(val1, val2)  CHECK_NE(val1, val2)
-#define DCHECK_LE(val1, val2)  CHECK_LE(val1, val2)
-#define DCHECK_LT(val1, val2)  CHECK_LT(val1, val2)
-#define DCHECK_GE(val1, val2)  CHECK_GE(val1, val2)
-#define DCHECK_GT(val1, val2)  CHECK_GT(val1, val2)
-#endif
-
-
-#ifdef ERROR
-#undef ERROR      // may conflict with ERROR macro on windows
-#endif
-enum LogSeverity {INFO = -1, WARNING = -2, ERROR = -3, FATAL = -4};
-
-// NOTE: we add a newline to the end of the output if it's not there already
-inline void LogPrintf(int severity, const char* pat, va_list ap) {
-  // We write directly to the stderr file descriptor and avoid FILE
-  // buffering because that may invoke malloc()
-  char buf[600];
-  perftools_vsnprintf(buf, sizeof(buf)-1, pat, ap);
-  if (buf[0] != '\0' && buf[strlen(buf)-1] != '\n') {
-    assert(strlen(buf)+1 < sizeof(buf));
-    strcat(buf, "\n");
-  }
-  WRITE_TO_STDERR(buf, strlen(buf));
-  if ((severity) == FATAL)
-    abort(); // LOG(FATAL) indicates a big problem, so don't run atexit() calls
-}
-
-// Note that since the order of global constructors is unspecified,
-// global code that calls RAW_LOG may execute before FLAGS_verbose is set.
-// Such code will run with verbosity == 0 no matter what.
-#define VLOG_IS_ON(severity) (FLAGS_verbose >= severity)
-
-// In a better world, we'd use __VA_ARGS__, but VC++ 7 doesn't support it.
-#define LOG_PRINTF(severity, pat) do {          \
-  if (VLOG_IS_ON(severity)) {                   \
-    va_list ap;                                 \
-    va_start(ap, pat);                          \
-    LogPrintf(severity, pat, ap);               \
-    va_end(ap);                                 \
-  }                                             \
-} while (0)
-
-// RAW_LOG is the main function; some synonyms are used in unittests.
-inline void RAW_LOG(int lvl, const char* pat, ...)  { LOG_PRINTF(lvl, pat); }
-inline void RAW_VLOG(int lvl, const char* pat, ...) { LOG_PRINTF(lvl, pat); }
-inline void LOG(int lvl, const char* pat, ...)      { LOG_PRINTF(lvl, pat); }
-inline void VLOG(int lvl, const char* pat, ...)     { LOG_PRINTF(lvl, pat); }
-inline void LOG_IF(int lvl, bool cond, const char* pat, ...) {
-  if (cond)  LOG_PRINTF(lvl, pat);
-}
-
-// This isn't technically logging, but it's also IO and also is an
-// attempt to be "raw" -- that is, to not use any higher-level libc
-// routines that might allocate memory or (ideally) try to allocate
-// locks.  We use an opaque file handle (not necessarily an int)
-// to allow even more low-level stuff in the future.
-// Like other "raw" routines, these functions are best effort, and
-// thus don't return error codes (except RawOpenForWriting()).
-#if defined(_WIN32) || defined(__CYGWIN__) || defined(__CYGWIN32__)
-#ifndef NOMINMAX
-#define NOMINMAX     // @#!$& windows
-#endif
-#include <windows.h>
-typedef HANDLE RawFD;
-const RawFD kIllegalRawFD = INVALID_HANDLE_VALUE;
-#else
-typedef int RawFD;
-const RawFD kIllegalRawFD = -1;   // what open returns if it fails
-#endif  // defined(_WIN32) || defined(__CYGWIN__) || defined(__CYGWIN32__)
-
-RawFD RawOpenForWriting(const char* filename);   // uses default permissions
-void RawWrite(RawFD fd, const char* buf, size_t len);
-void RawClose(RawFD fd);
-
-#endif // _LOGGING_H_

http://git-wip-us.apache.org/repos/asf/incubator-quickstep/blob/9661f956/third_party/gperftools/src/base/low_level_alloc.cc
----------------------------------------------------------------------
diff --git a/third_party/gperftools/src/base/low_level_alloc.cc b/third_party/gperftools/src/base/low_level_alloc.cc
deleted file mode 100644
index 4d2ae8d..0000000
--- a/third_party/gperftools/src/base/low_level_alloc.cc
+++ /dev/null
@@ -1,523 +0,0 @@
-// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
-/* Copyright (c) 2006, Google Inc.
- * All rights reserved.
- * 
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
- * 
- *     * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *     * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following disclaimer
- * in the documentation and/or other materials provided with the
- * distribution.
- *     * Neither the name of Google Inc. nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- * 
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-// A low-level allocator that can be used by other low-level
-// modules without introducing dependency cycles.
-// This allocator is slow and wasteful of memory;
-// it should not be used when performance is key.
-
-#include "base/low_level_alloc.h"
-#include "base/dynamic_annotations.h"
-#include "base/spinlock.h"
-#include "base/logging.h"
-#include "malloc_hook-inl.h"
-#include <gperftools/malloc_hook.h>
-#include <errno.h>
-#ifdef HAVE_UNISTD_H
-#include <unistd.h>
-#endif
-#ifdef HAVE_MMAP
-#include <sys/mman.h>
-#endif
-#include <new>                   // for placement-new
-
-// On systems (like freebsd) that don't define MAP_ANONYMOUS, use the old
-// form of the name instead.
-#ifndef MAP_ANONYMOUS
-# define MAP_ANONYMOUS MAP_ANON
-#endif
-
-// A first-fit allocator with amortized logarithmic free() time.
-
-// ---------------------------------------------------------------------------
-static const int kMaxLevel = 30;
-
-// We put this class-only struct in a namespace to avoid polluting the
-// global namespace with this struct name (thus risking an ODR violation).
-namespace low_level_alloc_internal {
-  // This struct describes one allocated block, or one free block.
-  struct AllocList {
-    struct Header {
-      intptr_t size;  // size of entire region, including this field. Must be
-                      // first.  Valid in both allocated and unallocated blocks
-      intptr_t magic; // kMagicAllocated or kMagicUnallocated xor this
-      LowLevelAlloc::Arena *arena; // pointer to parent arena
-      void *dummy_for_alignment;   // aligns regions to 0 mod 2*sizeof(void*)
-    } header;
-
-    // Next two fields: in unallocated blocks: freelist skiplist data
-    //                  in allocated blocks: overlaps with client data
-    int levels;           // levels in skiplist used
-    AllocList *next[kMaxLevel];   // actually has levels elements.
-                                  // The AllocList node may not have room for
-                                  // all kMaxLevel entries.  See max_fit in
-                                  // LLA_SkiplistLevels()
-  };
-}
-using low_level_alloc_internal::AllocList;
-
-
-// ---------------------------------------------------------------------------
-// A trivial skiplist implementation.  This is used to keep the freelist
-// in address order while taking only logarithmic time per insert and delete.
-
-// An integer approximation of log2(size/base)
-// Requires size >= base.
-static int IntLog2(size_t size, size_t base) {
-  int result = 0;
-  for (size_t i = size; i > base; i >>= 1) { // i == floor(size/2**result)
-    result++;
-  }
-  //    floor(size / 2**result) <= base < floor(size / 2**(result-1))
-  // =>     log2(size/(base+1)) <= result < 1+log2(size/base)
-  // => result ~= log2(size/base)
-  return result;
-}
-
-// Return a random integer n:  p(n)=1/(2**n) if 1 <= n; p(n)=0 if n < 1.
-static int Random() {
-  static int32 r = 1;         // no locking---it's not critical
-  ANNOTATE_BENIGN_RACE(&r, "benign race, not critical.");
-  int result = 1;
-  while ((((r = r*1103515245 + 12345) >> 30) & 1) == 0) {
-    result++;
-  }
-  return result;
-}
-
-// Return a number of skiplist levels for a node of size bytes, where
-// base is the minimum node size.  Compute level=log2(size / base)+n
-// where n is 1 if random is false and otherwise a random number generated with
-// the standard distribution for a skiplist:  See Random() above.
-// Bigger nodes tend to have more skiplist levels due to the log2(size / base)
-// term, so first-fit searches touch fewer nodes.  "level" is clipped so
-// level<kMaxLevel and next[level-1] will fit in the node.
-// 0 < LLA_SkiplistLevels(x,y,false) <= LLA_SkiplistLevels(x,y,true) < kMaxLevel
-static int LLA_SkiplistLevels(size_t size, size_t base, bool random) {
-  // max_fit is the maximum number of levels that will fit in a node for the
-  // given size.   We can't return more than max_fit, no matter what the
-  // random number generator says.
-  int max_fit = (size-OFFSETOF_MEMBER(AllocList, next)) / sizeof (AllocList *);
-  int level = IntLog2(size, base) + (random? Random() : 1);
-  if (level > max_fit)     level = max_fit;
-  if (level > kMaxLevel-1) level = kMaxLevel - 1;
-  RAW_CHECK(level >= 1, "block not big enough for even one level");
-  return level;
-}
-
-// Return "atleast", the first element of AllocList *head s.t. *atleast >= *e.
-// For 0 <= i < head->levels, set prev[i] to "no_greater", where no_greater
-// points to the last element at level i in the AllocList less than *e, or is
-// head if no such element exists.
-static AllocList *LLA_SkiplistSearch(AllocList *head,
-                                     AllocList *e, AllocList **prev) {
-  AllocList *p = head;
-  for (int level = head->levels - 1; level >= 0; level--) {
-    for (AllocList *n; (n = p->next[level]) != 0 && n < e; p = n) {
-    }
-    prev[level] = p;
-  }
-  return (head->levels == 0) ?  0 : prev[0]->next[0];
-}
-
-// Insert element *e into AllocList *head.  Set prev[] as LLA_SkiplistSearch.
-// Requires that e->levels be previously set by the caller (using
-// LLA_SkiplistLevels())
-static void LLA_SkiplistInsert(AllocList *head, AllocList *e,
-                               AllocList **prev) {
-  LLA_SkiplistSearch(head, e, prev);
-  for (; head->levels < e->levels; head->levels++) { // extend prev pointers
-    prev[head->levels] = head;                       // to all *e's levels
-  }
-  for (int i = 0; i != e->levels; i++) { // add element to list
-    e->next[i] = prev[i]->next[i];
-    prev[i]->next[i] = e;
-  }
-}
-
-// Remove element *e from AllocList *head.  Set prev[] as LLA_SkiplistSearch().
-// Requires that e->levels be previous set by the caller (using
-// LLA_SkiplistLevels())
-static void LLA_SkiplistDelete(AllocList *head, AllocList *e,
-                               AllocList **prev) {
-  AllocList *found = LLA_SkiplistSearch(head, e, prev);
-  RAW_CHECK(e == found, "element not in freelist");
-  for (int i = 0; i != e->levels && prev[i]->next[i] == e; i++) {
-    prev[i]->next[i] = e->next[i];
-  }
-  while (head->levels > 0 && head->next[head->levels - 1] == 0) {
-    head->levels--;   // reduce head->levels if level unused
-  }
-}
-
-// ---------------------------------------------------------------------------
-// Arena implementation
-
-struct LowLevelAlloc::Arena {
-  Arena() : mu(SpinLock::LINKER_INITIALIZED) {} // does nothing; for static init
-  explicit Arena(int) : pagesize(0) {}  // set pagesize to zero explicitly
-                                        // for non-static init
-
-  SpinLock mu;            // protects freelist, allocation_count,
-                          // pagesize, roundup, min_size
-  AllocList freelist;     // head of free list; sorted by addr (under mu)
-  int32 allocation_count; // count of allocated blocks (under mu)
-  int32 flags;            // flags passed to NewArena (ro after init)
-  size_t pagesize;        // ==getpagesize()  (init under mu, then ro)
-  size_t roundup;         // lowest power of 2 >= max(16,sizeof (AllocList))
-                          // (init under mu, then ro)
-  size_t min_size;        // smallest allocation block size
-                          // (init under mu, then ro)
-};
-
-// The default arena, which is used when 0 is passed instead of an Arena
-// pointer.
-static struct LowLevelAlloc::Arena default_arena;
-
-// Non-malloc-hooked arenas: used only to allocate metadata for arenas that
-// do not want malloc hook reporting, so that for them there's no malloc hook
-// reporting even during arena creation.
-static struct LowLevelAlloc::Arena unhooked_arena;
-static struct LowLevelAlloc::Arena unhooked_async_sig_safe_arena;
-
-// magic numbers to identify allocated and unallocated blocks
-static const intptr_t kMagicAllocated = 0x4c833e95;
-static const intptr_t kMagicUnallocated = ~kMagicAllocated;
-
-namespace {
-  class SCOPED_LOCKABLE ArenaLock {
-   public:
-    explicit ArenaLock(LowLevelAlloc::Arena *arena)
-        EXCLUSIVE_LOCK_FUNCTION(arena->mu)
-        : left_(false), mask_valid_(false), arena_(arena) {
-      if ((arena->flags & LowLevelAlloc::kAsyncSignalSafe) != 0) {
-      // We've decided not to support async-signal-safe arena use until
-      // there a demonstrated need.  Here's how one could do it though
-      // (would need to be made more portable).
-#if 0
-        sigset_t all;
-        sigfillset(&all);
-        this->mask_valid_ =
-            (pthread_sigmask(SIG_BLOCK, &all, &this->mask_) == 0);
-#else
-        RAW_CHECK(false, "We do not yet support async-signal-safe arena.");
-#endif
-      }
-      this->arena_->mu.Lock();
-    }
-    ~ArenaLock() { RAW_CHECK(this->left_, "haven't left Arena region"); }
-    void Leave() /*UNLOCK_FUNCTION()*/ {
-      this->arena_->mu.Unlock();
-#if 0
-      if (this->mask_valid_) {
-        pthread_sigmask(SIG_SETMASK, &this->mask_, 0);
-      }
-#endif
-      this->left_ = true;
-    }
-   private:
-    bool left_;       // whether left region
-    bool mask_valid_;
-#if 0
-    sigset_t mask_;   // old mask of blocked signals
-#endif
-    LowLevelAlloc::Arena *arena_;
-    DISALLOW_COPY_AND_ASSIGN(ArenaLock);
-  };
-} // anonymous namespace
-
-// create an appropriate magic number for an object at "ptr"
-// "magic" should be kMagicAllocated or kMagicUnallocated
-inline static intptr_t Magic(intptr_t magic, AllocList::Header *ptr) {
-  return magic ^ reinterpret_cast<intptr_t>(ptr);
-}
-
-// Initialize the fields of an Arena
-static void ArenaInit(LowLevelAlloc::Arena *arena) {
-  if (arena->pagesize == 0) {
-    arena->pagesize = getpagesize();
-    // Round up block sizes to a power of two close to the header size.
-    arena->roundup = 16;
-    while (arena->roundup < sizeof (arena->freelist.header)) {
-      arena->roundup += arena->roundup;
-    }
-    // Don't allocate blocks less than twice the roundup size to avoid tiny
-    // free blocks.
-    arena->min_size = 2 * arena->roundup;
-    arena->freelist.header.size = 0;
-    arena->freelist.header.magic =
-        Magic(kMagicUnallocated, &arena->freelist.header);
-    arena->freelist.header.arena = arena;
-    arena->freelist.levels = 0;
-    memset(arena->freelist.next, 0, sizeof (arena->freelist.next));
-    arena->allocation_count = 0;
-    if (arena == &default_arena) {
-      // Default arena should be hooked, e.g. for heap-checker to trace
-      // pointer chains through objects in the default arena.
-      arena->flags = LowLevelAlloc::kCallMallocHook;
-    } else if (arena == &unhooked_async_sig_safe_arena) {
-      arena->flags = LowLevelAlloc::kAsyncSignalSafe;
-    } else {
-      arena->flags = 0;   // other arenas' flags may be overridden by client,
-                          // but unhooked_arena will have 0 in 'flags'.
-    }
-  }
-}
-
-// L < meta_data_arena->mu
-LowLevelAlloc::Arena *LowLevelAlloc::NewArena(int32 flags,
-                                              Arena *meta_data_arena) {
-  RAW_CHECK(meta_data_arena != 0, "must pass a valid arena");
-  if (meta_data_arena == &default_arena) {
-    if ((flags & LowLevelAlloc::kAsyncSignalSafe) != 0) {
-      meta_data_arena = &unhooked_async_sig_safe_arena;
-    } else if ((flags & LowLevelAlloc::kCallMallocHook) == 0) {
-      meta_data_arena = &unhooked_arena;
-    }
-  }
-  // Arena(0) uses the constructor for non-static contexts
-  Arena *result =
-    new (AllocWithArena(sizeof (*result), meta_data_arena)) Arena(0);
-  ArenaInit(result);
-  result->flags = flags;
-  return result;
-}
-
-// L < arena->mu, L < arena->arena->mu
-bool LowLevelAlloc::DeleteArena(Arena *arena) {
-  RAW_CHECK(arena != 0 && arena != &default_arena && arena != &unhooked_arena,
-            "may not delete default arena");
-  ArenaLock section(arena);
-  bool empty = (arena->allocation_count == 0);
-  section.Leave();
-  if (empty) {
-    while (arena->freelist.next[0] != 0) {
-      AllocList *region = arena->freelist.next[0];
-      size_t size = region->header.size;
-      arena->freelist.next[0] = region->next[0];
-      RAW_CHECK(region->header.magic ==
-                Magic(kMagicUnallocated, &region->header),
-                "bad magic number in DeleteArena()");
-      RAW_CHECK(region->header.arena == arena,
-                "bad arena pointer in DeleteArena()");
-      RAW_CHECK(size % arena->pagesize == 0,
-                "empty arena has non-page-aligned block size");
-      RAW_CHECK(reinterpret_cast<intptr_t>(region) % arena->pagesize == 0,
-                "empty arena has non-page-aligned block");
-      int munmap_result;
-      if ((arena->flags & LowLevelAlloc::kAsyncSignalSafe) == 0) {
-        munmap_result = munmap(region, size);
-      } else {
-        munmap_result = MallocHook::UnhookedMUnmap(region, size);
-      }
-      RAW_CHECK(munmap_result == 0,
-                "LowLevelAlloc::DeleteArena:  munmap failed address");
-    }
-    Free(arena);
-  }
-  return empty;
-}
-
-// ---------------------------------------------------------------------------
-
-// Return value rounded up to next multiple of align.
-// align must be a power of two.
-static intptr_t RoundUp(intptr_t addr, intptr_t align) {
-  return (addr + align - 1) & ~(align - 1);
-}
-
-// Equivalent to "return prev->next[i]" but with sanity checking
-// that the freelist is in the correct order, that it
-// consists of regions marked "unallocated", and that no two regions
-// are adjacent in memory (they should have been coalesced).
-// L < arena->mu
-static AllocList *Next(int i, AllocList *prev, LowLevelAlloc::Arena *arena) {
-  RAW_CHECK(i < prev->levels, "too few levels in Next()");
-  AllocList *next = prev->next[i];
-  if (next != 0) {
-    RAW_CHECK(next->header.magic == Magic(kMagicUnallocated, &next->header),
-              "bad magic number in Next()");
-    RAW_CHECK(next->header.arena == arena,
-              "bad arena pointer in Next()");
-    if (prev != &arena->freelist) {
-      RAW_CHECK(prev < next, "unordered freelist");
-      RAW_CHECK(reinterpret_cast<char *>(prev) + prev->header.size <
-                reinterpret_cast<char *>(next), "malformed freelist");
-    }
-  }
-  return next;
-}
-
-// Coalesce list item "a" with its successor if they are adjacent.
-static void Coalesce(AllocList *a) {
-  AllocList *n = a->next[0];
-  if (n != 0 && reinterpret_cast<char *>(a) + a->header.size ==
-                    reinterpret_cast<char *>(n)) {
-    LowLevelAlloc::Arena *arena = a->header.arena;
-    a->header.size += n->header.size;
-    n->header.magic = 0;
-    n->header.arena = 0;
-    AllocList *prev[kMaxLevel];
-    LLA_SkiplistDelete(&arena->freelist, n, prev);
-    LLA_SkiplistDelete(&arena->freelist, a, prev);
-    a->levels = LLA_SkiplistLevels(a->header.size, arena->min_size, true);
-    LLA_SkiplistInsert(&arena->freelist, a, prev);
-  }
-}
-
-// Adds block at location "v" to the free list
-// L >= arena->mu
-static void AddToFreelist(void *v, LowLevelAlloc::Arena *arena) {
-  AllocList *f = reinterpret_cast<AllocList *>(
-                        reinterpret_cast<char *>(v) - sizeof (f->header));
-  RAW_CHECK(f->header.magic == Magic(kMagicAllocated, &f->header),
-            "bad magic number in AddToFreelist()");
-  RAW_CHECK(f->header.arena == arena,
-            "bad arena pointer in AddToFreelist()");
-  f->levels = LLA_SkiplistLevels(f->header.size, arena->min_size, true);
-  AllocList *prev[kMaxLevel];
-  LLA_SkiplistInsert(&arena->freelist, f, prev);
-  f->header.magic = Magic(kMagicUnallocated, &f->header);
-  Coalesce(f);                  // maybe coalesce with successor
-  Coalesce(prev[0]);            // maybe coalesce with predecessor
-}
-
-// Frees storage allocated by LowLevelAlloc::Alloc().
-// L < arena->mu
-void LowLevelAlloc::Free(void *v) {
-  if (v != 0) {
-    AllocList *f = reinterpret_cast<AllocList *>(
-                        reinterpret_cast<char *>(v) - sizeof (f->header));
-    RAW_CHECK(f->header.magic == Magic(kMagicAllocated, &f->header),
-              "bad magic number in Free()");
-    LowLevelAlloc::Arena *arena = f->header.arena;
-    if ((arena->flags & kCallMallocHook) != 0) {
-      MallocHook::InvokeDeleteHook(v);
-    }
-    ArenaLock section(arena);
-    AddToFreelist(v, arena);
-    RAW_CHECK(arena->allocation_count > 0, "nothing in arena to free");
-    arena->allocation_count--;
-    section.Leave();
-  }
-}
-
-// allocates and returns a block of size bytes, to be freed with Free()
-// L < arena->mu
-static void *DoAllocWithArena(size_t request, LowLevelAlloc::Arena *arena) {
-  void *result = 0;
-  if (request != 0) {
-    AllocList *s;       // will point to region that satisfies request
-    ArenaLock section(arena);
-    ArenaInit(arena);
-    // round up with header
-    size_t req_rnd = RoundUp(request + sizeof (s->header), arena->roundup);
-    for (;;) {      // loop until we find a suitable region
-      // find the minimum levels that a block of this size must have
-      int i = LLA_SkiplistLevels(req_rnd, arena->min_size, false) - 1;
-      if (i < arena->freelist.levels) {   // potential blocks exist
-        AllocList *before = &arena->freelist;  // predecessor of s
-        while ((s = Next(i, before, arena)) != 0 && s->header.size < req_rnd) {
-          before = s;
-        }
-        if (s != 0) {       // we found a region
-          break;
-        }
-      }
-      // we unlock before mmap() both because mmap() may call a callback hook,
-      // and because it may be slow.
-      arena->mu.Unlock();
-      // mmap generous 64K chunks to decrease
-      // the chances/impact of fragmentation:
-      size_t new_pages_size = RoundUp(req_rnd, arena->pagesize * 16);
-      void *new_pages;
-      if ((arena->flags & LowLevelAlloc::kAsyncSignalSafe) != 0) {
-        new_pages = MallocHook::UnhookedMMap(0, new_pages_size,
-            PROT_WRITE|PROT_READ, MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
-      } else {
-        new_pages = mmap(0, new_pages_size,
-            PROT_WRITE|PROT_READ, MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
-      }
-      RAW_CHECK(new_pages != MAP_FAILED, "mmap error");
-      arena->mu.Lock();
-      s = reinterpret_cast<AllocList *>(new_pages);
-      s->header.size = new_pages_size;
-      // Pretend the block is allocated; call AddToFreelist() to free it.
-      s->header.magic = Magic(kMagicAllocated, &s->header);
-      s->header.arena = arena;
-      AddToFreelist(&s->levels, arena);  // insert new region into free list
-    }
-    AllocList *prev[kMaxLevel];
-    LLA_SkiplistDelete(&arena->freelist, s, prev);    // remove from free list
-    // s points to the first free region that's big enough
-    if (req_rnd + arena->min_size <= s->header.size) {  // big enough to split
-      AllocList *n = reinterpret_cast<AllocList *>
-                        (req_rnd + reinterpret_cast<char *>(s));
-      n->header.size = s->header.size - req_rnd;
-      n->header.magic = Magic(kMagicAllocated, &n->header);
-      n->header.arena = arena;
-      s->header.size = req_rnd;
-      AddToFreelist(&n->levels, arena);
-    }
-    s->header.magic = Magic(kMagicAllocated, &s->header);
-    RAW_CHECK(s->header.arena == arena, "");
-    arena->allocation_count++;
-    section.Leave();
-    result = &s->levels;
-  }
-  ANNOTATE_NEW_MEMORY(result, request);
-  return result;
-}
-
-void *LowLevelAlloc::Alloc(size_t request) {
-  void *result = DoAllocWithArena(request, &default_arena);
-  if ((default_arena.flags & kCallMallocHook) != 0) {
-    // this call must be directly in the user-called allocator function
-    // for MallocHook::GetCallerStackTrace to work properly
-    MallocHook::InvokeNewHook(result, request);
-  }
-  return result;
-}
-
-void *LowLevelAlloc::AllocWithArena(size_t request, Arena *arena) {
-  RAW_CHECK(arena != 0, "must pass a valid arena");
-  void *result = DoAllocWithArena(request, arena);
-  if ((arena->flags & kCallMallocHook) != 0) {
-    // this call must be directly in the user-called allocator function
-    // for MallocHook::GetCallerStackTrace to work properly
-    MallocHook::InvokeNewHook(result, request);
-  }
-  return result;
-}
-
-LowLevelAlloc::Arena *LowLevelAlloc::DefaultArena() {
-  return &default_arena;
-}

http://git-wip-us.apache.org/repos/asf/incubator-quickstep/blob/9661f956/third_party/gperftools/src/base/low_level_alloc.h
----------------------------------------------------------------------
diff --git a/third_party/gperftools/src/base/low_level_alloc.h b/third_party/gperftools/src/base/low_level_alloc.h
deleted file mode 100644
index 4081ff8..0000000
--- a/third_party/gperftools/src/base/low_level_alloc.h
+++ /dev/null
@@ -1,107 +0,0 @@
-// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
-/* Copyright (c) 2006, Google Inc.
- * All rights reserved.
- * 
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
- * 
- *     * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *     * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following disclaimer
- * in the documentation and/or other materials provided with the
- * distribution.
- *     * Neither the name of Google Inc. nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- * 
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#if !defined(_BASE_LOW_LEVEL_ALLOC_H_)
-#define _BASE_LOW_LEVEL_ALLOC_H_
-
-// A simple thread-safe memory allocator that does not depend on
-// mutexes or thread-specific data.  It is intended to be used
-// sparingly, and only when malloc() would introduce an unwanted
-// dependency, such as inside the heap-checker.
-
-#include <config.h>
-#include <stddef.h>             // for size_t
-#include "base/basictypes.h"
-
-class LowLevelAlloc {
- public:
-  struct Arena;       // an arena from which memory may be allocated
-
-  // Returns a pointer to a block of at least "request" bytes
-  // that have been newly allocated from the specific arena.
-  // for Alloc() call the DefaultArena() is used.
-  // Returns 0 if passed request==0.
-  // Does not return 0 under other circumstances; it crashes if memory
-  // is not available.
-  static void *Alloc(size_t request)
-    ATTRIBUTE_SECTION(malloc_hook);
-  static void *AllocWithArena(size_t request, Arena *arena)
-    ATTRIBUTE_SECTION(malloc_hook);
-
-  // Deallocates a region of memory that was previously allocated with
-  // Alloc().   Does nothing if passed 0.   "s" must be either 0,
-  // or must have been returned from a call to Alloc() and not yet passed to
-  // Free() since that call to Alloc().  The space is returned to the arena
-  // from which it was allocated.
-  static void Free(void *s) ATTRIBUTE_SECTION(malloc_hook);
-
-    // ATTRIBUTE_SECTION(malloc_hook) for Alloc* and Free
-    // are to put all callers of MallocHook::Invoke* in this module
-    // into special section,
-    // so that MallocHook::GetCallerStackTrace can function accurately.
-
-  // Create a new arena.
-  // The root metadata for the new arena is allocated in the
-  // meta_data_arena; the DefaultArena() can be passed for meta_data_arena.
-  // These values may be ored into flags:
-  enum {
-    // Report calls to Alloc() and Free() via the MallocHook interface.
-    // Set in the DefaultArena.
-    kCallMallocHook = 0x0001,
-
-    // Make calls to Alloc(), Free() be async-signal-safe.  Not set in
-    // DefaultArena().
-    kAsyncSignalSafe = 0x0002,
-
-    // When used with DefaultArena(), the NewArena() and DeleteArena() calls
-    // obey the flags given explicitly in the NewArena() call, even if those
-    // flags differ from the settings in DefaultArena().  So the call
-    // NewArena(kAsyncSignalSafe, DefaultArena()) is itself async-signal-safe,
-    // as well as generatating an arena that provides async-signal-safe
-    // Alloc/Free.
-  };
-  static Arena *NewArena(int32 flags, Arena *meta_data_arena);
-
-  // Destroys an arena allocated by NewArena and returns true,
-  // provided no allocated blocks remain in the arena.
-  // If allocated blocks remain in the arena, does nothing and
-  // returns false.
-  // It is illegal to attempt to destroy the DefaultArena().
-  static bool DeleteArena(Arena *arena);
-
-  // The default arena that always exists.
-  static Arena *DefaultArena();
-
- private:
-  LowLevelAlloc();      // no instances
-};
-
-#endif

http://git-wip-us.apache.org/repos/asf/incubator-quickstep/blob/9661f956/third_party/gperftools/src/base/simple_mutex.h
----------------------------------------------------------------------
diff --git a/third_party/gperftools/src/base/simple_mutex.h b/third_party/gperftools/src/base/simple_mutex.h
deleted file mode 100644
index a1886e4..0000000
--- a/third_party/gperftools/src/base/simple_mutex.h
+++ /dev/null
@@ -1,332 +0,0 @@
-// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
-// Copyright (c) 2007, Google Inc.
-// All rights reserved.
-// 
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-// 
-//     * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-//     * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-// 
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// 
-// ---
-// Author: Craig Silverstein.
-//
-// A simple mutex wrapper, supporting locks and read-write locks.
-// You should assume the locks are *not* re-entrant.
-//
-// To use: you should define the following macros in your configure.ac:
-//   ACX_PTHREAD
-//   AC_RWLOCK
-// The latter is defined in ../autoconf.
-//
-// This class is meant to be internal-only and should be wrapped by an
-// internal namespace.  Before you use this module, please give the
-// name of your internal namespace for this module.  Or, if you want
-// to expose it, you'll want to move it to the Google namespace.  We
-// cannot put this class in global namespace because there can be some
-// problems when we have multiple versions of Mutex in each shared object.
-//
-// NOTE: TryLock() is broken for NO_THREADS mode, at least in NDEBUG
-//       mode.
-//
-// CYGWIN NOTE: Cygwin support for rwlock seems to be buggy:
-//    http://www.cygwin.com/ml/cygwin/2008-12/msg00017.html
-// Because of that, we might as well use windows locks for
-// cygwin.  They seem to be more reliable than the cygwin pthreads layer.
-//
-// TRICKY IMPLEMENTATION NOTE:
-// This class is designed to be safe to use during
-// dynamic-initialization -- that is, by global constructors that are
-// run before main() starts.  The issue in this case is that
-// dynamic-initialization happens in an unpredictable order, and it
-// could be that someone else's dynamic initializer could call a
-// function that tries to acquire this mutex -- but that all happens
-// before this mutex's constructor has run.  (This can happen even if
-// the mutex and the function that uses the mutex are in the same .cc
-// file.)  Basically, because Mutex does non-trivial work in its
-// constructor, it's not, in the naive implementation, safe to use
-// before dynamic initialization has run on it.
-//
-// The solution used here is to pair the actual mutex primitive with a
-// bool that is set to true when the mutex is dynamically initialized.
-// (Before that it's false.)  Then we modify all mutex routines to
-// look at the bool, and not try to lock/unlock until the bool makes
-// it to true (which happens after the Mutex constructor has run.)
-//
-// This works because before main() starts -- particularly, during
-// dynamic initialization -- there are no threads, so a) it's ok that
-// the mutex operations are a no-op, since we don't need locking then
-// anyway; and b) we can be quite confident our bool won't change
-// state between a call to Lock() and a call to Unlock() (that would
-// require a global constructor in one translation unit to call Lock()
-// and another global constructor in another translation unit to call
-// Unlock() later, which is pretty perverse).
-//
-// That said, it's tricky, and can conceivably fail; it's safest to
-// avoid trying to acquire a mutex in a global constructor, if you
-// can.  One way it can fail is that a really smart compiler might
-// initialize the bool to true at static-initialization time (too
-// early) rather than at dynamic-initialization time.  To discourage
-// that, we set is_safe_ to true in code (not the constructor
-// colon-initializer) and set it to true via a function that always
-// evaluates to true, but that the compiler can't know always
-// evaluates to true.  This should be good enough.
-//
-// A related issue is code that could try to access the mutex
-// after it's been destroyed in the global destructors (because
-// the Mutex global destructor runs before some other global
-// destructor, that tries to acquire the mutex).  The way we
-// deal with this is by taking a constructor arg that global
-// mutexes should pass in, that causes the destructor to do no
-// work.  We still depend on the compiler not doing anything
-// weird to a Mutex's memory after it is destroyed, but for a
-// static global variable, that's pretty safe.
-
-#ifndef GOOGLE_MUTEX_H_
-#define GOOGLE_MUTEX_H_
-
-#include <config.h>
-
-#if defined(NO_THREADS)
-  typedef int MutexType;      // to keep a lock-count
-#elif defined(_WIN32) || defined(__CYGWIN__) || defined(__CYGWIN32__)
-# ifndef WIN32_LEAN_AND_MEAN
-#   define WIN32_LEAN_AND_MEAN  // We only need minimal includes
-# endif
-  // We need Windows NT or later for TryEnterCriticalSection().  If you
-  // don't need that functionality, you can remove these _WIN32_WINNT
-  // lines, and change TryLock() to assert(0) or something.
-# ifndef _WIN32_WINNT
-#   define _WIN32_WINNT 0x0400
-# endif
-# include <windows.h>
-  typedef CRITICAL_SECTION MutexType;
-#elif defined(HAVE_PTHREAD) && defined(HAVE_RWLOCK)
-  // Needed for pthread_rwlock_*.  If it causes problems, you could take it
-  // out, but then you'd have to unset HAVE_RWLOCK (at least on linux -- it
-  // *does* cause problems for FreeBSD, or MacOSX, but isn't needed
-  // for locking there.)
-# ifdef __linux__
-#   define _XOPEN_SOURCE 500  // may be needed to get the rwlock calls
-# endif
-# include <pthread.h>
-  typedef pthread_rwlock_t MutexType;
-#elif defined(HAVE_PTHREAD)
-# include <pthread.h>
-  typedef pthread_mutex_t MutexType;
-#else
-# error Need to implement mutex.h for your architecture, or #define NO_THREADS
-#endif
-
-#include <assert.h>
-#include <stdlib.h>      // for abort()
-
-#define MUTEX_NAMESPACE perftools_mutex_namespace
-
-namespace MUTEX_NAMESPACE {
-
-class Mutex {
- public:
-  // This is used for the single-arg constructor
-  enum LinkerInitialized { LINKER_INITIALIZED };
-
-  // Create a Mutex that is not held by anybody.  This constructor is
-  // typically used for Mutexes allocated on the heap or the stack.
-  inline Mutex();
-  // This constructor should be used for global, static Mutex objects.
-  // It inhibits work being done by the destructor, which makes it
-  // safer for code that tries to acqiure this mutex in their global
-  // destructor.
-  inline Mutex(LinkerInitialized);
-
-  // Destructor
-  inline ~Mutex();
-
-  inline void Lock();    // Block if needed until free then acquire exclusively
-  inline void Unlock();  // Release a lock acquired via Lock()
-  inline bool TryLock(); // If free, Lock() and return true, else return false
-  // Note that on systems that don't support read-write locks, these may
-  // be implemented as synonyms to Lock() and Unlock().  So you can use
-  // these for efficiency, but don't use them anyplace where being able
-  // to do shared reads is necessary to avoid deadlock.
-  inline void ReaderLock();   // Block until free or shared then acquire a share
-  inline void ReaderUnlock(); // Release a read share of this Mutex
-  inline void WriterLock() { Lock(); }     // Acquire an exclusive lock
-  inline void WriterUnlock() { Unlock(); } // Release a lock from WriterLock()
-
- private:
-  MutexType mutex_;
-  // We want to make sure that the compiler sets is_safe_ to true only
-  // when we tell it to, and never makes assumptions is_safe_ is
-  // always true.  volatile is the most reliable way to do that.
-  volatile bool is_safe_;
-  // This indicates which constructor was called.
-  bool destroy_;
-
-  inline void SetIsSafe() { is_safe_ = true; }
-
-  // Catch the error of writing Mutex when intending MutexLock.
-  Mutex(Mutex* /*ignored*/) {}
-  // Disallow "evil" constructors
-  Mutex(const Mutex&);
-  void operator=(const Mutex&);
-};
-
-// Now the implementation of Mutex for various systems
-#if defined(NO_THREADS)
-
-// When we don't have threads, we can be either reading or writing,
-// but not both.  We can have lots of readers at once (in no-threads
-// mode, that's most likely to happen in recursive function calls),
-// but only one writer.  We represent this by having mutex_ be -1 when
-// writing and a number > 0 when reading (and 0 when no lock is held).
-//
-// In debug mode, we assert these invariants, while in non-debug mode
-// we do nothing, for efficiency.  That's why everything is in an
-// assert.
-
-Mutex::Mutex() : mutex_(0) { }
-Mutex::Mutex(Mutex::LinkerInitialized) : mutex_(0) { }
-Mutex::~Mutex()            { assert(mutex_ == 0); }
-void Mutex::Lock()         { assert(--mutex_ == -1); }
-void Mutex::Unlock()       { assert(mutex_++ == -1); }
-bool Mutex::TryLock()      { if (mutex_) return false; Lock(); return true; }
-void Mutex::ReaderLock()   { assert(++mutex_ > 0); }
-void Mutex::ReaderUnlock() { assert(mutex_-- > 0); }
-
-#elif defined(_WIN32) || defined(__CYGWIN__) || defined(__CYGWIN32__)
-
-Mutex::Mutex() : destroy_(true) {
-  InitializeCriticalSection(&mutex_);
-  SetIsSafe();
-}
-Mutex::Mutex(LinkerInitialized) : destroy_(false) {
-  InitializeCriticalSection(&mutex_);
-  SetIsSafe();
-}
-Mutex::~Mutex()            { if (destroy_) DeleteCriticalSection(&mutex_); }
-void Mutex::Lock()         { if (is_safe_) EnterCriticalSection(&mutex_); }
-void Mutex::Unlock()       { if (is_safe_) LeaveCriticalSection(&mutex_); }
-bool Mutex::TryLock()      { return is_safe_ ?
-                                 TryEnterCriticalSection(&mutex_) != 0 : true; }
-void Mutex::ReaderLock()   { Lock(); }      // we don't have read-write locks
-void Mutex::ReaderUnlock() { Unlock(); }
-
-#elif defined(HAVE_PTHREAD) && defined(HAVE_RWLOCK)
-
-#define SAFE_PTHREAD(fncall)  do {   /* run fncall if is_safe_ is true */  \
-  if (is_safe_ && fncall(&mutex_) != 0) abort();                           \
-} while (0)
-
-Mutex::Mutex() : destroy_(true) {
-  SetIsSafe();
-  if (is_safe_ && pthread_rwlock_init(&mutex_, NULL) != 0) abort();
-}
-Mutex::Mutex(Mutex::LinkerInitialized) : destroy_(false) {
-  SetIsSafe();
-  if (is_safe_ && pthread_rwlock_init(&mutex_, NULL) != 0) abort();
-}
-Mutex::~Mutex()       { if (destroy_) SAFE_PTHREAD(pthread_rwlock_destroy); }
-void Mutex::Lock()         { SAFE_PTHREAD(pthread_rwlock_wrlock); }
-void Mutex::Unlock()       { SAFE_PTHREAD(pthread_rwlock_unlock); }
-bool Mutex::TryLock()      { return is_safe_ ?
-                               pthread_rwlock_trywrlock(&mutex_) == 0 : true; }
-void Mutex::ReaderLock()   { SAFE_PTHREAD(pthread_rwlock_rdlock); }
-void Mutex::ReaderUnlock() { SAFE_PTHREAD(pthread_rwlock_unlock); }
-#undef SAFE_PTHREAD
-
-#elif defined(HAVE_PTHREAD)
-
-#define SAFE_PTHREAD(fncall)  do {   /* run fncall if is_safe_ is true */  \
-  if (is_safe_ && fncall(&mutex_) != 0) abort();                           \
-} while (0)
-
-Mutex::Mutex() : destroy_(true) {
-  SetIsSafe();
-  if (is_safe_ && pthread_mutex_init(&mutex_, NULL) != 0) abort();
-}
-Mutex::Mutex(Mutex::LinkerInitialized) : destroy_(false) {
-  SetIsSafe();
-  if (is_safe_ && pthread_mutex_init(&mutex_, NULL) != 0) abort();
-}
-Mutex::~Mutex()       { if (destroy_) SAFE_PTHREAD(pthread_mutex_destroy); }
-void Mutex::Lock()         { SAFE_PTHREAD(pthread_mutex_lock); }
-void Mutex::Unlock()       { SAFE_PTHREAD(pthread_mutex_unlock); }
-bool Mutex::TryLock()      { return is_safe_ ?
-                                 pthread_mutex_trylock(&mutex_) == 0 : true; }
-void Mutex::ReaderLock()   { Lock(); }
-void Mutex::ReaderUnlock() { Unlock(); }
-#undef SAFE_PTHREAD
-
-#endif
-
-// --------------------------------------------------------------------------
-// Some helper classes
-
-// MutexLock(mu) acquires mu when constructed and releases it when destroyed.
-class MutexLock {
- public:
-  explicit MutexLock(Mutex *mu) : mu_(mu) { mu_->Lock(); }
-  ~MutexLock() { mu_->Unlock(); }
- private:
-  Mutex * const mu_;
-  // Disallow "evil" constructors
-  MutexLock(const MutexLock&);
-  void operator=(const MutexLock&);
-};
-
-// ReaderMutexLock and WriterMutexLock do the same, for rwlocks
-class ReaderMutexLock {
- public:
-  explicit ReaderMutexLock(Mutex *mu) : mu_(mu) { mu_->ReaderLock(); }
-  ~ReaderMutexLock() { mu_->ReaderUnlock(); }
- private:
-  Mutex * const mu_;
-  // Disallow "evil" constructors
-  ReaderMutexLock(const ReaderMutexLock&);
-  void operator=(const ReaderMutexLock&);
-};
-
-class WriterMutexLock {
- public:
-  explicit WriterMutexLock(Mutex *mu) : mu_(mu) { mu_->WriterLock(); }
-  ~WriterMutexLock() { mu_->WriterUnlock(); }
- private:
-  Mutex * const mu_;
-  // Disallow "evil" constructors
-  WriterMutexLock(const WriterMutexLock&);
-  void operator=(const WriterMutexLock&);
-};
-
-// Catch bug where variable name is omitted, e.g. MutexLock (&mu);
-#define MutexLock(x) COMPILE_ASSERT(0, mutex_lock_decl_missing_var_name)
-#define ReaderMutexLock(x) COMPILE_ASSERT(0, rmutex_lock_decl_missing_var_name)
-#define WriterMutexLock(x) COMPILE_ASSERT(0, wmutex_lock_decl_missing_var_name)
-
-}  // namespace MUTEX_NAMESPACE
-
-using namespace MUTEX_NAMESPACE;
-
-#undef MUTEX_NAMESPACE
-
-#endif  /* #define GOOGLE_SIMPLE_MUTEX_H_ */