You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@quickstep.apache.org by ji...@apache.org on 2017/01/29 21:23:03 UTC

[05/55] [partial] incubator-quickstep git commit: Make the third party directory leaner.

http://git-wip-us.apache.org/repos/asf/incubator-quickstep/blob/9661f956/third_party/gperftools/src/base/sysinfo.h
----------------------------------------------------------------------
diff --git a/third_party/gperftools/src/base/sysinfo.h b/third_party/gperftools/src/base/sysinfo.h
deleted file mode 100644
index cc5cb74..0000000
--- a/third_party/gperftools/src/base/sysinfo.h
+++ /dev/null
@@ -1,236 +0,0 @@
-// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
-// Copyright (c) 2006, Google Inc.
-// All rights reserved.
-// 
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-// 
-//     * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-//     * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-// 
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// All functions here are thread-hostile due to file caching unless
-// commented otherwise.
-
-#ifndef _SYSINFO_H_
-#define _SYSINFO_H_
-
-#include <config.h>
-
-#include <time.h>
-#if (defined(_WIN32) || defined(__MINGW32__)) && (!defined(__CYGWIN__) && !defined(__CYGWIN32__))
-#include <windows.h>   // for DWORD
-#include <tlhelp32.h>  // for CreateToolhelp32Snapshot
-#endif
-#ifdef HAVE_UNISTD_H
-#include <unistd.h>    // for pid_t
-#endif
-#include <stddef.h>    // for size_t
-#include <limits.h>    // for PATH_MAX
-#include "base/basictypes.h"
-#include "base/logging.h"   // for RawFD
-
-// This getenv function is safe to call before the C runtime is initialized.
-// On Windows, it utilizes GetEnvironmentVariable() and on unix it uses
-// /proc/self/environ instead calling getenv().  It's intended to be used in
-// routines that run before main(), when the state required for getenv() may
-// not be set up yet.  In particular, errno isn't set up until relatively late
-// (after the pthreads library has a chance to make it threadsafe), and
-// getenv() doesn't work until then. 
-// On some platforms, this call will utilize the same, static buffer for
-// repeated GetenvBeforeMain() calls. Callers should not expect pointers from
-// this routine to be long lived.
-// Note that on unix, /proc only has the environment at the time the
-// application was started, so this routine ignores setenv() calls/etc.  Also
-// note it only reads the first 16K of the environment.
-extern const char* GetenvBeforeMain(const char* name);
-
-// This takes as an argument an environment-variable name (like
-// CPUPROFILE) whose value is supposed to be a file-path, and sets
-// path to that path, and returns true.  Non-trivial for surprising
-// reasons, as documented in sysinfo.cc.  path must have space PATH_MAX.
-extern bool GetUniquePathFromEnv(const char* env_name, char* path);
-
-extern int NumCPUs();
-
-void SleepForMilliseconds(int milliseconds);
-
-// processor cycles per second of each processor.  Thread-safe.
-extern double CyclesPerSecond(void);
-
-
-//  Return true if we're running POSIX (e.g., NPTL on Linux) threads,
-//  as opposed to a non-POSIX thread library.  The thing that we care
-//  about is whether a thread's pid is the same as the thread that
-//  spawned it.  If so, this function returns true.
-//  Thread-safe.
-//  Note: We consider false negatives to be OK.
-bool HasPosixThreads();
-
-#ifndef SWIG  // SWIG doesn't like struct Buffer and variable arguments.
-
-// A ProcMapsIterator abstracts access to /proc/maps for a given
-// process. Needs to be stack-allocatable and avoid using stdio/malloc
-// so it can be used in the google stack dumper, heap-profiler, etc.
-//
-// On Windows and Mac OS X, this iterator iterates *only* over DLLs
-// mapped into this process space.  For Linux, FreeBSD, and Solaris,
-// it iterates over *all* mapped memory regions, including anonymous
-// mmaps.  For other O/Ss, it is unlikely to work at all, and Valid()
-// will always return false.  Also note: this routine only works on
-// FreeBSD if procfs is mounted: make sure this is in your /etc/fstab:
-//    proc            /proc   procfs  rw 0 0
-class ProcMapsIterator {
- public:
-  struct Buffer {
-#ifdef __FreeBSD__
-    // FreeBSD requires us to read all of the maps file at once, so
-    // we have to make a buffer that's "always" big enough
-    static const size_t kBufSize = 102400;
-#else   // a one-line buffer is good enough
-    static const size_t kBufSize = PATH_MAX + 1024;
-#endif
-    char buf_[kBufSize];
-  };
-
-
-  // Create a new iterator for the specified pid.  pid can be 0 for "self".
-  explicit ProcMapsIterator(pid_t pid);
-
-  // Create an iterator with specified storage (for use in signal
-  // handler). "buffer" should point to a ProcMapsIterator::Buffer
-  // buffer can be NULL in which case a bufer will be allocated.
-  ProcMapsIterator(pid_t pid, Buffer *buffer);
-
-  // Iterate through maps_backing instead of maps if use_maps_backing
-  // is true.  Otherwise the same as above.  buffer can be NULL and
-  // it will allocate a buffer itself.
-  ProcMapsIterator(pid_t pid, Buffer *buffer,
-                   bool use_maps_backing);
-
-  // Returns true if the iterator successfully initialized;
-  bool Valid() const;
-
-  // Returns a pointer to the most recently parsed line. Only valid
-  // after Next() returns true, and until the iterator is destroyed or
-  // Next() is called again.  This may give strange results on non-Linux
-  // systems.  Prefer FormatLine() if that may be a concern.
-  const char *CurrentLine() const { return stext_; }
-
-  // Writes the "canonical" form of the /proc/xxx/maps info for a single
-  // line to the passed-in buffer. Returns the number of bytes written,
-  // or 0 if it was not able to write the complete line.  (To guarantee
-  // success, buffer should have size at least Buffer::kBufSize.)
-  // Takes as arguments values set via a call to Next().  The
-  // "canonical" form of the line (taken from linux's /proc/xxx/maps):
-  //    <start_addr(hex)>-<end_addr(hex)> <perms(rwxp)> <offset(hex)>   +
-  //    <major_dev(hex)>:<minor_dev(hex)> <inode> <filename> Note: the
-  // eg
-  //    08048000-0804c000 r-xp 00000000 03:01 3793678    /bin/cat
-  // If you don't have the dev_t (dev), feel free to pass in 0.
-  // (Next() doesn't return a dev_t, though NextExt does.)
-  //
-  // Note: if filename and flags were obtained via a call to Next(),
-  // then the output of this function is only valid if Next() returned
-  // true, and only until the iterator is destroyed or Next() is
-  // called again.  (Since filename, at least, points into CurrentLine.)
-  static int FormatLine(char* buffer, int bufsize,
-                        uint64 start, uint64 end, const char *flags,
-                        uint64 offset, int64 inode, const char *filename,
-                        dev_t dev);
-
-  // Find the next entry in /proc/maps; return true if found or false
-  // if at the end of the file.
-  //
-  // Any of the result pointers can be NULL if you're not interested
-  // in those values.
-  //
-  // If "flags" and "filename" are passed, they end up pointing to
-  // storage within the ProcMapsIterator that is valid only until the
-  // iterator is destroyed or Next() is called again. The caller may
-  // modify the contents of these strings (up as far as the first NUL,
-  // and only until the subsequent call to Next()) if desired.
-
-  // The offsets are all uint64 in order to handle the case of a
-  // 32-bit process running on a 64-bit kernel
-  //
-  // IMPORTANT NOTE: see top-of-class notes for details about what
-  // mapped regions Next() iterates over, depending on O/S.
-  // TODO(csilvers): make flags and filename const.
-  bool Next(uint64 *start, uint64 *end, char **flags,
-            uint64 *offset, int64 *inode, char **filename);
-
-  bool NextExt(uint64 *start, uint64 *end, char **flags,
-               uint64 *offset, int64 *inode, char **filename,
-               uint64 *file_mapping, uint64 *file_pages,
-               uint64 *anon_mapping, uint64 *anon_pages,
-               dev_t *dev);
-
-  ~ProcMapsIterator();
-
- private:
-  void Init(pid_t pid, Buffer *buffer, bool use_maps_backing);
-
-  char *ibuf_;        // input buffer
-  char *stext_;       // start of text
-  char *etext_;       // end of text
-  char *nextline_;    // start of next line
-  char *ebuf_;        // end of buffer (1 char for a nul)
-#if (defined(_WIN32) || defined(__MINGW32__)) && (!defined(__CYGWIN__) && !defined(__CYGWIN32__))
-  HANDLE snapshot_;   // filehandle on dll info
-  // In a change from the usual W-A pattern, there is no A variant of
-  // MODULEENTRY32.  Tlhelp32.h #defines the W variant, but not the A.
-  // We want the original A variants, and this #undef is the only
-  // way I see to get them.  Redefining it when we're done prevents us
-  // from affecting other .cc files.
-# ifdef MODULEENTRY32  // Alias of W
-#   undef MODULEENTRY32
-  MODULEENTRY32 module_;   // info about current dll (and dll iterator)
-#   define MODULEENTRY32 MODULEENTRY32W
-# else  // It's the ascii, the one we want.
-  MODULEENTRY32 module_;   // info about current dll (and dll iterator)
-# endif
-#elif defined(__MACH__)
-  int current_image_; // dll's are called "images" in macos parlance
-  int current_load_cmd_;   // the segment of this dll we're examining
-#elif defined(__sun__)     // Solaris
-  int fd_;
-  char current_filename_[PATH_MAX];
-#else
-  int fd_;            // filehandle on /proc/*/maps
-#endif
-  pid_t pid_;
-  char flags_[10];
-  Buffer* dynamic_buffer_;  // dynamically-allocated Buffer
-  bool using_maps_backing_; // true if we are looking at maps_backing instead of maps.
-};
-
-#endif  /* #ifndef SWIG */
-
-// Helper routines
-
-namespace tcmalloc {
-int FillProcSelfMaps(char buf[], int size, bool* wrote_all);
-void DumpProcSelfMaps(RawFD fd);
-}
-
-#endif   /* #ifndef _SYSINFO_H_ */

http://git-wip-us.apache.org/repos/asf/incubator-quickstep/blob/9661f956/third_party/gperftools/src/base/thread_annotations.h
----------------------------------------------------------------------
diff --git a/third_party/gperftools/src/base/thread_annotations.h b/third_party/gperftools/src/base/thread_annotations.h
deleted file mode 100644
index f57b299..0000000
--- a/third_party/gperftools/src/base/thread_annotations.h
+++ /dev/null
@@ -1,134 +0,0 @@
-// Copyright (c) 2008, Google Inc.
-// All rights reserved.
-// 
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-// 
-//     * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-//     * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-// 
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// ---
-// Author: Le-Chun Wu
-//
-// This header file contains the macro definitions for thread safety
-// annotations that allow the developers to document the locking policies
-// of their multi-threaded code. The annotations can also help program
-// analysis tools to identify potential thread safety issues.
-//
-// The annotations are implemented using GCC's "attributes" extension.
-// Using the macros defined here instead of the raw GCC attributes allows
-// for portability and future compatibility.
-//
-// This functionality is not yet fully implemented in perftools,
-// but may be one day.
-
-#ifndef BASE_THREAD_ANNOTATIONS_H_
-#define BASE_THREAD_ANNOTATIONS_H_
-
-
-#if defined(__GNUC__) \
-  && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 4)) \
-  && defined(__SUPPORT_TS_ANNOTATION__) && (!defined(SWIG))
-#define THREAD_ANNOTATION_ATTRIBUTE__(x)   __attribute__((x))
-#else
-#define THREAD_ANNOTATION_ATTRIBUTE__(x)   // no-op
-#endif
-
-
-// Document if a shared variable/field needs to be protected by a lock.
-// GUARDED_BY allows the user to specify a particular lock that should be
-// held when accessing the annotated variable, while GUARDED_VAR only
-// indicates a shared variable should be guarded (by any lock). GUARDED_VAR
-// is primarily used when the client cannot express the name of the lock.
-#define GUARDED_BY(x)          THREAD_ANNOTATION_ATTRIBUTE__(guarded_by(x))
-#define GUARDED_VAR            THREAD_ANNOTATION_ATTRIBUTE__(guarded)
-
-// Document if the memory location pointed to by a pointer should be guarded
-// by a lock when dereferencing the pointer. Similar to GUARDED_VAR,
-// PT_GUARDED_VAR is primarily used when the client cannot express the name
-// of the lock. Note that a pointer variable to a shared memory location
-// could itself be a shared variable. For example, if a shared global pointer
-// q, which is guarded by mu1, points to a shared memory location that is
-// guarded by mu2, q should be annotated as follows:
-//     int *q GUARDED_BY(mu1) PT_GUARDED_BY(mu2);
-#define PT_GUARDED_BY(x) \
-  THREAD_ANNOTATION_ATTRIBUTE__(point_to_guarded_by(x))
-#define PT_GUARDED_VAR \
-  THREAD_ANNOTATION_ATTRIBUTE__(point_to_guarded)
-
-// Document the acquisition order between locks that can be held
-// simultaneously by a thread. For any two locks that need to be annotated
-// to establish an acquisition order, only one of them needs the annotation.
-// (i.e. You don't have to annotate both locks with both ACQUIRED_AFTER
-// and ACQUIRED_BEFORE.)
-#define ACQUIRED_AFTER(x) \
-  THREAD_ANNOTATION_ATTRIBUTE__(acquired_after(x))
-#define ACQUIRED_BEFORE(x) \
-  THREAD_ANNOTATION_ATTRIBUTE__(acquired_before(x))
-
-// The following three annotations document the lock requirements for
-// functions/methods.
-
-// Document if a function expects certain locks to be held before it is called
-#define EXCLUSIVE_LOCKS_REQUIRED(x) \
-  THREAD_ANNOTATION_ATTRIBUTE__(exclusive_locks_required(x))
-
-#define SHARED_LOCKS_REQUIRED(x) \
-  THREAD_ANNOTATION_ATTRIBUTE__(shared_locks_required(x))
-
-// Document the locks acquired in the body of the function. These locks
-// cannot be held when calling this function (as google3's Mutex locks are
-// non-reentrant).
-#define LOCKS_EXCLUDED(x) \
-  THREAD_ANNOTATION_ATTRIBUTE__(locks_excluded(x))
-
-// Document the lock the annotated function returns without acquiring it.
-#define LOCK_RETURNED(x)       THREAD_ANNOTATION_ATTRIBUTE__(lock_returned(x))
-
-// Document if a class/type is a lockable type (such as the Mutex class).
-#define LOCKABLE               THREAD_ANNOTATION_ATTRIBUTE__(lockable)
-
-// Document if a class is a scoped lockable type (such as the MutexLock class).
-#define SCOPED_LOCKABLE        THREAD_ANNOTATION_ATTRIBUTE__(scoped_lockable)
-
-// The following annotations specify lock and unlock primitives.
-#define EXCLUSIVE_LOCK_FUNCTION(x) \
-  THREAD_ANNOTATION_ATTRIBUTE__(exclusive_lock(x))
-
-#define SHARED_LOCK_FUNCTION(x) \
-  THREAD_ANNOTATION_ATTRIBUTE__(shared_lock(x))
-
-#define EXCLUSIVE_TRYLOCK_FUNCTION(x) \
-  THREAD_ANNOTATION_ATTRIBUTE__(exclusive_trylock(x))
-
-#define SHARED_TRYLOCK_FUNCTION(x) \
-  THREAD_ANNOTATION_ATTRIBUTE__(shared_trylock(x))
-
-#define UNLOCK_FUNCTION(x) \
-  THREAD_ANNOTATION_ATTRIBUTE__(unlock(x))
-
-// An escape hatch for thread safety analysis to ignore the annotated function.
-#define NO_THREAD_SAFETY_ANALYSIS \
-  THREAD_ANNOTATION_ATTRIBUTE__(no_thread_safety_analysis)
-
-#endif  // BASE_THREAD_ANNOTATIONS_H_

http://git-wip-us.apache.org/repos/asf/incubator-quickstep/blob/9661f956/third_party/gperftools/src/base/thread_lister.c
----------------------------------------------------------------------
diff --git a/third_party/gperftools/src/base/thread_lister.c b/third_party/gperftools/src/base/thread_lister.c
deleted file mode 100644
index ca1b2de..0000000
--- a/third_party/gperftools/src/base/thread_lister.c
+++ /dev/null
@@ -1,77 +0,0 @@
-/* Copyright (c) 2005-2007, Google Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
- *
- *     * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *     * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following disclaimer
- * in the documentation and/or other materials provided with the
- * distribution.
- *     * Neither the name of Google Inc. nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * ---
- * Author: Markus Gutschke
- */
-
-#include "config.h"
-#include <stdio.h>         /* needed for NULL on some powerpc platforms (?!) */
-#ifdef HAVE_SYS_PRCTL
-# include <sys/prctl.h>
-#endif
-#include "base/thread_lister.h"
-#include "base/linuxthreads.h"
-/* Include other thread listers here that define THREADS macro
- * only when they can provide a good implementation.
- */
-
-#ifndef THREADS
-
-/* Default trivial thread lister for single-threaded applications,
- * or if the multi-threading code has not been ported, yet.
- */
-
-int TCMalloc_ListAllProcessThreads(void *parameter,
-				   ListAllProcessThreadsCallBack callback, ...) {
-  int rc;
-  va_list ap;
-  pid_t pid;
-
-#ifdef HAVE_SYS_PRCTL
-  int dumpable = prctl(PR_GET_DUMPABLE, 0);
-  if (!dumpable)
-    prctl(PR_SET_DUMPABLE, 1);
-#endif
-  va_start(ap, callback);
-  pid = getpid();
-  rc = callback(parameter, 1, &pid, ap);
-  va_end(ap);
-#ifdef HAVE_SYS_PRCTL
-  if (!dumpable)
-    prctl(PR_SET_DUMPABLE, 0);
-#endif
-  return rc;
-}
-
-int TCMalloc_ResumeAllProcessThreads(int num_threads, pid_t *thread_pids) {
-  return 1;
-}
-
-#endif   /* ifndef THREADS */

http://git-wip-us.apache.org/repos/asf/incubator-quickstep/blob/9661f956/third_party/gperftools/src/base/thread_lister.h
----------------------------------------------------------------------
diff --git a/third_party/gperftools/src/base/thread_lister.h b/third_party/gperftools/src/base/thread_lister.h
deleted file mode 100644
index 6e70b89..0000000
--- a/third_party/gperftools/src/base/thread_lister.h
+++ /dev/null
@@ -1,83 +0,0 @@
-/* -*- Mode: c; c-basic-offset: 2; indent-tabs-mode: nil -*- */
-/* Copyright (c) 2005-2007, Google Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
- *
- *     * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *     * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following disclaimer
- * in the documentation and/or other materials provided with the
- * distribution.
- *     * Neither the name of Google Inc. nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * ---
- * Author: Markus Gutschke
- */
-
-#ifndef _THREAD_LISTER_H
-#define _THREAD_LISTER_H
-
-#include <stdarg.h>
-#include <sys/types.h>
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-typedef int (*ListAllProcessThreadsCallBack)(void *parameter,
-                                             int num_threads,
-                                             pid_t *thread_pids,
-                                             va_list ap);
-
-/* This function gets the list of all linux threads of the current process
- * passes them to the 'callback' along with the 'parameter' pointer; at the
- * call back call time all the threads are paused via
- * PTRACE_ATTACH.
- * The callback is executed from a separate thread which shares only the
- * address space, the filesystem, and the filehandles with the caller. Most
- * notably, it does not share the same pid and ppid; and if it terminates,
- * the rest of the application is still there. 'callback' is supposed to do
- * or arrange for TCMalloc_ResumeAllProcessThreads. This happens automatically, if
- * the thread raises a synchronous signal (e.g. SIGSEGV); asynchronous
- * signals are blocked. If the 'callback' decides to unblock them, it must
- * ensure that they cannot terminate the application, or that
- * TCMalloc_ResumeAllProcessThreads will get called.
- * It is an error for the 'callback' to make any library calls that could
- * acquire locks. Most notably, this means that most system calls have to
- * avoid going through libc. Also, this means that it is not legal to call
- * exit() or abort().
- * We return -1 on error and the return value of 'callback' on success.
- */
-int TCMalloc_ListAllProcessThreads(void *parameter,
-                                   ListAllProcessThreadsCallBack callback, ...);
-
-/* This function resumes the list of all linux threads that
- * TCMalloc_ListAllProcessThreads pauses before giving to its
- * callback.  The function returns non-zero if at least one thread was
- * suspended and has now been resumed.
- */
-int TCMalloc_ResumeAllProcessThreads(int num_threads, pid_t *thread_pids);
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif  /* _THREAD_LISTER_H */

http://git-wip-us.apache.org/repos/asf/incubator-quickstep/blob/9661f956/third_party/gperftools/src/base/vdso_support.cc
----------------------------------------------------------------------
diff --git a/third_party/gperftools/src/base/vdso_support.cc b/third_party/gperftools/src/base/vdso_support.cc
deleted file mode 100644
index 730df30..0000000
--- a/third_party/gperftools/src/base/vdso_support.cc
+++ /dev/null
@@ -1,143 +0,0 @@
-// Copyright (c) 2008, Google Inc.
-// All rights reserved.
-// 
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-// 
-//     * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-//     * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-// 
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// ---
-// Author: Paul Pluzhnikov
-//
-// Allow dynamic symbol lookup in the kernel VDSO page.
-//
-// VDSOSupport -- a class representing kernel VDSO (if present).
-//
-
-#include "base/vdso_support.h"
-
-#ifdef HAVE_VDSO_SUPPORT     // defined in vdso_support.h
-
-#include <fcntl.h>
-#include <stddef.h>   // for ptrdiff_t
-
-#include "base/atomicops.h"  // for MemoryBarrier
-#include "base/linux_syscall_support.h"
-#include "base/logging.h"
-#include "base/dynamic_annotations.h"
-#include "base/basictypes.h"  // for COMPILE_ASSERT
-
-using base::subtle::MemoryBarrier;
-
-#ifndef AT_SYSINFO_EHDR
-#define AT_SYSINFO_EHDR 33
-#endif
-
-namespace base {
-
-const void *VDSOSupport::vdso_base_ = ElfMemImage::kInvalidBase;
-VDSOSupport::VDSOSupport()
-    // If vdso_base_ is still set to kInvalidBase, we got here
-    // before VDSOSupport::Init has been called. Call it now.
-    : image_(vdso_base_ == ElfMemImage::kInvalidBase ? Init() : vdso_base_) {
-}
-
-// NOTE: we can't use GoogleOnceInit() below, because we can be
-// called by tcmalloc, and none of the *once* stuff may be functional yet.
-//
-// In addition, we hope that the VDSOSupportHelper constructor
-// causes this code to run before there are any threads, and before
-// InitGoogle() has executed any chroot or setuid calls.
-//
-// Finally, even if there is a race here, it is harmless, because
-// the operation should be idempotent.
-const void *VDSOSupport::Init() {
-  if (vdso_base_ == ElfMemImage::kInvalidBase) {
-    // Valgrind zaps AT_SYSINFO_EHDR and friends from the auxv[]
-    // on stack, and so glibc works as if VDSO was not present.
-    // But going directly to kernel via /proc/self/auxv below bypasses
-    // Valgrind zapping. So we check for Valgrind separately.
-    if (RunningOnValgrind()) {
-      vdso_base_ = NULL;
-      return NULL;
-    }
-    int fd = open("/proc/self/auxv", O_RDONLY);
-    if (fd == -1) {
-      // Kernel too old to have a VDSO.
-      vdso_base_ = NULL;
-      return NULL;
-    }
-    ElfW(auxv_t) aux;
-    while (read(fd, &aux, sizeof(aux)) == sizeof(aux)) {
-      if (aux.a_type == AT_SYSINFO_EHDR) {
-        COMPILE_ASSERT(sizeof(vdso_base_) == sizeof(aux.a_un.a_val),
-                       unexpected_sizeof_pointer_NE_sizeof_a_val);
-        vdso_base_ = reinterpret_cast<void *>(aux.a_un.a_val);
-        break;
-      }
-    }
-    close(fd);
-    if (vdso_base_ == ElfMemImage::kInvalidBase) {
-      // Didn't find AT_SYSINFO_EHDR in auxv[].
-      vdso_base_ = NULL;
-    }
-  }
-  return vdso_base_;
-}
-
-const void *VDSOSupport::SetBase(const void *base) {
-  CHECK(base != ElfMemImage::kInvalidBase);
-  const void *old_base = vdso_base_;
-  vdso_base_ = base;
-  image_.Init(base);
-  return old_base;
-}
-
-bool VDSOSupport::LookupSymbol(const char *name,
-                               const char *version,
-                               int type,
-                               SymbolInfo *info) const {
-  return image_.LookupSymbol(name, version, type, info);
-}
-
-bool VDSOSupport::LookupSymbolByAddress(const void *address,
-                                        SymbolInfo *info_out) const {
-  return image_.LookupSymbolByAddress(address, info_out);
-}
-
-// We need to make sure VDSOSupport::Init() is called before
-// the main() runs, since it might do something like setuid or
-// chroot.  If VDSOSupport
-// is used in any global constructor, this will happen, since
-// VDSOSupport's constructor calls Init.  But if not, we need to
-// ensure it here, with a global constructor of our own.  This
-// is an allowed exception to the normal rule against non-trivial
-// global constructors.
-static class VDSOInitHelper {
- public:
-  VDSOInitHelper() { VDSOSupport::Init(); }
-} vdso_init_helper;
-}
-
-#endif  // HAVE_VDSO_SUPPORT

http://git-wip-us.apache.org/repos/asf/incubator-quickstep/blob/9661f956/third_party/gperftools/src/base/vdso_support.h
----------------------------------------------------------------------
diff --git a/third_party/gperftools/src/base/vdso_support.h b/third_party/gperftools/src/base/vdso_support.h
deleted file mode 100644
index c1209a4..0000000
--- a/third_party/gperftools/src/base/vdso_support.h
+++ /dev/null
@@ -1,132 +0,0 @@
-// Copyright (c) 2008, Google Inc.
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-//     * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// ---
-// Author: Paul Pluzhnikov
-//
-// Allow dynamic symbol lookup in the kernel VDSO page.
-//
-// VDSO stands for "Virtual Dynamic Shared Object" -- a page of
-// executable code, which looks like a shared library, but doesn't
-// necessarily exist anywhere on disk, and which gets mmap()ed into
-// every process by kernels which support VDSO, such as 2.6.x for 32-bit
-// executables, and 2.6.24 and above for 64-bit executables.
-//
-// More details could be found here:
-// http://www.trilithium.com/johan/2005/08/linux-gate/
-//
-// VDSOSupport -- a class representing kernel VDSO (if present).
-//
-// Example usage:
-//  VDSOSupport vdso;
-//  VDSOSupport::SymbolInfo info;
-//  typedef (*FN)(unsigned *, void *, void *);
-//  FN fn = NULL;
-//  if (vdso.LookupSymbol("__vdso_getcpu", "LINUX_2.6", STT_FUNC, &info)) {
-//     fn = reinterpret_cast<FN>(info.address);
-//  }
-
-#ifndef BASE_VDSO_SUPPORT_H_
-#define BASE_VDSO_SUPPORT_H_
-
-#include <config.h>
-#include "base/basictypes.h"
-#include "base/elf_mem_image.h"
-
-#ifdef HAVE_ELF_MEM_IMAGE
-
-#define HAVE_VDSO_SUPPORT 1
-
-#include <stdlib.h>     // for NULL
-
-namespace base {
-
-// NOTE: this class may be used from within tcmalloc, and can not
-// use any memory allocation routines.
-class VDSOSupport {
- public:
-  VDSOSupport();
-
-  typedef ElfMemImage::SymbolInfo SymbolInfo;
-  typedef ElfMemImage::SymbolIterator SymbolIterator;
-
-  // Answers whether we have a vdso at all.
-  bool IsPresent() const { return image_.IsPresent(); }
-
-  // Allow to iterate over all VDSO symbols.
-  SymbolIterator begin() const { return image_.begin(); }
-  SymbolIterator end() const { return image_.end(); }
-
-  // Look up versioned dynamic symbol in the kernel VDSO.
-  // Returns false if VDSO is not present, or doesn't contain given
-  // symbol/version/type combination.
-  // If info_out != NULL, additional details are filled in.
-  bool LookupSymbol(const char *name, const char *version,
-                    int symbol_type, SymbolInfo *info_out) const;
-
-  // Find info about symbol (if any) which overlaps given address.
-  // Returns true if symbol was found; false if VDSO isn't present
-  // or doesn't have a symbol overlapping given address.
-  // If info_out != NULL, additional details are filled in.
-  bool LookupSymbolByAddress(const void *address, SymbolInfo *info_out) const;
-
-  // Used only for testing. Replace real VDSO base with a mock.
-  // Returns previous value of vdso_base_. After you are done testing,
-  // you are expected to call SetBase() with previous value, in order to
-  // reset state to the way it was.
-  const void *SetBase(const void *s);
-
-  // Computes vdso_base_ and returns it. Should be called as early as
-  // possible; before any thread creation, chroot or setuid.
-  static const void *Init();
-
- private:
-  // image_ represents VDSO ELF image in memory.
-  // image_.ehdr_ == NULL implies there is no VDSO.
-  ElfMemImage image_;
-
-  // Cached value of auxv AT_SYSINFO_EHDR, computed once.
-  // This is a tri-state:
-  //   kInvalidBase   => value hasn't been determined yet.
-  //              0   => there is no VDSO.
-  //           else   => vma of VDSO Elf{32,64}_Ehdr.
-  //
-  // When testing with mock VDSO, low bit is set.
-  // The low bit is always available because vdso_base_ is
-  // page-aligned.
-  static const void *vdso_base_;
-
-  DISALLOW_COPY_AND_ASSIGN(VDSOSupport);
-};
-
-}  // namespace base
-
-#endif  // HAVE_ELF_MEM_IMAGE
-
-#endif  // BASE_VDSO_SUPPORT_H_

http://git-wip-us.apache.org/repos/asf/incubator-quickstep/blob/9661f956/third_party/gperftools/src/central_freelist.cc
----------------------------------------------------------------------
diff --git a/third_party/gperftools/src/central_freelist.cc b/third_party/gperftools/src/central_freelist.cc
deleted file mode 100644
index 11b190d..0000000
--- a/third_party/gperftools/src/central_freelist.cc
+++ /dev/null
@@ -1,387 +0,0 @@
-// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
-// Copyright (c) 2008, Google Inc.
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-//     * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// ---
-// Author: Sanjay Ghemawat <op...@google.com>
-
-#include "config.h"
-#include <algorithm>
-#include "central_freelist.h"
-#include "internal_logging.h"  // for ASSERT, MESSAGE
-#include "linked_list.h"       // for SLL_Next, SLL_Push, etc
-#include "page_heap.h"         // for PageHeap
-#include "static_vars.h"       // for Static
-
-using std::min;
-using std::max;
-
-namespace tcmalloc {
-
-void CentralFreeList::Init(size_t cl) {
-  size_class_ = cl;
-  tcmalloc::DLL_Init(&empty_);
-  tcmalloc::DLL_Init(&nonempty_);
-  num_spans_ = 0;
-  counter_ = 0;
-
-  max_cache_size_ = kMaxNumTransferEntries;
-#ifdef TCMALLOC_SMALL_BUT_SLOW
-  // Disable the transfer cache for the small footprint case.
-  cache_size_ = 0;
-#else
-  cache_size_ = 16;
-#endif
-  if (cl > 0) {
-    // Limit the maximum size of the cache based on the size class.  If this
-    // is not done, large size class objects will consume a lot of memory if
-    // they just sit in the transfer cache.
-    int32_t bytes = Static::sizemap()->ByteSizeForClass(cl);
-    int32_t objs_to_move = Static::sizemap()->num_objects_to_move(cl);
-
-    ASSERT(objs_to_move > 0 && bytes > 0);
-    // Limit each size class cache to at most 1MB of objects or one entry,
-    // whichever is greater. Total transfer cache memory used across all
-    // size classes then can't be greater than approximately
-    // 1MB * kMaxNumTransferEntries.
-    // min and max are in parens to avoid macro-expansion on windows.
-    max_cache_size_ = (min)(max_cache_size_,
-                          (max)(1, (1024 * 1024) / (bytes * objs_to_move)));
-    cache_size_ = (min)(cache_size_, max_cache_size_);
-  }
-  used_slots_ = 0;
-  ASSERT(cache_size_ <= max_cache_size_);
-}
-
-void CentralFreeList::ReleaseListToSpans(void* start) {
-  while (start) {
-    void *next = SLL_Next(start);
-    ReleaseToSpans(start);
-    start = next;
-  }
-}
-
-// MapObjectToSpan should logically be part of ReleaseToSpans.  But
-// this triggers an optimization bug in gcc 4.5.0.  Moving to a
-// separate function, and making sure that function isn't inlined,
-// seems to fix the problem.  It also should be fixed for gcc 4.5.1.
-static
-#if __GNUC__ == 4 && __GNUC_MINOR__ == 5 && __GNUC_PATCHLEVEL__ == 0
-__attribute__ ((noinline))
-#endif
-Span* MapObjectToSpan(void* object) {
-  const PageID p = reinterpret_cast<uintptr_t>(object) >> kPageShift;
-  Span* span = Static::pageheap()->GetDescriptor(p);
-  return span;
-}
-
-void CentralFreeList::ReleaseToSpans(void* object) {
-  Span* span = MapObjectToSpan(object);
-  ASSERT(span != NULL);
-  ASSERT(span->refcount > 0);
-
-  // If span is empty, move it to non-empty list
-  if (span->objects == NULL) {
-    tcmalloc::DLL_Remove(span);
-    tcmalloc::DLL_Prepend(&nonempty_, span);
-    Event(span, 'N', 0);
-  }
-
-  // The following check is expensive, so it is disabled by default
-  if (false) {
-    // Check that object does not occur in list
-    int got = 0;
-    for (void* p = span->objects; p != NULL; p = *((void**) p)) {
-      ASSERT(p != object);
-      got++;
-    }
-    ASSERT(got + span->refcount ==
-           (span->length<<kPageShift) /
-           Static::sizemap()->ByteSizeForClass(span->sizeclass));
-  }
-
-  counter_++;
-  span->refcount--;
-  if (span->refcount == 0) {
-    Event(span, '#', 0);
-    counter_ -= ((span->length<<kPageShift) /
-                 Static::sizemap()->ByteSizeForClass(span->sizeclass));
-    tcmalloc::DLL_Remove(span);
-    --num_spans_;
-
-    // Release central list lock while operating on pageheap
-    lock_.Unlock();
-    {
-      SpinLockHolder h(Static::pageheap_lock());
-      Static::pageheap()->Delete(span);
-    }
-    lock_.Lock();
-  } else {
-    *(reinterpret_cast<void**>(object)) = span->objects;
-    span->objects = object;
-  }
-}
-
-bool CentralFreeList::EvictRandomSizeClass(
-    int locked_size_class, bool force) {
-  static int race_counter = 0;
-  int t = race_counter++;  // Updated without a lock, but who cares.
-  if (t >= kNumClasses) {
-    while (t >= kNumClasses) {
-      t -= kNumClasses;
-    }
-    race_counter = t;
-  }
-  ASSERT(t >= 0);
-  ASSERT(t < kNumClasses);
-  if (t == locked_size_class) return false;
-  return Static::central_cache()[t].ShrinkCache(locked_size_class, force);
-}
-
-bool CentralFreeList::MakeCacheSpace() {
-  // Is there room in the cache?
-  if (used_slots_ < cache_size_) return true;
-  // Check if we can expand this cache?
-  if (cache_size_ == max_cache_size_) return false;
-  // Ok, we'll try to grab an entry from some other size class.
-  if (EvictRandomSizeClass(size_class_, false) ||
-      EvictRandomSizeClass(size_class_, true)) {
-    // Succeeded in evicting, we're going to make our cache larger.
-    // However, we may have dropped and re-acquired the lock in
-    // EvictRandomSizeClass (via ShrinkCache and the LockInverter), so the
-    // cache_size may have changed.  Therefore, check and verify that it is
-    // still OK to increase the cache_size.
-    if (cache_size_ < max_cache_size_) {
-      cache_size_++;
-      return true;
-    }
-  }
-  return false;
-}
-
-
-namespace {
-class LockInverter {
- private:
-  SpinLock *held_, *temp_;
- public:
-  inline explicit LockInverter(SpinLock* held, SpinLock *temp)
-    : held_(held), temp_(temp) { held_->Unlock(); temp_->Lock(); }
-  inline ~LockInverter() { temp_->Unlock(); held_->Lock();  }
-};
-}
-
-// This function is marked as NO_THREAD_SAFETY_ANALYSIS because it uses
-// LockInverter to release one lock and acquire another in scoped-lock
-// style, which our current annotation/analysis does not support.
-bool CentralFreeList::ShrinkCache(int locked_size_class, bool force)
-    NO_THREAD_SAFETY_ANALYSIS {
-  // Start with a quick check without taking a lock.
-  if (cache_size_ == 0) return false;
-  // We don't evict from a full cache unless we are 'forcing'.
-  if (force == false && used_slots_ == cache_size_) return false;
-
-  // Grab lock, but first release the other lock held by this thread.  We use
-  // the lock inverter to ensure that we never hold two size class locks
-  // concurrently.  That can create a deadlock because there is no well
-  // defined nesting order.
-  LockInverter li(&Static::central_cache()[locked_size_class].lock_, &lock_);
-  ASSERT(used_slots_ <= cache_size_);
-  ASSERT(0 <= cache_size_);
-  if (cache_size_ == 0) return false;
-  if (used_slots_ == cache_size_) {
-    if (force == false) return false;
-    // ReleaseListToSpans releases the lock, so we have to make all the
-    // updates to the central list before calling it.
-    cache_size_--;
-    used_slots_--;
-    ReleaseListToSpans(tc_slots_[used_slots_].head);
-    return true;
-  }
-  cache_size_--;
-  return true;
-}
-
-void CentralFreeList::InsertRange(void *start, void *end, int N) {
-  SpinLockHolder h(&lock_);
-  if (N == Static::sizemap()->num_objects_to_move(size_class_) &&
-    MakeCacheSpace()) {
-    int slot = used_slots_++;
-    ASSERT(slot >=0);
-    ASSERT(slot < max_cache_size_);
-    TCEntry *entry = &tc_slots_[slot];
-    entry->head = start;
-    entry->tail = end;
-    return;
-  }
-  ReleaseListToSpans(start);
-}
-
-int CentralFreeList::RemoveRange(void **start, void **end, int N) {
-  ASSERT(N > 0);
-  lock_.Lock();
-  if (N == Static::sizemap()->num_objects_to_move(size_class_) &&
-      used_slots_ > 0) {
-    int slot = --used_slots_;
-    ASSERT(slot >= 0);
-    TCEntry *entry = &tc_slots_[slot];
-    *start = entry->head;
-    *end = entry->tail;
-    lock_.Unlock();
-    return N;
-  }
-
-  int result = 0;
-  *start = NULL;
-  *end = NULL;
-  // TODO: Prefetch multiple TCEntries?
-  result = FetchFromOneSpansSafe(N, start, end);
-  if (result != 0) {
-    while (result < N) {
-      int n;
-      void* head = NULL;
-      void* tail = NULL;
-      n = FetchFromOneSpans(N - result, &head, &tail);
-      if (!n) break;
-      result += n;
-      SLL_PushRange(start, head, tail);
-    }
-  }
-  lock_.Unlock();
-  return result;
-}
-
-
-int CentralFreeList::FetchFromOneSpansSafe(int N, void **start, void **end) {
-  int result = FetchFromOneSpans(N, start, end);
-  if (!result) {
-    Populate();
-    result = FetchFromOneSpans(N, start, end);
-  }
-  return result;
-}
-
-int CentralFreeList::FetchFromOneSpans(int N, void **start, void **end) {
-  if (tcmalloc::DLL_IsEmpty(&nonempty_)) return 0;
-  Span* span = nonempty_.next;
-
-  ASSERT(span->objects != NULL);
-
-  int result = 0;
-  void *prev, *curr;
-  curr = span->objects;
-  do {
-    prev = curr;
-    curr = *(reinterpret_cast<void**>(curr));
-  } while (++result < N && curr != NULL);
-
-  if (curr == NULL) {
-    // Move to empty list
-    tcmalloc::DLL_Remove(span);
-    tcmalloc::DLL_Prepend(&empty_, span);
-    Event(span, 'E', 0);
-  }
-
-  *start = span->objects;
-  *end = prev;
-  span->objects = curr;
-  SLL_SetNext(*end, NULL);
-  span->refcount += result;
-  counter_ -= result;
-  return result;
-}
-
-// Fetch memory from the system and add to the central cache freelist.
-void CentralFreeList::Populate() {
-  // Release central list lock while operating on pageheap
-  lock_.Unlock();
-  const size_t npages = Static::sizemap()->class_to_pages(size_class_);
-
-  Span* span;
-  {
-    SpinLockHolder h(Static::pageheap_lock());
-    span = Static::pageheap()->New(npages);
-    if (span) Static::pageheap()->RegisterSizeClass(span, size_class_);
-  }
-  if (span == NULL) {
-    Log(kLog, __FILE__, __LINE__,
-        "tcmalloc: allocation failed", npages << kPageShift);
-    lock_.Lock();
-    return;
-  }
-  ASSERT(span->length == npages);
-  // Cache sizeclass info eagerly.  Locking is not necessary.
-  // (Instead of being eager, we could just replace any stale info
-  // about this span, but that seems to be no better in practice.)
-  for (int i = 0; i < npages; i++) {
-    Static::pageheap()->CacheSizeClass(span->start + i, size_class_);
-  }
-
-  // Split the block into pieces and add to the free-list
-  // TODO: coloring of objects to avoid cache conflicts?
-  void** tail = &span->objects;
-  char* ptr = reinterpret_cast<char*>(span->start << kPageShift);
-  char* limit = ptr + (npages << kPageShift);
-  const size_t size = Static::sizemap()->ByteSizeForClass(size_class_);
-  int num = 0;
-  while (ptr + size <= limit) {
-    *tail = ptr;
-    tail = reinterpret_cast<void**>(ptr);
-    ptr += size;
-    num++;
-  }
-  ASSERT(ptr <= limit);
-  *tail = NULL;
-  span->refcount = 0; // No sub-object in use yet
-
-  // Add span to list of non-empty spans
-  lock_.Lock();
-  tcmalloc::DLL_Prepend(&nonempty_, span);
-  ++num_spans_;
-  counter_ += num;
-}
-
-int CentralFreeList::tc_length() {
-  SpinLockHolder h(&lock_);
-  return used_slots_ * Static::sizemap()->num_objects_to_move(size_class_);
-}
-
-size_t CentralFreeList::OverheadBytes() {
-  SpinLockHolder h(&lock_);
-  if (size_class_ == 0) {  // 0 holds the 0-sized allocations
-    return 0;
-  }
-  const size_t pages_per_span = Static::sizemap()->class_to_pages(size_class_);
-  const size_t object_size = Static::sizemap()->class_to_size(size_class_);
-  ASSERT(object_size > 0);
-  const size_t overhead_per_span = (pages_per_span * kPageSize) % object_size;
-  return num_spans_ * overhead_per_span;
-}
-
-}  // namespace tcmalloc

http://git-wip-us.apache.org/repos/asf/incubator-quickstep/blob/9661f956/third_party/gperftools/src/central_freelist.h
----------------------------------------------------------------------
diff --git a/third_party/gperftools/src/central_freelist.h b/third_party/gperftools/src/central_freelist.h
deleted file mode 100644
index 4148680..0000000
--- a/third_party/gperftools/src/central_freelist.h
+++ /dev/null
@@ -1,211 +0,0 @@
-// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
-// Copyright (c) 2008, Google Inc.
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-//     * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// ---
-// Author: Sanjay Ghemawat <op...@google.com>
-
-#ifndef TCMALLOC_CENTRAL_FREELIST_H_
-#define TCMALLOC_CENTRAL_FREELIST_H_
-
-#include "config.h"
-#include <stddef.h>                     // for size_t
-#ifdef HAVE_STDINT_H
-#include <stdint.h>                     // for int32_t
-#endif
-#include "base/spinlock.h"
-#include "base/thread_annotations.h"
-#include "common.h"
-#include "span.h"
-
-namespace tcmalloc {
-
-// Data kept per size-class in central cache.
-class CentralFreeList {
- public:
-  // A CentralFreeList may be used before its constructor runs.
-  // So we prevent lock_'s constructor from doing anything to the
-  // lock_ state.
-  CentralFreeList() : lock_(base::LINKER_INITIALIZED) { }
-
-  void Init(size_t cl);
-
-  // These methods all do internal locking.
-
-  // Insert the specified range into the central freelist.  N is the number of
-  // elements in the range.  RemoveRange() is the opposite operation.
-  void InsertRange(void *start, void *end, int N);
-
-  // Returns the actual number of fetched elements and sets *start and *end.
-  int RemoveRange(void **start, void **end, int N);
-
-  // Returns the number of free objects in cache.
-  int length() {
-    SpinLockHolder h(&lock_);
-    return counter_;
-  }
-
-  // Returns the number of free objects in the transfer cache.
-  int tc_length();
-
-  // Returns the memory overhead (internal fragmentation) attributable
-  // to the freelist.  This is memory lost when the size of elements
-  // in a freelist doesn't exactly divide the page-size (an 8192-byte
-  // page full of 5-byte objects would have 2 bytes memory overhead).
-  size_t OverheadBytes();
-
-  // Lock/Unlock the internal SpinLock. Used on the pthread_atfork call
-  // to set the lock in a consistent state before the fork.
-  void Lock() {
-    lock_.Lock();
-  }
-
-  void Unlock() {
-    lock_.Unlock();
-  }
-
- private:
-  // TransferCache is used to cache transfers of
-  // sizemap.num_objects_to_move(size_class) back and forth between
-  // thread caches and the central cache for a given size class.
-  struct TCEntry {
-    void *head;  // Head of chain of objects.
-    void *tail;  // Tail of chain of objects.
-  };
-
-  // A central cache freelist can have anywhere from 0 to kMaxNumTransferEntries
-  // slots to put link list chains into.
-#ifdef TCMALLOC_SMALL_BUT_SLOW
-  // For the small memory model, the transfer cache is not used.
-  static const int kMaxNumTransferEntries = 0;
-#else
-  // Starting point for the the maximum number of entries in the transfer cache.
-  // This actual maximum for a given size class may be lower than this
-  // maximum value.
-  static const int kMaxNumTransferEntries = 64;
-#endif
-
-  // REQUIRES: lock_ is held
-  // Remove object from cache and return.
-  // Return NULL if no free entries in cache.
-  int FetchFromOneSpans(int N, void **start, void **end) EXCLUSIVE_LOCKS_REQUIRED(lock_);
-
-  // REQUIRES: lock_ is held
-  // Remove object from cache and return.  Fetches
-  // from pageheap if cache is empty.  Only returns
-  // NULL on allocation failure.
-  int FetchFromOneSpansSafe(int N, void **start, void **end) EXCLUSIVE_LOCKS_REQUIRED(lock_);
-
-  // REQUIRES: lock_ is held
-  // Release a linked list of objects to spans.
-  // May temporarily release lock_.
-  void ReleaseListToSpans(void *start) EXCLUSIVE_LOCKS_REQUIRED(lock_);
-
-  // REQUIRES: lock_ is held
-  // Release an object to spans.
-  // May temporarily release lock_.
-  void ReleaseToSpans(void* object) EXCLUSIVE_LOCKS_REQUIRED(lock_);
-
-  // REQUIRES: lock_ is held
-  // Populate cache by fetching from the page heap.
-  // May temporarily release lock_.
-  void Populate() EXCLUSIVE_LOCKS_REQUIRED(lock_);
-
-  // REQUIRES: lock is held.
-  // Tries to make room for a TCEntry.  If the cache is full it will try to
-  // expand it at the cost of some other cache size.  Return false if there is
-  // no space.
-  bool MakeCacheSpace() EXCLUSIVE_LOCKS_REQUIRED(lock_);
-
-  // REQUIRES: lock_ for locked_size_class is held.
-  // Picks a "random" size class to steal TCEntry slot from.  In reality it
-  // just iterates over the sizeclasses but does so without taking a lock.
-  // Returns true on success.
-  // May temporarily lock a "random" size class.
-  static bool EvictRandomSizeClass(int locked_size_class, bool force);
-
-  // REQUIRES: lock_ is *not* held.
-  // Tries to shrink the Cache.  If force is true it will relase objects to
-  // spans if it allows it to shrink the cache.  Return false if it failed to
-  // shrink the cache.  Decrements cache_size_ on succeess.
-  // May temporarily take lock_.  If it takes lock_, the locked_size_class
-  // lock is released to keep the thread from holding two size class locks
-  // concurrently which could lead to a deadlock.
-  bool ShrinkCache(int locked_size_class, bool force) LOCKS_EXCLUDED(lock_);
-
-  // This lock protects all the data members.  cached_entries and cache_size_
-  // may be looked at without holding the lock.
-  SpinLock lock_;
-
-  // We keep linked lists of empty and non-empty spans.
-  size_t   size_class_;     // My size class
-  Span     empty_;          // Dummy header for list of empty spans
-  Span     nonempty_;       // Dummy header for list of non-empty spans
-  size_t   num_spans_;      // Number of spans in empty_ plus nonempty_
-  size_t   counter_;        // Number of free objects in cache entry
-
-  // Here we reserve space for TCEntry cache slots.  Space is preallocated
-  // for the largest possible number of entries than any one size class may
-  // accumulate.  Not all size classes are allowed to accumulate
-  // kMaxNumTransferEntries, so there is some wasted space for those size
-  // classes.
-  TCEntry tc_slots_[kMaxNumTransferEntries];
-
-  // Number of currently used cached entries in tc_slots_.  This variable is
-  // updated under a lock but can be read without one.
-  int32_t used_slots_;
-  // The current number of slots for this size class.  This is an
-  // adaptive value that is increased if there is lots of traffic
-  // on a given size class.
-  int32_t cache_size_;
-  // Maximum size of the cache for a given size class.
-  int32_t max_cache_size_;
-};
-
-// Pads each CentralCache object to multiple of 64 bytes.  Since some
-// compilers (such as MSVC) don't like it when the padding is 0, I use
-// template specialization to remove the padding entirely when
-// sizeof(CentralFreeList) is a multiple of 64.
-template<int kFreeListSizeMod64>
-class CentralFreeListPaddedTo : public CentralFreeList {
- private:
-  char pad_[64 - kFreeListSizeMod64];
-};
-
-template<>
-class CentralFreeListPaddedTo<0> : public CentralFreeList {
-};
-
-class CentralFreeListPadded : public CentralFreeListPaddedTo<
-  sizeof(CentralFreeList) % 64> {
-};
-
-}  // namespace tcmalloc
-
-#endif  // TCMALLOC_CENTRAL_FREELIST_H_

http://git-wip-us.apache.org/repos/asf/incubator-quickstep/blob/9661f956/third_party/gperftools/src/common.cc
----------------------------------------------------------------------
diff --git a/third_party/gperftools/src/common.cc b/third_party/gperftools/src/common.cc
deleted file mode 100644
index 3b66afe..0000000
--- a/third_party/gperftools/src/common.cc
+++ /dev/null
@@ -1,276 +0,0 @@
-// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
-// Copyright (c) 2008, Google Inc.
-// All rights reserved.
-// 
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-// 
-//     * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-//     * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-// 
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// ---
-// Author: Sanjay Ghemawat <op...@google.com>
-
-#include <stdlib.h> // for getenv and strtol
-#include "config.h"
-#include "common.h"
-#include "system-alloc.h"
-#include "base/spinlock.h"
-#include "getenv_safe.h" // TCMallocGetenvSafe
-
-namespace tcmalloc {
-
-// Define the maximum number of object per classe type to transfer between
-// thread and central caches.
-static int32 FLAGS_tcmalloc_transfer_num_objects;
-
-static const int32 kDefaultTransferNumObjecs = 32768;
-
-// The init function is provided to explicit initialize the variable value
-// from the env. var to avoid C++ global construction that might defer its
-// initialization after a malloc/new call.
-static inline void InitTCMallocTransferNumObjects()
-{
-  if (UNLIKELY(FLAGS_tcmalloc_transfer_num_objects == 0)) {
-    const char *envval = TCMallocGetenvSafe("TCMALLOC_TRANSFER_NUM_OBJ");
-    FLAGS_tcmalloc_transfer_num_objects = !envval ? kDefaultTransferNumObjecs :
-      strtol(envval, NULL, 10);
-  }
-}
-
-// Note: the following only works for "n"s that fit in 32-bits, but
-// that is fine since we only use it for small sizes.
-static inline int LgFloor(size_t n) {
-  int log = 0;
-  for (int i = 4; i >= 0; --i) {
-    int shift = (1 << i);
-    size_t x = n >> shift;
-    if (x != 0) {
-      n = x;
-      log += shift;
-    }
-  }
-  ASSERT(n == 1);
-  return log;
-}
-
-int AlignmentForSize(size_t size) {
-  int alignment = kAlignment;
-  if (size > kMaxSize) {
-    // Cap alignment at kPageSize for large sizes.
-    alignment = kPageSize;
-  } else if (size >= 128) {
-    // Space wasted due to alignment is at most 1/8, i.e., 12.5%.
-    alignment = (1 << LgFloor(size)) / 8;
-  } else if (size >= kMinAlign) {
-    // We need an alignment of at least 16 bytes to satisfy
-    // requirements for some SSE types.
-    alignment = kMinAlign;
-  }
-  // Maximum alignment allowed is page size alignment.
-  if (alignment > kPageSize) {
-    alignment = kPageSize;
-  }
-  CHECK_CONDITION(size < kMinAlign || alignment >= kMinAlign);
-  CHECK_CONDITION((alignment & (alignment - 1)) == 0);
-  return alignment;
-}
-
-int SizeMap::NumMoveSize(size_t size) {
-  if (size == 0) return 0;
-  // Use approx 64k transfers between thread and central caches.
-  int num = static_cast<int>(64.0 * 1024.0 / size);
-  if (num < 2) num = 2;
-
-  // Avoid bringing too many objects into small object free lists.
-  // If this value is too large:
-  // - We waste memory with extra objects sitting in the thread caches.
-  // - The central freelist holds its lock for too long while
-  //   building a linked list of objects, slowing down the allocations
-  //   of other threads.
-  // If this value is too small:
-  // - We go to the central freelist too often and we have to acquire
-  //   its lock each time.
-  // This value strikes a balance between the constraints above.
-  if (num > FLAGS_tcmalloc_transfer_num_objects)
-    num = FLAGS_tcmalloc_transfer_num_objects;
-
-  return num;
-}
-
-// Initialize the mapping arrays
-void SizeMap::Init() {
-  InitTCMallocTransferNumObjects();
-
-  // Do some sanity checking on add_amount[]/shift_amount[]/class_array[]
-  if (ClassIndex(0) != 0) {
-    Log(kCrash, __FILE__, __LINE__,
-        "Invalid class index for size 0", ClassIndex(0));
-  }
-  if (ClassIndex(kMaxSize) >= sizeof(class_array_)) {
-    Log(kCrash, __FILE__, __LINE__,
-        "Invalid class index for kMaxSize", ClassIndex(kMaxSize));
-  }
-
-  // Compute the size classes we want to use
-  int sc = 1;   // Next size class to assign
-  int alignment = kAlignment;
-  CHECK_CONDITION(kAlignment <= kMinAlign);
-  for (size_t size = kAlignment; size <= kMaxSize; size += alignment) {
-    alignment = AlignmentForSize(size);
-    CHECK_CONDITION((size % alignment) == 0);
-
-    int blocks_to_move = NumMoveSize(size) / 4;
-    size_t psize = 0;
-    do {
-      psize += kPageSize;
-      // Allocate enough pages so leftover is less than 1/8 of total.
-      // This bounds wasted space to at most 12.5%.
-      while ((psize % size) > (psize >> 3)) {
-        psize += kPageSize;
-      }
-      // Continue to add pages until there are at least as many objects in
-      // the span as are needed when moving objects from the central
-      // freelists and spans to the thread caches.
-    } while ((psize / size) < (blocks_to_move));
-    const size_t my_pages = psize >> kPageShift;
-
-    if (sc > 1 && my_pages == class_to_pages_[sc-1]) {
-      // See if we can merge this into the previous class without
-      // increasing the fragmentation of the previous class.
-      const size_t my_objects = (my_pages << kPageShift) / size;
-      const size_t prev_objects = (class_to_pages_[sc-1] << kPageShift)
-                                  / class_to_size_[sc-1];
-      if (my_objects == prev_objects) {
-        // Adjust last class to include this size
-        class_to_size_[sc-1] = size;
-        continue;
-      }
-    }
-
-    // Add new class
-    class_to_pages_[sc] = my_pages;
-    class_to_size_[sc] = size;
-    sc++;
-  }
-  if (sc != kNumClasses) {
-    Log(kCrash, __FILE__, __LINE__,
-        "wrong number of size classes: (found vs. expected )", sc, kNumClasses);
-  }
-
-  // Initialize the mapping arrays
-  int next_size = 0;
-  for (int c = 1; c < kNumClasses; c++) {
-    const int max_size_in_class = class_to_size_[c];
-    for (int s = next_size; s <= max_size_in_class; s += kAlignment) {
-      class_array_[ClassIndex(s)] = c;
-    }
-    next_size = max_size_in_class + kAlignment;
-  }
-
-  // Double-check sizes just to be safe
-  for (size_t size = 0; size <= kMaxSize;) {
-    const int sc = SizeClass(size);
-    if (sc <= 0 || sc >= kNumClasses) {
-      Log(kCrash, __FILE__, __LINE__,
-          "Bad size class (class, size)", sc, size);
-    }
-    if (sc > 1 && size <= class_to_size_[sc-1]) {
-      Log(kCrash, __FILE__, __LINE__,
-          "Allocating unnecessarily large class (class, size)", sc, size);
-    }
-    const size_t s = class_to_size_[sc];
-    if (size > s || s == 0) {
-      Log(kCrash, __FILE__, __LINE__,
-          "Bad (class, size, requested)", sc, s, size);
-    }
-    if (size <= kMaxSmallSize) {
-      size += 8;
-    } else {
-      size += 128;
-    }
-  }
-
-  // Initialize the num_objects_to_move array.
-  for (size_t cl = 1; cl  < kNumClasses; ++cl) {
-    num_objects_to_move_[cl] = NumMoveSize(ByteSizeForClass(cl));
-  }
-}
-
-// Metadata allocator -- keeps stats about how many bytes allocated.
-static uint64_t metadata_system_bytes_ = 0;
-static const size_t kMetadataAllocChunkSize = 8*1024*1024;
-static const size_t kMetadataBigAllocThreshold = kMetadataAllocChunkSize / 8;
-// usually malloc uses larger alignments, but because metadata cannot
-// have and fancy simd types, aligning on pointer size seems fine
-static const size_t kMetadataAllignment = sizeof(void *);
-
-static char *metadata_chunk_alloc_;
-static size_t metadata_chunk_avail_;
-
-static SpinLock metadata_alloc_lock(SpinLock::LINKER_INITIALIZED);
-
-void* MetaDataAlloc(size_t bytes) {
-  if (bytes >= kMetadataAllocChunkSize) {
-    void *rv = TCMalloc_SystemAlloc(bytes,
-                                    NULL, kMetadataAllignment);
-    if (rv != NULL) {
-      metadata_system_bytes_ += bytes;
-    }
-    return rv;
-  }
-
-  SpinLockHolder h(&metadata_alloc_lock);
-
-  // the following works by essentially turning address to integer of
-  // log_2 kMetadataAllignment size and negating it. I.e. negated
-  // value + original value gets 0 and that's what we want modulo
-  // kMetadataAllignment. Note, we negate before masking higher bits
-  // off, otherwise we'd have to mask them off after negation anyways.
-  intptr_t alignment = -reinterpret_cast<intptr_t>(metadata_chunk_alloc_) & (kMetadataAllignment-1);
-
-  if (metadata_chunk_avail_ < bytes + alignment) {
-    size_t real_size;
-    void *ptr = TCMalloc_SystemAlloc(kMetadataAllocChunkSize,
-                                     &real_size, kMetadataAllignment);
-    if (ptr == NULL) {
-      return NULL;
-    }
-
-    metadata_chunk_alloc_ = static_cast<char *>(ptr);
-    metadata_chunk_avail_ = real_size;
-
-    alignment = 0;
-  }
-
-  void *rv = static_cast<void *>(metadata_chunk_alloc_ + alignment);
-  bytes += alignment;
-  metadata_chunk_alloc_ += bytes;
-  metadata_chunk_avail_ -= bytes;
-  metadata_system_bytes_ += bytes;
-  return rv;
-}
-
-uint64_t metadata_system_bytes() { return metadata_system_bytes_; }
-
-}  // namespace tcmalloc

http://git-wip-us.apache.org/repos/asf/incubator-quickstep/blob/9661f956/third_party/gperftools/src/common.h
----------------------------------------------------------------------
diff --git a/third_party/gperftools/src/common.h b/third_party/gperftools/src/common.h
deleted file mode 100644
index c3484d3..0000000
--- a/third_party/gperftools/src/common.h
+++ /dev/null
@@ -1,274 +0,0 @@
-// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
-// Copyright (c) 2008, Google Inc.
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-//     * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// ---
-// Author: Sanjay Ghemawat <op...@google.com>
-//
-// Common definitions for tcmalloc code.
-
-#ifndef TCMALLOC_COMMON_H_
-#define TCMALLOC_COMMON_H_
-
-#include "config.h"
-#include <stddef.h>                     // for size_t
-#ifdef HAVE_STDINT_H
-#include <stdint.h>                     // for uintptr_t, uint64_t
-#endif
-#include "internal_logging.h"  // for ASSERT, etc
-#include "base/basictypes.h"   // for LIKELY, etc
-
-#ifdef HAVE_BUILTIN_EXPECT
-#define LIKELY(x) __builtin_expect(!!(x), 1)
-#define UNLIKELY(x) __builtin_expect(!!(x), 0)
-#else
-#define LIKELY(x) (x)
-#define UNLIKELY(x) (x)
-#endif
-
-// Type that can hold a page number
-typedef uintptr_t PageID;
-
-// Type that can hold the length of a run of pages
-typedef uintptr_t Length;
-
-//-------------------------------------------------------------------
-// Configuration
-//-------------------------------------------------------------------
-
-#if defined(TCMALLOC_ALIGN_8BYTES)
-// Unless we force to use 8 bytes alignment we use an alignment of
-// at least 16 bytes to statisfy requirements for some SSE types.
-// Keep in mind when using the 16 bytes alignment you can have a space
-// waste due alignment of 25%. (eg malloc of 24 bytes will get 32 bytes)
-static const size_t kMinAlign   = 8;
-// Number of classes created until reach page size 128.
-static const size_t kBaseClasses = 16;
-#else
-static const size_t kMinAlign   = 16;
-static const size_t kBaseClasses = 9;
-#endif
-
-// Using large pages speeds up the execution at a cost of larger memory use.
-// Deallocation may speed up by a factor as the page map gets 8x smaller, so
-// lookups in the page map result in fewer L2 cache misses, which translates to
-// speedup for application/platform combinations with high L2 cache pressure.
-// As the number of size classes increases with large pages, we increase
-// the thread cache allowance to avoid passing more free ranges to and from
-// central lists.  Also, larger pages are less likely to get freed.
-// These two factors cause a bounded increase in memory use.
-#if defined(TCMALLOC_32K_PAGES)
-static const size_t kPageShift  = 15;
-static const size_t kNumClasses = kBaseClasses + 69;
-#elif defined(TCMALLOC_64K_PAGES)
-static const size_t kPageShift  = 16;
-static const size_t kNumClasses = kBaseClasses + 73;
-#else
-static const size_t kPageShift  = 13;
-static const size_t kNumClasses = kBaseClasses + 79;
-#endif
-
-static const size_t kMaxThreadCacheSize = 4 << 20;
-
-static const size_t kPageSize   = 1 << kPageShift;
-static const size_t kMaxSize    = 256 * 1024;
-static const size_t kAlignment  = 8;
-static const size_t kLargeSizeClass = 0;
-// For all span-lengths < kMaxPages we keep an exact-size list.
-static const size_t kMaxPages = 1 << (20 - kPageShift);
-
-// Default bound on the total amount of thread caches.
-#ifdef TCMALLOC_SMALL_BUT_SLOW
-// Make the overall thread cache no bigger than that of a single thread
-// for the small memory footprint case.
-static const size_t kDefaultOverallThreadCacheSize = kMaxThreadCacheSize;
-#else
-static const size_t kDefaultOverallThreadCacheSize = 8u * kMaxThreadCacheSize;
-#endif
-
-// Lower bound on the per-thread cache sizes
-static const size_t kMinThreadCacheSize = kMaxSize * 2;
-
-// The number of bytes one ThreadCache will steal from another when
-// the first ThreadCache is forced to Scavenge(), delaying the
-// next call to Scavenge for this thread.
-static const size_t kStealAmount = 1 << 16;
-
-// The number of times that a deallocation can cause a freelist to
-// go over its max_length() before shrinking max_length().
-static const int kMaxOverages = 3;
-
-// Maximum length we allow a per-thread free-list to have before we
-// move objects from it into the corresponding central free-list.  We
-// want this big to avoid locking the central free-list too often.  It
-// should not hurt to make this list somewhat big because the
-// scavenging code will shrink it down when its contents are not in use.
-static const int kMaxDynamicFreeListLength = 8192;
-
-static const Length kMaxValidPages = (~static_cast<Length>(0)) >> kPageShift;
-
-#if defined __x86_64__
-// All current and planned x86_64 processors only look at the lower 48 bits
-// in virtual to physical address translation.  The top 16 are thus unused.
-// TODO(rus): Under what operating systems can we increase it safely to 17?
-// This lets us use smaller page maps.  On first allocation, a 36-bit page map
-// uses only 96 KB instead of the 4.5 MB used by a 52-bit page map.
-static const int kAddressBits = (sizeof(void*) < 8 ? (8 * sizeof(void*)) : 48);
-#else
-static const int kAddressBits = 8 * sizeof(void*);
-#endif
-
-namespace tcmalloc {
-
-// Convert byte size into pages.  This won't overflow, but may return
-// an unreasonably large value if bytes is huge enough.
-inline Length pages(size_t bytes) {
-  return (bytes >> kPageShift) +
-      ((bytes & (kPageSize - 1)) > 0 ? 1 : 0);
-}
-
-// For larger allocation sizes, we use larger memory alignments to
-// reduce the number of size classes.
-int AlignmentForSize(size_t size);
-
-// Size-class information + mapping
-class SizeMap {
- private:
-  // Number of objects to move between a per-thread list and a central
-  // list in one shot.  We want this to be not too small so we can
-  // amortize the lock overhead for accessing the central list.  Making
-  // it too big may temporarily cause unnecessary memory wastage in the
-  // per-thread free list until the scavenger cleans up the list.
-  int num_objects_to_move_[kNumClasses];
-
-  //-------------------------------------------------------------------
-  // Mapping from size to size_class and vice versa
-  //-------------------------------------------------------------------
-
-  // Sizes <= 1024 have an alignment >= 8.  So for such sizes we have an
-  // array indexed by ceil(size/8).  Sizes > 1024 have an alignment >= 128.
-  // So for these larger sizes we have an array indexed by ceil(size/128).
-  //
-  // We flatten both logical arrays into one physical array and use
-  // arithmetic to compute an appropriate index.  The constants used by
-  // ClassIndex() were selected to make the flattening work.
-  //
-  // Examples:
-  //   Size       Expression                      Index
-  //   -------------------------------------------------------
-  //   0          (0 + 7) / 8                     0
-  //   1          (1 + 7) / 8                     1
-  //   ...
-  //   1024       (1024 + 7) / 8                  128
-  //   1025       (1025 + 127 + (120<<7)) / 128   129
-  //   ...
-  //   32768      (32768 + 127 + (120<<7)) / 128  376
-  static const int kMaxSmallSize = 1024;
-  static const size_t kClassArraySize =
-      ((kMaxSize + 127 + (120 << 7)) >> 7) + 1;
-  unsigned char class_array_[kClassArraySize];
-
-  // Compute index of the class_array[] entry for a given size
-  static inline size_t ClassIndex(int s) {
-    // Use unsigned arithmetic to avoid unnecessary sign extensions.
-    ASSERT(0 <= s);
-    ASSERT(s <= kMaxSize);
-    if (LIKELY(s <= kMaxSmallSize)) {
-      return (static_cast<uint32_t>(s) + 7) >> 3;
-    } else {
-      return (static_cast<uint32_t>(s) + 127 + (120 << 7)) >> 7;
-    }
-  }
-
-  int NumMoveSize(size_t size);
-
-  // Mapping from size class to max size storable in that class
-  size_t class_to_size_[kNumClasses];
-
-  // Mapping from size class to number of pages to allocate at a time
-  size_t class_to_pages_[kNumClasses];
-
- public:
-  // Constructor should do nothing since we rely on explicit Init()
-  // call, which may or may not be called before the constructor runs.
-  SizeMap() { }
-
-  // Initialize the mapping arrays
-  void Init();
-
-  inline int SizeClass(int size) {
-    return class_array_[ClassIndex(size)];
-  }
-
-  // Get the byte-size for a specified class
-  inline size_t ByteSizeForClass(size_t cl) {
-    return class_to_size_[cl];
-  }
-
-  // Mapping from size class to max size storable in that class
-  inline size_t class_to_size(size_t cl) {
-    return class_to_size_[cl];
-  }
-
-  // Mapping from size class to number of pages to allocate at a time
-  inline size_t class_to_pages(size_t cl) {
-    return class_to_pages_[cl];
-  }
-
-  // Number of objects to move between a per-thread list and a central
-  // list in one shot.  We want this to be not too small so we can
-  // amortize the lock overhead for accessing the central list.  Making
-  // it too big may temporarily cause unnecessary memory wastage in the
-  // per-thread free list until the scavenger cleans up the list.
-  inline int num_objects_to_move(size_t cl) {
-    return num_objects_to_move_[cl];
-  }
-};
-
-// Allocates "bytes" worth of memory and returns it.  Increments
-// metadata_system_bytes appropriately.  May return NULL if allocation
-// fails.  Requires pageheap_lock is held.
-void* MetaDataAlloc(size_t bytes);
-
-// Returns the total number of bytes allocated from the system.
-// Requires pageheap_lock is held.
-uint64_t metadata_system_bytes();
-
-// size/depth are made the same size as a pointer so that some generic
-// code below can conveniently cast them back and forth to void*.
-static const int kMaxStackDepth = 31;
-struct StackTrace {
-  uintptr_t size;          // Size of object
-  uintptr_t depth;         // Number of PC values stored in array below
-  void*     stack[kMaxStackDepth];
-};
-
-}  // namespace tcmalloc
-
-#endif  // TCMALLOC_COMMON_H_