You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@quickstep.apache.org by hb...@apache.org on 2017/01/19 20:06:51 UTC

[14/51] [abbrv] [partial] incubator-quickstep git commit: Added shell script to download prerequisite third party libs

http://git-wip-us.apache.org/repos/asf/incubator-quickstep/blob/b249eb11/third_party/gperftools/src/malloc_hook_mmap_linux.h
----------------------------------------------------------------------
diff --git a/third_party/gperftools/src/malloc_hook_mmap_linux.h b/third_party/gperftools/src/malloc_hook_mmap_linux.h
deleted file mode 100755
index 8e5a3b0..0000000
--- a/third_party/gperftools/src/malloc_hook_mmap_linux.h
+++ /dev/null
@@ -1,238 +0,0 @@
-// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
-// Copyright (c) 2005, Google Inc.
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-//     * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// ---
-// Author: Sanjay Ghemawat <op...@google.com>
-
-// We define mmap() and mmap64(), which somewhat reimplements libc's mmap
-// syscall stubs.  Unfortunately libc only exports the stubs via weak symbols
-// (which we're overriding with our mmap64() and mmap() wrappers) so we can't
-// just call through to them.
-
-#ifndef __linux
-# error Should only be including malloc_hook_mmap_linux.h on linux systems.
-#endif
-
-#include <unistd.h>
-#include <syscall.h>
-#include <sys/mman.h>
-#include <errno.h>
-#include "base/linux_syscall_support.h"
-
-// The x86-32 case and the x86-64 case differ:
-// 32b has a mmap2() syscall, 64b does not.
-// 64b and 32b have different calling conventions for mmap().
-
-// I test for 64-bit first so I don't have to do things like
-// '#if (defined(__mips__) && !defined(__MIPS64__))' as a mips32 check.
-#if defined(__x86_64__) || defined(__PPC64__) || defined(__aarch64__) || (defined(_MIPS_SIM) && _MIPS_SIM == _ABI64)
-
-static inline void* do_mmap64(void *start, size_t length,
-                              int prot, int flags,
-                              int fd, __off64_t offset) __THROW {
-  return sys_mmap(start, length, prot, flags, fd, offset);
-}
-
-#define MALLOC_HOOK_HAVE_DO_MMAP64 1
-
-#elif defined(__i386__) || defined(__PPC__) || defined(__mips__) || \
-      defined(__arm__)
-
-static inline void* do_mmap64(void *start, size_t length,
-                              int prot, int flags,
-                              int fd, __off64_t offset) __THROW {
-  void *result;
-
-  // Try mmap2() unless it's not supported
-  static bool have_mmap2 = true;
-  if (have_mmap2) {
-    static int pagesize = 0;
-    if (!pagesize) pagesize = getpagesize();
-
-    // Check that the offset is page aligned
-    if (offset & (pagesize - 1)) {
-      result = MAP_FAILED;
-      errno = EINVAL;
-      goto out;
-    }
-
-    result = (void *)syscall(SYS_mmap2,
-                             start, length, prot, flags, fd,
-                             (off_t) (offset / pagesize));
-    if (result != MAP_FAILED || errno != ENOSYS)  goto out;
-
-    // We don't have mmap2() after all - don't bother trying it in future
-    have_mmap2 = false;
-  }
-
-  if (((off_t)offset) != offset) {
-    // If we're trying to map a 64-bit offset, fail now since we don't
-    // have 64-bit mmap() support.
-    result = MAP_FAILED;
-    errno = EINVAL;
-    goto out;
-  }
-
-#ifdef __NR_mmap
-  {
-    // Fall back to old 32-bit offset mmap() call
-    // Old syscall interface cannot handle six args, so pass in an array
-    int32 args[6] = { (int32) start, (int32) length, prot, flags, fd,
-                      (off_t) offset };
-    result = (void *)syscall(SYS_mmap, args);
-  }
-#else
-  // Some Linux ports like ARM EABI Linux has no mmap, just mmap2.
-  result = MAP_FAILED;
-#endif
-
- out:
-  return result;
-}
-
-#define MALLOC_HOOK_HAVE_DO_MMAP64 1
-
-#endif  // #if defined(__x86_64__)
-
-
-#ifdef MALLOC_HOOK_HAVE_DO_MMAP64
-
-// We use do_mmap64 abstraction to put MallocHook::InvokeMmapHook
-// calls right into mmap and mmap64, so that the stack frames in the caller's
-// stack are at the same offsets for all the calls of memory allocating
-// functions.
-
-// Put all callers of MallocHook::Invoke* in this module into
-// malloc_hook section,
-// so that MallocHook::GetCallerStackTrace can function accurately:
-
-// Make sure mmap doesn't get #define'd away by <sys/mman.h>
-# undef mmap
-
-extern "C" {
-  void* mmap64(void *start, size_t length, int prot, int flags,
-               int fd, __off64_t offset  ) __THROW
-    ATTRIBUTE_SECTION(malloc_hook);
-  void* mmap(void *start, size_t length,int prot, int flags,
-             int fd, off_t offset) __THROW
-    ATTRIBUTE_SECTION(malloc_hook);
-  int munmap(void* start, size_t length) __THROW
-    ATTRIBUTE_SECTION(malloc_hook);
-  void* mremap(void* old_addr, size_t old_size, size_t new_size,
-               int flags, ...) __THROW
-    ATTRIBUTE_SECTION(malloc_hook);
-  void* sbrk(ptrdiff_t increment) __THROW
-    ATTRIBUTE_SECTION(malloc_hook);
-}
-
-extern "C" void* mmap64(void *start, size_t length, int prot, int flags,
-                        int fd, __off64_t offset) __THROW {
-  MallocHook::InvokePreMmapHook(start, length, prot, flags, fd, offset);
-  void *result;
-  if (!MallocHook::InvokeMmapReplacement(
-          start, length, prot, flags, fd, offset, &result)) {
-    result = do_mmap64(start, length, prot, flags, fd, offset);
-  }
-  MallocHook::InvokeMmapHook(result, start, length, prot, flags, fd, offset);
-  return result;
-}
-
-# if !defined(__USE_FILE_OFFSET64) || !defined(__REDIRECT_NTH)
-
-extern "C" void* mmap(void *start, size_t length, int prot, int flags,
-                      int fd, off_t offset) __THROW {
-  MallocHook::InvokePreMmapHook(start, length, prot, flags, fd, offset);
-  void *result;
-  if (!MallocHook::InvokeMmapReplacement(
-          start, length, prot, flags, fd, offset, &result)) {
-    result = do_mmap64(start, length, prot, flags, fd,
-                       static_cast<size_t>(offset)); // avoid sign extension
-  }
-  MallocHook::InvokeMmapHook(result, start, length, prot, flags, fd, offset);
-  return result;
-}
-
-# endif  // !defined(__USE_FILE_OFFSET64) || !defined(__REDIRECT_NTH)
-
-extern "C" int munmap(void* start, size_t length) __THROW {
-  MallocHook::InvokeMunmapHook(start, length);
-  int result;
-  if (!MallocHook::InvokeMunmapReplacement(start, length, &result)) {
-    result = sys_munmap(start, length);
-  }
-  return result;
-}
-
-extern "C" void* mremap(void* old_addr, size_t old_size, size_t new_size,
-                        int flags, ...) __THROW {
-  va_list ap;
-  va_start(ap, flags);
-  void *new_address = va_arg(ap, void *);
-  va_end(ap);
-  void* result = sys_mremap(old_addr, old_size, new_size, flags, new_address);
-  MallocHook::InvokeMremapHook(result, old_addr, old_size, new_size, flags,
-                               new_address);
-  return result;
-}
-
-#ifndef __UCLIBC__
-// libc's version:
-extern "C" void* __sbrk(ptrdiff_t increment);
-
-extern "C" void* sbrk(ptrdiff_t increment) __THROW {
-  MallocHook::InvokePreSbrkHook(increment);
-  void *result = __sbrk(increment);
-  MallocHook::InvokeSbrkHook(result, increment);
-  return result;
-}
-
-#endif
-
-/*static*/void* MallocHook::UnhookedMMap(void *start, size_t length, int prot,
-                                         int flags, int fd, off_t offset) {
-  void* result;
-  if (!MallocHook::InvokeMmapReplacement(
-          start, length, prot, flags, fd, offset, &result)) {
-    result = do_mmap64(start, length, prot, flags, fd, offset);
-  }
-  return result;
-}
-
-/*static*/int MallocHook::UnhookedMUnmap(void *start, size_t length) {
-  int result;
-  if (!MallocHook::InvokeMunmapReplacement(start, length, &result)) {
-    result = syscall(SYS_munmap, start, length);
-  }
-  return result;
-}
-
-#undef MALLOC_HOOK_HAVE_DO_MMAP64
-
-#endif  // #ifdef MALLOC_HOOK_HAVE_DO_MMAP64

http://git-wip-us.apache.org/repos/asf/incubator-quickstep/blob/b249eb11/third_party/gperftools/src/maybe_threads.cc
----------------------------------------------------------------------
diff --git a/third_party/gperftools/src/maybe_threads.cc b/third_party/gperftools/src/maybe_threads.cc
deleted file mode 100644
index 6dd0d8d..0000000
--- a/third_party/gperftools/src/maybe_threads.cc
+++ /dev/null
@@ -1,157 +0,0 @@
-// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
-// Copyright (c) 2005, Google Inc.
-// All rights reserved.
-// 
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-// 
-//     * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-//     * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-// 
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// ---
-// Author: Paul Menage <op...@google.com>
-//
-// Some wrappers for pthread functions so that we can be LD_PRELOADed
-// against non-pthreads apps.
-//
-// This module will behave very strangely if some pthreads functions
-// exist and others don't.
-
-#include "config.h"
-#include <assert.h>
-#include <string.h>    // for memcmp
-#include <stdio.h>     // for __isthreaded on FreeBSD
-// We don't actually need strings. But including this header seems to
-// stop the compiler trying to short-circuit our pthreads existence
-// tests and claiming that the address of a function is always
-// non-zero. I have no idea why ...
-#include <string>
-#include "maybe_threads.h"
-#include "base/basictypes.h"
-
-// __THROW is defined in glibc systems.  It means, counter-intuitively,
-// "This function will never throw an exception."  It's an optional
-// optimization tool, but we may need to use it to match glibc prototypes.
-#ifndef __THROW    // I guess we're not on a glibc system
-# define __THROW   // __THROW is just an optimization, so ok to make it ""
-#endif
-
-// These are the methods we're going to conditionally include.
-extern "C" {
-  int pthread_key_create (pthread_key_t*, void (*)(void*))
-      __THROW ATTRIBUTE_WEAK;
-  int pthread_key_delete (pthread_key_t)
-      __THROW ATTRIBUTE_WEAK;
-  void *pthread_getspecific(pthread_key_t)
-      __THROW ATTRIBUTE_WEAK;
-  int pthread_setspecific(pthread_key_t, const void*)
-      __THROW ATTRIBUTE_WEAK;
-  int pthread_once(pthread_once_t *, void (*)(void))
-      ATTRIBUTE_WEAK;
-}
-
-#define MAX_PERTHREAD_VALS 16
-static void *perftools_pthread_specific_vals[MAX_PERTHREAD_VALS];
-static int next_key;
-
-// NOTE: it's similar to bitcast defined in basic_types.h with
-// exception of ignoring sizes mismatch
-template <typename T1, typename T2>
-static T2 memcpy_cast(const T1 &input) {
-  T2 output;
-  size_t s = sizeof(input);
-  if (sizeof(output) < s) {
-    s = sizeof(output);
-  }
-  memcpy(&output, &input, s);
-  return output;
-}
-
-int perftools_pthread_key_create(pthread_key_t *key,
-                                 void (*destr_function) (void *)) {
-  if (pthread_key_create) {
-    return pthread_key_create(key, destr_function);
-  } else {
-    assert(next_key < MAX_PERTHREAD_VALS);
-    *key = memcpy_cast<int, pthread_key_t>(next_key++);
-    return 0;
-  }
-}
-
-int perftools_pthread_key_delete(pthread_key_t key) {
-  if (pthread_key_delete) {
-    return pthread_key_delete(key);
-  } else {
-    return 0;
-  }
-}
-
-void *perftools_pthread_getspecific(pthread_key_t key) {
-  if (pthread_getspecific) {
-    return pthread_getspecific(key);
-  } else {
-    return perftools_pthread_specific_vals[memcpy_cast<pthread_key_t, int>(key)];
-  }
-}
-
-int perftools_pthread_setspecific(pthread_key_t key, void *val) {
-  if (pthread_setspecific) {
-    return pthread_setspecific(key, val);
-  } else {
-    perftools_pthread_specific_vals[memcpy_cast<pthread_key_t, int>(key)] = val;
-    return 0;
-  }
-}
-
-
-static pthread_once_t pthread_once_init = PTHREAD_ONCE_INIT;
-int perftools_pthread_once(pthread_once_t *ctl,
-                           void  (*init_routine) (void)) {
-#ifdef __FreeBSD__
-  // On __FreeBSD__, calling pthread_once on a system that is not
-  // linked with -pthread is silently a noop. :-( Luckily, we have a
-  // workaround: FreeBSD exposes __isthreaded in <stdio.h>, which is
-  // set to 1 when the first thread is spawned.  So on those systems,
-  // we can use our own separate pthreads-once mechanism, which is
-  // used until __isthreaded is 1 (which will never be true if the app
-  // is not linked with -pthread).
-  static bool pthread_once_ran_before_threads = false;
-  if (pthread_once_ran_before_threads) {
-    return 0;
-  }
-  if (!__isthreaded) {
-    init_routine();
-    pthread_once_ran_before_threads = true;
-    return 0;
-  }
-#endif
-  if (pthread_once) {
-    return pthread_once(ctl, init_routine);
-  } else {
-    if (memcmp(ctl, &pthread_once_init, sizeof(*ctl)) == 0) {
-      init_routine();
-      ++*(char*)(ctl);        // make it so it's no longer equal to init
-    }
-    return 0;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-quickstep/blob/b249eb11/third_party/gperftools/src/maybe_threads.h
----------------------------------------------------------------------
diff --git a/third_party/gperftools/src/maybe_threads.h b/third_party/gperftools/src/maybe_threads.h
deleted file mode 100644
index b60f4ef..0000000
--- a/third_party/gperftools/src/maybe_threads.h
+++ /dev/null
@@ -1,54 +0,0 @@
-// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
-// Copyright (c) 2005, Google Inc.
-// All rights reserved.
-// 
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-// 
-//     * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-//     * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-// 
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// ---
-// Author: Paul Menage <op...@google.com>
-
-//-------------------------------------------------------------------
-// Some wrappers for pthread functions so that we can be LD_PRELOADed
-// against non-pthreads apps.
-//-------------------------------------------------------------------
-
-#ifndef GOOGLE_MAYBE_THREADS_H_
-#define GOOGLE_MAYBE_THREADS_H_
-
-#ifdef HAVE_PTHREAD
-#include <pthread.h>
-#endif
-
-int perftools_pthread_key_create(pthread_key_t *key,
-                                 void (*destr_function) (void *));
-int perftools_pthread_key_delete(pthread_key_t key);
-void *perftools_pthread_getspecific(pthread_key_t key);
-int perftools_pthread_setspecific(pthread_key_t key, void *val);
-int perftools_pthread_once(pthread_once_t *ctl,
-                           void  (*init_routine) (void));
-
-#endif  /* GOOGLE_MAYBE_THREADS_H_ */

http://git-wip-us.apache.org/repos/asf/incubator-quickstep/blob/b249eb11/third_party/gperftools/src/memfs_malloc.cc
----------------------------------------------------------------------
diff --git a/third_party/gperftools/src/memfs_malloc.cc b/third_party/gperftools/src/memfs_malloc.cc
deleted file mode 100644
index ce20891..0000000
--- a/third_party/gperftools/src/memfs_malloc.cc
+++ /dev/null
@@ -1,268 +0,0 @@
-// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
-// Copyright (c) 2007, Google Inc.
-// All rights reserved.
-// 
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-// 
-//     * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-//     * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-// 
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// ---
-// Author: Arun Sharma
-//
-// A tcmalloc system allocator that uses a memory based filesystem such as
-// tmpfs or hugetlbfs
-//
-// Since these only exist on linux, we only register this allocator there.
-
-#ifdef __linux
-
-#include <config.h>
-#include <errno.h>                      // for errno, EINVAL
-#include <inttypes.h>                   // for PRId64
-#include <limits.h>                     // for PATH_MAX
-#include <stddef.h>                     // for size_t, NULL
-#ifdef HAVE_STDINT_H
-#include <stdint.h>                     // for int64_t, uintptr_t
-#endif
-#include <stdio.h>                      // for snprintf
-#include <stdlib.h>                     // for mkstemp
-#include <string.h>                     // for strerror
-#include <sys/mman.h>                   // for mmap, MAP_FAILED, etc
-#include <sys/statfs.h>                 // for fstatfs, statfs
-#include <unistd.h>                     // for ftruncate, off_t, unlink
-#include <new>                          // for operator new
-#include <string>
-
-#include <gperftools/malloc_extension.h>
-#include "base/basictypes.h"
-#include "base/googleinit.h"
-#include "base/sysinfo.h"
-#include "internal_logging.h"
-
-// TODO(sanjay): Move the code below into the tcmalloc namespace
-using tcmalloc::kLog;
-using tcmalloc::kCrash;
-using tcmalloc::Log;
-using std::string;
-
-DEFINE_string(memfs_malloc_path, EnvToString("TCMALLOC_MEMFS_MALLOC_PATH", ""),
-              "Path where hugetlbfs or tmpfs is mounted. The caller is "
-              "responsible for ensuring that the path is unique and does "
-              "not conflict with another process");
-DEFINE_int64(memfs_malloc_limit_mb,
-             EnvToInt("TCMALLOC_MEMFS_LIMIT_MB", 0),
-             "Limit total allocation size to the "
-             "specified number of MiB.  0 == no limit.");
-DEFINE_bool(memfs_malloc_abort_on_fail,
-            EnvToBool("TCMALLOC_MEMFS_ABORT_ON_FAIL", false),
-            "abort() whenever memfs_malloc fails to satisfy an allocation "
-            "for any reason.");
-DEFINE_bool(memfs_malloc_ignore_mmap_fail,
-            EnvToBool("TCMALLOC_MEMFS_IGNORE_MMAP_FAIL", false),
-            "Ignore failures from mmap");
-DEFINE_bool(memfs_malloc_map_private,
-            EnvToBool("TCMALLOC_MEMFS_MAP_PRIVATE", false),
-	    "Use MAP_PRIVATE with mmap");
-
-// Hugetlbfs based allocator for tcmalloc
-class HugetlbSysAllocator: public SysAllocator {
-public:
-  explicit HugetlbSysAllocator(SysAllocator* fallback)
-    : failed_(true),  // To disable allocator until Initialize() is called.
-      big_page_size_(0),
-      hugetlb_fd_(-1),
-      hugetlb_base_(0),
-      fallback_(fallback) {
-  }
-
-  void* Alloc(size_t size, size_t *actual_size, size_t alignment);
-  bool Initialize();
-
-  bool failed_;          // Whether failed to allocate memory.
-
-private:
-  void* AllocInternal(size_t size, size_t *actual_size, size_t alignment);
-
-  int64 big_page_size_;
-  int hugetlb_fd_;       // file descriptor for hugetlb
-  off_t hugetlb_base_;
-
-  SysAllocator* fallback_;  // Default system allocator to fall back to.
-};
-static char hugetlb_space[sizeof(HugetlbSysAllocator)];
-
-// No locking needed here since we assume that tcmalloc calls
-// us with an internal lock held (see tcmalloc/system-alloc.cc).
-void* HugetlbSysAllocator::Alloc(size_t size, size_t *actual_size,
-                                 size_t alignment) {
-  if (failed_) {
-    return fallback_->Alloc(size, actual_size, alignment);
-  }
-
-  // We don't respond to allocation requests smaller than big_page_size_ unless
-  // the caller is ok to take more than they asked for. Used by MetaDataAlloc.
-  if (actual_size == NULL && size < big_page_size_) {
-    return fallback_->Alloc(size, actual_size, alignment);
-  }
-
-  // Enforce huge page alignment.  Be careful to deal with overflow.
-  size_t new_alignment = alignment;
-  if (new_alignment < big_page_size_) new_alignment = big_page_size_;
-  size_t aligned_size = ((size + new_alignment - 1) /
-                         new_alignment) * new_alignment;
-  if (aligned_size < size) {
-    return fallback_->Alloc(size, actual_size, alignment);
-  }
-
-  void* result = AllocInternal(aligned_size, actual_size, new_alignment);
-  if (result != NULL) {
-    return result;
-  }
-  Log(kLog, __FILE__, __LINE__,
-      "HugetlbSysAllocator: (failed, allocated)", failed_, hugetlb_base_);
-  if (FLAGS_memfs_malloc_abort_on_fail) {
-    Log(kCrash, __FILE__, __LINE__,
-        "memfs_malloc_abort_on_fail is set");
-  }
-  return fallback_->Alloc(size, actual_size, alignment);
-}
-
-void* HugetlbSysAllocator::AllocInternal(size_t size, size_t* actual_size,
-                                         size_t alignment) {
-  // Ask for extra memory if alignment > pagesize
-  size_t extra = 0;
-  if (alignment > big_page_size_) {
-    extra = alignment - big_page_size_;
-  }
-
-  // Test if this allocation would put us over the limit.
-  off_t limit = FLAGS_memfs_malloc_limit_mb*1024*1024;
-  if (limit > 0 && hugetlb_base_ + size + extra > limit) {
-    // Disable the allocator when there's less than one page left.
-    if (limit - hugetlb_base_ < big_page_size_) {
-      Log(kLog, __FILE__, __LINE__, "reached memfs_malloc_limit_mb");
-      failed_ = true;
-    }
-    else {
-      Log(kLog, __FILE__, __LINE__,
-          "alloc too large (size, bytes left)", size, limit-hugetlb_base_);
-    }
-    return NULL;
-  }
-
-  // This is not needed for hugetlbfs, but needed for tmpfs.  Annoyingly
-  // hugetlbfs returns EINVAL for ftruncate.
-  int ret = ftruncate(hugetlb_fd_, hugetlb_base_ + size + extra);
-  if (ret != 0 && errno != EINVAL) {
-    Log(kLog, __FILE__, __LINE__,
-        "ftruncate failed", strerror(errno));
-    failed_ = true;
-    return NULL;
-  }
-
-  // Note: size + extra does not overflow since:
-  //            size + alignment < (1<<NBITS).
-  // and        extra <= alignment
-  // therefore  size + extra < (1<<NBITS)
-  void *result;
-  result = mmap(0, size + extra, PROT_WRITE|PROT_READ,
-                FLAGS_memfs_malloc_map_private ? MAP_PRIVATE : MAP_SHARED,
-                hugetlb_fd_, hugetlb_base_);
-  if (result == reinterpret_cast<void*>(MAP_FAILED)) {
-    if (!FLAGS_memfs_malloc_ignore_mmap_fail) {
-      Log(kLog, __FILE__, __LINE__,
-          "mmap failed (size, error)", size + extra, strerror(errno));
-      failed_ = true;
-    }
-    return NULL;
-  }
-  uintptr_t ptr = reinterpret_cast<uintptr_t>(result);
-
-  // Adjust the return memory so it is aligned
-  size_t adjust = 0;
-  if ((ptr & (alignment - 1)) != 0) {
-    adjust = alignment - (ptr & (alignment - 1));
-  }
-  ptr += adjust;
-  hugetlb_base_ += (size + extra);
-
-  if (actual_size) {
-    *actual_size = size + extra - adjust;
-  }
-
-  return reinterpret_cast<void*>(ptr);
-}
-
-bool HugetlbSysAllocator::Initialize() {
-  char path[PATH_MAX];
-  const int pathlen = FLAGS_memfs_malloc_path.size();
-  if (pathlen + 8 > sizeof(path)) {
-    Log(kCrash, __FILE__, __LINE__, "XX fatal: memfs_malloc_path too long");
-    return false;
-  }
-  memcpy(path, FLAGS_memfs_malloc_path.data(), pathlen);
-  memcpy(path + pathlen, ".XXXXXX", 8);  // Also copies terminating \0
-
-  int hugetlb_fd = mkstemp(path);
-  if (hugetlb_fd == -1) {
-    Log(kLog, __FILE__, __LINE__,
-        "warning: unable to create memfs_malloc_path",
-        path, strerror(errno));
-    return false;
-  }
-
-  // Cleanup memory on process exit
-  if (unlink(path) == -1) {
-    Log(kCrash, __FILE__, __LINE__,
-        "fatal: error unlinking memfs_malloc_path", path, strerror(errno));
-    return false;
-  }
-
-  // Use fstatfs to figure out the default page size for memfs
-  struct statfs sfs;
-  if (fstatfs(hugetlb_fd, &sfs) == -1) {
-    Log(kCrash, __FILE__, __LINE__,
-        "fatal: error fstatfs of memfs_malloc_path", strerror(errno));
-    return false;
-  }
-  int64 page_size = sfs.f_bsize;
-
-  hugetlb_fd_ = hugetlb_fd;
-  big_page_size_ = page_size;
-  failed_ = false;
-  return true;
-}
-
-REGISTER_MODULE_INITIALIZER(memfs_malloc, {
-  if (FLAGS_memfs_malloc_path.length()) {
-    SysAllocator* alloc = MallocExtension::instance()->GetSystemAllocator();
-    HugetlbSysAllocator* hp = new (hugetlb_space) HugetlbSysAllocator(alloc);
-    if (hp->Initialize()) {
-      MallocExtension::instance()->SetSystemAllocator(hp);
-    }
-  }
-});
-
-#endif   /* ifdef __linux */

http://git-wip-us.apache.org/repos/asf/incubator-quickstep/blob/b249eb11/third_party/gperftools/src/memory_region_map.cc
----------------------------------------------------------------------
diff --git a/third_party/gperftools/src/memory_region_map.cc b/third_party/gperftools/src/memory_region_map.cc
deleted file mode 100755
index e885859..0000000
--- a/third_party/gperftools/src/memory_region_map.cc
+++ /dev/null
@@ -1,829 +0,0 @@
-// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
-/* Copyright (c) 2006, Google Inc.
- * All rights reserved.
- * 
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
- * 
- *     * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *     * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following disclaimer
- * in the documentation and/or other materials provided with the
- * distribution.
- *     * Neither the name of Google Inc. nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- * 
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * ---
- * Author: Maxim Lifantsev
- */
-
-//
-// Background and key design points of MemoryRegionMap.
-//
-// MemoryRegionMap is a low-level module with quite atypical requirements that
-// result in some degree of non-triviality of the implementation and design.
-//
-// MemoryRegionMap collects info about *all* memory regions created with
-// mmap, munmap, mremap, sbrk.
-// They key word above is 'all': all that are happening in a process
-// during its lifetime frequently starting even before global object
-// constructor execution.
-//
-// This is needed by the primary client of MemoryRegionMap:
-// HeapLeakChecker uses the regions and the associated stack traces
-// to figure out what part of the memory is the heap:
-// if MemoryRegionMap were to miss some (early) regions, leak checking would
-// stop working correctly.
-//
-// To accomplish the goal of functioning before/during global object
-// constructor execution MemoryRegionMap is done as a singleton service
-// that relies on own on-demand initialized static constructor-less data,
-// and only relies on other low-level modules that can also function properly
-// even before global object constructors run.
-//
-// Accomplishing the goal of collecting data about all mmap, munmap, mremap,
-// sbrk occurrences is a more involved: conceptually to do this one needs to
-// record some bits of data in particular about any mmap or sbrk call,
-// but to do that one needs to allocate memory for that data at some point,
-// but all memory allocations in the end themselves come from an mmap
-// or sbrk call (that's how the address space of the process grows).
-//
-// Also note that we need to do all the above recording from
-// within an mmap/sbrk hook which is sometimes/frequently is made by a memory
-// allocator, including the allocator MemoryRegionMap itself must rely on.
-// In the case of heap-checker usage this includes even the very first
-// mmap/sbrk call happening in the program: heap-checker gets activated due to
-// a link-time installed mmap/sbrk hook and it initializes MemoryRegionMap
-// and asks it to record info about this very first call right from that
-// very first hook invocation.
-//
-// MemoryRegionMap is doing its memory allocations via LowLevelAlloc:
-// unlike more complex standard memory allocator, LowLevelAlloc cooperates with
-// MemoryRegionMap by not holding any of its own locks while it calls mmap
-// to get memory, thus we are able to call LowLevelAlloc from
-// our mmap/sbrk hooks without causing a deadlock in it.
-// For the same reason of deadlock prevention the locking in MemoryRegionMap
-// itself is write-recursive which is an exception to Google's mutex usage.
-//
-// We still need to break the infinite cycle of mmap calling our hook,
-// which asks LowLevelAlloc for memory to record this mmap,
-// which (sometimes) causes mmap, which calls our hook, and so on.
-// We do this as follows: on a recursive call of MemoryRegionMap's
-// mmap/sbrk/mremap hook we record the data about the allocation in a
-// static fixed-sized stack (saved_regions and saved_buckets), when the
-// recursion unwinds but before returning from the outer hook call we unwind
-// this stack and move the data from saved_regions and saved_buckets to its
-// permanent place in the RegionSet and "bucket_table" respectively,
-// which can cause more allocations and mmap-s and recursion and unwinding,
-// but the whole process ends eventually due to the fact that for the small
-// allocations we are doing LowLevelAlloc reuses one mmap call and parcels out
-// the memory it created to satisfy several of our allocation requests.
-//
-
-// ========================================================================= //
-
-#include <config.h>
-
-#ifdef HAVE_UNISTD_H
-#include <unistd.h>
-#endif
-#ifdef HAVE_INTTYPES_H
-#include <inttypes.h>
-#endif
-#ifdef HAVE_MMAP
-#include <sys/mman.h>
-#elif !defined(MAP_FAILED)
-#define MAP_FAILED -1  // the only thing we need from mman.h
-#endif
-#ifdef HAVE_PTHREAD
-#include <pthread.h>   // for pthread_t, pthread_self()
-#endif
-#include <stddef.h>
-
-#include <algorithm>
-#include <set>
-
-#include "memory_region_map.h"
-
-#include "base/logging.h"
-#include "base/low_level_alloc.h"
-#include "malloc_hook-inl.h"
-
-#include <gperftools/stacktrace.h>
-#include <gperftools/malloc_hook.h>
-
-// MREMAP_FIXED is a linux extension.  How it's used in this file,
-// setting it to 0 is equivalent to saying, "This feature isn't
-// supported", which is right.
-#ifndef MREMAP_FIXED
-# define MREMAP_FIXED  0
-#endif
-
-using std::max;
-
-// ========================================================================= //
-
-int MemoryRegionMap::client_count_ = 0;
-int MemoryRegionMap::max_stack_depth_ = 0;
-MemoryRegionMap::RegionSet* MemoryRegionMap::regions_ = NULL;
-LowLevelAlloc::Arena* MemoryRegionMap::arena_ = NULL;
-SpinLock MemoryRegionMap::lock_(SpinLock::LINKER_INITIALIZED);
-SpinLock MemoryRegionMap::owner_lock_(  // ACQUIRED_AFTER(lock_)
-    SpinLock::LINKER_INITIALIZED);
-int MemoryRegionMap::recursion_count_ = 0;  // GUARDED_BY(owner_lock_)
-pthread_t MemoryRegionMap::lock_owner_tid_;  // GUARDED_BY(owner_lock_)
-int64 MemoryRegionMap::map_size_ = 0;
-int64 MemoryRegionMap::unmap_size_ = 0;
-HeapProfileBucket** MemoryRegionMap::bucket_table_ = NULL;  // GUARDED_BY(lock_)
-int MemoryRegionMap::num_buckets_ = 0;  // GUARDED_BY(lock_)
-int MemoryRegionMap::saved_buckets_count_ = 0;  // GUARDED_BY(lock_)
-HeapProfileBucket MemoryRegionMap::saved_buckets_[20];  // GUARDED_BY(lock_)
-
-// GUARDED_BY(lock_)
-const void* MemoryRegionMap::saved_buckets_keys_[20][kMaxStackDepth];
-
-// ========================================================================= //
-
-// Simple hook into execution of global object constructors,
-// so that we do not call pthread_self() when it does not yet work.
-static bool libpthread_initialized = false;
-static bool initializer = (libpthread_initialized = true, true);
-
-static inline bool current_thread_is(pthread_t should_be) {
-  // Before main() runs, there's only one thread, so we're always that thread
-  if (!libpthread_initialized) return true;
-  // this starts working only sometime well into global constructor execution:
-  return pthread_equal(pthread_self(), should_be);
-}
-
-// ========================================================================= //
-
-// Constructor-less place-holder to store a RegionSet in.
-union MemoryRegionMap::RegionSetRep {
-  char rep[sizeof(RegionSet)];
-  void* align_it;  // do not need a better alignment for 'rep' than this
-  RegionSet* region_set() { return reinterpret_cast<RegionSet*>(rep); }
-};
-
-// The bytes where MemoryRegionMap::regions_ will point to.
-// We use RegionSetRep with noop c-tor so that global construction
-// does not interfere.
-static MemoryRegionMap::RegionSetRep regions_rep;
-
-// ========================================================================= //
-
-// Has InsertRegionLocked been called recursively
-// (or rather should we *not* use regions_ to record a hooked mmap).
-static bool recursive_insert = false;
-
-void MemoryRegionMap::Init(int max_stack_depth, bool use_buckets) {
-  RAW_VLOG(10, "MemoryRegionMap Init");
-  RAW_CHECK(max_stack_depth >= 0, "");
-  // Make sure we don't overflow the memory in region stacks:
-  RAW_CHECK(max_stack_depth <= kMaxStackDepth,
-            "need to increase kMaxStackDepth?");
-  Lock();
-  client_count_ += 1;
-  max_stack_depth_ = max(max_stack_depth_, max_stack_depth);
-  if (client_count_ > 1) {
-    // not first client: already did initialization-proper
-    Unlock();
-    RAW_VLOG(10, "MemoryRegionMap Init increment done");
-    return;
-  }
-  // Set our hooks and make sure they were installed:
-  RAW_CHECK(MallocHook::AddMmapHook(&MmapHook), "");
-  RAW_CHECK(MallocHook::AddMremapHook(&MremapHook), "");
-  RAW_CHECK(MallocHook::AddSbrkHook(&SbrkHook), "");
-  RAW_CHECK(MallocHook::AddMunmapHook(&MunmapHook), "");
-  // We need to set recursive_insert since the NewArena call itself
-  // will already do some allocations with mmap which our hooks will catch
-  // recursive_insert allows us to buffer info about these mmap calls.
-  // Note that Init() can be (and is) sometimes called
-  // already from within an mmap/sbrk hook.
-  recursive_insert = true;
-  arena_ = LowLevelAlloc::NewArena(0, LowLevelAlloc::DefaultArena());
-  recursive_insert = false;
-  HandleSavedRegionsLocked(&InsertRegionLocked);  // flush the buffered ones
-    // Can't instead use HandleSavedRegionsLocked(&DoInsertRegionLocked) before
-    // recursive_insert = false; as InsertRegionLocked will also construct
-    // regions_ on demand for us.
-  if (use_buckets) {
-    const int table_bytes = kHashTableSize * sizeof(*bucket_table_);
-    recursive_insert = true;
-    bucket_table_ = static_cast<HeapProfileBucket**>(
-        MyAllocator::Allocate(table_bytes));
-    recursive_insert = false;
-    memset(bucket_table_, 0, table_bytes);
-    num_buckets_ = 0;
-  }
-  Unlock();
-  RAW_VLOG(10, "MemoryRegionMap Init done");
-}
-
-bool MemoryRegionMap::Shutdown() {
-  RAW_VLOG(10, "MemoryRegionMap Shutdown");
-  Lock();
-  RAW_CHECK(client_count_ > 0, "");
-  client_count_ -= 1;
-  if (client_count_ != 0) {  // not last client; need not really shutdown
-    Unlock();
-    RAW_VLOG(10, "MemoryRegionMap Shutdown decrement done");
-    return true;
-  }
-  if (bucket_table_ != NULL) {
-    for (int i = 0; i < kHashTableSize; i++) {
-      for (HeapProfileBucket* curr = bucket_table_[i]; curr != 0; /**/) {
-        HeapProfileBucket* bucket = curr;
-        curr = curr->next;
-        MyAllocator::Free(bucket->stack, 0);
-        MyAllocator::Free(bucket, 0);
-      }
-    }
-    MyAllocator::Free(bucket_table_, 0);
-    num_buckets_ = 0;
-    bucket_table_ = NULL;
-  }
-  RAW_CHECK(MallocHook::RemoveMmapHook(&MmapHook), "");
-  RAW_CHECK(MallocHook::RemoveMremapHook(&MremapHook), "");
-  RAW_CHECK(MallocHook::RemoveSbrkHook(&SbrkHook), "");
-  RAW_CHECK(MallocHook::RemoveMunmapHook(&MunmapHook), "");
-  if (regions_) regions_->~RegionSet();
-  regions_ = NULL;
-  bool deleted_arena = LowLevelAlloc::DeleteArena(arena_);
-  if (deleted_arena) {
-    arena_ = 0;
-  } else {
-    RAW_LOG(WARNING, "Can't delete LowLevelAlloc arena: it's being used");
-  }
-  Unlock();
-  RAW_VLOG(10, "MemoryRegionMap Shutdown done");
-  return deleted_arena;
-}
-
-bool MemoryRegionMap::IsRecordingLocked() {
-  RAW_CHECK(LockIsHeld(), "should be held (by this thread)");
-  return client_count_ > 0;
-}
-
-// Invariants (once libpthread_initialized is true):
-//   * While lock_ is not held, recursion_count_ is 0 (and
-//     lock_owner_tid_ is the previous owner, but we don't rely on
-//     that).
-//   * recursion_count_ and lock_owner_tid_ are only written while
-//     both lock_ and owner_lock_ are held. They may be read under
-//     just owner_lock_.
-//   * At entry and exit of Lock() and Unlock(), the current thread
-//     owns lock_ iff pthread_equal(lock_owner_tid_, pthread_self())
-//     && recursion_count_ > 0.
-void MemoryRegionMap::Lock() {
-  {
-    SpinLockHolder l(&owner_lock_);
-    if (recursion_count_ > 0 && current_thread_is(lock_owner_tid_)) {
-      RAW_CHECK(lock_.IsHeld(), "Invariants violated");
-      recursion_count_++;
-      RAW_CHECK(recursion_count_ <= 5,
-                "recursive lock nesting unexpectedly deep");
-      return;
-    }
-  }
-  lock_.Lock();
-  {
-    SpinLockHolder l(&owner_lock_);
-    RAW_CHECK(recursion_count_ == 0,
-              "Last Unlock didn't reset recursion_count_");
-    if (libpthread_initialized)
-      lock_owner_tid_ = pthread_self();
-    recursion_count_ = 1;
-  }
-}
-
-void MemoryRegionMap::Unlock() {
-  SpinLockHolder l(&owner_lock_);
-  RAW_CHECK(recursion_count_ >  0, "unlock when not held");
-  RAW_CHECK(lock_.IsHeld(),
-            "unlock when not held, and recursion_count_ is wrong");
-  RAW_CHECK(current_thread_is(lock_owner_tid_), "unlock by non-holder");
-  recursion_count_--;
-  if (recursion_count_ == 0) {
-    lock_.Unlock();
-  }
-}
-
-bool MemoryRegionMap::LockIsHeld() {
-  SpinLockHolder l(&owner_lock_);
-  return lock_.IsHeld()  &&  current_thread_is(lock_owner_tid_);
-}
-
-const MemoryRegionMap::Region*
-MemoryRegionMap::DoFindRegionLocked(uintptr_t addr) {
-  RAW_CHECK(LockIsHeld(), "should be held (by this thread)");
-  if (regions_ != NULL) {
-    Region sample;
-    sample.SetRegionSetKey(addr);
-    RegionSet::iterator region = regions_->lower_bound(sample);
-    if (region != regions_->end()) {
-      RAW_CHECK(addr <= region->end_addr, "");
-      if (region->start_addr <= addr  &&  addr < region->end_addr) {
-        return &(*region);
-      }
-    }
-  }
-  return NULL;
-}
-
-bool MemoryRegionMap::FindRegion(uintptr_t addr, Region* result) {
-  Lock();
-  const Region* region = DoFindRegionLocked(addr);
-  if (region != NULL) *result = *region;  // create it as an independent copy
-  Unlock();
-  return region != NULL;
-}
-
-bool MemoryRegionMap::FindAndMarkStackRegion(uintptr_t stack_top,
-                                             Region* result) {
-  Lock();
-  const Region* region = DoFindRegionLocked(stack_top);
-  if (region != NULL) {
-    RAW_VLOG(10, "Stack at %p is inside region %p..%p",
-                reinterpret_cast<void*>(stack_top),
-                reinterpret_cast<void*>(region->start_addr),
-                reinterpret_cast<void*>(region->end_addr));
-    const_cast<Region*>(region)->set_is_stack();  // now we know
-      // cast is safe (set_is_stack does not change the set ordering key)
-    *result = *region;  // create *result as an independent copy
-  }
-  Unlock();
-  return region != NULL;
-}
-
-HeapProfileBucket* MemoryRegionMap::GetBucket(int depth,
-                                              const void* const key[]) {
-  RAW_CHECK(LockIsHeld(), "should be held (by this thread)");
-  // Make hash-value
-  uintptr_t hash = 0;
-  for (int i = 0; i < depth; i++) {
-    hash += reinterpret_cast<uintptr_t>(key[i]);
-    hash += hash << 10;
-    hash ^= hash >> 6;
-  }
-  hash += hash << 3;
-  hash ^= hash >> 11;
-
-  // Lookup stack trace in table
-  unsigned int hash_index = (static_cast<unsigned int>(hash)) % kHashTableSize;
-  for (HeapProfileBucket* bucket = bucket_table_[hash_index];
-       bucket != 0;
-       bucket = bucket->next) {
-    if ((bucket->hash == hash) && (bucket->depth == depth) &&
-        std::equal(key, key + depth, bucket->stack)) {
-      return bucket;
-    }
-  }
-
-  // Create new bucket
-  const size_t key_size = sizeof(key[0]) * depth;
-  HeapProfileBucket* bucket;
-  if (recursive_insert) {  // recursion: save in saved_buckets_
-    const void** key_copy = saved_buckets_keys_[saved_buckets_count_];
-    std::copy(key, key + depth, key_copy);
-    bucket = &saved_buckets_[saved_buckets_count_];
-    memset(bucket, 0, sizeof(*bucket));
-    ++saved_buckets_count_;
-    bucket->stack = key_copy;
-    bucket->next  = NULL;
-  } else {
-    recursive_insert = true;
-    const void** key_copy = static_cast<const void**>(
-        MyAllocator::Allocate(key_size));
-    recursive_insert = false;
-    std::copy(key, key + depth, key_copy);
-    recursive_insert = true;
-    bucket = static_cast<HeapProfileBucket*>(
-        MyAllocator::Allocate(sizeof(HeapProfileBucket)));
-    recursive_insert = false;
-    memset(bucket, 0, sizeof(*bucket));
-    bucket->stack = key_copy;
-    bucket->next  = bucket_table_[hash_index];
-  }
-  bucket->hash = hash;
-  bucket->depth = depth;
-  bucket_table_[hash_index] = bucket;
-  ++num_buckets_;
-  return bucket;
-}
-
-MemoryRegionMap::RegionIterator MemoryRegionMap::BeginRegionLocked() {
-  RAW_CHECK(LockIsHeld(), "should be held (by this thread)");
-  RAW_CHECK(regions_ != NULL, "");
-  return regions_->begin();
-}
-
-MemoryRegionMap::RegionIterator MemoryRegionMap::EndRegionLocked() {
-  RAW_CHECK(LockIsHeld(), "should be held (by this thread)");
-  RAW_CHECK(regions_ != NULL, "");
-  return regions_->end();
-}
-
-inline void MemoryRegionMap::DoInsertRegionLocked(const Region& region) {
-  RAW_VLOG(12, "Inserting region %p..%p from %p",
-              reinterpret_cast<void*>(region.start_addr),
-              reinterpret_cast<void*>(region.end_addr),
-              reinterpret_cast<void*>(region.caller()));
-  RegionSet::const_iterator i = regions_->lower_bound(region);
-  if (i != regions_->end() && i->start_addr <= region.start_addr) {
-    RAW_DCHECK(region.end_addr <= i->end_addr, "");  // lower_bound ensures this
-    return;  // 'region' is a subset of an already recorded region; do nothing
-    // We can be stricter and allow this only when *i has been created via
-    // an mmap with MAP_NORESERVE flag set.
-  }
-  if (DEBUG_MODE) {
-    RAW_CHECK(i == regions_->end()  ||  !region.Overlaps(*i),
-              "Wow, overlapping memory regions");
-    Region sample;
-    sample.SetRegionSetKey(region.start_addr);
-    i = regions_->lower_bound(sample);
-    RAW_CHECK(i == regions_->end()  ||  !region.Overlaps(*i),
-              "Wow, overlapping memory regions");
-  }
-  region.AssertIsConsistent();  // just making sure
-  // This inserts and allocates permanent storage for region
-  // and its call stack data: it's safe to do it now:
-  regions_->insert(region);
-  RAW_VLOG(12, "Inserted region %p..%p :",
-              reinterpret_cast<void*>(region.start_addr),
-              reinterpret_cast<void*>(region.end_addr));
-  if (VLOG_IS_ON(12))  LogAllLocked();
-}
-
-// These variables are local to MemoryRegionMap::InsertRegionLocked()
-// and MemoryRegionMap::HandleSavedRegionsLocked()
-// and are file-level to ensure that they are initialized at load time.
-
-// Number of unprocessed region inserts.
-static int saved_regions_count = 0;
-
-// Unprocessed inserts (must be big enough to hold all allocations that can
-// be caused by a InsertRegionLocked call).
-// Region has no constructor, so that c-tor execution does not interfere
-// with the any-time use of the static memory behind saved_regions.
-static MemoryRegionMap::Region saved_regions[20];
-
-inline void MemoryRegionMap::HandleSavedRegionsLocked(
-              void (*insert_func)(const Region& region)) {
-  while (saved_regions_count > 0) {
-    // Making a local-var copy of the region argument to insert_func
-    // including its stack (w/o doing any memory allocations) is important:
-    // in many cases the memory in saved_regions
-    // will get written-to during the (*insert_func)(r) call below.
-    Region r = saved_regions[--saved_regions_count];
-    (*insert_func)(r);
-  }
-}
-
-void MemoryRegionMap::RestoreSavedBucketsLocked() {
-  RAW_CHECK(LockIsHeld(), "should be held (by this thread)");
-  while (saved_buckets_count_ > 0) {
-    HeapProfileBucket bucket = saved_buckets_[--saved_buckets_count_];
-    unsigned int hash_index =
-        static_cast<unsigned int>(bucket.hash) % kHashTableSize;
-    bool is_found = false;
-    for (HeapProfileBucket* curr = bucket_table_[hash_index];
-         curr != 0;
-         curr = curr->next) {
-      if ((curr->hash == bucket.hash) && (curr->depth == bucket.depth) &&
-          std::equal(bucket.stack, bucket.stack + bucket.depth, curr->stack)) {
-        curr->allocs += bucket.allocs;
-        curr->alloc_size += bucket.alloc_size;
-        curr->frees += bucket.frees;
-        curr->free_size += bucket.free_size;
-        is_found = true;
-        break;
-      }
-    }
-    if (is_found) continue;
-
-    const size_t key_size = sizeof(bucket.stack[0]) * bucket.depth;
-    const void** key_copy = static_cast<const void**>(
-        MyAllocator::Allocate(key_size));
-    std::copy(bucket.stack, bucket.stack + bucket.depth, key_copy);
-    HeapProfileBucket* new_bucket = static_cast<HeapProfileBucket*>(
-        MyAllocator::Allocate(sizeof(HeapProfileBucket)));
-    memset(new_bucket, 0, sizeof(*new_bucket));
-    new_bucket->hash = bucket.hash;
-    new_bucket->depth = bucket.depth;
-    new_bucket->stack = key_copy;
-    new_bucket->next = bucket_table_[hash_index];
-    bucket_table_[hash_index] = new_bucket;
-    ++num_buckets_;
-  }
-}
-
-inline void MemoryRegionMap::InsertRegionLocked(const Region& region) {
-  RAW_CHECK(LockIsHeld(), "should be held (by this thread)");
-  // We can be called recursively, because RegionSet constructor
-  // and DoInsertRegionLocked() (called below) can call the allocator.
-  // recursive_insert tells us if that's the case. When this happens,
-  // region insertion information is recorded in saved_regions[],
-  // and taken into account when the recursion unwinds.
-  // Do the insert:
-  if (recursive_insert) {  // recursion: save in saved_regions
-    RAW_VLOG(12, "Saving recursive insert of region %p..%p from %p",
-                reinterpret_cast<void*>(region.start_addr),
-                reinterpret_cast<void*>(region.end_addr),
-                reinterpret_cast<void*>(region.caller()));
-    RAW_CHECK(saved_regions_count < arraysize(saved_regions), "");
-    // Copy 'region' to saved_regions[saved_regions_count]
-    // together with the contents of its call_stack,
-    // then increment saved_regions_count.
-    saved_regions[saved_regions_count++] = region;
-  } else {  // not a recusrive call
-    if (regions_ == NULL) {  // init regions_
-      RAW_VLOG(12, "Initializing region set");
-      regions_ = regions_rep.region_set();
-      recursive_insert = true;
-      new(regions_) RegionSet();
-      HandleSavedRegionsLocked(&DoInsertRegionLocked);
-      recursive_insert = false;
-    }
-    recursive_insert = true;
-    // Do the actual insertion work to put new regions into regions_:
-    DoInsertRegionLocked(region);
-    HandleSavedRegionsLocked(&DoInsertRegionLocked);
-    recursive_insert = false;
-  }
-}
-
-// We strip out different number of stack frames in debug mode
-// because less inlining happens in that case
-#ifdef NDEBUG
-static const int kStripFrames = 1;
-#else
-static const int kStripFrames = 3;
-#endif
-
-void MemoryRegionMap::RecordRegionAddition(const void* start, size_t size) {
-  // Record start/end info about this memory acquisition call in a new region:
-  Region region;
-  region.Create(start, size);
-  // First get the call stack info into the local varible 'region':
-  int depth = 0;
-  // NOTE: libunwind also does mmap and very much likely while holding
-  // it's own lock(s). So some threads may first take libunwind lock,
-  // and then take region map lock (necessary to record mmap done from
-  // inside libunwind). On the other hand other thread(s) may do
-  // normal mmap. Which would call this method to record it. Which
-  // would then proceed with installing that record to region map
-  // while holding region map lock. That may cause mmap from our own
-  // internal allocators, so attempt to unwind in this case may cause
-  // reverse order of taking libuwind and region map locks. Which is
-  // obvious deadlock.
-  //
-  // Thankfully, we can easily detect if we're holding region map lock
-  // and avoid recording backtrace in this (rare and largely
-  // irrelevant) case. By doing this we "declare" that thread needing
-  // both locks must take region map lock last. In other words we do
-  // not allow taking libuwind lock when we already have region map
-  // lock. Note, this is generally impossible when somebody tries to
-  // mix cpu profiling and heap checking/profiling, because cpu
-  // profiler grabs backtraces at arbitrary places. But at least such
-  // combination is rarer and less relevant.
-  if (max_stack_depth_ > 0 && !LockIsHeld()) {
-    depth = MallocHook::GetCallerStackTrace(const_cast<void**>(region.call_stack),
-                                            max_stack_depth_, kStripFrames + 1);
-  }
-  region.set_call_stack_depth(depth);  // record stack info fully
-  RAW_VLOG(10, "New global region %p..%p from %p",
-              reinterpret_cast<void*>(region.start_addr),
-              reinterpret_cast<void*>(region.end_addr),
-              reinterpret_cast<void*>(region.caller()));
-  // Note: none of the above allocates memory.
-  Lock();  // recursively lock
-  map_size_ += size;
-  InsertRegionLocked(region);
-    // This will (eventually) allocate storage for and copy over the stack data
-    // from region.call_stack_data_ that is pointed by region.call_stack().
-  if (bucket_table_ != NULL) {
-    HeapProfileBucket* b = GetBucket(depth, region.call_stack);
-    ++b->allocs;
-    b->alloc_size += size;
-    if (!recursive_insert) {
-      recursive_insert = true;
-      RestoreSavedBucketsLocked();
-      recursive_insert = false;
-    }
-  }
-  Unlock();
-}
-
-void MemoryRegionMap::RecordRegionRemoval(const void* start, size_t size) {
-  Lock();
-  if (recursive_insert) {
-    // First remove the removed region from saved_regions, if it's
-    // there, to prevent overrunning saved_regions in recursive
-    // map/unmap call sequences, and also from later inserting regions
-    // which have already been unmapped.
-    uintptr_t start_addr = reinterpret_cast<uintptr_t>(start);
-    uintptr_t end_addr = start_addr + size;
-    int put_pos = 0;
-    int old_count = saved_regions_count;
-    for (int i = 0; i < old_count; ++i, ++put_pos) {
-      Region& r = saved_regions[i];
-      if (r.start_addr == start_addr && r.end_addr == end_addr) {
-        // An exact match, so it's safe to remove.
-        RecordRegionRemovalInBucket(r.call_stack_depth, r.call_stack, size);
-        --saved_regions_count;
-        --put_pos;
-        RAW_VLOG(10, ("Insta-Removing saved region %p..%p; "
-                     "now have %d saved regions"),
-                 reinterpret_cast<void*>(start_addr),
-                 reinterpret_cast<void*>(end_addr),
-                 saved_regions_count);
-      } else {
-        if (put_pos < i) {
-          saved_regions[put_pos] = saved_regions[i];
-        }
-      }
-    }
-  }
-  if (regions_ == NULL) {  // We must have just unset the hooks,
-                           // but this thread was already inside the hook.
-    Unlock();
-    return;
-  }
-  if (!recursive_insert) {
-    HandleSavedRegionsLocked(&InsertRegionLocked);
-  }
-    // first handle adding saved regions if any
-  uintptr_t start_addr = reinterpret_cast<uintptr_t>(start);
-  uintptr_t end_addr = start_addr + size;
-  // subtract start_addr, end_addr from all the regions
-  RAW_VLOG(10, "Removing global region %p..%p; have %" PRIuS " regions",
-              reinterpret_cast<void*>(start_addr),
-              reinterpret_cast<void*>(end_addr),
-              regions_->size());
-  Region sample;
-  sample.SetRegionSetKey(start_addr);
-  // Only iterate over the regions that might overlap start_addr..end_addr:
-  for (RegionSet::iterator region = regions_->lower_bound(sample);
-       region != regions_->end()  &&  region->start_addr < end_addr;
-       /*noop*/) {
-    RAW_VLOG(13, "Looking at region %p..%p",
-                reinterpret_cast<void*>(region->start_addr),
-                reinterpret_cast<void*>(region->end_addr));
-    if (start_addr <= region->start_addr  &&
-        region->end_addr <= end_addr) {  // full deletion
-      RAW_VLOG(12, "Deleting region %p..%p",
-                  reinterpret_cast<void*>(region->start_addr),
-                  reinterpret_cast<void*>(region->end_addr));
-      RecordRegionRemovalInBucket(region->call_stack_depth, region->call_stack,
-                                  region->end_addr - region->start_addr);
-      RegionSet::iterator d = region;
-      ++region;
-      regions_->erase(d);
-      continue;
-    } else if (region->start_addr < start_addr  &&
-               end_addr < region->end_addr) {  // cutting-out split
-      RAW_VLOG(12, "Splitting region %p..%p in two",
-                  reinterpret_cast<void*>(region->start_addr),
-                  reinterpret_cast<void*>(region->end_addr));
-      RecordRegionRemovalInBucket(region->call_stack_depth, region->call_stack,
-                                  end_addr - start_addr);
-      // Make another region for the start portion:
-      // The new region has to be the start portion because we can't
-      // just modify region->end_addr as it's the sorting key.
-      Region r = *region;
-      r.set_end_addr(start_addr);
-      InsertRegionLocked(r);
-      // cut *region from start:
-      const_cast<Region&>(*region).set_start_addr(end_addr);
-    } else if (end_addr > region->start_addr  &&
-               start_addr <= region->start_addr) {  // cut from start
-      RAW_VLOG(12, "Start-chopping region %p..%p",
-                  reinterpret_cast<void*>(region->start_addr),
-                  reinterpret_cast<void*>(region->end_addr));
-      RecordRegionRemovalInBucket(region->call_stack_depth, region->call_stack,
-                                  end_addr - region->start_addr);
-      const_cast<Region&>(*region).set_start_addr(end_addr);
-    } else if (start_addr > region->start_addr  &&
-               start_addr < region->end_addr) {  // cut from end
-      RAW_VLOG(12, "End-chopping region %p..%p",
-                  reinterpret_cast<void*>(region->start_addr),
-                  reinterpret_cast<void*>(region->end_addr));
-      RecordRegionRemovalInBucket(region->call_stack_depth, region->call_stack,
-                                  region->end_addr - start_addr);
-      // Can't just modify region->end_addr (it's the sorting key):
-      Region r = *region;
-      r.set_end_addr(start_addr);
-      RegionSet::iterator d = region;
-      ++region;
-      // It's safe to erase before inserting since r is independent of *d:
-      // r contains an own copy of the call stack:
-      regions_->erase(d);
-      InsertRegionLocked(r);
-      continue;
-    }
-    ++region;
-  }
-  RAW_VLOG(12, "Removed region %p..%p; have %" PRIuS " regions",
-              reinterpret_cast<void*>(start_addr),
-              reinterpret_cast<void*>(end_addr),
-              regions_->size());
-  if (VLOG_IS_ON(12))  LogAllLocked();
-  unmap_size_ += size;
-  Unlock();
-}
-
-void MemoryRegionMap::RecordRegionRemovalInBucket(int depth,
-                                                  const void* const stack[],
-                                                  size_t size) {
-  RAW_CHECK(LockIsHeld(), "should be held (by this thread)");
-  if (bucket_table_ == NULL) return;
-  HeapProfileBucket* b = GetBucket(depth, stack);
-  ++b->frees;
-  b->free_size += size;
-}
-
-void MemoryRegionMap::MmapHook(const void* result,
-                               const void* start, size_t size,
-                               int prot, int flags,
-                               int fd, off_t offset) {
-  // TODO(maxim): replace all 0x%" PRIxS " by %p when RAW_VLOG uses a safe
-  // snprintf reimplementation that does not malloc to pretty-print NULL
-  RAW_VLOG(10, "MMap = 0x%" PRIxPTR " of %" PRIuS " at %" PRIu64 " "
-              "prot %d flags %d fd %d offs %" PRId64,
-              reinterpret_cast<uintptr_t>(result), size,
-              reinterpret_cast<uint64>(start), prot, flags, fd,
-              static_cast<int64>(offset));
-  if (result != reinterpret_cast<void*>(MAP_FAILED)  &&  size != 0) {
-    RecordRegionAddition(result, size);
-  }
-}
-
-void MemoryRegionMap::MunmapHook(const void* ptr, size_t size) {
-  RAW_VLOG(10, "MUnmap of %p %" PRIuS "", ptr, size);
-  if (size != 0) {
-    RecordRegionRemoval(ptr, size);
-  }
-}
-
-void MemoryRegionMap::MremapHook(const void* result,
-                                 const void* old_addr, size_t old_size,
-                                 size_t new_size, int flags,
-                                 const void* new_addr) {
-  RAW_VLOG(10, "MRemap = 0x%" PRIxPTR " of 0x%" PRIxPTR " %" PRIuS " "
-              "to %" PRIuS " flags %d new_addr=0x%" PRIxPTR,
-              (uintptr_t)result, (uintptr_t)old_addr,
-               old_size, new_size, flags,
-               flags & MREMAP_FIXED ? (uintptr_t)new_addr : 0);
-  if (result != reinterpret_cast<void*>(-1)) {
-    RecordRegionRemoval(old_addr, old_size);
-    RecordRegionAddition(result, new_size);
-  }
-}
-
-void MemoryRegionMap::SbrkHook(const void* result, ptrdiff_t increment) {
-  RAW_VLOG(10, "Sbrk = 0x%" PRIxPTR " of %" PRIdS "", (uintptr_t)result, increment);
-  if (result != reinterpret_cast<void*>(-1)) {
-    if (increment > 0) {
-      void* new_end = sbrk(0);
-      RecordRegionAddition(result, reinterpret_cast<uintptr_t>(new_end) -
-                                   reinterpret_cast<uintptr_t>(result));
-    } else if (increment < 0) {
-      void* new_end = sbrk(0);
-      RecordRegionRemoval(new_end, reinterpret_cast<uintptr_t>(result) -
-                                   reinterpret_cast<uintptr_t>(new_end));
-    }
-  }
-}
-
-void MemoryRegionMap::LogAllLocked() {
-  RAW_CHECK(LockIsHeld(), "should be held (by this thread)");
-  RAW_LOG(INFO, "List of regions:");
-  uintptr_t previous = 0;
-  for (RegionSet::const_iterator r = regions_->begin();
-       r != regions_->end(); ++r) {
-    RAW_LOG(INFO, "Memory region 0x%" PRIxPTR "..0x%" PRIxPTR " "
-                  "from 0x%" PRIxPTR " stack=%d",
-                  r->start_addr, r->end_addr, r->caller(), r->is_stack);
-    RAW_CHECK(previous < r->end_addr, "wow, we messed up the set order");
-      // this must be caused by uncontrolled recursive operations on regions_
-    previous = r->end_addr;
-  }
-  RAW_LOG(INFO, "End of regions list");
-}

http://git-wip-us.apache.org/repos/asf/incubator-quickstep/blob/b249eb11/third_party/gperftools/src/memory_region_map.h
----------------------------------------------------------------------
diff --git a/third_party/gperftools/src/memory_region_map.h b/third_party/gperftools/src/memory_region_map.h
deleted file mode 100644
index ec388e1..0000000
--- a/third_party/gperftools/src/memory_region_map.h
+++ /dev/null
@@ -1,413 +0,0 @@
-// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
-/* Copyright (c) 2006, Google Inc.
- * All rights reserved.
- * 
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
- * 
- *     * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *     * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following disclaimer
- * in the documentation and/or other materials provided with the
- * distribution.
- *     * Neither the name of Google Inc. nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- * 
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * ---
- * Author: Maxim Lifantsev
- */
-
-#ifndef BASE_MEMORY_REGION_MAP_H_
-#define BASE_MEMORY_REGION_MAP_H_
-
-#include <config.h>
-
-#ifdef HAVE_PTHREAD
-#include <pthread.h>
-#endif
-#include <stddef.h>
-#include <set>
-#include "base/stl_allocator.h"
-#include "base/spinlock.h"
-#include "base/thread_annotations.h"
-#include "base/low_level_alloc.h"
-#include "heap-profile-stats.h"
-
-// TODO(maxim): add a unittest:
-//  execute a bunch of mmaps and compare memory map what strace logs
-//  execute a bunch of mmap/munmup and compare memory map with
-//  own accounting of what those mmaps generated
-
-// Thread-safe class to collect and query the map of all memory regions
-// in a process that have been created with mmap, munmap, mremap, sbrk.
-// For each memory region, we keep track of (and provide to users)
-// the stack trace that allocated that memory region.
-// The recorded stack trace depth is bounded by
-// a user-supplied max_stack_depth parameter of Init().
-// After initialization with Init()
-// (which can happened even before global object constructor execution)
-// we collect the map by installing and monitoring MallocHook-s
-// to mmap, munmap, mremap, sbrk.
-// At any time one can query this map via provided interface.
-// For more details on the design of MemoryRegionMap
-// see the comment at the top of our .cc file.
-class MemoryRegionMap {
- private:
-  // Max call stack recording depth supported by Init().  Set it to be
-  // high enough for all our clients.  Note: we do not define storage
-  // for this (doing that requires special handling in windows), so
-  // don't take the address of it!
-  static const int kMaxStackDepth = 32;
-
-  // Size of the hash table of buckets.  A structure of the bucket table is
-  // described in heap-profile-stats.h.
-  static const int kHashTableSize = 179999;
-
- public:
-  // interface ================================================================
-
-  // Every client of MemoryRegionMap must call Init() before first use,
-  // and Shutdown() after last use.  This allows us to reference count
-  // this (singleton) class properly.  MemoryRegionMap assumes it's the
-  // only client of MallocHooks, so a client can only register other
-  // MallocHooks after calling Init() and must unregister them before
-  // calling Shutdown().
-
-  // Initialize this module to record memory allocation stack traces.
-  // Stack traces that have more than "max_stack_depth" frames
-  // are automatically shrunk to "max_stack_depth" when they are recorded.
-  // Init() can be called more than once w/o harm, largest max_stack_depth
-  // will be the effective one.
-  // When "use_buckets" is true, then counts of mmap and munmap sizes will be
-  // recorded with each stack trace.  If Init() is called more than once, then
-  // counting will be effective after any call contained "use_buckets" of true.
-  // It will install mmap, munmap, mremap, sbrk hooks
-  // and initialize arena_ and our hook and locks, hence one can use
-  // MemoryRegionMap::Lock()/Unlock() to manage the locks.
-  // Uses Lock/Unlock inside.
-  static void Init(int max_stack_depth, bool use_buckets);
-
-  // Try to shutdown this module undoing what Init() did.
-  // Returns true iff could do full shutdown (or it was not attempted).
-  // Full shutdown is attempted when the number of Shutdown() calls equals
-  // the number of Init() calls.
-  static bool Shutdown();
-
-  // Return true if MemoryRegionMap is initialized and recording, i.e. when
-  // then number of Init() calls are more than the number of Shutdown() calls.
-  static bool IsRecordingLocked();
-
-  // Locks to protect our internal data structures.
-  // These also protect use of arena_ if our Init() has been done.
-  // The lock is recursive.
-  static void Lock() EXCLUSIVE_LOCK_FUNCTION(lock_);
-  static void Unlock() UNLOCK_FUNCTION(lock_);
-
-  // Returns true when the lock is held by this thread (for use in RAW_CHECK-s).
-  static bool LockIsHeld();
-
-  // Locker object that acquires the MemoryRegionMap::Lock
-  // for the duration of its lifetime (a C++ scope).
-  class LockHolder {
-   public:
-    LockHolder() { Lock(); }
-    ~LockHolder() { Unlock(); }
-   private:
-    DISALLOW_COPY_AND_ASSIGN(LockHolder);
-  };
-
-  // A memory region that we know about through malloc_hook-s.
-  // This is essentially an interface through which MemoryRegionMap
-  // exports the collected data to its clients.  Thread-compatible.
-  struct Region {
-    uintptr_t start_addr;  // region start address
-    uintptr_t end_addr;  // region end address
-    int call_stack_depth;  // number of caller stack frames that we saved
-    const void* call_stack[kMaxStackDepth];  // caller address stack array
-                                             // filled to call_stack_depth size
-    bool is_stack;  // does this region contain a thread's stack:
-                    // a user of MemoryRegionMap supplies this info
-
-    // Convenience accessor for call_stack[0],
-    // i.e. (the program counter of) the immediate caller
-    // of this region's allocation function,
-    // but it also returns NULL when call_stack_depth is 0,
-    // i.e whe we weren't able to get the call stack.
-    // This usually happens in recursive calls, when the stack-unwinder
-    // calls mmap() which in turn calls the stack-unwinder.
-    uintptr_t caller() const {
-      return reinterpret_cast<uintptr_t>(call_stack_depth >= 1
-                                         ? call_stack[0] : NULL);
-    }
-
-    // Return true iff this region overlaps region x.
-    bool Overlaps(const Region& x) const {
-      return start_addr < x.end_addr  &&  end_addr > x.start_addr;
-    }
-
-   private:  // helpers for MemoryRegionMap
-    friend class MemoryRegionMap;
-
-    // The ways we create Region-s:
-    void Create(const void* start, size_t size) {
-      start_addr = reinterpret_cast<uintptr_t>(start);
-      end_addr = start_addr + size;
-      is_stack = false;  // not a stack till marked such
-      call_stack_depth = 0;
-      AssertIsConsistent();
-    }
-    void set_call_stack_depth(int depth) {
-      RAW_DCHECK(call_stack_depth == 0, "");  // only one such set is allowed
-      call_stack_depth = depth;
-      AssertIsConsistent();
-    }
-
-    // The ways we modify Region-s:
-    void set_is_stack() { is_stack = true; }
-    void set_start_addr(uintptr_t addr) {
-      start_addr = addr;
-      AssertIsConsistent();
-    }
-    void set_end_addr(uintptr_t addr) {
-      end_addr = addr;
-      AssertIsConsistent();
-    }
-
-    // Verifies that *this contains consistent data, crashes if not the case.
-    void AssertIsConsistent() const {
-      RAW_DCHECK(start_addr < end_addr, "");
-      RAW_DCHECK(call_stack_depth >= 0  &&
-                 call_stack_depth <= kMaxStackDepth, "");
-    }
-
-    // Post-default construction helper to make a Region suitable
-    // for searching in RegionSet regions_.
-    void SetRegionSetKey(uintptr_t addr) {
-      // make sure *this has no usable data:
-      if (DEBUG_MODE) memset(this, 0xFF, sizeof(*this));
-      end_addr = addr;
-    }
-
-    // Note: call_stack[kMaxStackDepth] as a member lets us make Region
-    // a simple self-contained struct with correctly behaving bit-vise copying.
-    // This simplifies the code of this module but wastes some memory:
-    // in most-often use case of this module (leak checking)
-    // only one call_stack element out of kMaxStackDepth is actually needed.
-    // Making the storage for call_stack variable-sized,
-    // substantially complicates memory management for the Region-s:
-    // as they need to be created and manipulated for some time
-    // w/o any memory allocations, yet are also given out to the users.
-  };
-
-  // Find the region that covers addr and write its data into *result if found,
-  // in which case *result gets filled so that it stays fully functional
-  // even when the underlying region gets removed from MemoryRegionMap.
-  // Returns success. Uses Lock/Unlock inside.
-  static bool FindRegion(uintptr_t addr, Region* result);
-
-  // Find the region that contains stack_top, mark that region as
-  // a stack region, and write its data into *result if found,
-  // in which case *result gets filled so that it stays fully functional
-  // even when the underlying region gets removed from MemoryRegionMap.
-  // Returns success. Uses Lock/Unlock inside.
-  static bool FindAndMarkStackRegion(uintptr_t stack_top, Region* result);
-
-  // Iterate over the buckets which store mmap and munmap counts per stack
-  // trace.  It calls "callback" for each bucket, and passes "arg" to it.
-  template<class Type>
-  static void IterateBuckets(void (*callback)(const HeapProfileBucket*, Type),
-                             Type arg);
-
-  // Get the bucket whose caller stack trace is "key".  The stack trace is
-  // used to a depth of "depth" at most.  The requested bucket is created if
-  // needed.
-  // The bucket table is described in heap-profile-stats.h.
-  static HeapProfileBucket* GetBucket(int depth, const void* const key[]);
-
- private:  // our internal types ==============================================
-
-  // Region comparator for sorting with STL
-  struct RegionCmp {
-    bool operator()(const Region& x, const Region& y) const {
-      return x.end_addr < y.end_addr;
-    }
-  };
-
-  // We allocate STL objects in our own arena.
-  struct MyAllocator {
-    static void *Allocate(size_t n) {
-      return LowLevelAlloc::AllocWithArena(n, arena_);
-    }
-    static void Free(const void *p, size_t /* n */) {
-      LowLevelAlloc::Free(const_cast<void*>(p));
-    }
-  };
-
-  // Set of the memory regions
-  typedef std::set<Region, RegionCmp,
-              STL_Allocator<Region, MyAllocator> > RegionSet;
-
- public:  // more in-depth interface ==========================================
-
-  // STL iterator with values of Region
-  typedef RegionSet::const_iterator RegionIterator;
-
-  // Return the begin/end iterators to all the regions.
-  // These need Lock/Unlock protection around their whole usage (loop).
-  // Even when the same thread causes modifications during such a loop
-  // (which are permitted due to recursive locking)
-  // the loop iterator will still be valid as long as its region
-  // has not been deleted, but EndRegionLocked should be
-  // re-evaluated whenever the set of regions has changed.
-  static RegionIterator BeginRegionLocked();
-  static RegionIterator EndRegionLocked();
-
-  // Return the accumulated sizes of mapped and unmapped regions.
-  static int64 MapSize() { return map_size_; }
-  static int64 UnmapSize() { return unmap_size_; }
-
-  // Effectively private type from our .cc =================================
-  // public to let us declare global objects:
-  union RegionSetRep;
-
- private:
-  // representation ===========================================================
-
-  // Counter of clients of this module that have called Init().
-  static int client_count_;
-
-  // Maximal number of caller stack frames to save (>= 0).
-  static int max_stack_depth_;
-
-  // Arena used for our allocations in regions_.
-  static LowLevelAlloc::Arena* arena_;
-
-  // Set of the mmap/sbrk/mremap-ed memory regions
-  // To be accessed *only* when Lock() is held.
-  // Hence we protect the non-recursive lock used inside of arena_
-  // with our recursive Lock(). This lets a user prevent deadlocks
-  // when threads are stopped by TCMalloc_ListAllProcessThreads at random spots
-  // simply by acquiring our recursive Lock() before that.
-  static RegionSet* regions_;
-
-  // Lock to protect regions_ and buckets_ variables and the data behind.
-  static SpinLock lock_;
-  // Lock to protect the recursive lock itself.
-  static SpinLock owner_lock_;
-
-  // Recursion count for the recursive lock.
-  static int recursion_count_;
-  // The thread id of the thread that's inside the recursive lock.
-  static pthread_t lock_owner_tid_;
-
-  // Total size of all mapped pages so far
-  static int64 map_size_;
-  // Total size of all unmapped pages so far
-  static int64 unmap_size_;
-
-  // Bucket hash table which is described in heap-profile-stats.h.
-  static HeapProfileBucket** bucket_table_ GUARDED_BY(lock_);
-  static int num_buckets_ GUARDED_BY(lock_);
-
-  // The following members are local to MemoryRegionMap::GetBucket()
-  // and MemoryRegionMap::HandleSavedBucketsLocked()
-  // and are file-level to ensure that they are initialized at load time.
-  //
-  // These are used as temporary storage to break the infinite cycle of mmap
-  // calling our hook which (sometimes) causes mmap.  It must be a static
-  // fixed-size array.  The size 20 is just an expected value for safety.
-  // The details are described in memory_region_map.cc.
-
-  // Number of unprocessed bucket inserts.
-  static int saved_buckets_count_ GUARDED_BY(lock_);
-
-  // Unprocessed inserts (must be big enough to hold all mmaps that can be
-  // caused by a GetBucket call).
-  // Bucket has no constructor, so that c-tor execution does not interfere
-  // with the any-time use of the static memory behind saved_buckets.
-  static HeapProfileBucket saved_buckets_[20] GUARDED_BY(lock_);
-
-  static const void* saved_buckets_keys_[20][kMaxStackDepth] GUARDED_BY(lock_);
-
-  // helpers ==================================================================
-
-  // Helper for FindRegion and FindAndMarkStackRegion:
-  // returns the region covering 'addr' or NULL; assumes our lock_ is held.
-  static const Region* DoFindRegionLocked(uintptr_t addr);
-
-  // Verifying wrapper around regions_->insert(region)
-  // To be called to do InsertRegionLocked's work only!
-  inline static void DoInsertRegionLocked(const Region& region);
-  // Handle regions saved by InsertRegionLocked into a tmp static array
-  // by calling insert_func on them.
-  inline static void HandleSavedRegionsLocked(
-                       void (*insert_func)(const Region& region));
-
-  // Restore buckets saved in a tmp static array by GetBucket to the bucket
-  // table where all buckets eventually should be.
-  static void RestoreSavedBucketsLocked();
-
-  // Wrapper around DoInsertRegionLocked
-  // that handles the case of recursive allocator calls.
-  inline static void InsertRegionLocked(const Region& region);
-
-  // Record addition of a memory region at address "start" of size "size"
-  // (called from our mmap/mremap/sbrk hooks).
-  static void RecordRegionAddition(const void* start, size_t size);
-  // Record deletion of a memory region at address "start" of size "size"
-  // (called from our munmap/mremap/sbrk hooks).
-  static void RecordRegionRemoval(const void* start, size_t size);
-
-  // Record deletion of a memory region of size "size" in a bucket whose
-  // caller stack trace is "key".  The stack trace is used to a depth of
-  // "depth" at most.
-  static void RecordRegionRemovalInBucket(int depth,
-                                          const void* const key[],
-                                          size_t size);
-
-  // Hooks for MallocHook
-  static void MmapHook(const void* result,
-                       const void* start, size_t size,
-                       int prot, int flags,
-                       int fd, off_t offset);
-  static void MunmapHook(const void* ptr, size_t size);
-  static void MremapHook(const void* result, const void* old_addr,
-                         size_t old_size, size_t new_size, int flags,
-                         const void* new_addr);
-  static void SbrkHook(const void* result, ptrdiff_t increment);
-
-  // Log all memory regions; Useful for debugging only.
-  // Assumes Lock() is held
-  static void LogAllLocked();
-
-  DISALLOW_COPY_AND_ASSIGN(MemoryRegionMap);
-};
-
-template <class Type>
-void MemoryRegionMap::IterateBuckets(
-    void (*callback)(const HeapProfileBucket*, Type), Type callback_arg) {
-  for (int index = 0; index < kHashTableSize; index++) {
-    for (HeapProfileBucket* bucket = bucket_table_[index];
-         bucket != NULL;
-         bucket = bucket->next) {
-      callback(bucket, callback_arg);
-    }
-  }
-}
-
-#endif  // BASE_MEMORY_REGION_MAP_H_