You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@quickstep.apache.org by hb...@apache.org on 2017/01/19 20:06:50 UTC
[13/51] [abbrv] [partial] incubator-quickstep git commit: Added shell
script to download prerequisite third party libs
http://git-wip-us.apache.org/repos/asf/incubator-quickstep/blob/b249eb11/third_party/gperftools/src/packed-cache-inl.h
----------------------------------------------------------------------
diff --git a/third_party/gperftools/src/packed-cache-inl.h b/third_party/gperftools/src/packed-cache-inl.h
deleted file mode 100644
index 0946260..0000000
--- a/third_party/gperftools/src/packed-cache-inl.h
+++ /dev/null
@@ -1,239 +0,0 @@
-// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
-// Copyright (c) 2007, Google Inc.
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// ---
-// Author: Geoff Pike
-//
-// This file provides a minimal cache that can hold a <key, value> pair
-// with little if any wasted space. The types of the key and value
-// must be unsigned integral types or at least have unsigned semantics
-// for >>, casting, and similar operations.
-//
-// Synchronization is not provided. However, the cache is implemented
-// as an array of cache entries whose type is chosen at compile time.
-// If a[i] is atomic on your hardware for the chosen array type then
-// raciness will not necessarily lead to bugginess. The cache entries
-// must be large enough to hold a partial key and a value packed
-// together. The partial keys are bit strings of length
-// kKeybits - kHashbits, and the values are bit strings of length kValuebits.
-//
-// In an effort to use minimal space, every cache entry represents
-// some <key, value> pair; the class provides no way to mark a cache
-// entry as empty or uninitialized. In practice, you may want to have
-// reserved keys or values to get around this limitation. For example, in
-// tcmalloc's PageID-to-sizeclass cache, a value of 0 is used as
-// "unknown sizeclass."
-//
-// Usage Considerations
-// --------------------
-//
-// kHashbits controls the size of the cache. The best value for
-// kHashbits will of course depend on the application. Perhaps try
-// tuning the value of kHashbits by measuring different values on your
-// favorite benchmark. Also remember not to be a pig; other
-// programs that need resources may suffer if you are.
-//
-// The main uses for this class will be when performance is
-// critical and there's a convenient type to hold the cache's
-// entries. As described above, the number of bits required
-// for a cache entry is (kKeybits - kHashbits) + kValuebits. Suppose
-// kKeybits + kValuebits is 43. Then it probably makes sense to
-// chose kHashbits >= 11 so that cache entries fit in a uint32.
-//
-// On the other hand, suppose kKeybits = kValuebits = 64. Then
-// using this class may be less worthwhile. You'll probably
-// be using 128 bits for each entry anyway, so maybe just pick
-// a hash function, H, and use an array indexed by H(key):
-// void Put(K key, V value) { a_[H(key)] = pair<K, V>(key, value); }
-// V GetOrDefault(K key, V default) { const pair<K, V> &p = a_[H(key)]; ... }
-// etc.
-//
-// Further Details
-// ---------------
-//
-// For caches used only by one thread, the following is true:
-// 1. For a cache c,
-// (c.Put(key, value), c.GetOrDefault(key, 0)) == value
-// and
-// (c.Put(key, value), <...>, c.GetOrDefault(key, 0)) == value
-// if the elided code contains no c.Put calls.
-//
-// 2. Has(key) will return false if no <key, value> pair with that key
-// has ever been Put. However, a newly initialized cache will have
-// some <key, value> pairs already present. When you create a new
-// cache, you must specify an "initial value." The initialization
-// procedure is equivalent to Clear(initial_value), which is
-// equivalent to Put(k, initial_value) for all keys k from 0 to
-// 2^kHashbits - 1.
-//
-// 3. If key and key' differ then the only way Put(key, value) may
-// cause Has(key') to change is that Has(key') may change from true to
-// false. Furthermore, a Put() call that doesn't change Has(key')
-// doesn't change GetOrDefault(key', ...) either.
-//
-// Implementation details:
-//
-// This is a direct-mapped cache with 2^kHashbits entries; the hash
-// function simply takes the low bits of the key. We store whole keys
-// if a whole key plus a whole value fits in an entry. Otherwise, an
-// entry is the high bits of a key and a value, packed together.
-// E.g., a 20 bit key and a 7 bit value only require a uint16 for each
-// entry if kHashbits >= 11.
-//
-// Alternatives to this scheme will be added as needed.
-
-#ifndef TCMALLOC_PACKED_CACHE_INL_H_
-#define TCMALLOC_PACKED_CACHE_INL_H_
-
-#include "config.h"
-#include <stddef.h> // for size_t
-#ifdef HAVE_STDINT_H
-#include <stdint.h> // for uintptr_t
-#endif
-#include "base/basictypes.h"
-#include "internal_logging.h"
-
-// A safe way of doing "(1 << n) - 1" -- without worrying about overflow
-// Note this will all be resolved to a constant expression at compile-time
-#define N_ONES_(IntType, N) \
- ( (N) == 0 ? 0 : ((static_cast<IntType>(1) << ((N)-1))-1 + \
- (static_cast<IntType>(1) << ((N)-1))) )
-
-// The types K and V provide upper bounds on the number of valid keys
-// and values, but we explicitly require the keys to be less than
-// 2^kKeybits and the values to be less than 2^kValuebits. The size of
-// the table is controlled by kHashbits, and the type of each entry in
-// the cache is T. See also the big comment at the top of the file.
-template <int kKeybits, typename T>
-class PackedCache {
- public:
- typedef uintptr_t K;
- typedef size_t V;
-#ifdef TCMALLOC_SMALL_BUT_SLOW
- // Decrease the size map cache if running in the small memory mode.
- static const int kHashbits = 12;
-#else
- static const int kHashbits = 16;
-#endif
- static const int kValuebits = 7;
- static const bool kUseWholeKeys = kKeybits + kValuebits <= 8 * sizeof(T);
-
- explicit PackedCache(V initial_value) {
- COMPILE_ASSERT(kKeybits <= sizeof(K) * 8, key_size);
- COMPILE_ASSERT(kValuebits <= sizeof(V) * 8, value_size);
- COMPILE_ASSERT(kHashbits <= kKeybits, hash_function);
- COMPILE_ASSERT(kKeybits - kHashbits + kValuebits <= kTbits,
- entry_size_must_be_big_enough);
- Clear(initial_value);
- }
-
- void Put(K key, V value) {
- ASSERT(key == (key & kKeyMask));
- ASSERT(value == (value & kValueMask));
- array_[Hash(key)] = KeyToUpper(key) | value;
- }
-
- bool Has(K key) const {
- ASSERT(key == (key & kKeyMask));
- return KeyMatch(array_[Hash(key)], key);
- }
-
- V GetOrDefault(K key, V default_value) const {
- // As with other code in this class, we touch array_ as few times
- // as we can. Assuming entries are read atomically (e.g., their
- // type is uintptr_t on most hardware) then certain races are
- // harmless.
- ASSERT(key == (key & kKeyMask));
- T entry = array_[Hash(key)];
- return KeyMatch(entry, key) ? EntryToValue(entry) : default_value;
- }
-
- void Clear(V value) {
- ASSERT(value == (value & kValueMask));
- for (int i = 0; i < 1 << kHashbits; i++) {
- ASSERT(kUseWholeKeys || KeyToUpper(i) == 0);
- array_[i] = kUseWholeKeys ? (value | KeyToUpper(i)) : value;
- }
- }
-
- private:
- // We are going to pack a value and the upper part of a key (or a
- // whole key) into an entry of type T. The UPPER type is for the
- // upper part of a key, after the key has been masked and shifted
- // for inclusion in an entry.
- typedef T UPPER;
-
- static V EntryToValue(T t) { return t & kValueMask; }
-
- // If we have space for a whole key, we just shift it left.
- // Otherwise kHashbits determines where in a K to find the upper
- // part of the key, and kValuebits determines where in the entry to
- // put it.
- static UPPER KeyToUpper(K k) {
- if (kUseWholeKeys) {
- return static_cast<T>(k) << kValuebits;
- } else {
- const int shift = kHashbits - kValuebits;
- // Assume kHashbits >= kValuebits. It'd be easy to lift this assumption.
- return static_cast<T>(k >> shift) & kUpperMask;
- }
- }
-
- static size_t Hash(K key) {
- return static_cast<size_t>(key) & N_ONES_(size_t, kHashbits);
- }
-
- // Does the entry match the relevant part of the given key?
- static bool KeyMatch(T entry, K key) {
- return kUseWholeKeys ?
- (entry >> kValuebits == key) :
- ((KeyToUpper(key) ^ entry) & kUpperMask) == 0;
- }
-
- static const int kTbits = 8 * sizeof(T);
- static const int kUpperbits = kUseWholeKeys ? kKeybits : kKeybits - kHashbits;
-
- // For masking a K.
- static const K kKeyMask = N_ONES_(K, kKeybits);
-
- // For masking a T.
- static const T kUpperMask = N_ONES_(T, kUpperbits) << kValuebits;
-
- // For masking a V or a T.
- static const V kValueMask = N_ONES_(V, kValuebits);
-
- // array_ is the cache. Its elements are volatile because any
- // thread can write any array element at any time.
- volatile T array_[1 << kHashbits];
-};
-
-#undef N_ONES_
-
-#endif // TCMALLOC_PACKED_CACHE_INL_H_
http://git-wip-us.apache.org/repos/asf/incubator-quickstep/blob/b249eb11/third_party/gperftools/src/page_heap.cc
----------------------------------------------------------------------
diff --git a/third_party/gperftools/src/page_heap.cc b/third_party/gperftools/src/page_heap.cc
deleted file mode 100644
index a60df4a..0000000
--- a/third_party/gperftools/src/page_heap.cc
+++ /dev/null
@@ -1,675 +0,0 @@
-// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
-// Copyright (c) 2008, Google Inc.
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// ---
-// Author: Sanjay Ghemawat <op...@google.com>
-
-#include <config.h>
-#ifdef HAVE_INTTYPES_H
-#include <inttypes.h> // for PRIuPTR
-#endif
-#include <gperftools/malloc_extension.h> // for MallocRange, etc
-#include "base/basictypes.h"
-#include "base/commandlineflags.h"
-#include "internal_logging.h" // for ASSERT, TCMalloc_Printer, etc
-#include "page_heap_allocator.h" // for PageHeapAllocator
-#include "static_vars.h" // for Static
-#include "system-alloc.h" // for TCMalloc_SystemAlloc, etc
-
-DEFINE_double(tcmalloc_release_rate,
- EnvToDouble("TCMALLOC_RELEASE_RATE", 1.0),
- "Rate at which we release unused memory to the system. "
- "Zero means we never release memory back to the system. "
- "Increase this flag to return memory faster; decrease it "
- "to return memory slower. Reasonable rates are in the "
- "range [0,10]");
-
-DEFINE_int64(tcmalloc_heap_limit_mb,
- EnvToInt("TCMALLOC_HEAP_LIMIT_MB", 0),
- "Limit total size of the process heap to the "
- "specified number of MiB. "
- "When we approach the limit the memory is released "
- "to the system more aggressively (more minor page faults). "
- "Zero means to allocate as long as system allows.");
-
-namespace tcmalloc {
-
-PageHeap::PageHeap()
- : pagemap_(MetaDataAlloc),
- pagemap_cache_(0),
- scavenge_counter_(0),
- // Start scavenging at kMaxPages list
- release_index_(kMaxPages),
- aggressive_decommit_(false) {
- COMPILE_ASSERT(kNumClasses <= (1 << PageMapCache::kValuebits), valuebits);
- DLL_Init(&large_.normal);
- DLL_Init(&large_.returned);
- for (int i = 0; i < kMaxPages; i++) {
- DLL_Init(&free_[i].normal);
- DLL_Init(&free_[i].returned);
- }
-}
-
-Span* PageHeap::SearchFreeAndLargeLists(Length n) {
- ASSERT(Check());
- ASSERT(n > 0);
-
- // Find first size >= n that has a non-empty list
- for (Length s = n; s < kMaxPages; s++) {
- Span* ll = &free_[s].normal;
- // If we're lucky, ll is non-empty, meaning it has a suitable span.
- if (!DLL_IsEmpty(ll)) {
- ASSERT(ll->next->location == Span::ON_NORMAL_FREELIST);
- return Carve(ll->next, n);
- }
- // Alternatively, maybe there's a usable returned span.
- ll = &free_[s].returned;
- if (!DLL_IsEmpty(ll)) {
- // We did not call EnsureLimit before, to avoid releasing the span
- // that will be taken immediately back.
- // Calling EnsureLimit here is not very expensive, as it fails only if
- // there is no more normal spans (and it fails efficiently)
- // or SystemRelease does not work (there is probably no returned spans).
- if (EnsureLimit(n)) {
- // ll may have became empty due to coalescing
- if (!DLL_IsEmpty(ll)) {
- ASSERT(ll->next->location == Span::ON_RETURNED_FREELIST);
- return Carve(ll->next, n);
- }
- }
- }
- }
- // No luck in free lists, our last chance is in a larger class.
- return AllocLarge(n); // May be NULL
-}
-
-static const size_t kForcedCoalesceInterval = 128*1024*1024;
-
-Span* PageHeap::New(Length n) {
- ASSERT(Check());
- ASSERT(n > 0);
-
- Span* result = SearchFreeAndLargeLists(n);
- if (result != NULL)
- return result;
-
- if (stats_.free_bytes != 0 && stats_.unmapped_bytes != 0
- && stats_.free_bytes + stats_.unmapped_bytes >= stats_.system_bytes / 4
- && (stats_.system_bytes / kForcedCoalesceInterval
- != (stats_.system_bytes + (n << kPageShift)) / kForcedCoalesceInterval)) {
- // We're about to grow heap, but there are lots of free pages.
- // tcmalloc's design decision to keep unmapped and free spans
- // separately and never coalesce them means that sometimes there
- // can be free pages span of sufficient size, but it consists of
- // "segments" of different type so page heap search cannot find
- // it. In order to prevent growing heap and wasting memory in such
- // case we're going to unmap all free pages. So that all free
- // spans are maximally coalesced.
- //
- // We're also limiting 'rate' of going into this path to be at
- // most once per 128 megs of heap growth. Otherwise programs that
- // grow heap frequently (and that means by small amount) could be
- // penalized with higher count of minor page faults.
- //
- // See also large_heap_fragmentation_unittest.cc and
- // https://code.google.com/p/gperftools/issues/detail?id=368
- ReleaseAtLeastNPages(static_cast<Length>(0x7fffffff));
-
- // then try again. If we are forced to grow heap because of large
- // spans fragmentation and not because of problem described above,
- // then at the very least we've just unmapped free but
- // insufficiently big large spans back to OS. So in case of really
- // unlucky memory fragmentation we'll be consuming virtual address
- // space, but not real memory
- result = SearchFreeAndLargeLists(n);
- if (result != NULL) return result;
- }
-
- // Grow the heap and try again.
- if (!GrowHeap(n)) {
- ASSERT(stats_.unmapped_bytes+ stats_.committed_bytes==stats_.system_bytes);
- ASSERT(Check());
- return NULL;
- }
- return SearchFreeAndLargeLists(n);
-}
-
-Span* PageHeap::AllocLarge(Length n) {
- // find the best span (closest to n in size).
- // The following loops implements address-ordered best-fit.
- Span *best = NULL;
-
- // Search through normal list
- for (Span* span = large_.normal.next;
- span != &large_.normal;
- span = span->next) {
- if (span->length >= n) {
- if ((best == NULL)
- || (span->length < best->length)
- || ((span->length == best->length) && (span->start < best->start))) {
- best = span;
- ASSERT(best->location == Span::ON_NORMAL_FREELIST);
- }
- }
- }
-
- Span *bestNormal = best;
-
- // Search through released list in case it has a better fit
- for (Span* span = large_.returned.next;
- span != &large_.returned;
- span = span->next) {
- if (span->length >= n) {
- if ((best == NULL)
- || (span->length < best->length)
- || ((span->length == best->length) && (span->start < best->start))) {
- best = span;
- ASSERT(best->location == Span::ON_RETURNED_FREELIST);
- }
- }
- }
-
- if (best == bestNormal) {
- return best == NULL ? NULL : Carve(best, n);
- }
-
- // best comes from returned list.
-
- if (EnsureLimit(n, false)) {
- return Carve(best, n);
- }
-
- if (EnsureLimit(n, true)) {
- // best could have been destroyed by coalescing.
- // bestNormal is not a best-fit, and it could be destroyed as well.
- // We retry, the limit is already ensured:
- return AllocLarge(n);
- }
-
- // If bestNormal existed, EnsureLimit would succeeded:
- ASSERT(bestNormal == NULL);
- // We are not allowed to take best from returned list.
- return NULL;
-}
-
-Span* PageHeap::Split(Span* span, Length n) {
- ASSERT(0 < n);
- ASSERT(n < span->length);
- ASSERT(span->location == Span::IN_USE);
- ASSERT(span->sizeclass == 0);
- Event(span, 'T', n);
-
- const int extra = span->length - n;
- Span* leftover = NewSpan(span->start + n, extra);
- ASSERT(leftover->location == Span::IN_USE);
- Event(leftover, 'U', extra);
- RecordSpan(leftover);
- pagemap_.set(span->start + n - 1, span); // Update map from pageid to span
- span->length = n;
-
- return leftover;
-}
-
-void PageHeap::CommitSpan(Span* span) {
- TCMalloc_SystemCommit(reinterpret_cast<void*>(span->start << kPageShift),
- static_cast<size_t>(span->length << kPageShift));
- stats_.committed_bytes += span->length << kPageShift;
-}
-
-bool PageHeap::DecommitSpan(Span* span) {
- bool rv = TCMalloc_SystemRelease(reinterpret_cast<void*>(span->start << kPageShift),
- static_cast<size_t>(span->length << kPageShift));
- if (rv) {
- stats_.committed_bytes -= span->length << kPageShift;
- }
-
- return rv;
-}
-
-Span* PageHeap::Carve(Span* span, Length n) {
- ASSERT(n > 0);
- ASSERT(span->location != Span::IN_USE);
- const int old_location = span->location;
- RemoveFromFreeList(span);
- span->location = Span::IN_USE;
- Event(span, 'A', n);
-
- const int extra = span->length - n;
- ASSERT(extra >= 0);
- if (extra > 0) {
- Span* leftover = NewSpan(span->start + n, extra);
- leftover->location = old_location;
- Event(leftover, 'S', extra);
- RecordSpan(leftover);
-
- // The previous span of |leftover| was just splitted -- no need to
- // coalesce them. The next span of |leftover| was not previously coalesced
- // with |span|, i.e. is NULL or has got location other than |old_location|.
-#ifndef NDEBUG
- const PageID p = leftover->start;
- const Length len = leftover->length;
- Span* next = GetDescriptor(p+len);
- ASSERT (next == NULL ||
- next->location == Span::IN_USE ||
- next->location != leftover->location);
-#endif
-
- PrependToFreeList(leftover); // Skip coalescing - no candidates possible
- span->length = n;
- pagemap_.set(span->start + n - 1, span);
- }
- ASSERT(Check());
- if (old_location == Span::ON_RETURNED_FREELIST) {
- // We need to recommit this address space.
- CommitSpan(span);
- }
- ASSERT(span->location == Span::IN_USE);
- ASSERT(span->length == n);
- ASSERT(stats_.unmapped_bytes+ stats_.committed_bytes==stats_.system_bytes);
- return span;
-}
-
-void PageHeap::Delete(Span* span) {
- ASSERT(Check());
- ASSERT(span->location == Span::IN_USE);
- ASSERT(span->length > 0);
- ASSERT(GetDescriptor(span->start) == span);
- ASSERT(GetDescriptor(span->start + span->length - 1) == span);
- const Length n = span->length;
- span->sizeclass = 0;
- span->sample = 0;
- span->location = Span::ON_NORMAL_FREELIST;
- Event(span, 'D', span->length);
- MergeIntoFreeList(span); // Coalesces if possible
- IncrementalScavenge(n);
- ASSERT(stats_.unmapped_bytes+ stats_.committed_bytes==stats_.system_bytes);
- ASSERT(Check());
-}
-
-bool PageHeap::MayMergeSpans(Span *span, Span *other) {
- if (aggressive_decommit_) {
- return other->location != Span::IN_USE;
- }
- return span->location == other->location;
-}
-
-void PageHeap::MergeIntoFreeList(Span* span) {
- ASSERT(span->location != Span::IN_USE);
-
- // Coalesce -- we guarantee that "p" != 0, so no bounds checking
- // necessary. We do not bother resetting the stale pagemap
- // entries for the pieces we are merging together because we only
- // care about the pagemap entries for the boundaries.
- //
- // Note: depending on aggressive_decommit_ mode we allow only
- // similar spans to be coalesced.
- //
- // The following applies if aggressive_decommit_ is enabled:
- //
- // Note that the adjacent spans we merge into "span" may come out of a
- // "normal" (committed) list, and cleanly merge with our IN_USE span, which
- // is implicitly committed. If the adjacents spans are on the "returned"
- // (decommitted) list, then we must get both spans into the same state before
- // or after we coalesce them. The current code always decomits. This is
- // achieved by blindly decommitting the entire coalesced region, which may
- // include any combination of committed and decommitted spans, at the end of
- // the method.
-
- // TODO(jar): "Always decommit" causes some extra calls to commit when we are
- // called in GrowHeap() during an allocation :-/. We need to eval the cost of
- // that oscillation, and possibly do something to reduce it.
-
- // TODO(jar): We need a better strategy for deciding to commit, or decommit,
- // based on memory usage and free heap sizes.
-
- uint64_t temp_committed = 0;
-
- const PageID p = span->start;
- const Length n = span->length;
- Span* prev = GetDescriptor(p-1);
- if (prev != NULL && MayMergeSpans(span, prev)) {
- // Merge preceding span into this span
- ASSERT(prev->start + prev->length == p);
- const Length len = prev->length;
- if (aggressive_decommit_ && prev->location == Span::ON_RETURNED_FREELIST) {
- // We're about to put the merge span into the returned freelist and call
- // DecommitSpan() on it, which will mark the entire span including this
- // one as released and decrease stats_.committed_bytes by the size of the
- // merged span. To make the math work out we temporarily increase the
- // stats_.committed_bytes amount.
- temp_committed = prev->length << kPageShift;
- }
- RemoveFromFreeList(prev);
- DeleteSpan(prev);
- span->start -= len;
- span->length += len;
- pagemap_.set(span->start, span);
- Event(span, 'L', len);
- }
- Span* next = GetDescriptor(p+n);
- if (next != NULL && MayMergeSpans(span, next)) {
- // Merge next span into this span
- ASSERT(next->start == p+n);
- const Length len = next->length;
- if (aggressive_decommit_ && next->location == Span::ON_RETURNED_FREELIST) {
- // See the comment below 'if (prev->location ...' for explanation.
- temp_committed += next->length << kPageShift;
- }
- RemoveFromFreeList(next);
- DeleteSpan(next);
- span->length += len;
- pagemap_.set(span->start + span->length - 1, span);
- Event(span, 'R', len);
- }
-
- if (aggressive_decommit_) {
- if (DecommitSpan(span)) {
- span->location = Span::ON_RETURNED_FREELIST;
- stats_.committed_bytes += temp_committed;
- } else {
- ASSERT(temp_committed == 0);
- }
- }
- PrependToFreeList(span);
-}
-
-void PageHeap::PrependToFreeList(Span* span) {
- ASSERT(span->location != Span::IN_USE);
- SpanList* list = (span->length < kMaxPages) ? &free_[span->length] : &large_;
- if (span->location == Span::ON_NORMAL_FREELIST) {
- stats_.free_bytes += (span->length << kPageShift);
- DLL_Prepend(&list->normal, span);
- } else {
- stats_.unmapped_bytes += (span->length << kPageShift);
- DLL_Prepend(&list->returned, span);
- }
-}
-
-void PageHeap::RemoveFromFreeList(Span* span) {
- ASSERT(span->location != Span::IN_USE);
- if (span->location == Span::ON_NORMAL_FREELIST) {
- stats_.free_bytes -= (span->length << kPageShift);
- } else {
- stats_.unmapped_bytes -= (span->length << kPageShift);
- }
- DLL_Remove(span);
-}
-
-void PageHeap::IncrementalScavenge(Length n) {
- // Fast path; not yet time to release memory
- scavenge_counter_ -= n;
- if (scavenge_counter_ >= 0) return; // Not yet time to scavenge
-
- const double rate = FLAGS_tcmalloc_release_rate;
- if (rate <= 1e-6) {
- // Tiny release rate means that releasing is disabled.
- scavenge_counter_ = kDefaultReleaseDelay;
- return;
- }
-
- Length released_pages = ReleaseAtLeastNPages(1);
-
- if (released_pages == 0) {
- // Nothing to scavenge, delay for a while.
- scavenge_counter_ = kDefaultReleaseDelay;
- } else {
- // Compute how long to wait until we return memory.
- // FLAGS_tcmalloc_release_rate==1 means wait for 1000 pages
- // after releasing one page.
- const double mult = 1000.0 / rate;
- double wait = mult * static_cast<double>(released_pages);
- if (wait > kMaxReleaseDelay) {
- // Avoid overflow and bound to reasonable range.
- wait = kMaxReleaseDelay;
- }
- scavenge_counter_ = static_cast<int64_t>(wait);
- }
-}
-
-Length PageHeap::ReleaseLastNormalSpan(SpanList* slist) {
- Span* s = slist->normal.prev;
- ASSERT(s->location == Span::ON_NORMAL_FREELIST);
-
- if (DecommitSpan(s)) {
- RemoveFromFreeList(s);
- const Length n = s->length;
- s->location = Span::ON_RETURNED_FREELIST;
- MergeIntoFreeList(s); // Coalesces if possible.
- return n;
- }
-
- return 0;
-}
-
-Length PageHeap::ReleaseAtLeastNPages(Length num_pages) {
- Length released_pages = 0;
-
- // Round robin through the lists of free spans, releasing the last
- // span in each list. Stop after releasing at least num_pages
- // or when there is nothing more to release.
- while (released_pages < num_pages && stats_.free_bytes > 0) {
- for (int i = 0; i < kMaxPages+1 && released_pages < num_pages;
- i++, release_index_++) {
- if (release_index_ > kMaxPages) release_index_ = 0;
- SpanList* slist = (release_index_ == kMaxPages) ?
- &large_ : &free_[release_index_];
- if (!DLL_IsEmpty(&slist->normal)) {
- Length released_len = ReleaseLastNormalSpan(slist);
- // Some systems do not support release
- if (released_len == 0) return released_pages;
- released_pages += released_len;
- }
- }
- }
- return released_pages;
-}
-
-bool PageHeap::EnsureLimit(Length n, bool withRelease)
-{
- Length limit = (FLAGS_tcmalloc_heap_limit_mb*1024*1024) >> kPageShift;
- if (limit == 0) return true; //there is no limit
-
- // We do not use stats_.system_bytes because it does not take
- // MetaDataAllocs into account.
- Length takenPages = TCMalloc_SystemTaken >> kPageShift;
- //XXX takenPages may be slightly bigger than limit for two reasons:
- //* MetaDataAllocs ignore the limit (it is not easy to handle
- // out of memory there)
- //* sys_alloc may round allocation up to huge page size,
- // although smaller limit was ensured
-
- ASSERT(takenPages >= stats_.unmapped_bytes >> kPageShift);
- takenPages -= stats_.unmapped_bytes >> kPageShift;
-
- if (takenPages + n > limit && withRelease) {
- takenPages -= ReleaseAtLeastNPages(takenPages + n - limit);
- }
-
- return takenPages + n <= limit;
-}
-
-void PageHeap::RegisterSizeClass(Span* span, size_t sc) {
- // Associate span object with all interior pages as well
- ASSERT(span->location == Span::IN_USE);
- ASSERT(GetDescriptor(span->start) == span);
- ASSERT(GetDescriptor(span->start+span->length-1) == span);
- Event(span, 'C', sc);
- span->sizeclass = sc;
- for (Length i = 1; i < span->length-1; i++) {
- pagemap_.set(span->start+i, span);
- }
-}
-
-void PageHeap::GetSmallSpanStats(SmallSpanStats* result) {
- for (int s = 0; s < kMaxPages; s++) {
- result->normal_length[s] = DLL_Length(&free_[s].normal);
- result->returned_length[s] = DLL_Length(&free_[s].returned);
- }
-}
-
-void PageHeap::GetLargeSpanStats(LargeSpanStats* result) {
- result->spans = 0;
- result->normal_pages = 0;
- result->returned_pages = 0;
- for (Span* s = large_.normal.next; s != &large_.normal; s = s->next) {
- result->normal_pages += s->length;;
- result->spans++;
- }
- for (Span* s = large_.returned.next; s != &large_.returned; s = s->next) {
- result->returned_pages += s->length;
- result->spans++;
- }
-}
-
-bool PageHeap::GetNextRange(PageID start, base::MallocRange* r) {
- Span* span = reinterpret_cast<Span*>(pagemap_.Next(start));
- if (span == NULL) {
- return false;
- }
- r->address = span->start << kPageShift;
- r->length = span->length << kPageShift;
- r->fraction = 0;
- switch (span->location) {
- case Span::IN_USE:
- r->type = base::MallocRange::INUSE;
- r->fraction = 1;
- if (span->sizeclass > 0) {
- // Only some of the objects in this span may be in use.
- const size_t osize = Static::sizemap()->class_to_size(span->sizeclass);
- r->fraction = (1.0 * osize * span->refcount) / r->length;
- }
- break;
- case Span::ON_NORMAL_FREELIST:
- r->type = base::MallocRange::FREE;
- break;
- case Span::ON_RETURNED_FREELIST:
- r->type = base::MallocRange::UNMAPPED;
- break;
- default:
- r->type = base::MallocRange::UNKNOWN;
- break;
- }
- return true;
-}
-
-static void RecordGrowth(size_t growth) {
- StackTrace* t = Static::stacktrace_allocator()->New();
- t->depth = GetStackTrace(t->stack, kMaxStackDepth-1, 3);
- t->size = growth;
- t->stack[kMaxStackDepth-1] = reinterpret_cast<void*>(Static::growth_stacks());
- Static::set_growth_stacks(t);
-}
-
-bool PageHeap::GrowHeap(Length n) {
- ASSERT(kMaxPages >= kMinSystemAlloc);
- if (n > kMaxValidPages) return false;
- Length ask = (n>kMinSystemAlloc) ? n : static_cast<Length>(kMinSystemAlloc);
- size_t actual_size;
- void* ptr = NULL;
- if (EnsureLimit(ask)) {
- ptr = TCMalloc_SystemAlloc(ask << kPageShift, &actual_size, kPageSize);
- }
- if (ptr == NULL) {
- if (n < ask) {
- // Try growing just "n" pages
- ask = n;
- if (EnsureLimit(ask)) {
- ptr = TCMalloc_SystemAlloc(ask << kPageShift, &actual_size, kPageSize);
- }
- }
- if (ptr == NULL) return false;
- }
- ask = actual_size >> kPageShift;
- RecordGrowth(ask << kPageShift);
-
- uint64_t old_system_bytes = stats_.system_bytes;
- stats_.system_bytes += (ask << kPageShift);
- stats_.committed_bytes += (ask << kPageShift);
- const PageID p = reinterpret_cast<uintptr_t>(ptr) >> kPageShift;
- ASSERT(p > 0);
-
- // If we have already a lot of pages allocated, just pre allocate a bunch of
- // memory for the page map. This prevents fragmentation by pagemap metadata
- // when a program keeps allocating and freeing large blocks.
-
- if (old_system_bytes < kPageMapBigAllocationThreshold
- && stats_.system_bytes >= kPageMapBigAllocationThreshold) {
- pagemap_.PreallocateMoreMemory();
- }
-
- // Make sure pagemap_ has entries for all of the new pages.
- // Plus ensure one before and one after so coalescing code
- // does not need bounds-checking.
- if (pagemap_.Ensure(p-1, ask+2)) {
- // Pretend the new area is allocated and then Delete() it to cause
- // any necessary coalescing to occur.
- Span* span = NewSpan(p, ask);
- RecordSpan(span);
- Delete(span);
- ASSERT(stats_.unmapped_bytes+ stats_.committed_bytes==stats_.system_bytes);
- ASSERT(Check());
- return true;
- } else {
- // We could not allocate memory within "pagemap_"
- // TODO: Once we can return memory to the system, return the new span
- return false;
- }
-}
-
-bool PageHeap::Check() {
- ASSERT(free_[0].normal.next == &free_[0].normal);
- ASSERT(free_[0].returned.next == &free_[0].returned);
- return true;
-}
-
-bool PageHeap::CheckExpensive() {
- bool result = Check();
- CheckList(&large_.normal, kMaxPages, 1000000000, Span::ON_NORMAL_FREELIST);
- CheckList(&large_.returned, kMaxPages, 1000000000, Span::ON_RETURNED_FREELIST);
- for (Length s = 1; s < kMaxPages; s++) {
- CheckList(&free_[s].normal, s, s, Span::ON_NORMAL_FREELIST);
- CheckList(&free_[s].returned, s, s, Span::ON_RETURNED_FREELIST);
- }
- return result;
-}
-
-bool PageHeap::CheckList(Span* list, Length min_pages, Length max_pages,
- int freelist) {
- for (Span* s = list->next; s != list; s = s->next) {
- CHECK_CONDITION(s->location == freelist); // NORMAL or RETURNED
- CHECK_CONDITION(s->length >= min_pages);
- CHECK_CONDITION(s->length <= max_pages);
- CHECK_CONDITION(GetDescriptor(s->start) == s);
- CHECK_CONDITION(GetDescriptor(s->start+s->length-1) == s);
- }
- return true;
-}
-
-} // namespace tcmalloc
http://git-wip-us.apache.org/repos/asf/incubator-quickstep/blob/b249eb11/third_party/gperftools/src/page_heap.h
----------------------------------------------------------------------
diff --git a/third_party/gperftools/src/page_heap.h b/third_party/gperftools/src/page_heap.h
deleted file mode 100644
index 18abed1..0000000
--- a/third_party/gperftools/src/page_heap.h
+++ /dev/null
@@ -1,316 +0,0 @@
-// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
-// Copyright (c) 2008, Google Inc.
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// ---
-// Author: Sanjay Ghemawat <op...@google.com>
-
-#ifndef TCMALLOC_PAGE_HEAP_H_
-#define TCMALLOC_PAGE_HEAP_H_
-
-#include <config.h>
-#include <stddef.h> // for size_t
-#ifdef HAVE_STDINT_H
-#include <stdint.h> // for uint64_t, int64_t, uint16_t
-#endif
-#include <gperftools/malloc_extension.h>
-#include "base/basictypes.h"
-#include "common.h"
-#include "packed-cache-inl.h"
-#include "pagemap.h"
-#include "span.h"
-
-// We need to dllexport PageHeap just for the unittest. MSVC complains
-// that we don't dllexport the PageHeap members, but we don't need to
-// test those, so I just suppress this warning.
-#ifdef _MSC_VER
-#pragma warning(push)
-#pragma warning(disable:4251)
-#endif
-
-// This #ifdef should almost never be set. Set NO_TCMALLOC_SAMPLES if
-// you're porting to a system where you really can't get a stacktrace.
-// Because we control the definition of GetStackTrace, all clients of
-// GetStackTrace should #include us rather than stacktrace.h.
-#ifdef NO_TCMALLOC_SAMPLES
- // We use #define so code compiles even if you #include stacktrace.h somehow.
-# define GetStackTrace(stack, depth, skip) (0)
-#else
-# include <gperftools/stacktrace.h>
-#endif
-
-namespace base {
-struct MallocRange;
-}
-
-namespace tcmalloc {
-
-// -------------------------------------------------------------------------
-// Map from page-id to per-page data
-// -------------------------------------------------------------------------
-
-// We use PageMap2<> for 32-bit and PageMap3<> for 64-bit machines.
-// We also use a simple one-level cache for hot PageID-to-sizeclass mappings,
-// because sometimes the sizeclass is all the information we need.
-
-// Selector class -- general selector uses 3-level map
-template <int BITS> class MapSelector {
- public:
- typedef TCMalloc_PageMap3<BITS-kPageShift> Type;
- typedef PackedCache<BITS-kPageShift, uint64_t> CacheType;
-};
-
-// A two-level map for 32-bit machines
-template <> class MapSelector<32> {
- public:
- typedef TCMalloc_PageMap2<32-kPageShift> Type;
- typedef PackedCache<32-kPageShift, uint16_t> CacheType;
-};
-
-// -------------------------------------------------------------------------
-// Page-level allocator
-// * Eager coalescing
-//
-// Heap for page-level allocation. We allow allocating and freeing a
-// contiguous runs of pages (called a "span").
-// -------------------------------------------------------------------------
-
-class PERFTOOLS_DLL_DECL PageHeap {
- public:
- PageHeap();
-
- // Allocate a run of "n" pages. Returns zero if out of memory.
- // Caller should not pass "n == 0" -- instead, n should have
- // been rounded up already.
- Span* New(Length n);
-
- // Delete the span "[p, p+n-1]".
- // REQUIRES: span was returned by earlier call to New() and
- // has not yet been deleted.
- void Delete(Span* span);
-
- // Mark an allocated span as being used for small objects of the
- // specified size-class.
- // REQUIRES: span was returned by an earlier call to New()
- // and has not yet been deleted.
- void RegisterSizeClass(Span* span, size_t sc);
-
- // Split an allocated span into two spans: one of length "n" pages
- // followed by another span of length "span->length - n" pages.
- // Modifies "*span" to point to the first span of length "n" pages.
- // Returns a pointer to the second span.
- //
- // REQUIRES: "0 < n < span->length"
- // REQUIRES: span->location == IN_USE
- // REQUIRES: span->sizeclass == 0
- Span* Split(Span* span, Length n);
-
- // Return the descriptor for the specified page. Returns NULL if
- // this PageID was not allocated previously.
- inline Span* GetDescriptor(PageID p) const {
- return reinterpret_cast<Span*>(pagemap_.get(p));
- }
-
- // If this page heap is managing a range with starting page # >= start,
- // store info about the range in *r and return true. Else return false.
- bool GetNextRange(PageID start, base::MallocRange* r);
-
- // Page heap statistics
- struct Stats {
- Stats() : system_bytes(0), free_bytes(0), unmapped_bytes(0), committed_bytes(0) {}
- uint64_t system_bytes; // Total bytes allocated from system
- uint64_t free_bytes; // Total bytes on normal freelists
- uint64_t unmapped_bytes; // Total bytes on returned freelists
- uint64_t committed_bytes; // Bytes committed, always <= system_bytes_.
-
- };
- inline Stats stats() const { return stats_; }
-
- struct SmallSpanStats {
- // For each free list of small spans, the length (in spans) of the
- // normal and returned free lists for that size.
- int64 normal_length[kMaxPages];
- int64 returned_length[kMaxPages];
- };
- void GetSmallSpanStats(SmallSpanStats* result);
-
- // Stats for free large spans (i.e., spans with more than kMaxPages pages).
- struct LargeSpanStats {
- int64 spans; // Number of such spans
- int64 normal_pages; // Combined page length of normal large spans
- int64 returned_pages; // Combined page length of unmapped spans
- };
- void GetLargeSpanStats(LargeSpanStats* result);
-
- bool Check();
- // Like Check() but does some more comprehensive checking.
- bool CheckExpensive();
- bool CheckList(Span* list, Length min_pages, Length max_pages,
- int freelist); // ON_NORMAL_FREELIST or ON_RETURNED_FREELIST
-
- // Try to release at least num_pages for reuse by the OS. Returns
- // the actual number of pages released, which may be less than
- // num_pages if there weren't enough pages to release. The result
- // may also be larger than num_pages since page_heap might decide to
- // release one large range instead of fragmenting it into two
- // smaller released and unreleased ranges.
- Length ReleaseAtLeastNPages(Length num_pages);
-
- // Return 0 if we have no information, or else the correct sizeclass for p.
- // Reads and writes to pagemap_cache_ do not require locking.
- // The entries are 64 bits on 64-bit hardware and 16 bits on
- // 32-bit hardware, and we don't mind raciness as long as each read of
- // an entry yields a valid entry, not a partially updated entry.
- size_t GetSizeClassIfCached(PageID p) const {
- return pagemap_cache_.GetOrDefault(p, 0);
- }
- void CacheSizeClass(PageID p, size_t cl) const { pagemap_cache_.Put(p, cl); }
-
- bool GetAggressiveDecommit(void) {return aggressive_decommit_;}
- void SetAggressiveDecommit(bool aggressive_decommit) {
- aggressive_decommit_ = aggressive_decommit;
- }
-
- private:
- // Allocates a big block of memory for the pagemap once we reach more than
- // 128MB
- static const size_t kPageMapBigAllocationThreshold = 128 << 20;
-
- // Minimum number of pages to fetch from system at a time. Must be
- // significantly bigger than kBlockSize to amortize system-call
- // overhead, and also to reduce external fragementation. Also, we
- // should keep this value big because various incarnations of Linux
- // have small limits on the number of mmap() regions per
- // address-space.
- // REQUIRED: kMinSystemAlloc <= kMaxPages;
- static const int kMinSystemAlloc = kMaxPages;
-
- // Never delay scavenging for more than the following number of
- // deallocated pages. With 4K pages, this comes to 4GB of
- // deallocation.
- static const int kMaxReleaseDelay = 1 << 20;
-
- // If there is nothing to release, wait for so many pages before
- // scavenging again. With 4K pages, this comes to 1GB of memory.
- static const int kDefaultReleaseDelay = 1 << 18;
-
- // Pick the appropriate map and cache types based on pointer size
- typedef MapSelector<kAddressBits>::Type PageMap;
- typedef MapSelector<kAddressBits>::CacheType PageMapCache;
- PageMap pagemap_;
- mutable PageMapCache pagemap_cache_;
-
- // We segregate spans of a given size into two circular linked
- // lists: one for normal spans, and one for spans whose memory
- // has been returned to the system.
- struct SpanList {
- Span normal;
- Span returned;
- };
-
- // List of free spans of length >= kMaxPages
- SpanList large_;
-
- // Array mapping from span length to a doubly linked list of free spans
- SpanList free_[kMaxPages];
-
- // Statistics on system, free, and unmapped bytes
- Stats stats_;
-
- Span* SearchFreeAndLargeLists(Length n);
-
- bool GrowHeap(Length n);
-
- // REQUIRES: span->length >= n
- // REQUIRES: span->location != IN_USE
- // Remove span from its free list, and move any leftover part of
- // span into appropriate free lists. Also update "span" to have
- // length exactly "n" and mark it as non-free so it can be returned
- // to the client. After all that, decrease free_pages_ by n and
- // return span.
- Span* Carve(Span* span, Length n);
-
- void RecordSpan(Span* span) {
- pagemap_.set(span->start, span);
- if (span->length > 1) {
- pagemap_.set(span->start + span->length - 1, span);
- }
- }
-
- // Allocate a large span of length == n. If successful, returns a
- // span of exactly the specified length. Else, returns NULL.
- Span* AllocLarge(Length n);
-
- // Coalesce span with neighboring spans if possible, prepend to
- // appropriate free list, and adjust stats.
- void MergeIntoFreeList(Span* span);
-
- // Commit the span.
- void CommitSpan(Span* span);
-
- // Decommit the span.
- bool DecommitSpan(Span* span);
-
- // Prepends span to appropriate free list, and adjusts stats.
- void PrependToFreeList(Span* span);
-
- // Removes span from its free list, and adjust stats.
- void RemoveFromFreeList(Span* span);
-
- // Incrementally release some memory to the system.
- // IncrementalScavenge(n) is called whenever n pages are freed.
- void IncrementalScavenge(Length n);
-
- // Release the last span on the normal portion of this list.
- // Return the length of that span or zero if release failed.
- Length ReleaseLastNormalSpan(SpanList* slist);
-
- // Checks if we are allowed to take more memory from the system.
- // If limit is reached and allowRelease is true, tries to release
- // some unused spans.
- bool EnsureLimit(Length n, bool allowRelease = true);
-
- bool MayMergeSpans(Span *span, Span *other);
-
- // Number of pages to deallocate before doing more scavenging
- int64_t scavenge_counter_;
-
- // Index of last free list where we released memory to the OS.
- int release_index_;
-
- bool aggressive_decommit_;
-};
-
-} // namespace tcmalloc
-
-#ifdef _MSC_VER
-#pragma warning(pop)
-#endif
-
-#endif // TCMALLOC_PAGE_HEAP_H_
http://git-wip-us.apache.org/repos/asf/incubator-quickstep/blob/b249eb11/third_party/gperftools/src/page_heap_allocator.h
----------------------------------------------------------------------
diff --git a/third_party/gperftools/src/page_heap_allocator.h b/third_party/gperftools/src/page_heap_allocator.h
deleted file mode 100644
index 892d1c1..0000000
--- a/third_party/gperftools/src/page_heap_allocator.h
+++ /dev/null
@@ -1,114 +0,0 @@
-// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
-// Copyright (c) 2008, Google Inc.
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// ---
-// Author: Sanjay Ghemawat <op...@google.com>
-
-#ifndef TCMALLOC_PAGE_HEAP_ALLOCATOR_H_
-#define TCMALLOC_PAGE_HEAP_ALLOCATOR_H_
-
-#include <stddef.h> // for NULL, size_t
-
-#include "common.h" // for MetaDataAlloc
-#include "internal_logging.h" // for ASSERT
-
-namespace tcmalloc {
-
-// Simple allocator for objects of a specified type. External locking
-// is required before accessing one of these objects.
-template <class T>
-class PageHeapAllocator {
- public:
- // We use an explicit Init function because these variables are statically
- // allocated and their constructors might not have run by the time some
- // other static variable tries to allocate memory.
- void Init() {
- ASSERT(sizeof(T) <= kAllocIncrement);
- inuse_ = 0;
- free_area_ = NULL;
- free_avail_ = 0;
- free_list_ = NULL;
- // Reserve some space at the beginning to avoid fragmentation.
- Delete(New());
- }
-
- T* New() {
- // Consult free list
- void* result;
- if (free_list_ != NULL) {
- result = free_list_;
- free_list_ = *(reinterpret_cast<void**>(result));
- } else {
- if (free_avail_ < sizeof(T)) {
- // Need more room. We assume that MetaDataAlloc returns
- // suitably aligned memory.
- free_area_ = reinterpret_cast<char*>(MetaDataAlloc(kAllocIncrement));
- if (free_area_ == NULL) {
- Log(kCrash, __FILE__, __LINE__,
- "FATAL ERROR: Out of memory trying to allocate internal "
- "tcmalloc data (bytes, object-size)",
- kAllocIncrement, sizeof(T));
- }
- free_avail_ = kAllocIncrement;
- }
- result = free_area_;
- free_area_ += sizeof(T);
- free_avail_ -= sizeof(T);
- }
- inuse_++;
- return reinterpret_cast<T*>(result);
- }
-
- void Delete(T* p) {
- *(reinterpret_cast<void**>(p)) = free_list_;
- free_list_ = p;
- inuse_--;
- }
-
- int inuse() const { return inuse_; }
-
- private:
- // How much to allocate from system at a time
- static const int kAllocIncrement = 128 << 10;
-
- // Free area from which to carve new objects
- char* free_area_;
- size_t free_avail_;
-
- // Free list of already carved objects
- void* free_list_;
-
- // Number of allocated but unfreed objects
- int inuse_;
-};
-
-} // namespace tcmalloc
-
-#endif // TCMALLOC_PAGE_HEAP_ALLOCATOR_H_
http://git-wip-us.apache.org/repos/asf/incubator-quickstep/blob/b249eb11/third_party/gperftools/src/pagemap.h
----------------------------------------------------------------------
diff --git a/third_party/gperftools/src/pagemap.h b/third_party/gperftools/src/pagemap.h
deleted file mode 100644
index dd94423..0000000
--- a/third_party/gperftools/src/pagemap.h
+++ /dev/null
@@ -1,324 +0,0 @@
-// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
-// Copyright (c) 2005, Google Inc.
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// ---
-// Author: Sanjay Ghemawat <op...@google.com>
-//
-// A data structure used by the caching malloc. It maps from page# to
-// a pointer that contains info about that page. We use two
-// representations: one for 32-bit addresses, and another for 64 bit
-// addresses. Both representations provide the same interface. The
-// first representation is implemented as a flat array, the seconds as
-// a three-level radix tree that strips away approximately 1/3rd of
-// the bits every time.
-//
-// The BITS parameter should be the number of bits required to hold
-// a page number. E.g., with 32 bit pointers and 4K pages (i.e.,
-// page offset fits in lower 12 bits), BITS == 20.
-
-#ifndef TCMALLOC_PAGEMAP_H_
-#define TCMALLOC_PAGEMAP_H_
-
-#include "config.h"
-
-#include <stddef.h> // for NULL, size_t
-#include <string.h> // for memset
-#if defined HAVE_STDINT_H
-#include <stdint.h>
-#elif defined HAVE_INTTYPES_H
-#include <inttypes.h>
-#else
-#include <sys/types.h>
-#endif
-#include "internal_logging.h" // for ASSERT
-
-// Single-level array
-template <int BITS>
-class TCMalloc_PageMap1 {
- private:
- static const int LENGTH = 1 << BITS;
-
- void** array_;
-
- public:
- typedef uintptr_t Number;
-
- explicit TCMalloc_PageMap1(void* (*allocator)(size_t)) {
- array_ = reinterpret_cast<void**>((*allocator)(sizeof(void*) << BITS));
- memset(array_, 0, sizeof(void*) << BITS);
- }
-
- // Ensure that the map contains initialized entries "x .. x+n-1".
- // Returns true if successful, false if we could not allocate memory.
- bool Ensure(Number x, size_t n) {
- // Nothing to do since flat array was allocated at start. All
- // that's left is to check for overflow (that is, we don't want to
- // ensure a number y where array_[y] would be an out-of-bounds
- // access).
- return n <= LENGTH - x; // an overflow-free way to do "x + n <= LENGTH"
- }
-
- void PreallocateMoreMemory() {}
-
- // Return the current value for KEY. Returns NULL if not yet set,
- // or if k is out of range.
- void* get(Number k) const {
- if ((k >> BITS) > 0) {
- return NULL;
- }
- return array_[k];
- }
-
- // REQUIRES "k" is in range "[0,2^BITS-1]".
- // REQUIRES "k" has been ensured before.
- //
- // Sets the value 'v' for key 'k'.
- void set(Number k, void* v) {
- array_[k] = v;
- }
-
- // Return the first non-NULL pointer found in this map for
- // a page number >= k. Returns NULL if no such number is found.
- void* Next(Number k) const {
- while (k < (1 << BITS)) {
- if (array_[k] != NULL) return array_[k];
- k++;
- }
- return NULL;
- }
-};
-
-// Two-level radix tree
-template <int BITS>
-class TCMalloc_PageMap2 {
- private:
- // Put 32 entries in the root and (2^BITS)/32 entries in each leaf.
- static const int ROOT_BITS = 5;
- static const int ROOT_LENGTH = 1 << ROOT_BITS;
-
- static const int LEAF_BITS = BITS - ROOT_BITS;
- static const int LEAF_LENGTH = 1 << LEAF_BITS;
-
- // Leaf node
- struct Leaf {
- void* values[LEAF_LENGTH];
- };
-
- Leaf* root_[ROOT_LENGTH]; // Pointers to 32 child nodes
- void* (*allocator_)(size_t); // Memory allocator
-
- public:
- typedef uintptr_t Number;
-
- explicit TCMalloc_PageMap2(void* (*allocator)(size_t)) {
- allocator_ = allocator;
- memset(root_, 0, sizeof(root_));
- }
-
- void* get(Number k) const {
- const Number i1 = k >> LEAF_BITS;
- const Number i2 = k & (LEAF_LENGTH-1);
- if ((k >> BITS) > 0 || root_[i1] == NULL) {
- return NULL;
- }
- return root_[i1]->values[i2];
- }
-
- void set(Number k, void* v) {
- const Number i1 = k >> LEAF_BITS;
- const Number i2 = k & (LEAF_LENGTH-1);
- ASSERT(i1 < ROOT_LENGTH);
- root_[i1]->values[i2] = v;
- }
-
- bool Ensure(Number start, size_t n) {
- for (Number key = start; key <= start + n - 1; ) {
- const Number i1 = key >> LEAF_BITS;
-
- // Check for overflow
- if (i1 >= ROOT_LENGTH)
- return false;
-
- // Make 2nd level node if necessary
- if (root_[i1] == NULL) {
- Leaf* leaf = reinterpret_cast<Leaf*>((*allocator_)(sizeof(Leaf)));
- if (leaf == NULL) return false;
- memset(leaf, 0, sizeof(*leaf));
- root_[i1] = leaf;
- }
-
- // Advance key past whatever is covered by this leaf node
- key = ((key >> LEAF_BITS) + 1) << LEAF_BITS;
- }
- return true;
- }
-
- void PreallocateMoreMemory() {
- // Allocate enough to keep track of all possible pages
- Ensure(0, 1 << BITS);
- }
-
- void* Next(Number k) const {
- while (k < (1 << BITS)) {
- const Number i1 = k >> LEAF_BITS;
- Leaf* leaf = root_[i1];
- if (leaf != NULL) {
- // Scan forward in leaf
- for (Number i2 = k & (LEAF_LENGTH - 1); i2 < LEAF_LENGTH; i2++) {
- if (leaf->values[i2] != NULL) {
- return leaf->values[i2];
- }
- }
- }
- // Skip to next top-level entry
- k = (i1 + 1) << LEAF_BITS;
- }
- return NULL;
- }
-};
-
-// Three-level radix tree
-template <int BITS>
-class TCMalloc_PageMap3 {
- private:
- // How many bits should we consume at each interior level
- static const int INTERIOR_BITS = (BITS + 2) / 3; // Round-up
- static const int INTERIOR_LENGTH = 1 << INTERIOR_BITS;
-
- // How many bits should we consume at leaf level
- static const int LEAF_BITS = BITS - 2*INTERIOR_BITS;
- static const int LEAF_LENGTH = 1 << LEAF_BITS;
-
- // Interior node
- struct Node {
- Node* ptrs[INTERIOR_LENGTH];
- };
-
- // Leaf node
- struct Leaf {
- void* values[LEAF_LENGTH];
- };
-
- Node* root_; // Root of radix tree
- void* (*allocator_)(size_t); // Memory allocator
-
- Node* NewNode() {
- Node* result = reinterpret_cast<Node*>((*allocator_)(sizeof(Node)));
- if (result != NULL) {
- memset(result, 0, sizeof(*result));
- }
- return result;
- }
-
- public:
- typedef uintptr_t Number;
-
- explicit TCMalloc_PageMap3(void* (*allocator)(size_t)) {
- allocator_ = allocator;
- root_ = NewNode();
- }
-
- void* get(Number k) const {
- const Number i1 = k >> (LEAF_BITS + INTERIOR_BITS);
- const Number i2 = (k >> LEAF_BITS) & (INTERIOR_LENGTH-1);
- const Number i3 = k & (LEAF_LENGTH-1);
- if ((k >> BITS) > 0 ||
- root_->ptrs[i1] == NULL || root_->ptrs[i1]->ptrs[i2] == NULL) {
- return NULL;
- }
- return reinterpret_cast<Leaf*>(root_->ptrs[i1]->ptrs[i2])->values[i3];
- }
-
- void set(Number k, void* v) {
- ASSERT(k >> BITS == 0);
- const Number i1 = k >> (LEAF_BITS + INTERIOR_BITS);
- const Number i2 = (k >> LEAF_BITS) & (INTERIOR_LENGTH-1);
- const Number i3 = k & (LEAF_LENGTH-1);
- reinterpret_cast<Leaf*>(root_->ptrs[i1]->ptrs[i2])->values[i3] = v;
- }
-
- bool Ensure(Number start, size_t n) {
- for (Number key = start; key <= start + n - 1; ) {
- const Number i1 = key >> (LEAF_BITS + INTERIOR_BITS);
- const Number i2 = (key >> LEAF_BITS) & (INTERIOR_LENGTH-1);
-
- // Check for overflow
- if (i1 >= INTERIOR_LENGTH || i2 >= INTERIOR_LENGTH)
- return false;
-
- // Make 2nd level node if necessary
- if (root_->ptrs[i1] == NULL) {
- Node* n = NewNode();
- if (n == NULL) return false;
- root_->ptrs[i1] = n;
- }
-
- // Make leaf node if necessary
- if (root_->ptrs[i1]->ptrs[i2] == NULL) {
- Leaf* leaf = reinterpret_cast<Leaf*>((*allocator_)(sizeof(Leaf)));
- if (leaf == NULL) return false;
- memset(leaf, 0, sizeof(*leaf));
- root_->ptrs[i1]->ptrs[i2] = reinterpret_cast<Node*>(leaf);
- }
-
- // Advance key past whatever is covered by this leaf node
- key = ((key >> LEAF_BITS) + 1) << LEAF_BITS;
- }
- return true;
- }
-
- void PreallocateMoreMemory() {
- }
-
- void* Next(Number k) const {
- while (k < (Number(1) << BITS)) {
- const Number i1 = k >> (LEAF_BITS + INTERIOR_BITS);
- const Number i2 = (k >> LEAF_BITS) & (INTERIOR_LENGTH-1);
- if (root_->ptrs[i1] == NULL) {
- // Advance to next top-level entry
- k = (i1 + 1) << (LEAF_BITS + INTERIOR_BITS);
- } else {
- Leaf* leaf = reinterpret_cast<Leaf*>(root_->ptrs[i1]->ptrs[i2]);
- if (leaf != NULL) {
- for (Number i3 = (k & (LEAF_LENGTH-1)); i3 < LEAF_LENGTH; i3++) {
- if (leaf->values[i3] != NULL) {
- return leaf->values[i3];
- }
- }
- }
- // Advance to next interior entry
- k = ((k >> LEAF_BITS) + 1) << LEAF_BITS;
- }
- }
- return NULL;
- }
-};
-
-#endif // TCMALLOC_PAGEMAP_H_