You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@marmotta.apache.org by ss...@apache.org on 2018/04/29 19:36:14 UTC

[45/51] [partial] marmotta git commit: * Replace gtest with upstream version, including LICENSE header. * Include absl library for faster and safer string operations. * Update license headers where needed. * Removed custom code replaced by absl.

http://git-wip-us.apache.org/repos/asf/marmotta/blob/0eb556da/libraries/ostrich/backend/3rdparty/abseil/absl/base/internal/low_level_alloc.cc
----------------------------------------------------------------------
diff --git a/libraries/ostrich/backend/3rdparty/abseil/absl/base/internal/low_level_alloc.cc b/libraries/ostrich/backend/3rdparty/abseil/absl/base/internal/low_level_alloc.cc
new file mode 100644
index 0000000..0a6f307
--- /dev/null
+++ b/libraries/ostrich/backend/3rdparty/abseil/absl/base/internal/low_level_alloc.cc
@@ -0,0 +1,604 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// A low-level allocator that can be used by other low-level
+// modules without introducing dependency cycles.
+// This allocator is slow and wasteful of memory;
+// it should not be used when performance is key.
+
+#include "absl/base/internal/low_level_alloc.h"
+
+#include <type_traits>
+
+#include "absl/base/call_once.h"
+#include "absl/base/config.h"
+#include "absl/base/internal/direct_mmap.h"
+#include "absl/base/internal/scheduling_mode.h"
+#include "absl/base/macros.h"
+#include "absl/base/thread_annotations.h"
+
+// LowLevelAlloc requires that the platform support low-level
+// allocation of virtual memory. Platforms lacking this cannot use
+// LowLevelAlloc.
+#ifndef ABSL_LOW_LEVEL_ALLOC_MISSING
+
+#ifndef _WIN32
+#include <pthread.h>
+#include <signal.h>
+#include <sys/mman.h>
+#include <unistd.h>
+#else
+#include <windows.h>
+#endif
+
+#include <string.h>
+#include <algorithm>
+#include <atomic>
+#include <cerrno>
+#include <cstddef>
+#include <new>                   // for placement-new
+
+#include "absl/base/dynamic_annotations.h"
+#include "absl/base/internal/raw_logging.h"
+#include "absl/base/internal/spinlock.h"
+
+// MAP_ANONYMOUS
+#if defined(__APPLE__)
+// For mmap, Linux defines both MAP_ANONYMOUS and MAP_ANON and says MAP_ANON is
+// deprecated. In Darwin, MAP_ANON is all there is.
+#if !defined MAP_ANONYMOUS
+#define MAP_ANONYMOUS MAP_ANON
+#endif  // !MAP_ANONYMOUS
+#endif  // __APPLE__
+
+namespace absl {
+namespace base_internal {
+
+// A first-fit allocator with amortized logarithmic free() time.
+
+// ---------------------------------------------------------------------------
+static const int kMaxLevel = 30;
+
+namespace {
+// This struct describes one allocated block, or one free block.
+struct AllocList {
+  struct Header {
+    // Size of entire region, including this field. Must be
+    // first. Valid in both allocated and unallocated blocks.
+    uintptr_t size;
+
+    // kMagicAllocated or kMagicUnallocated xor this.
+    uintptr_t magic;
+
+    // Pointer to parent arena.
+    LowLevelAlloc::Arena *arena;
+
+    // Aligns regions to 0 mod 2*sizeof(void*).
+    void *dummy_for_alignment;
+  } header;
+
+  // Next two fields: in unallocated blocks: freelist skiplist data
+  //                  in allocated blocks: overlaps with client data
+
+  // Levels in skiplist used.
+  int levels;
+
+  // Actually has levels elements. The AllocList node may not have room
+  // for all kMaxLevel entries. See max_fit in LLA_SkiplistLevels().
+  AllocList *next[kMaxLevel];
+};
+}  // namespace
+
+// ---------------------------------------------------------------------------
+// A trivial skiplist implementation.  This is used to keep the freelist
+// in address order while taking only logarithmic time per insert and delete.
+
+// An integer approximation of log2(size/base)
+// Requires size >= base.
+static int IntLog2(size_t size, size_t base) {
+  int result = 0;
+  for (size_t i = size; i > base; i >>= 1) {  // i == floor(size/2**result)
+    result++;
+  }
+  //    floor(size / 2**result) <= base < floor(size / 2**(result-1))
+  // =>     log2(size/(base+1)) <= result < 1+log2(size/base)
+  // => result ~= log2(size/base)
+  return result;
+}
+
+// Return a random integer n:  p(n)=1/(2**n) if 1 <= n; p(n)=0 if n < 1.
+static int Random(uint32_t *state) {
+  uint32_t r = *state;
+  int result = 1;
+  while ((((r = r*1103515245 + 12345) >> 30) & 1) == 0) {
+    result++;
+  }
+  *state = r;
+  return result;
+}
+
+// Return a number of skiplist levels for a node of size bytes, where
+// base is the minimum node size.  Compute level=log2(size / base)+n
+// where n is 1 if random is false and otherwise a random number generated with
+// the standard distribution for a skiplist:  See Random() above.
+// Bigger nodes tend to have more skiplist levels due to the log2(size / base)
+// term, so first-fit searches touch fewer nodes.  "level" is clipped so
+// level<kMaxLevel and next[level-1] will fit in the node.
+// 0 < LLA_SkiplistLevels(x,y,false) <= LLA_SkiplistLevels(x,y,true) < kMaxLevel
+static int LLA_SkiplistLevels(size_t size, size_t base, uint32_t *random) {
+  // max_fit is the maximum number of levels that will fit in a node for the
+  // given size.   We can't return more than max_fit, no matter what the
+  // random number generator says.
+  size_t max_fit = (size - offsetof(AllocList, next)) / sizeof(AllocList *);
+  int level = IntLog2(size, base) + (random != nullptr ? Random(random) : 1);
+  if (static_cast<size_t>(level) > max_fit) level = static_cast<int>(max_fit);
+  if (level > kMaxLevel-1) level = kMaxLevel - 1;
+  ABSL_RAW_CHECK(level >= 1, "block not big enough for even one level");
+  return level;
+}
+
+// Return "atleast", the first element of AllocList *head s.t. *atleast >= *e.
+// For 0 <= i < head->levels, set prev[i] to "no_greater", where no_greater
+// points to the last element at level i in the AllocList less than *e, or is
+// head if no such element exists.
+static AllocList *LLA_SkiplistSearch(AllocList *head,
+                                     AllocList *e, AllocList **prev) {
+  AllocList *p = head;
+  for (int level = head->levels - 1; level >= 0; level--) {
+    for (AllocList *n; (n = p->next[level]) != nullptr && n < e; p = n) {
+    }
+    prev[level] = p;
+  }
+  return (head->levels == 0) ? nullptr : prev[0]->next[0];
+}
+
+// Insert element *e into AllocList *head.  Set prev[] as LLA_SkiplistSearch.
+// Requires that e->levels be previously set by the caller (using
+// LLA_SkiplistLevels())
+static void LLA_SkiplistInsert(AllocList *head, AllocList *e,
+                               AllocList **prev) {
+  LLA_SkiplistSearch(head, e, prev);
+  for (; head->levels < e->levels; head->levels++) {  // extend prev pointers
+    prev[head->levels] = head;                        // to all *e's levels
+  }
+  for (int i = 0; i != e->levels; i++) {  // add element to list
+    e->next[i] = prev[i]->next[i];
+    prev[i]->next[i] = e;
+  }
+}
+
+// Remove element *e from AllocList *head.  Set prev[] as LLA_SkiplistSearch().
+// Requires that e->levels be previous set by the caller (using
+// LLA_SkiplistLevels())
+static void LLA_SkiplistDelete(AllocList *head, AllocList *e,
+                               AllocList **prev) {
+  AllocList *found = LLA_SkiplistSearch(head, e, prev);
+  ABSL_RAW_CHECK(e == found, "element not in freelist");
+  for (int i = 0; i != e->levels && prev[i]->next[i] == e; i++) {
+    prev[i]->next[i] = e->next[i];
+  }
+  while (head->levels > 0 && head->next[head->levels - 1] == nullptr) {
+    head->levels--;   // reduce head->levels if level unused
+  }
+}
+
+// ---------------------------------------------------------------------------
+// Arena implementation
+
+// Metadata for an LowLevelAlloc arena instance.
+struct LowLevelAlloc::Arena {
+  // Constructs an arena with the given LowLevelAlloc flags.
+  explicit Arena(uint32_t flags_value);
+
+  base_internal::SpinLock mu;
+  // Head of free list, sorted by address
+  AllocList freelist GUARDED_BY(mu);
+  // Count of allocated blocks
+  int32_t allocation_count GUARDED_BY(mu);
+  // flags passed to NewArena
+  const uint32_t flags;
+  // Result of getpagesize()
+  const size_t pagesize;
+  // Lowest power of two >= max(16, sizeof(AllocList))
+  const size_t roundup;
+  // Smallest allocation block size
+  const size_t min_size;
+  // PRNG state
+  uint32_t random GUARDED_BY(mu);
+};
+
+namespace {
+using ArenaStorage = std::aligned_storage<sizeof(LowLevelAlloc::Arena),
+                                          alignof(LowLevelAlloc::Arena)>::type;
+
+// Static storage space for the lazily-constructed, default global arena
+// instances.  We require this space because the whole point of LowLevelAlloc
+// is to avoid relying on malloc/new.
+ArenaStorage default_arena_storage;
+ArenaStorage unhooked_arena_storage;
+#ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
+ArenaStorage unhooked_async_sig_safe_arena_storage;
+#endif
+
+// We must use LowLevelCallOnce here to construct the global arenas, rather than
+// using function-level statics, to avoid recursively invoking the scheduler.
+absl::once_flag create_globals_once;
+
+void CreateGlobalArenas() {
+  new (&default_arena_storage)
+      LowLevelAlloc::Arena(LowLevelAlloc::kCallMallocHook);
+  new (&unhooked_arena_storage) LowLevelAlloc::Arena(0);
+#ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
+  new (&unhooked_async_sig_safe_arena_storage)
+      LowLevelAlloc::Arena(LowLevelAlloc::kAsyncSignalSafe);
+#endif
+}
+
+// Returns a global arena that does not call into hooks.  Used by NewArena()
+// when kCallMallocHook is not set.
+LowLevelAlloc::Arena* UnhookedArena() {
+  base_internal::LowLevelCallOnce(&create_globals_once, CreateGlobalArenas);
+  return reinterpret_cast<LowLevelAlloc::Arena*>(&unhooked_arena_storage);
+}
+
+#ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
+// Returns a global arena that is async-signal safe.  Used by NewArena() when
+// kAsyncSignalSafe is set.
+LowLevelAlloc::Arena *UnhookedAsyncSigSafeArena() {
+  base_internal::LowLevelCallOnce(&create_globals_once, CreateGlobalArenas);
+  return reinterpret_cast<LowLevelAlloc::Arena *>(
+      &unhooked_async_sig_safe_arena_storage);
+}
+#endif
+
+}  // namespace
+
+// Returns the default arena, as used by LowLevelAlloc::Alloc() and friends.
+LowLevelAlloc::Arena *LowLevelAlloc::DefaultArena() {
+  base_internal::LowLevelCallOnce(&create_globals_once, CreateGlobalArenas);
+  return reinterpret_cast<LowLevelAlloc::Arena*>(&default_arena_storage);
+}
+
+// magic numbers to identify allocated and unallocated blocks
+static const uintptr_t kMagicAllocated = 0x4c833e95U;
+static const uintptr_t kMagicUnallocated = ~kMagicAllocated;
+
+namespace {
+class SCOPED_LOCKABLE ArenaLock {
+ public:
+  explicit ArenaLock(LowLevelAlloc::Arena *arena)
+      EXCLUSIVE_LOCK_FUNCTION(arena->mu)
+      : arena_(arena) {
+#ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
+    if ((arena->flags & LowLevelAlloc::kAsyncSignalSafe) != 0) {
+      sigset_t all;
+      sigfillset(&all);
+      mask_valid_ = pthread_sigmask(SIG_BLOCK, &all, &mask_) == 0;
+    }
+#endif
+    arena_->mu.Lock();
+  }
+  ~ArenaLock() { ABSL_RAW_CHECK(left_, "haven't left Arena region"); }
+  void Leave() UNLOCK_FUNCTION() {
+    arena_->mu.Unlock();
+#ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
+    if (mask_valid_) {
+      pthread_sigmask(SIG_SETMASK, &mask_, nullptr);
+    }
+#endif
+    left_ = true;
+  }
+
+ private:
+  bool left_ = false;  // whether left region
+#ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
+  bool mask_valid_ = false;
+  sigset_t mask_;  // old mask of blocked signals
+#endif
+  LowLevelAlloc::Arena *arena_;
+  ArenaLock(const ArenaLock &) = delete;
+  ArenaLock &operator=(const ArenaLock &) = delete;
+};
+}  // namespace
+
+// create an appropriate magic number for an object at "ptr"
+// "magic" should be kMagicAllocated or kMagicUnallocated
+inline static uintptr_t Magic(uintptr_t magic, AllocList::Header *ptr) {
+  return magic ^ reinterpret_cast<uintptr_t>(ptr);
+}
+
+namespace {
+size_t GetPageSize() {
+#ifdef _WIN32
+  SYSTEM_INFO system_info;
+  GetSystemInfo(&system_info);
+  return std::max(system_info.dwPageSize, system_info.dwAllocationGranularity);
+#else
+  return getpagesize();
+#endif
+}
+
+size_t RoundedUpBlockSize() {
+  // Round up block sizes to a power of two close to the header size.
+  size_t roundup = 16;
+  while (roundup < sizeof(AllocList::Header)) {
+    roundup += roundup;
+  }
+  return roundup;
+}
+
+}  // namespace
+
+LowLevelAlloc::Arena::Arena(uint32_t flags_value)
+    : mu(base_internal::SCHEDULE_KERNEL_ONLY),
+      allocation_count(0),
+      flags(flags_value),
+      pagesize(GetPageSize()),
+      roundup(RoundedUpBlockSize()),
+      min_size(2 * roundup),
+      random(0) {
+  freelist.header.size = 0;
+  freelist.header.magic =
+      Magic(kMagicUnallocated, &freelist.header);
+  freelist.header.arena = this;
+  freelist.levels = 0;
+  memset(freelist.next, 0, sizeof(freelist.next));
+}
+
+// L < meta_data_arena->mu
+LowLevelAlloc::Arena *LowLevelAlloc::NewArena(int32_t flags) {
+  Arena *meta_data_arena = DefaultArena();
+#ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
+  if ((flags & LowLevelAlloc::kAsyncSignalSafe) != 0) {
+    meta_data_arena = UnhookedAsyncSigSafeArena();
+  } else  // NOLINT(readability/braces)
+#endif
+      if ((flags & LowLevelAlloc::kCallMallocHook) == 0) {
+    meta_data_arena = UnhookedArena();
+  }
+  Arena *result =
+    new (AllocWithArena(sizeof (*result), meta_data_arena)) Arena(flags);
+  return result;
+}
+
+// L < arena->mu, L < arena->arena->mu
+bool LowLevelAlloc::DeleteArena(Arena *arena) {
+  ABSL_RAW_CHECK(
+      arena != nullptr && arena != DefaultArena() && arena != UnhookedArena(),
+      "may not delete default arena");
+  ArenaLock section(arena);
+  if (arena->allocation_count != 0) {
+    section.Leave();
+    return false;
+  }
+  while (arena->freelist.next[0] != nullptr) {
+    AllocList *region = arena->freelist.next[0];
+    size_t size = region->header.size;
+    arena->freelist.next[0] = region->next[0];
+    ABSL_RAW_CHECK(
+        region->header.magic == Magic(kMagicUnallocated, &region->header),
+        "bad magic number in DeleteArena()");
+    ABSL_RAW_CHECK(region->header.arena == arena,
+                   "bad arena pointer in DeleteArena()");
+    ABSL_RAW_CHECK(size % arena->pagesize == 0,
+                   "empty arena has non-page-aligned block size");
+    ABSL_RAW_CHECK(reinterpret_cast<uintptr_t>(region) % arena->pagesize == 0,
+                   "empty arena has non-page-aligned block");
+    int munmap_result;
+#ifdef _WIN32
+    munmap_result = VirtualFree(region, 0, MEM_RELEASE);
+    ABSL_RAW_CHECK(munmap_result != 0,
+                   "LowLevelAlloc::DeleteArena: VitualFree failed");
+#else
+    if ((arena->flags & LowLevelAlloc::kAsyncSignalSafe) == 0) {
+      munmap_result = munmap(region, size);
+    } else {
+      munmap_result = base_internal::DirectMunmap(region, size);
+    }
+    if (munmap_result != 0) {
+      ABSL_RAW_LOG(FATAL, "LowLevelAlloc::DeleteArena: munmap failed: %d",
+                   errno);
+    }
+#endif
+  }
+  section.Leave();
+  arena->~Arena();
+  Free(arena);
+  return true;
+}
+
+// ---------------------------------------------------------------------------
+
+// Addition, checking for overflow.  The intent is to die if an external client
+// manages to push through a request that would cause arithmetic to fail.
+static inline uintptr_t CheckedAdd(uintptr_t a, uintptr_t b) {
+  uintptr_t sum = a + b;
+  ABSL_RAW_CHECK(sum >= a, "LowLevelAlloc arithmetic overflow");
+  return sum;
+}
+
+// Return value rounded up to next multiple of align.
+// align must be a power of two.
+static inline uintptr_t RoundUp(uintptr_t addr, uintptr_t align) {
+  return CheckedAdd(addr, align - 1) & ~(align - 1);
+}
+
+// Equivalent to "return prev->next[i]" but with sanity checking
+// that the freelist is in the correct order, that it
+// consists of regions marked "unallocated", and that no two regions
+// are adjacent in memory (they should have been coalesced).
+// L < arena->mu
+static AllocList *Next(int i, AllocList *prev, LowLevelAlloc::Arena *arena) {
+  ABSL_RAW_CHECK(i < prev->levels, "too few levels in Next()");
+  AllocList *next = prev->next[i];
+  if (next != nullptr) {
+    ABSL_RAW_CHECK(
+        next->header.magic == Magic(kMagicUnallocated, &next->header),
+        "bad magic number in Next()");
+    ABSL_RAW_CHECK(next->header.arena == arena, "bad arena pointer in Next()");
+    if (prev != &arena->freelist) {
+      ABSL_RAW_CHECK(prev < next, "unordered freelist");
+      ABSL_RAW_CHECK(reinterpret_cast<char *>(prev) + prev->header.size <
+                         reinterpret_cast<char *>(next),
+                     "malformed freelist");
+    }
+  }
+  return next;
+}
+
+// Coalesce list item "a" with its successor if they are adjacent.
+static void Coalesce(AllocList *a) {
+  AllocList *n = a->next[0];
+  if (n != nullptr && reinterpret_cast<char *>(a) + a->header.size ==
+                          reinterpret_cast<char *>(n)) {
+    LowLevelAlloc::Arena *arena = a->header.arena;
+    a->header.size += n->header.size;
+    n->header.magic = 0;
+    n->header.arena = nullptr;
+    AllocList *prev[kMaxLevel];
+    LLA_SkiplistDelete(&arena->freelist, n, prev);
+    LLA_SkiplistDelete(&arena->freelist, a, prev);
+    a->levels = LLA_SkiplistLevels(a->header.size, arena->min_size,
+                                   &arena->random);
+    LLA_SkiplistInsert(&arena->freelist, a, prev);
+  }
+}
+
+// Adds block at location "v" to the free list
+// L >= arena->mu
+static void AddToFreelist(void *v, LowLevelAlloc::Arena *arena) {
+  AllocList *f = reinterpret_cast<AllocList *>(
+                        reinterpret_cast<char *>(v) - sizeof (f->header));
+  ABSL_RAW_CHECK(f->header.magic == Magic(kMagicAllocated, &f->header),
+                 "bad magic number in AddToFreelist()");
+  ABSL_RAW_CHECK(f->header.arena == arena,
+                 "bad arena pointer in AddToFreelist()");
+  f->levels = LLA_SkiplistLevels(f->header.size, arena->min_size,
+                                 &arena->random);
+  AllocList *prev[kMaxLevel];
+  LLA_SkiplistInsert(&arena->freelist, f, prev);
+  f->header.magic = Magic(kMagicUnallocated, &f->header);
+  Coalesce(f);                  // maybe coalesce with successor
+  Coalesce(prev[0]);            // maybe coalesce with predecessor
+}
+
+// Frees storage allocated by LowLevelAlloc::Alloc().
+// L < arena->mu
+void LowLevelAlloc::Free(void *v) {
+  if (v != nullptr) {
+    AllocList *f = reinterpret_cast<AllocList *>(
+                        reinterpret_cast<char *>(v) - sizeof (f->header));
+    ABSL_RAW_CHECK(f->header.magic == Magic(kMagicAllocated, &f->header),
+                   "bad magic number in Free()");
+    LowLevelAlloc::Arena *arena = f->header.arena;
+    ArenaLock section(arena);
+    AddToFreelist(v, arena);
+    ABSL_RAW_CHECK(arena->allocation_count > 0, "nothing in arena to free");
+    arena->allocation_count--;
+    section.Leave();
+  }
+}
+
+// allocates and returns a block of size bytes, to be freed with Free()
+// L < arena->mu
+static void *DoAllocWithArena(size_t request, LowLevelAlloc::Arena *arena) {
+  void *result = nullptr;
+  if (request != 0) {
+    AllocList *s;       // will point to region that satisfies request
+    ArenaLock section(arena);
+    // round up with header
+    size_t req_rnd = RoundUp(CheckedAdd(request, sizeof (s->header)),
+                             arena->roundup);
+    for (;;) {      // loop until we find a suitable region
+      // find the minimum levels that a block of this size must have
+      int i = LLA_SkiplistLevels(req_rnd, arena->min_size, nullptr) - 1;
+      if (i < arena->freelist.levels) {   // potential blocks exist
+        AllocList *before = &arena->freelist;  // predecessor of s
+        while ((s = Next(i, before, arena)) != nullptr &&
+               s->header.size < req_rnd) {
+          before = s;
+        }
+        if (s != nullptr) {       // we found a region
+          break;
+        }
+      }
+      // we unlock before mmap() both because mmap() may call a callback hook,
+      // and because it may be slow.
+      arena->mu.Unlock();
+      // mmap generous 64K chunks to decrease
+      // the chances/impact of fragmentation:
+      size_t new_pages_size = RoundUp(req_rnd, arena->pagesize * 16);
+      void *new_pages;
+#ifdef _WIN32
+      new_pages = VirtualAlloc(0, new_pages_size,
+                               MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
+      ABSL_RAW_CHECK(new_pages != nullptr, "VirtualAlloc failed");
+#else
+      if ((arena->flags & LowLevelAlloc::kAsyncSignalSafe) != 0) {
+        new_pages = base_internal::DirectMmap(nullptr, new_pages_size,
+            PROT_WRITE|PROT_READ, MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
+      } else {
+        new_pages = mmap(nullptr, new_pages_size, PROT_WRITE | PROT_READ,
+                         MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
+      }
+      if (new_pages == MAP_FAILED) {
+        ABSL_RAW_LOG(FATAL, "mmap error: %d", errno);
+      }
+#endif
+      arena->mu.Lock();
+      s = reinterpret_cast<AllocList *>(new_pages);
+      s->header.size = new_pages_size;
+      // Pretend the block is allocated; call AddToFreelist() to free it.
+      s->header.magic = Magic(kMagicAllocated, &s->header);
+      s->header.arena = arena;
+      AddToFreelist(&s->levels, arena);  // insert new region into free list
+    }
+    AllocList *prev[kMaxLevel];
+    LLA_SkiplistDelete(&arena->freelist, s, prev);    // remove from free list
+    // s points to the first free region that's big enough
+    if (CheckedAdd(req_rnd, arena->min_size) <= s->header.size) {
+      // big enough to split
+      AllocList *n = reinterpret_cast<AllocList *>
+                        (req_rnd + reinterpret_cast<char *>(s));
+      n->header.size = s->header.size - req_rnd;
+      n->header.magic = Magic(kMagicAllocated, &n->header);
+      n->header.arena = arena;
+      s->header.size = req_rnd;
+      AddToFreelist(&n->levels, arena);
+    }
+    s->header.magic = Magic(kMagicAllocated, &s->header);
+    ABSL_RAW_CHECK(s->header.arena == arena, "");
+    arena->allocation_count++;
+    section.Leave();
+    result = &s->levels;
+  }
+  ANNOTATE_MEMORY_IS_UNINITIALIZED(result, request);
+  return result;
+}
+
+void *LowLevelAlloc::Alloc(size_t request) {
+  void *result = DoAllocWithArena(request, DefaultArena());
+  return result;
+}
+
+void *LowLevelAlloc::AllocWithArena(size_t request, Arena *arena) {
+  ABSL_RAW_CHECK(arena != nullptr, "must pass a valid arena");
+  void *result = DoAllocWithArena(request, arena);
+  return result;
+}
+
+}  // namespace base_internal
+}  // namespace absl
+
+#endif  // ABSL_LOW_LEVEL_ALLOC_MISSING

http://git-wip-us.apache.org/repos/asf/marmotta/blob/0eb556da/libraries/ostrich/backend/3rdparty/abseil/absl/base/internal/low_level_alloc.h
----------------------------------------------------------------------
diff --git a/libraries/ostrich/backend/3rdparty/abseil/absl/base/internal/low_level_alloc.h b/libraries/ostrich/backend/3rdparty/abseil/absl/base/internal/low_level_alloc.h
new file mode 100644
index 0000000..3c15605
--- /dev/null
+++ b/libraries/ostrich/backend/3rdparty/abseil/absl/base/internal/low_level_alloc.h
@@ -0,0 +1,119 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef ABSL_BASE_INTERNAL_LOW_LEVEL_ALLOC_H_
+#define ABSL_BASE_INTERNAL_LOW_LEVEL_ALLOC_H_
+
+// A simple thread-safe memory allocator that does not depend on
+// mutexes or thread-specific data.  It is intended to be used
+// sparingly, and only when malloc() would introduce an unwanted
+// dependency, such as inside the heap-checker, or the Mutex
+// implementation.
+
+// IWYU pragma: private, include "base/low_level_alloc.h"
+
+#include <sys/types.h>
+#include <cstdint>
+
+#include "absl/base/attributes.h"
+#include "absl/base/config.h"
+
+// LowLevelAlloc requires that the platform support low-level
+// allocation of virtual memory. Platforms lacking this cannot use
+// LowLevelAlloc.
+#ifdef ABSL_LOW_LEVEL_ALLOC_MISSING
+#error ABSL_LOW_LEVEL_ALLOC_MISSING cannot be directly set
+#elif !defined(ABSL_HAVE_MMAP) && !defined(_WIN32)
+#define ABSL_LOW_LEVEL_ALLOC_MISSING 1
+#endif
+
+// Using LowLevelAlloc with kAsyncSignalSafe isn't supported on Windows.
+#ifdef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
+#error ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING cannot be directly set
+#elif defined(_WIN32)
+#define ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING 1
+#endif
+
+#include <cstddef>
+
+#include "absl/base/port.h"
+
+namespace absl {
+namespace base_internal {
+
+class LowLevelAlloc {
+ public:
+  struct Arena;       // an arena from which memory may be allocated
+
+  // Returns a pointer to a block of at least "request" bytes
+  // that have been newly allocated from the specific arena.
+  // for Alloc() call the DefaultArena() is used.
+  // Returns 0 if passed request==0.
+  // Does not return 0 under other circumstances; it crashes if memory
+  // is not available.
+  static void *Alloc(size_t request) ABSL_ATTRIBUTE_SECTION(malloc_hook);
+  static void *AllocWithArena(size_t request, Arena *arena)
+      ABSL_ATTRIBUTE_SECTION(malloc_hook);
+
+  // Deallocates a region of memory that was previously allocated with
+  // Alloc().   Does nothing if passed 0.   "s" must be either 0,
+  // or must have been returned from a call to Alloc() and not yet passed to
+  // Free() since that call to Alloc().  The space is returned to the arena
+  // from which it was allocated.
+  static void Free(void *s) ABSL_ATTRIBUTE_SECTION(malloc_hook);
+
+  // ABSL_ATTRIBUTE_SECTION(malloc_hook) for Alloc* and Free
+  // are to put all callers of MallocHook::Invoke* in this module
+  // into special section,
+  // so that MallocHook::GetCallerStackTrace can function accurately.
+
+  // Create a new arena.
+  // The root metadata for the new arena is allocated in the
+  // meta_data_arena; the DefaultArena() can be passed for meta_data_arena.
+  // These values may be ored into flags:
+  enum {
+    // Report calls to Alloc() and Free() via the MallocHook interface.
+    // Set in the DefaultArena.
+    kCallMallocHook = 0x0001,
+
+#ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
+    // Make calls to Alloc(), Free() be async-signal-safe. Not set in
+    // DefaultArena(). Not supported on all platforms.
+    kAsyncSignalSafe = 0x0002,
+#endif
+  };
+  // Construct a new arena.  The allocation of the underlying metadata honors
+  // the provided flags.  For example, the call NewArena(kAsyncSignalSafe)
+  // is itself async-signal-safe, as well as generatating an arena that provides
+  // async-signal-safe Alloc/Free.
+  static Arena *NewArena(int32_t flags);
+
+  // Destroys an arena allocated by NewArena and returns true,
+  // provided no allocated blocks remain in the arena.
+  // If allocated blocks remain in the arena, does nothing and
+  // returns false.
+  // It is illegal to attempt to destroy the DefaultArena().
+  static bool DeleteArena(Arena *arena);
+
+  // The default arena that always exists.
+  static Arena *DefaultArena();
+
+ private:
+  LowLevelAlloc();      // no instances
+};
+
+}  // namespace base_internal
+}  // namespace absl
+#endif  // ABSL_BASE_INTERNAL_LOW_LEVEL_ALLOC_H_

http://git-wip-us.apache.org/repos/asf/marmotta/blob/0eb556da/libraries/ostrich/backend/3rdparty/abseil/absl/base/internal/low_level_alloc_test.cc
----------------------------------------------------------------------
diff --git a/libraries/ostrich/backend/3rdparty/abseil/absl/base/internal/low_level_alloc_test.cc b/libraries/ostrich/backend/3rdparty/abseil/absl/base/internal/low_level_alloc_test.cc
new file mode 100644
index 0000000..cf2b363
--- /dev/null
+++ b/libraries/ostrich/backend/3rdparty/abseil/absl/base/internal/low_level_alloc_test.cc
@@ -0,0 +1,157 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/base/internal/low_level_alloc.h"
+
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <thread>  // NOLINT(build/c++11)
+#include <unordered_map>
+#include <utility>
+
+namespace absl {
+namespace base_internal {
+namespace {
+
+// This test doesn't use gtest since it needs to test that everything
+// works before main().
+#define TEST_ASSERT(x)                                           \
+  if (!(x)) {                                                    \
+    printf("TEST_ASSERT(%s) FAILED ON LINE %d\n", #x, __LINE__); \
+    abort();                                                     \
+  }
+
+// a block of memory obtained from the allocator
+struct BlockDesc {
+  char *ptr;      // pointer to memory
+  int len;        // number of bytes
+  int fill;       // filled with data starting with this
+};
+
+// Check that the pattern placed in the block d
+// by RandomizeBlockDesc is still there.
+static void CheckBlockDesc(const BlockDesc &d) {
+  for (int i = 0; i != d.len; i++) {
+    TEST_ASSERT((d.ptr[i] & 0xff) == ((d.fill + i) & 0xff));
+  }
+}
+
+// Fill the block "*d" with a pattern
+// starting with a random byte.
+static void RandomizeBlockDesc(BlockDesc *d) {
+  d->fill = rand() & 0xff;
+  for (int i = 0; i != d->len; i++) {
+    d->ptr[i] = (d->fill + i) & 0xff;
+  }
+}
+
+// Use to indicate to the malloc hooks that
+// this calls is from LowLevelAlloc.
+static bool using_low_level_alloc = false;
+
+// n times, toss a coin, and based on the outcome
+// either allocate a new block or deallocate an old block.
+// New blocks are placed in a std::unordered_map with a random key
+// and initialized with RandomizeBlockDesc().
+// If keys conflict, the older block is freed.
+// Old blocks are always checked with CheckBlockDesc()
+// before being freed.  At the end of the run,
+// all remaining allocated blocks are freed.
+// If use_new_arena is true, use a fresh arena, and then delete it.
+// If call_malloc_hook is true and user_arena is true,
+// allocations and deallocations are reported via the MallocHook
+// interface.
+static void Test(bool use_new_arena, bool call_malloc_hook, int n) {
+  typedef std::unordered_map<int, BlockDesc> AllocMap;
+  AllocMap allocated;
+  AllocMap::iterator it;
+  BlockDesc block_desc;
+  int rnd;
+  LowLevelAlloc::Arena *arena = 0;
+  if (use_new_arena) {
+    int32_t flags = call_malloc_hook ? LowLevelAlloc::kCallMallocHook : 0;
+    arena = LowLevelAlloc::NewArena(flags);
+  }
+  for (int i = 0; i != n; i++) {
+    if (i != 0 && i % 10000 == 0) {
+      printf(".");
+      fflush(stdout);
+    }
+
+    switch (rand() & 1) {      // toss a coin
+    case 0:     // coin came up heads: add a block
+      using_low_level_alloc = true;
+      block_desc.len = rand() & 0x3fff;
+      block_desc.ptr =
+        reinterpret_cast<char *>(
+                        arena == 0
+                        ? LowLevelAlloc::Alloc(block_desc.len)
+                        : LowLevelAlloc::AllocWithArena(block_desc.len, arena));
+      using_low_level_alloc = false;
+      RandomizeBlockDesc(&block_desc);
+      rnd = rand();
+      it = allocated.find(rnd);
+      if (it != allocated.end()) {
+        CheckBlockDesc(it->second);
+        using_low_level_alloc = true;
+        LowLevelAlloc::Free(it->second.ptr);
+        using_low_level_alloc = false;
+        it->second = block_desc;
+      } else {
+        allocated[rnd] = block_desc;
+      }
+      break;
+    case 1:     // coin came up tails: remove a block
+      it = allocated.begin();
+      if (it != allocated.end()) {
+        CheckBlockDesc(it->second);
+        using_low_level_alloc = true;
+        LowLevelAlloc::Free(it->second.ptr);
+        using_low_level_alloc = false;
+        allocated.erase(it);
+      }
+      break;
+    }
+  }
+  // remove all remaining blocks
+  while ((it = allocated.begin()) != allocated.end()) {
+    CheckBlockDesc(it->second);
+    using_low_level_alloc = true;
+    LowLevelAlloc::Free(it->second.ptr);
+    using_low_level_alloc = false;
+    allocated.erase(it);
+  }
+  if (use_new_arena) {
+    TEST_ASSERT(LowLevelAlloc::DeleteArena(arena));
+  }
+}
+// LowLevelAlloc is designed to be safe to call before main().
+static struct BeforeMain {
+  BeforeMain() {
+    Test(false, false, 50000);
+    Test(true, false, 50000);
+    Test(true, true, 50000);
+  }
+} before_main;
+
+}  // namespace
+}  // namespace base_internal
+}  // namespace absl
+
+int main(int argc, char *argv[]) {
+  // The actual test runs in the global constructor of `before_main`.
+  printf("PASS\n");
+  return 0;
+}

http://git-wip-us.apache.org/repos/asf/marmotta/blob/0eb556da/libraries/ostrich/backend/3rdparty/abseil/absl/base/internal/low_level_scheduling.h
----------------------------------------------------------------------
diff --git a/libraries/ostrich/backend/3rdparty/abseil/absl/base/internal/low_level_scheduling.h b/libraries/ostrich/backend/3rdparty/abseil/absl/base/internal/low_level_scheduling.h
new file mode 100644
index 0000000..e716f2b
--- /dev/null
+++ b/libraries/ostrich/backend/3rdparty/abseil/absl/base/internal/low_level_scheduling.h
@@ -0,0 +1,104 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Core interfaces and definitions used by by low-level interfaces such as
+// SpinLock.
+
+#ifndef ABSL_BASE_INTERNAL_LOW_LEVEL_SCHEDULING_H_
+#define ABSL_BASE_INTERNAL_LOW_LEVEL_SCHEDULING_H_
+
+#include "absl/base/internal/scheduling_mode.h"
+#include "absl/base/macros.h"
+
+// The following two declarations exist so SchedulingGuard may friend them with
+// the appropriate language linkage.  These callbacks allow libc internals, such
+// as function level statics, to schedule cooperatively when locking.
+extern "C" bool __google_disable_rescheduling(void);
+extern "C" void __google_enable_rescheduling(bool disable_result);
+
+namespace absl {
+namespace base_internal {
+
+class SchedulingHelper;  // To allow use of SchedulingGuard.
+class SpinLock;          // To allow use of SchedulingGuard.
+
+// SchedulingGuard
+// Provides guard semantics that may be used to disable cooperative rescheduling
+// of the calling thread within specific program blocks.  This is used to
+// protect resources (e.g. low-level SpinLocks or Domain code) that cooperative
+// scheduling depends on.
+//
+// Domain implementations capable of rescheduling in reaction to involuntary
+// kernel thread actions (e.g blocking due to a pagefault or syscall) must
+// guarantee that an annotated thread is not allowed to (cooperatively)
+// reschedule until the annotated region is complete.
+//
+// It is an error to attempt to use a cooperatively scheduled resource (e.g.
+// Mutex) within a rescheduling-disabled region.
+//
+// All methods are async-signal safe.
+class SchedulingGuard {
+ public:
+  // Returns true iff the calling thread may be cooperatively rescheduled.
+  static bool ReschedulingIsAllowed();
+
+ private:
+  // Disable cooperative rescheduling of the calling thread.  It may still
+  // initiate scheduling operations (e.g. wake-ups), however, it may not itself
+  // reschedule.  Nestable.  The returned result is opaque, clients should not
+  // attempt to interpret it.
+  // REQUIRES: Result must be passed to a pairing EnableScheduling().
+  static bool DisableRescheduling();
+
+  // Marks the end of a rescheduling disabled region, previously started by
+  // DisableRescheduling().
+  // REQUIRES: Pairs with innermost call (and result) of DisableRescheduling().
+  static void EnableRescheduling(bool disable_result);
+
+  // A scoped helper for {Disable, Enable}Rescheduling().
+  // REQUIRES: destructor must run in same thread as constructor.
+  struct ScopedDisable {
+    ScopedDisable() { disabled = SchedulingGuard::DisableRescheduling(); }
+    ~ScopedDisable() { SchedulingGuard::EnableRescheduling(disabled); }
+
+    bool disabled;
+  };
+
+  // Access to SchedulingGuard is explicitly white-listed.
+  friend class SchedulingHelper;
+  friend class SpinLock;
+
+  SchedulingGuard(const SchedulingGuard&) = delete;
+  SchedulingGuard& operator=(const SchedulingGuard&) = delete;
+};
+
+//------------------------------------------------------------------------------
+// End of public interfaces.
+//------------------------------------------------------------------------------
+inline bool SchedulingGuard::ReschedulingIsAllowed() {
+  return false;
+}
+
+inline bool SchedulingGuard::DisableRescheduling() {
+  return false;
+}
+
+inline void SchedulingGuard::EnableRescheduling(bool /* disable_result */) {
+  return;
+}
+
+
+}  // namespace base_internal
+}  // namespace absl
+#endif  // ABSL_BASE_INTERNAL_LOW_LEVEL_SCHEDULING_H_

http://git-wip-us.apache.org/repos/asf/marmotta/blob/0eb556da/libraries/ostrich/backend/3rdparty/abseil/absl/base/internal/per_thread_tls.h
----------------------------------------------------------------------
diff --git a/libraries/ostrich/backend/3rdparty/abseil/absl/base/internal/per_thread_tls.h b/libraries/ostrich/backend/3rdparty/abseil/absl/base/internal/per_thread_tls.h
new file mode 100644
index 0000000..2428bdc
--- /dev/null
+++ b/libraries/ostrich/backend/3rdparty/abseil/absl/base/internal/per_thread_tls.h
@@ -0,0 +1,48 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_BASE_INTERNAL_PER_THREAD_TLS_H_
+#define ABSL_BASE_INTERNAL_PER_THREAD_TLS_H_
+
+// This header defines two macros:
+// If the platform supports thread-local storage:
+//   ABSL_PER_THREAD_TLS_KEYWORD is the C keyword needed to declare a
+//   thread-local variable ABSL_PER_THREAD_TLS is 1
+//
+// Otherwise:
+//   ABSL_PER_THREAD_TLS_KEYWORD is empty
+//   ABSL_PER_THREAD_TLS is 0
+//
+// Microsoft C supports thread-local storage.
+// GCC supports it if the appropriate version of glibc is available,
+// which the programmer can indicate by defining ABSL_HAVE_TLS
+
+#include "absl/base/port.h"  // For ABSL_HAVE_TLS
+
+#if defined(ABSL_PER_THREAD_TLS)
+#error ABSL_PER_THREAD_TLS cannot be directly set
+#elif defined(ABSL_PER_THREAD_TLS_KEYWORD)
+#error ABSL_PER_THREAD_TLS_KEYWORD cannot be directly set
+#elif defined(ABSL_HAVE_TLS)
+#define ABSL_PER_THREAD_TLS_KEYWORD __thread
+#define ABSL_PER_THREAD_TLS 1
+#elif defined(_MSC_VER)
+#define ABSL_PER_THREAD_TLS_KEYWORD __declspec(thread)
+#define ABSL_PER_THREAD_TLS 1
+#else
+#define ABSL_PER_THREAD_TLS_KEYWORD
+#define ABSL_PER_THREAD_TLS 0
+#endif
+
+#endif  // ABSL_BASE_INTERNAL_PER_THREAD_TLS_H_

http://git-wip-us.apache.org/repos/asf/marmotta/blob/0eb556da/libraries/ostrich/backend/3rdparty/abseil/absl/base/internal/pretty_function.h
----------------------------------------------------------------------
diff --git a/libraries/ostrich/backend/3rdparty/abseil/absl/base/internal/pretty_function.h b/libraries/ostrich/backend/3rdparty/abseil/absl/base/internal/pretty_function.h
new file mode 100644
index 0000000..01b0547
--- /dev/null
+++ b/libraries/ostrich/backend/3rdparty/abseil/absl/base/internal/pretty_function.h
@@ -0,0 +1,33 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_BASE_INTERNAL_PRETTY_FUNCTION_H_
+#define ABSL_BASE_INTERNAL_PRETTY_FUNCTION_H_
+
+// ABSL_PRETTY_FUNCTION
+//
+// In C++11, __func__ gives the undecorated name of the current function.  That
+// is, "main", not "int main()".  Various compilers give extra macros to get the
+// decorated function name, including return type and arguments, to
+// differentiate between overload sets.  ABSL_PRETTY_FUNCTION is a portable
+// version of these macros which forwards to the correct macro on each compiler.
+#if defined(_MSC_VER)
+#define ABSL_PRETTY_FUNCTION __FUNCSIG__
+#elif defined(__GNUC__)
+#define ABSL_PRETTY_FUNCTION __PRETTY_FUNCTION__
+#else
+#error "Unsupported compiler"
+#endif
+
+#endif  // ABSL_BASE_INTERNAL_PRETTY_FUNCTION_H_

http://git-wip-us.apache.org/repos/asf/marmotta/blob/0eb556da/libraries/ostrich/backend/3rdparty/abseil/absl/base/internal/raw_logging.cc
----------------------------------------------------------------------
diff --git a/libraries/ostrich/backend/3rdparty/abseil/absl/base/internal/raw_logging.cc b/libraries/ostrich/backend/3rdparty/abseil/absl/base/internal/raw_logging.cc
new file mode 100644
index 0000000..1ce1388
--- /dev/null
+++ b/libraries/ostrich/backend/3rdparty/abseil/absl/base/internal/raw_logging.cc
@@ -0,0 +1,218 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/base/internal/raw_logging.h"
+
+#include <stddef.h>
+#include <cstdarg>
+#include <cstdio>
+#include <cstdlib>
+#include <cstring>
+
+#include "absl/base/attributes.h"
+#include "absl/base/config.h"
+#include "absl/base/internal/atomic_hook.h"
+#include "absl/base/log_severity.h"
+
+// We know how to perform low-level writes to stderr in POSIX and Windows.  For
+// these platforms, we define the token ABSL_LOW_LEVEL_WRITE_SUPPORTED.
+// Much of raw_logging.cc becomes a no-op when we can't output messages,
+// although a FATAL ABSL_RAW_LOG message will still abort the process.
+
+// ABSL_HAVE_POSIX_WRITE is defined when the platform provides posix write()
+// (as from unistd.h)
+//
+// This preprocessor token is also defined in raw_io.cc.  If you need to copy
+// this, consider moving both to config.h instead.
+#if defined(__linux__) || defined(__APPLE__) || defined(__FreeBSD__) || \
+    defined(__Fuchsia__) || defined(__native_client__)
+#include <unistd.h>
+
+
+#define ABSL_HAVE_POSIX_WRITE 1
+#define ABSL_LOW_LEVEL_WRITE_SUPPORTED 1
+#else
+#undef ABSL_HAVE_POSIX_WRITE
+#endif
+
+// ABSL_HAVE_SYSCALL_WRITE is defined when the platform provides the syscall
+//   syscall(SYS_write, /*int*/ fd, /*char* */ buf, /*size_t*/ len);
+// for low level operations that want to avoid libc.
+#if (defined(__linux__) || defined(__FreeBSD__)) && !defined(__ANDROID__)
+#include <sys/syscall.h>
+#define ABSL_HAVE_SYSCALL_WRITE 1
+#define ABSL_LOW_LEVEL_WRITE_SUPPORTED 1
+#else
+#undef ABSL_HAVE_SYSCALL_WRITE
+#endif
+
+#ifdef _WIN32
+#include <io.h>
+
+#define ABSL_HAVE_RAW_IO 1
+#define ABSL_LOW_LEVEL_WRITE_SUPPORTED 1
+#else
+#undef ABSL_HAVE_RAW_IO
+#endif
+
+// TODO(gfalcon): We want raw-logging to work on as many platforms as possible.
+// Explicitly #error out when not ABSL_LOW_LEVEL_WRITE_SUPPORTED, except for a
+// whitelisted set of platforms for which we expect not to be able to raw log.
+
+ABSL_CONST_INIT static absl::base_internal::AtomicHook<
+    absl::raw_logging_internal::LogPrefixHook> log_prefix_hook;
+ABSL_CONST_INIT static absl::base_internal::AtomicHook<
+    absl::raw_logging_internal::AbortHook> abort_hook;
+
+#ifdef ABSL_LOW_LEVEL_WRITE_SUPPORTED
+static const char kTruncated[] = " ... (message truncated)\n";
+
+// sprintf the format to the buffer, adjusting *buf and *size to reflect the
+// consumed bytes, and return whether the message fit without truncation.  If
+// truncation occurred, if possible leave room in the buffer for the message
+// kTruncated[].
+inline static bool VADoRawLog(char** buf, int* size, const char* format,
+                              va_list ap) ABSL_PRINTF_ATTRIBUTE(3, 0);
+inline static bool VADoRawLog(char** buf, int* size,
+                              const char* format, va_list ap) {
+  int n = vsnprintf(*buf, *size, format, ap);
+  bool result = true;
+  if (n < 0 || n > *size) {
+    result = false;
+    if (static_cast<size_t>(*size) > sizeof(kTruncated)) {
+      n = *size - sizeof(kTruncated);  // room for truncation message
+    } else {
+      n = 0;                           // no room for truncation message
+    }
+  }
+  *size -= n;
+  *buf += n;
+  return result;
+}
+#endif  // ABSL_LOW_LEVEL_WRITE_SUPPORTED
+
+static constexpr int kLogBufSize = 3000;
+
+namespace {
+
+// CAVEAT: vsnprintf called from *DoRawLog below has some (exotic) code paths
+// that invoke malloc() and getenv() that might acquire some locks.
+
+// Helper for RawLog below.
+// *DoRawLog writes to *buf of *size and move them past the written portion.
+// It returns true iff there was no overflow or error.
+bool DoRawLog(char** buf, int* size, const char* format, ...)
+    ABSL_PRINTF_ATTRIBUTE(3, 4);
+bool DoRawLog(char** buf, int* size, const char* format, ...) {
+  va_list ap;
+  va_start(ap, format);
+  int n = vsnprintf(*buf, *size, format, ap);
+  va_end(ap);
+  if (n < 0 || n > *size) return false;
+  *size -= n;
+  *buf += n;
+  return true;
+}
+
+void RawLogVA(absl::LogSeverity severity, const char* file, int line,
+              const char* format, va_list ap) ABSL_PRINTF_ATTRIBUTE(4, 0);
+void RawLogVA(absl::LogSeverity severity, const char* file, int line,
+              const char* format, va_list ap) {
+  char buffer[kLogBufSize];
+  char* buf = buffer;
+  int size = sizeof(buffer);
+#ifdef ABSL_LOW_LEVEL_WRITE_SUPPORTED
+  bool enabled = true;
+#else
+  bool enabled = false;
+#endif
+
+#ifdef ABSL_MIN_LOG_LEVEL
+  if (static_cast<int>(severity) < ABSL_MIN_LOG_LEVEL &&
+      severity < absl::LogSeverity::kFatal) {
+    enabled = false;
+  }
+#endif
+
+  auto log_prefix_hook_ptr = log_prefix_hook.Load();
+  if (log_prefix_hook_ptr) {
+    enabled = log_prefix_hook_ptr(severity, file, line, &buf, &size);
+  } else {
+    if (enabled) {
+      DoRawLog(&buf, &size, "[%s : %d] RAW: ", file, line);
+    }
+  }
+  const char* const prefix_end = buf;
+
+#ifdef ABSL_LOW_LEVEL_WRITE_SUPPORTED
+  if (enabled) {
+    bool no_chop = VADoRawLog(&buf, &size, format, ap);
+    if (no_chop) {
+      DoRawLog(&buf, &size, "\n");
+    } else {
+      DoRawLog(&buf, &size, "%s", kTruncated);
+    }
+    absl::raw_logging_internal::SafeWriteToStderr(buffer, strlen(buffer));
+  }
+#else
+  static_cast<void>(format);
+  static_cast<void>(ap);
+#endif
+
+  // Abort the process after logging a FATAL message, even if the output itself
+  // was suppressed.
+  if (severity == absl::LogSeverity::kFatal) {
+    abort_hook(file, line, buffer, prefix_end, buffer + kLogBufSize);
+    abort();
+  }
+}
+
+}  // namespace
+
+namespace absl {
+namespace raw_logging_internal {
+void SafeWriteToStderr(const char *s, size_t len) {
+#if defined(ABSL_HAVE_SYSCALL_WRITE)
+  syscall(SYS_write, STDERR_FILENO, s, len);
+#elif defined(ABSL_HAVE_POSIX_WRITE)
+  write(STDERR_FILENO, s, len);
+#elif defined(ABSL_HAVE_RAW_IO)
+  _write(/* stderr */ 2, s, len);
+#else
+  // stderr logging unsupported on this platform
+  (void) s;
+  (void) len;
+#endif
+}
+
+void RawLog(absl::LogSeverity severity, const char* file, int line,
+            const char* format, ...) ABSL_PRINTF_ATTRIBUTE(4, 5);
+void RawLog(absl::LogSeverity severity, const char* file, int line,
+            const char* format, ...) {
+  va_list ap;
+  va_start(ap, format);
+  RawLogVA(severity, file, line, format, ap);
+  va_end(ap);
+}
+
+bool RawLoggingFullySupported() {
+#ifdef ABSL_LOW_LEVEL_WRITE_SUPPORTED
+  return true;
+#else  // !ABSL_LOW_LEVEL_WRITE_SUPPORTED
+  return false;
+#endif  // !ABSL_LOW_LEVEL_WRITE_SUPPORTED
+}
+
+}  // namespace raw_logging_internal
+}  // namespace absl

http://git-wip-us.apache.org/repos/asf/marmotta/blob/0eb556da/libraries/ostrich/backend/3rdparty/abseil/absl/base/internal/raw_logging.h
----------------------------------------------------------------------
diff --git a/libraries/ostrich/backend/3rdparty/abseil/absl/base/internal/raw_logging.h b/libraries/ostrich/backend/3rdparty/abseil/absl/base/internal/raw_logging.h
new file mode 100644
index 0000000..a2b7207
--- /dev/null
+++ b/libraries/ostrich/backend/3rdparty/abseil/absl/base/internal/raw_logging.h
@@ -0,0 +1,137 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Thread-safe logging routines that do not allocate any memory or
+// acquire any locks, and can therefore be used by low-level memory
+// allocation, synchronization, and signal-handling code.
+
+#ifndef ABSL_BASE_INTERNAL_RAW_LOGGING_H_
+#define ABSL_BASE_INTERNAL_RAW_LOGGING_H_
+
+#include "absl/base/attributes.h"
+#include "absl/base/log_severity.h"
+#include "absl/base/macros.h"
+#include "absl/base/port.h"
+
+// This is similar to LOG(severity) << format..., but
+// * it is to be used ONLY by low-level modules that can't use normal LOG()
+// * it is designed to be a low-level logger that does not allocate any
+//   memory and does not need any locks, hence:
+// * it logs straight and ONLY to STDERR w/o buffering
+// * it uses an explicit printf-format and arguments list
+// * it will silently chop off really long message strings
+// Usage example:
+//   ABSL_RAW_LOG(ERROR, "Failed foo with %i: %s", status, error);
+// This will print an almost standard log line like this to stderr only:
+//   E0821 211317 file.cc:123] RAW: Failed foo with 22: bad_file
+#define ABSL_RAW_LOG(severity, ...)                                            \
+  do {                                                                         \
+    constexpr const char* absl_raw_logging_internal_basename =                 \
+        ::absl::raw_logging_internal::Basename(__FILE__,                       \
+                                               sizeof(__FILE__) - 1);          \
+    ::absl::raw_logging_internal::RawLog(ABSL_RAW_LOGGING_INTERNAL_##severity, \
+                                         absl_raw_logging_internal_basename,   \
+                                         __LINE__, __VA_ARGS__);               \
+  } while (0)
+
+// Similar to CHECK(condition) << message, but for low-level modules:
+// we use only ABSL_RAW_LOG that does not allocate memory.
+// We do not want to provide args list here to encourage this usage:
+//   if (!cond)  ABSL_RAW_LOG(FATAL, "foo ...", hard_to_compute_args);
+// so that the args are not computed when not needed.
+#define ABSL_RAW_CHECK(condition, message)                             \
+  do {                                                                 \
+    if (ABSL_PREDICT_FALSE(!(condition))) {                            \
+      ABSL_RAW_LOG(FATAL, "Check %s failed: %s", #condition, message); \
+    }                                                                  \
+  } while (0)
+
+#define ABSL_RAW_LOGGING_INTERNAL_INFO ::absl::LogSeverity::kInfo
+#define ABSL_RAW_LOGGING_INTERNAL_WARNING ::absl::LogSeverity::kWarning
+#define ABSL_RAW_LOGGING_INTERNAL_ERROR ::absl::LogSeverity::kError
+#define ABSL_RAW_LOGGING_INTERNAL_FATAL ::absl::LogSeverity::kFatal
+#define ABSL_RAW_LOGGING_INTERNAL_LEVEL(severity) \
+  ::absl::NormalizeLogSeverity(severity)
+
+namespace absl {
+namespace raw_logging_internal {
+
+// Helper function to implement ABSL_RAW_LOG
+// Logs format... at "severity" level, reporting it
+// as called from file:line.
+// This does not allocate memory or acquire locks.
+void RawLog(absl::LogSeverity severity, const char* file, int line,
+            const char* format, ...) ABSL_PRINTF_ATTRIBUTE(4, 5);
+
+// Writes the provided buffer directly to stderr, in a safe, low-level manner.
+//
+// In POSIX this means calling write(), which is async-signal safe and does
+// not malloc.  If the platform supports the SYS_write syscall, we invoke that
+// directly to side-step any libc interception.
+void SafeWriteToStderr(const char *s, size_t len);
+
+// compile-time function to get the "base" filename, that is, the part of
+// a filename after the last "/" or "\" path separator.  The search starts at
+// the end of the std::string; the second parameter is the length of the std::string.
+constexpr const char* Basename(const char* fname, int offset) {
+  return offset == 0 || fname[offset - 1] == '/' || fname[offset - 1] == '\\'
+             ? fname + offset
+             : Basename(fname, offset - 1);
+}
+
+// For testing only.
+// Returns true if raw logging is fully supported. When it is not
+// fully supported, no messages will be emitted, but a log at FATAL
+// severity will cause an abort.
+//
+// TODO(gfalcon): Come up with a better name for this method.
+bool RawLoggingFullySupported();
+
+// Function type for a raw_logging customization hook for suppressing messages
+// by severity, and for writing custom prefixes on non-suppressed messages.
+//
+// The installed hook is called for every raw log invocation.  The message will
+// be logged to stderr only if the hook returns true.  FATAL errors will cause
+// the process to abort, even if writing to stderr is suppressed.  The hook is
+// also provided with an output buffer, where it can write a custom log message
+// prefix.
+//
+// The raw_logging system does not allocate memory or grab locks.  User-provided
+// hooks must avoid these operations, and must not throw exceptions.
+//
+// 'severity' is the severity level of the message being written.
+// 'file' and 'line' are the file and line number where the ABSL_RAW_LOG macro
+// was located.
+// 'buffer' and 'buf_size' are pointers to the buffer and buffer size.  If the
+// hook writes a prefix, it must increment *buffer and decrement *buf_size
+// accordingly.
+using LogPrefixHook = bool (*)(absl::LogSeverity severity, const char* file,
+                               int line, char** buffer, int* buf_size);
+
+// Function type for a raw_logging customization hook called to abort a process
+// when a FATAL message is logged.  If the provided AbortHook() returns, the
+// logging system will call abort().
+//
+// 'file' and 'line' are the file and line number where the ABSL_RAW_LOG macro
+// was located.
+// The null-terminated logged message lives in the buffer between 'buf_start'
+// and 'buf_end'.  'prefix_end' points to the first non-prefix character of the
+// buffer (as written by the LogPrefixHook.)
+using AbortHook = void (*)(const char* file, int line, const char* buf_start,
+                           const char* prefix_end, const char* buf_end);
+
+}  // namespace raw_logging_internal
+}  // namespace absl
+
+#endif  // ABSL_BASE_INTERNAL_RAW_LOGGING_H_

http://git-wip-us.apache.org/repos/asf/marmotta/blob/0eb556da/libraries/ostrich/backend/3rdparty/abseil/absl/base/internal/scheduling_mode.h
----------------------------------------------------------------------
diff --git a/libraries/ostrich/backend/3rdparty/abseil/absl/base/internal/scheduling_mode.h b/libraries/ostrich/backend/3rdparty/abseil/absl/base/internal/scheduling_mode.h
new file mode 100644
index 0000000..1b6497a
--- /dev/null
+++ b/libraries/ostrich/backend/3rdparty/abseil/absl/base/internal/scheduling_mode.h
@@ -0,0 +1,54 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Core interfaces and definitions used by by low-level interfaces such as
+// SpinLock.
+
+#ifndef ABSL_BASE_INTERNAL_SCHEDULING_MODE_H_
+#define ABSL_BASE_INTERNAL_SCHEDULING_MODE_H_
+
+namespace absl {
+namespace base_internal {
+
+// Used to describe how a thread may be scheduled.  Typically associated with
+// the declaration of a resource supporting synchronized access.
+//
+// SCHEDULE_COOPERATIVE_AND_KERNEL:
+// Specifies that when waiting, a cooperative thread (e.g. a Fiber) may
+// reschedule (using base::scheduling semantics); allowing other cooperative
+// threads to proceed.
+//
+// SCHEDULE_KERNEL_ONLY: (Also described as "non-cooperative")
+// Specifies that no cooperative scheduling semantics may be used, even if the
+// current thread is itself cooperatively scheduled.  This means that
+// cooperative threads will NOT allow other cooperative threads to execute in
+// their place while waiting for a resource of this type.  Host operating system
+// semantics (e.g. a futex) may still be used.
+//
+// When optional, clients should strongly prefer SCHEDULE_COOPERATIVE_AND_KERNEL
+// by default.  SCHEDULE_KERNEL_ONLY should only be used for resources on which
+// base::scheduling (e.g. the implementation of a Scheduler) may depend.
+//
+// NOTE: Cooperative resources may not be nested below non-cooperative ones.
+// This means that it is invalid to to acquire a SCHEDULE_COOPERATIVE_AND_KERNEL
+// resource if a SCHEDULE_KERNEL_ONLY resource is already held.
+enum SchedulingMode {
+  SCHEDULE_KERNEL_ONLY = 0,         // Allow scheduling only the host OS.
+  SCHEDULE_COOPERATIVE_AND_KERNEL,  // Also allow cooperative scheduling.
+};
+
+}  // namespace base_internal
+}  // namespace absl
+
+#endif  // ABSL_BASE_INTERNAL_SCHEDULING_MODE_H_

http://git-wip-us.apache.org/repos/asf/marmotta/blob/0eb556da/libraries/ostrich/backend/3rdparty/abseil/absl/base/internal/spinlock.cc
----------------------------------------------------------------------
diff --git a/libraries/ostrich/backend/3rdparty/abseil/absl/base/internal/spinlock.cc b/libraries/ostrich/backend/3rdparty/abseil/absl/base/internal/spinlock.cc
new file mode 100644
index 0000000..28a2059
--- /dev/null
+++ b/libraries/ostrich/backend/3rdparty/abseil/absl/base/internal/spinlock.cc
@@ -0,0 +1,238 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/base/internal/spinlock.h"
+
+#include <algorithm>
+#include <atomic>
+#include <limits>
+
+#include "absl/base/internal/atomic_hook.h"
+#include "absl/base/internal/cycleclock.h"
+#include "absl/base/internal/spinlock_wait.h"
+#include "absl/base/internal/sysinfo.h" /* For NumCPUs() */
+
+// Description of lock-word:
+//  31..00: [............................3][2][1][0]
+//
+//     [0]: kSpinLockHeld
+//     [1]: kSpinLockCooperative
+//     [2]: kSpinLockDisabledScheduling
+// [31..3]: ONLY kSpinLockSleeper OR
+//          Wait time in cycles >> PROFILE_TIMESTAMP_SHIFT
+//
+// Detailed descriptions:
+//
+// Bit [0]: The lock is considered held iff kSpinLockHeld is set.
+//
+// Bit [1]: Eligible waiters (e.g. Fibers) may co-operatively reschedule when
+//          contended iff kSpinLockCooperative is set.
+//
+// Bit [2]: This bit is exclusive from bit [1].  It is used only by a
+//          non-cooperative lock.  When set, indicates that scheduling was
+//          successfully disabled when the lock was acquired.  May be unset,
+//          even if non-cooperative, if a ThreadIdentity did not yet exist at
+//          time of acquisition.
+//
+// Bit [3]: If this is the only upper bit ([31..3]) set then this lock was
+//          acquired without contention, however, at least one waiter exists.
+//
+//          Otherwise, bits [31..3] represent the time spent by the current lock
+//          holder to acquire the lock.  There may be outstanding waiter(s).
+
+namespace absl {
+namespace base_internal {
+
+static int adaptive_spin_count = 0;
+
+namespace {
+struct SpinLock_InitHelper {
+  SpinLock_InitHelper() {
+    // On multi-cpu machines, spin for longer before yielding
+    // the processor or sleeping.  Reduces idle time significantly.
+    if (base_internal::NumCPUs() > 1) {
+      adaptive_spin_count = 1000;
+    }
+  }
+};
+
+// Hook into global constructor execution:
+// We do not do adaptive spinning before that,
+// but nothing lock-intensive should be going on at that time.
+static SpinLock_InitHelper init_helper;
+
+ABSL_CONST_INIT static base_internal::AtomicHook<void (*)(const void *lock,
+                                                          int64_t wait_cycles)>
+    submit_profile_data;
+
+}  // namespace
+
+void RegisterSpinLockProfiler(void (*fn)(const void *contendedlock,
+                                         int64_t wait_cycles)) {
+  submit_profile_data.Store(fn);
+}
+
+// Uncommon constructors.
+SpinLock::SpinLock(base_internal::SchedulingMode mode)
+    : lockword_(IsCooperative(mode) ? kSpinLockCooperative : 0) {
+  ABSL_TSAN_MUTEX_CREATE(this, __tsan_mutex_not_static);
+}
+
+SpinLock::SpinLock(base_internal::LinkerInitialized,
+                   base_internal::SchedulingMode mode) {
+  ABSL_TSAN_MUTEX_CREATE(this, 0);
+  if (IsCooperative(mode)) {
+    InitLinkerInitializedAndCooperative();
+  }
+  // Otherwise, lockword_ is already initialized.
+}
+
+// Static (linker initialized) spinlocks always start life as functional
+// non-cooperative locks.  When their static constructor does run, it will call
+// this initializer to augment the lockword with the cooperative bit.  By
+// actually taking the lock when we do this we avoid the need for an atomic
+// operation in the regular unlock path.
+//
+// SlowLock() must be careful to re-test for this bit so that any outstanding
+// waiters may be upgraded to cooperative status.
+void SpinLock::InitLinkerInitializedAndCooperative() {
+  Lock();
+  lockword_.fetch_or(kSpinLockCooperative, std::memory_order_relaxed);
+  Unlock();
+}
+
+// Monitor the lock to see if its value changes within some time period
+// (adaptive_spin_count loop iterations).  A timestamp indicating
+// when the thread initially started waiting for the lock is passed in via
+// the initial_wait_timestamp value.  The total wait time in cycles for the
+// lock is returned in the wait_cycles parameter.  The last value read
+// from the lock is returned from the method.
+uint32_t SpinLock::SpinLoop(int64_t initial_wait_timestamp,
+                            uint32_t *wait_cycles) {
+  int c = adaptive_spin_count;
+  uint32_t lock_value;
+  do {
+    lock_value = lockword_.load(std::memory_order_relaxed);
+  } while ((lock_value & kSpinLockHeld) != 0 && --c > 0);
+  uint32_t spin_loop_wait_cycles =
+      EncodeWaitCycles(initial_wait_timestamp, CycleClock::Now());
+  *wait_cycles = spin_loop_wait_cycles;
+
+  return TryLockInternal(lock_value, spin_loop_wait_cycles);
+}
+
+void SpinLock::SlowLock() {
+  // The lock was not obtained initially, so this thread needs to wait for
+  // it.  Record the current timestamp in the local variable wait_start_time
+  // so the total wait time can be stored in the lockword once this thread
+  // obtains the lock.
+  int64_t wait_start_time = CycleClock::Now();
+  uint32_t wait_cycles;
+  uint32_t lock_value = SpinLoop(wait_start_time, &wait_cycles);
+
+  int lock_wait_call_count = 0;
+  while ((lock_value & kSpinLockHeld) != 0) {
+    // If the lock is currently held, but not marked as having a sleeper, mark
+    // it as having a sleeper.
+    if ((lock_value & kWaitTimeMask) == 0) {
+      // Here, just "mark" that the thread is going to sleep.  Don't store the
+      // lock wait time in the lock as that will cause the current lock
+      // owner to think it experienced contention.
+      if (lockword_.compare_exchange_strong(
+              lock_value, lock_value | kSpinLockSleeper,
+              std::memory_order_acquire, std::memory_order_relaxed)) {
+        // Successfully transitioned to kSpinLockSleeper.  Pass
+        // kSpinLockSleeper to the SpinLockWait routine to properly indicate
+        // the last lock_value observed.
+        lock_value |= kSpinLockSleeper;
+      } else if ((lock_value & kSpinLockHeld) == 0) {
+        // Lock is free again, so try and acquire it before sleeping.  The
+        // new lock state will be the number of cycles this thread waited if
+        // this thread obtains the lock.
+        lock_value = TryLockInternal(lock_value, wait_cycles);
+        continue;   // Skip the delay at the end of the loop.
+      }
+    }
+
+    base_internal::SchedulingMode scheduling_mode;
+    if ((lock_value & kSpinLockCooperative) != 0) {
+      scheduling_mode = base_internal::SCHEDULE_COOPERATIVE_AND_KERNEL;
+    } else {
+      scheduling_mode = base_internal::SCHEDULE_KERNEL_ONLY;
+    }
+    // SpinLockDelay() calls into fiber scheduler, we need to see
+    // synchronization there to avoid false positives.
+    ABSL_TSAN_MUTEX_PRE_DIVERT(this, 0);
+    // Wait for an OS specific delay.
+    base_internal::SpinLockDelay(&lockword_, lock_value, ++lock_wait_call_count,
+                                 scheduling_mode);
+    ABSL_TSAN_MUTEX_POST_DIVERT(this, 0);
+    // Spin again after returning from the wait routine to give this thread
+    // some chance of obtaining the lock.
+    lock_value = SpinLoop(wait_start_time, &wait_cycles);
+  }
+}
+
+void SpinLock::SlowUnlock(uint32_t lock_value) {
+  base_internal::SpinLockWake(&lockword_,
+                              false);  // wake waiter if necessary
+
+  // If our acquisition was contended, collect contentionz profile info.  We
+  // reserve a unitary wait time to represent that a waiter exists without our
+  // own acquisition having been contended.
+  if ((lock_value & kWaitTimeMask) != kSpinLockSleeper) {
+    const uint64_t wait_cycles = DecodeWaitCycles(lock_value);
+    ABSL_TSAN_MUTEX_PRE_DIVERT(this, 0);
+    submit_profile_data(this, wait_cycles);
+    ABSL_TSAN_MUTEX_POST_DIVERT(this, 0);
+  }
+}
+
+// We use the upper 29 bits of the lock word to store the time spent waiting to
+// acquire this lock.  This is reported by contentionz profiling.  Since the
+// lower bits of the cycle counter wrap very quickly on high-frequency
+// processors we divide to reduce the granularity to 2^PROFILE_TIMESTAMP_SHIFT
+// sized units.  On a 4Ghz machine this will lose track of wait times greater
+// than (2^29/4 Ghz)*128 =~ 17.2 seconds.  Such waits should be extremely rare.
+enum { PROFILE_TIMESTAMP_SHIFT = 7 };
+enum { LOCKWORD_RESERVED_SHIFT = 3 };  // We currently reserve the lower 3 bits.
+
+uint32_t SpinLock::EncodeWaitCycles(int64_t wait_start_time,
+                                    int64_t wait_end_time) {
+  static const int64_t kMaxWaitTime =
+      std::numeric_limits<uint32_t>::max() >> LOCKWORD_RESERVED_SHIFT;
+  int64_t scaled_wait_time =
+      (wait_end_time - wait_start_time) >> PROFILE_TIMESTAMP_SHIFT;
+
+  // Return a representation of the time spent waiting that can be stored in
+  // the lock word's upper bits.  bit_cast is required as Atomic32 is signed.
+  const uint32_t clamped = static_cast<uint32_t>(
+      std::min(scaled_wait_time, kMaxWaitTime) << LOCKWORD_RESERVED_SHIFT);
+
+  // bump up value if necessary to avoid returning kSpinLockSleeper.
+  const uint32_t after_spinlock_sleeper =
+     kSpinLockSleeper + (1 << LOCKWORD_RESERVED_SHIFT);
+  return clamped == kSpinLockSleeper ? after_spinlock_sleeper : clamped;
+}
+
+uint64_t SpinLock::DecodeWaitCycles(uint32_t lock_value) {
+  // Cast to uint32_t first to ensure bits [63:32] are cleared.
+  const uint64_t scaled_wait_time =
+      static_cast<uint32_t>(lock_value & kWaitTimeMask);
+  return scaled_wait_time
+      << (PROFILE_TIMESTAMP_SHIFT - LOCKWORD_RESERVED_SHIFT);
+}
+
+}  // namespace base_internal
+}  // namespace absl

http://git-wip-us.apache.org/repos/asf/marmotta/blob/0eb556da/libraries/ostrich/backend/3rdparty/abseil/absl/base/internal/spinlock.h
----------------------------------------------------------------------
diff --git a/libraries/ostrich/backend/3rdparty/abseil/absl/base/internal/spinlock.h b/libraries/ostrich/backend/3rdparty/abseil/absl/base/internal/spinlock.h
new file mode 100644
index 0000000..212abc6
--- /dev/null
+++ b/libraries/ostrich/backend/3rdparty/abseil/absl/base/internal/spinlock.h
@@ -0,0 +1,239 @@
+//
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+//  Most users requiring mutual exclusion should use Mutex.
+//  SpinLock is provided for use in three situations:
+//   - for use in code that Mutex itself depends on
+//   - to get a faster fast-path release under low contention (without an
+//     atomic read-modify-write) In return, SpinLock has worse behaviour under
+//     contention, which is why Mutex is preferred in most situations.
+//   - for async signal safety (see below)
+
+// SpinLock is async signal safe.  If a spinlock is used within a signal
+// handler, all code that acquires the lock must ensure that the signal cannot
+// arrive while they are holding the lock.  Typically, this is done by blocking
+// the signal.
+
+#ifndef ABSL_BASE_INTERNAL_SPINLOCK_H_
+#define ABSL_BASE_INTERNAL_SPINLOCK_H_
+
+#include <stdint.h>
+#include <sys/types.h>
+#include <atomic>
+
+#include "absl/base/attributes.h"
+#include "absl/base/dynamic_annotations.h"
+#include "absl/base/internal/low_level_scheduling.h"
+#include "absl/base/internal/raw_logging.h"
+#include "absl/base/internal/scheduling_mode.h"
+#include "absl/base/internal/tsan_mutex_interface.h"
+#include "absl/base/macros.h"
+#include "absl/base/port.h"
+#include "absl/base/thread_annotations.h"
+
+namespace absl {
+namespace base_internal {
+
+class LOCKABLE SpinLock {
+ public:
+  SpinLock() : lockword_(kSpinLockCooperative) {
+    ABSL_TSAN_MUTEX_CREATE(this, __tsan_mutex_not_static);
+  }
+
+  // Special constructor for use with static SpinLock objects.  E.g.,
+  //
+  //    static SpinLock lock(base_internal::kLinkerInitialized);
+  //
+  // When intialized using this constructor, we depend on the fact
+  // that the linker has already initialized the memory appropriately.
+  // A SpinLock constructed like this can be freely used from global
+  // initializers without worrying about the order in which global
+  // initializers run.
+  explicit SpinLock(base_internal::LinkerInitialized) {
+    // Does nothing; lockword_ is already initialized
+    ABSL_TSAN_MUTEX_CREATE(this, 0);
+  }
+
+  // Constructors that allow non-cooperative spinlocks to be created for use
+  // inside thread schedulers.  Normal clients should not use these.
+  explicit SpinLock(base_internal::SchedulingMode mode);
+  SpinLock(base_internal::LinkerInitialized,
+           base_internal::SchedulingMode mode);
+
+  ~SpinLock() { ABSL_TSAN_MUTEX_DESTROY(this, __tsan_mutex_not_static); }
+
+  // Acquire this SpinLock.
+  inline void Lock() EXCLUSIVE_LOCK_FUNCTION() {
+    ABSL_TSAN_MUTEX_PRE_LOCK(this, 0);
+    if (!TryLockImpl()) {
+      SlowLock();
+    }
+    ABSL_TSAN_MUTEX_POST_LOCK(this, 0, 0);
+  }
+
+  // Try to acquire this SpinLock without blocking and return true if the
+  // acquisition was successful.  If the lock was not acquired, false is
+  // returned.  If this SpinLock is free at the time of the call, TryLock
+  // will return true with high probability.
+  inline bool TryLock() EXCLUSIVE_TRYLOCK_FUNCTION(true) {
+    ABSL_TSAN_MUTEX_PRE_LOCK(this, __tsan_mutex_try_lock);
+    bool res = TryLockImpl();
+    ABSL_TSAN_MUTEX_POST_LOCK(
+        this, __tsan_mutex_try_lock | (res ? 0 : __tsan_mutex_try_lock_failed),
+        0);
+    return res;
+  }
+
+  // Release this SpinLock, which must be held by the calling thread.
+  inline void Unlock() UNLOCK_FUNCTION() {
+    ABSL_TSAN_MUTEX_PRE_UNLOCK(this, 0);
+    uint32_t lock_value = lockword_.load(std::memory_order_relaxed);
+    lockword_.store(lock_value & kSpinLockCooperative,
+                    std::memory_order_release);
+
+    if ((lock_value & kSpinLockDisabledScheduling) != 0) {
+      base_internal::SchedulingGuard::EnableRescheduling(true);
+    }
+    if ((lock_value & kWaitTimeMask) != 0) {
+      // Collect contentionz profile info, and speed the wakeup of any waiter.
+      // The wait_cycles value indicates how long this thread spent waiting
+      // for the lock.
+      SlowUnlock(lock_value);
+    }
+    ABSL_TSAN_MUTEX_POST_UNLOCK(this, 0);
+  }
+
+  // Determine if the lock is held.  When the lock is held by the invoking
+  // thread, true will always be returned. Intended to be used as
+  // CHECK(lock.IsHeld()).
+  inline bool IsHeld() const {
+    return (lockword_.load(std::memory_order_relaxed) & kSpinLockHeld) != 0;
+  }
+
+ protected:
+  // These should not be exported except for testing.
+
+  // Store number of cycles between wait_start_time and wait_end_time in a
+  // lock value.
+  static uint32_t EncodeWaitCycles(int64_t wait_start_time,
+                                   int64_t wait_end_time);
+
+  // Extract number of wait cycles in a lock value.
+  static uint64_t DecodeWaitCycles(uint32_t lock_value);
+
+  // Provide access to protected method above.  Use for testing only.
+  friend struct SpinLockTest;
+
+ private:
+  // lockword_ is used to store the following:
+  //
+  // bit[0] encodes whether a lock is being held.
+  // bit[1] encodes whether a lock uses cooperative scheduling.
+  // bit[2] encodes whether a lock disables scheduling.
+  // bit[3:31] encodes time a lock spent on waiting as a 29-bit unsigned int.
+  enum { kSpinLockHeld = 1 };
+  enum { kSpinLockCooperative = 2 };
+  enum { kSpinLockDisabledScheduling = 4 };
+  enum { kSpinLockSleeper = 8 };
+  enum { kWaitTimeMask =                      // Includes kSpinLockSleeper.
+    ~(kSpinLockHeld | kSpinLockCooperative | kSpinLockDisabledScheduling) };
+
+  // Returns true if the provided scheduling mode is cooperative.
+  static constexpr bool IsCooperative(
+      base_internal::SchedulingMode scheduling_mode) {
+    return scheduling_mode == base_internal::SCHEDULE_COOPERATIVE_AND_KERNEL;
+  }
+
+  uint32_t TryLockInternal(uint32_t lock_value, uint32_t wait_cycles);
+  void InitLinkerInitializedAndCooperative();
+  void SlowLock() ABSL_ATTRIBUTE_COLD;
+  void SlowUnlock(uint32_t lock_value) ABSL_ATTRIBUTE_COLD;
+  uint32_t SpinLoop(int64_t initial_wait_timestamp, uint32_t* wait_cycles);
+
+  inline bool TryLockImpl() {
+    uint32_t lock_value = lockword_.load(std::memory_order_relaxed);
+    return (TryLockInternal(lock_value, 0) & kSpinLockHeld) == 0;
+  }
+
+  std::atomic<uint32_t> lockword_;
+
+  SpinLock(const SpinLock&) = delete;
+  SpinLock& operator=(const SpinLock&) = delete;
+};
+
+// Corresponding locker object that arranges to acquire a spinlock for
+// the duration of a C++ scope.
+class SCOPED_LOCKABLE SpinLockHolder {
+ public:
+  inline explicit SpinLockHolder(SpinLock* l) EXCLUSIVE_LOCK_FUNCTION(l)
+      : lock_(l) {
+    l->Lock();
+  }
+  inline ~SpinLockHolder() UNLOCK_FUNCTION() { lock_->Unlock(); }
+
+  SpinLockHolder(const SpinLockHolder&) = delete;
+  SpinLockHolder& operator=(const SpinLockHolder&) = delete;
+
+ private:
+  SpinLock* lock_;
+};
+
+// Register a hook for profiling support.
+//
+// The function pointer registered here will be called whenever a spinlock is
+// contended.  The callback is given an opaque handle to the contended spinlock
+// and the number of wait cycles.  This is thread-safe, but only a single
+// profiler can be registered.  It is an error to call this function multiple
+// times with different arguments.
+void RegisterSpinLockProfiler(void (*fn)(const void* lock,
+                                         int64_t wait_cycles));
+
+//------------------------------------------------------------------------------
+// Public interface ends here.
+//------------------------------------------------------------------------------
+
+// If (result & kSpinLockHeld) == 0, then *this was successfully locked.
+// Otherwise, returns last observed value for lockword_.
+inline uint32_t SpinLock::TryLockInternal(uint32_t lock_value,
+                                          uint32_t wait_cycles) {
+  if ((lock_value & kSpinLockHeld) != 0) {
+    return lock_value;
+  }
+
+  uint32_t sched_disabled_bit = 0;
+  if ((lock_value & kSpinLockCooperative) == 0) {
+    // For non-cooperative locks we must make sure we mark ourselves as
+    // non-reschedulable before we attempt to CompareAndSwap.
+    if (base_internal::SchedulingGuard::DisableRescheduling()) {
+      sched_disabled_bit = kSpinLockDisabledScheduling;
+    }
+  }
+
+  if (lockword_.compare_exchange_strong(
+          lock_value,
+          kSpinLockHeld | lock_value | wait_cycles | sched_disabled_bit,
+          std::memory_order_acquire, std::memory_order_relaxed)) {
+  } else {
+    base_internal::SchedulingGuard::EnableRescheduling(sched_disabled_bit != 0);
+  }
+
+  return lock_value;
+}
+
+}  // namespace base_internal
+}  // namespace absl
+
+#endif  // ABSL_BASE_INTERNAL_SPINLOCK_H_

http://git-wip-us.apache.org/repos/asf/marmotta/blob/0eb556da/libraries/ostrich/backend/3rdparty/abseil/absl/base/internal/spinlock_akaros.inc
----------------------------------------------------------------------
diff --git a/libraries/ostrich/backend/3rdparty/abseil/absl/base/internal/spinlock_akaros.inc b/libraries/ostrich/backend/3rdparty/abseil/absl/base/internal/spinlock_akaros.inc
new file mode 100644
index 0000000..051c8cf
--- /dev/null
+++ b/libraries/ostrich/backend/3rdparty/abseil/absl/base/internal/spinlock_akaros.inc
@@ -0,0 +1,35 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// This file is an Akaros-specific part of spinlock_wait.cc
+
+#include <atomic>
+
+#include "absl/base/internal/scheduling_mode.h"
+
+extern "C" {
+
+ABSL_ATTRIBUTE_WEAK void AbslInternalSpinLockDelay(
+    std::atomic<uint32_t>* /* lock_word */, uint32_t /* value */,
+    int /* loop */, absl::base_internal::SchedulingMode /* mode */) {
+  // In Akaros, one must take care not to call anything that could cause a
+  // malloc(), a blocking system call, or a uthread_yield() while holding a
+  // spinlock. Our callers assume will not call into libraries or other
+  // arbitrary code.
+}
+
+ABSL_ATTRIBUTE_WEAK void AbslInternalSpinLockWake(
+    std::atomic<uint32_t>* /* lock_word */, bool /* all */) {}
+
+}  // extern "C"

http://git-wip-us.apache.org/repos/asf/marmotta/blob/0eb556da/libraries/ostrich/backend/3rdparty/abseil/absl/base/internal/spinlock_posix.inc
----------------------------------------------------------------------
diff --git a/libraries/ostrich/backend/3rdparty/abseil/absl/base/internal/spinlock_posix.inc b/libraries/ostrich/backend/3rdparty/abseil/absl/base/internal/spinlock_posix.inc
new file mode 100644
index 0000000..0098c1c
--- /dev/null
+++ b/libraries/ostrich/backend/3rdparty/abseil/absl/base/internal/spinlock_posix.inc
@@ -0,0 +1,46 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// This file is a Posix-specific part of spinlock_wait.cc
+
+#include <sched.h>
+#include <atomic>
+#include <ctime>
+#include <cerrno>
+
+#include "absl/base/internal/scheduling_mode.h"
+#include "absl/base/port.h"
+
+extern "C" {
+
+ABSL_ATTRIBUTE_WEAK void AbslInternalSpinLockDelay(
+    std::atomic<uint32_t>* /* lock_word */, uint32_t /* value */, int loop,
+    absl::base_internal::SchedulingMode /* mode */) {
+  int save_errno = errno;
+  if (loop == 0) {
+  } else if (loop == 1) {
+    sched_yield();
+  } else {
+    struct timespec tm;
+    tm.tv_sec = 0;
+    tm.tv_nsec = absl::base_internal::SpinLockSuggestedDelayNS(loop);
+    nanosleep(&tm, nullptr);
+  }
+  errno = save_errno;
+}
+
+ABSL_ATTRIBUTE_WEAK void AbslInternalSpinLockWake(
+    std::atomic<uint32_t>* /* lock_word */, bool /* all */) {}
+
+}  // extern "C"

http://git-wip-us.apache.org/repos/asf/marmotta/blob/0eb556da/libraries/ostrich/backend/3rdparty/abseil/absl/base/internal/spinlock_wait.cc
----------------------------------------------------------------------
diff --git a/libraries/ostrich/backend/3rdparty/abseil/absl/base/internal/spinlock_wait.cc b/libraries/ostrich/backend/3rdparty/abseil/absl/base/internal/spinlock_wait.cc
new file mode 100644
index 0000000..9f6e991
--- /dev/null
+++ b/libraries/ostrich/backend/3rdparty/abseil/absl/base/internal/spinlock_wait.cc
@@ -0,0 +1,79 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// The OS-specific header included below must provide two calls:
+// base::subtle::SpinLockDelay() and base::subtle::SpinLockWake().
+// See spinlock_wait.h for the specs.
+
+#include <atomic>
+#include <cstdint>
+
+#include "absl/base/internal/spinlock_wait.h"
+
+#if defined(_WIN32)
+#include "absl/base/internal/spinlock_win32.inc"
+#elif defined(__akaros__)
+#include "absl/base/internal/spinlock_akaros.inc"
+#else
+#include "absl/base/internal/spinlock_posix.inc"
+#endif
+
+namespace absl {
+namespace base_internal {
+
+// See spinlock_wait.h for spec.
+uint32_t SpinLockWait(std::atomic<uint32_t> *w, int n,
+                      const SpinLockWaitTransition trans[],
+                      base_internal::SchedulingMode scheduling_mode) {
+  for (int loop = 0; ; loop++) {
+    uint32_t v = w->load(std::memory_order_acquire);
+    int i;
+    for (i = 0; i != n && v != trans[i].from; i++) {
+    }
+    if (i == n) {
+      SpinLockDelay(w, v, loop, scheduling_mode);  // no matching transition
+    } else if (trans[i].to == v ||                 // null transition
+               w->compare_exchange_strong(v, trans[i].to,
+                                          std::memory_order_acquire,
+                                          std::memory_order_relaxed)) {
+      if (trans[i].done) return v;
+    }
+  }
+}
+
+static std::atomic<uint64_t> delay_rand;
+
+// Return a suggested delay in nanoseconds for iteration number "loop"
+int SpinLockSuggestedDelayNS(int loop) {
+  // Weak pseudo-random number generator to get some spread between threads
+  // when many are spinning.
+  uint64_t r = delay_rand.load(std::memory_order_relaxed);
+  r = 0x5deece66dLL * r + 0xb;   // numbers from nrand48()
+  delay_rand.store(r, std::memory_order_relaxed);
+
+  r <<= 16;   // 48-bit random number now in top 48-bits.
+  if (loop < 0 || loop > 32) {   // limit loop to 0..32
+    loop = 32;
+  }
+  // loop>>3 cannot exceed 4 because loop cannot exceed 32.
+  // Select top 20..24 bits of lower 48 bits,
+  // giving approximately 0ms to 16ms.
+  // Mean is exponential in loop for first 32 iterations, then 8ms.
+  // The futex path multiplies this by 16, since we expect explicit wakeups
+  // almost always on that path.
+  return static_cast<int>(r >> (44 - (loop >> 3)));
+}
+
+}  // namespace base_internal
+}  // namespace absl