You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@marmotta.apache.org by ss...@apache.org on 2018/04/29 19:36:15 UTC

[46/51] [partial] marmotta git commit: * Replace gtest with upstream version, including LICENSE header. * Include absl library for faster and safer string operations. * Update license headers where needed. * Removed custom code replaced by absl.

http://git-wip-us.apache.org/repos/asf/marmotta/blob/0eb556da/libraries/ostrich/backend/3rdparty/abseil/absl/base/internal/cycleclock.cc
----------------------------------------------------------------------
diff --git a/libraries/ostrich/backend/3rdparty/abseil/absl/base/internal/cycleclock.cc b/libraries/ostrich/backend/3rdparty/abseil/absl/base/internal/cycleclock.cc
new file mode 100644
index 0000000..a742df0
--- /dev/null
+++ b/libraries/ostrich/backend/3rdparty/abseil/absl/base/internal/cycleclock.cc
@@ -0,0 +1,81 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// The implementation of CycleClock::Frequency.
+//
+// NOTE: only i386 and x86_64 have been well tested.
+// PPC, sparc, alpha, and ia64 are based on
+//    http://peter.kuscsik.com/wordpress/?p=14
+// with modifications by m3b.  See also
+//    https://setisvn.ssl.berkeley.edu/svn/lib/fftw-3.0.1/kernel/cycle.h
+
+#include "absl/base/internal/cycleclock.h"
+
+#include <chrono>  // NOLINT(build/c++11)
+
+#include "absl/base/internal/unscaledcycleclock.h"
+
+namespace absl {
+namespace base_internal {
+
+#if ABSL_USE_UNSCALED_CYCLECLOCK
+
+namespace {
+
+#ifdef NDEBUG
+#ifdef ABSL_INTERNAL_UNSCALED_CYCLECLOCK_FREQUENCY_IS_CPU_FREQUENCY
+// Not debug mode and the UnscaledCycleClock frequency is the CPU
+// frequency.  Scale the CycleClock to prevent overflow if someone
+// tries to represent the time as cycles since the Unix epoch.
+static constexpr int32_t kShift = 1;
+#else
+// Not debug mode and the UnscaledCycleClock isn't operating at the
+// raw CPU frequency. There is no need to do any scaling, so don't
+// needlessly sacrifice precision.
+static constexpr int32_t kShift = 0;
+#endif
+#else
+// In debug mode use a different shift to discourage depending on a
+// particular shift value.
+static constexpr int32_t kShift = 2;
+#endif
+
+static constexpr double kFrequencyScale = 1.0 / (1 << kShift);
+
+}  // namespace
+
+int64_t CycleClock::Now() {
+  return base_internal::UnscaledCycleClock::Now() >> kShift;
+}
+
+double CycleClock::Frequency() {
+  return kFrequencyScale * base_internal::UnscaledCycleClock::Frequency();
+}
+
+#else
+
+int64_t CycleClock::Now() {
+  return std::chrono::duration_cast<std::chrono::nanoseconds>(
+             std::chrono::steady_clock::now().time_since_epoch())
+      .count();
+}
+
+double CycleClock::Frequency() {
+  return 1e9;
+}
+
+#endif
+
+}  // namespace base_internal
+}  // namespace absl

http://git-wip-us.apache.org/repos/asf/marmotta/blob/0eb556da/libraries/ostrich/backend/3rdparty/abseil/absl/base/internal/cycleclock.h
----------------------------------------------------------------------
diff --git a/libraries/ostrich/backend/3rdparty/abseil/absl/base/internal/cycleclock.h b/libraries/ostrich/backend/3rdparty/abseil/absl/base/internal/cycleclock.h
new file mode 100644
index 0000000..60e9715
--- /dev/null
+++ b/libraries/ostrich/backend/3rdparty/abseil/absl/base/internal/cycleclock.h
@@ -0,0 +1,77 @@
+//
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+// -----------------------------------------------------------------------------
+// File: cycleclock.h
+// -----------------------------------------------------------------------------
+//
+// This header file defines a `CycleClock`, which yields the value and frequency
+// of a cycle counter that increments at a rate that is approximately constant.
+//
+// NOTE:
+//
+// The cycle counter frequency is not necessarily related to the core clock
+// frequency and should not be treated as such. That is, `CycleClock` cycles are
+// not necessarily "CPU cycles" and code should not rely on that behavior, even
+// if experimentally observed.
+//
+//
+// An arbitrary offset may have been added to the counter at power on.
+//
+// On some platforms, the rate and offset of the counter may differ
+// slightly when read from different CPUs of a multiprocessor. Usually,
+// we try to ensure that the operating system adjusts values periodically
+// so that values agree approximately.   If you need stronger guarantees,
+// consider using alternate interfaces.
+//
+// The CPU is not required to maintain the ordering of a cycle counter read
+// with respect to surrounding instructions.
+
+#ifndef ABSL_BASE_INTERNAL_CYCLECLOCK_H_
+#define ABSL_BASE_INTERNAL_CYCLECLOCK_H_
+
+#include <cstdint>
+
+namespace absl {
+namespace base_internal {
+
+// -----------------------------------------------------------------------------
+// CycleClock
+// -----------------------------------------------------------------------------
+class CycleClock {
+ public:
+  // CycleClock::Now()
+  //
+  // Returns the value of a cycle counter that counts at a rate that is
+  // approximately constant.
+  static int64_t Now();
+
+  // CycleClock::Frequency()
+  //
+  // Returns the amount by which `CycleClock::Now()` increases per second. Note
+  // that this value may not necessarily match the core CPU clock frequency.
+  static double Frequency();
+
+ private:
+  CycleClock() = delete;  // no instances
+  CycleClock(const CycleClock&) = delete;
+  CycleClock& operator=(const CycleClock&) = delete;
+};
+
+}  // namespace base_internal
+}  // namespace absl
+
+#endif  // ABSL_BASE_INTERNAL_CYCLECLOCK_H_

http://git-wip-us.apache.org/repos/asf/marmotta/blob/0eb556da/libraries/ostrich/backend/3rdparty/abseil/absl/base/internal/direct_mmap.h
----------------------------------------------------------------------
diff --git a/libraries/ostrich/backend/3rdparty/abseil/absl/base/internal/direct_mmap.h b/libraries/ostrich/backend/3rdparty/abseil/absl/base/internal/direct_mmap.h
new file mode 100644
index 0000000..4bd273e
--- /dev/null
+++ b/libraries/ostrich/backend/3rdparty/abseil/absl/base/internal/direct_mmap.h
@@ -0,0 +1,151 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Functions for directly invoking mmap() via syscall, avoiding the case where
+// mmap() has been locally overridden.
+
+#ifndef ABSL_BASE_INTERNAL_DIRECT_MMAP_H_
+#define ABSL_BASE_INTERNAL_DIRECT_MMAP_H_
+
+#include "absl/base/config.h"
+
+#if ABSL_HAVE_MMAP
+
+#include <sys/mman.h>
+
+#ifdef __linux__
+
+#include <sys/types.h>
+#ifdef __BIONIC__
+#include <sys/syscall.h>
+#else
+#include <syscall.h>
+#endif
+
+#include <linux/unistd.h>
+#include <unistd.h>
+#include <cerrno>
+#include <cstdarg>
+#include <cstdint>
+
+#ifdef __mips__
+// Include definitions of the ABI currently in use.
+#ifdef __BIONIC__
+// Android doesn't have sgidefs.h, but does have asm/sgidefs.h, which has the
+// definitions we need.
+#include <asm/sgidefs.h>
+#else
+#include <sgidefs.h>
+#endif  // __BIONIC__
+#endif  // __mips__
+
+// SYS_mmap and SYS_munmap are not defined in Android.
+#ifdef __BIONIC__
+extern "C" void* __mmap2(void*, size_t, int, int, int, long);
+#if defined(__NR_mmap) && !defined(SYS_mmap)
+#define SYS_mmap __NR_mmap
+#endif
+#ifndef SYS_munmap
+#define SYS_munmap __NR_munmap
+#endif
+#endif  // __BIONIC__
+
+namespace absl {
+namespace base_internal {
+
+// Platform specific logic extracted from
+// https://chromium.googlesource.com/linux-syscall-support/+/master/linux_syscall_support.h
+inline void* DirectMmap(void* start, size_t length, int prot, int flags, int fd,
+                        off64_t offset) noexcept {
+#if defined(__i386__) || defined(__ARM_ARCH_3__) || defined(__ARM_EABI__) || \
+    (defined(__mips__) && _MIPS_SIM == _MIPS_SIM_ABI32) ||                   \
+    (defined(__PPC__) && !defined(__PPC64__)) ||                             \
+    (defined(__s390__) && !defined(__s390x__))
+  // On these architectures, implement mmap with mmap2.
+  static int pagesize = 0;
+  if (pagesize == 0) {
+    pagesize = getpagesize();
+  }
+  if (offset < 0 || offset % pagesize != 0) {
+    errno = EINVAL;
+    return MAP_FAILED;
+  }
+#ifdef __BIONIC__
+  // SYS_mmap2 has problems on Android API level <= 16.
+  // Workaround by invoking __mmap2() instead.
+  return __mmap2(start, length, prot, flags, fd, offset / pagesize);
+#else
+  return reinterpret_cast<void*>(
+      syscall(SYS_mmap2, start, length, prot, flags, fd,
+              static_cast<off_t>(offset / pagesize)));
+#endif
+#elif defined(__s390x__)
+  // On s390x, mmap() arguments are passed in memory.
+  uint32_t buf[6] = {
+      reinterpret_cast<uint32_t>(start), static_cast<uint32_t>(length),
+      static_cast<uint32_t>(prot),       static_cast<uint32_t>(flags),
+      static_cast<uint32_t>(fd),         static_cast<uint32_t>(offset)};
+  return reintrepret_cast<void*>(syscall(SYS_mmap, buf));
+#elif defined(__x86_64__)
+// The x32 ABI has 32 bit longs, but the syscall interface is 64 bit.
+// We need to explicitly cast to an unsigned 64 bit type to avoid implicit
+// sign extension.  We can't cast pointers directly because those are
+// 32 bits, and gcc will dump ugly warnings about casting from a pointer
+// to an integer of a different size. We also need to make sure __off64_t
+// isn't truncated to 32-bits under x32.
+#define MMAP_SYSCALL_ARG(x) ((uint64_t)(uintptr_t)(x))
+  return reinterpret_cast<void*>(
+      syscall(SYS_mmap, MMAP_SYSCALL_ARG(start), MMAP_SYSCALL_ARG(length),
+              MMAP_SYSCALL_ARG(prot), MMAP_SYSCALL_ARG(flags),
+              MMAP_SYSCALL_ARG(fd), static_cast<uint64_t>(offset)));
+#undef MMAP_SYSCALL_ARG
+#else  // Remaining 64-bit aritectures.
+  static_assert(sizeof(unsigned long) == 8, "Platform is not 64-bit");
+  return reinterpret_cast<void*>(
+      syscall(SYS_mmap, start, length, prot, flags, fd, offset));
+#endif
+}
+
+inline int DirectMunmap(void* start, size_t length) {
+  return static_cast<int>(syscall(SYS_munmap, start, length));
+}
+
+}  // namespace base_internal
+}  // namespace absl
+
+#else  // !__linux__
+
+// For non-linux platforms where we have mmap, just dispatch directly to the
+// actual mmap()/munmap() methods.
+
+namespace absl {
+namespace base_internal {
+
+inline void* DirectMmap(void* start, size_t length, int prot, int flags, int fd,
+                        off_t offset) {
+  return mmap(start, length, prot, flags, fd, offset);
+}
+
+inline int DirectMunmap(void* start, size_t length) {
+  return munmap(start, length);
+}
+
+}  // namespace base_internal
+}  // namespace absl
+
+#endif  // __linux__
+
+#endif  // ABSL_HAVE_MMAP
+
+#endif  // ABSL_BASE_INTERNAL_DIRECT_MMAP_H_

http://git-wip-us.apache.org/repos/asf/marmotta/blob/0eb556da/libraries/ostrich/backend/3rdparty/abseil/absl/base/internal/endian.h
----------------------------------------------------------------------
diff --git a/libraries/ostrich/backend/3rdparty/abseil/absl/base/internal/endian.h b/libraries/ostrich/backend/3rdparty/abseil/absl/base/internal/endian.h
new file mode 100644
index 0000000..edc10f1
--- /dev/null
+++ b/libraries/ostrich/backend/3rdparty/abseil/absl/base/internal/endian.h
@@ -0,0 +1,269 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef ABSL_BASE_INTERNAL_ENDIAN_H_
+#define ABSL_BASE_INTERNAL_ENDIAN_H_
+
+// The following guarantees declaration of the byte swap functions
+#ifdef _MSC_VER
+#include <stdlib.h>  // NOLINT(build/include)
+#elif defined(__APPLE__)
+// Mac OS X / Darwin features
+#include <libkern/OSByteOrder.h>
+#elif defined(__FreeBSD__)
+#include <sys/endian.h>
+#elif defined(__GLIBC__)
+#include <byteswap.h>  // IWYU pragma: export
+#endif
+
+#include <cstdint>
+#include "absl/base/config.h"
+#include "absl/base/internal/unaligned_access.h"
+#include "absl/base/port.h"
+
+namespace absl {
+
+// Use compiler byte-swapping intrinsics if they are available.  32-bit
+// and 64-bit versions are available in Clang and GCC as of GCC 4.3.0.
+// The 16-bit version is available in Clang and GCC only as of GCC 4.8.0.
+// For simplicity, we enable them all only for GCC 4.8.0 or later.
+#if defined(__clang__) || \
+    (defined(__GNUC__) && \
+     ((__GNUC__ == 4 && __GNUC_MINOR__ >= 8) || __GNUC__ >= 5))
+inline uint64_t gbswap_64(uint64_t host_int) {
+  return __builtin_bswap64(host_int);
+}
+inline uint32_t gbswap_32(uint32_t host_int) {
+  return __builtin_bswap32(host_int);
+}
+inline uint16_t gbswap_16(uint16_t host_int) {
+  return __builtin_bswap16(host_int);
+}
+
+#elif defined(_MSC_VER)
+inline uint64_t gbswap_64(uint64_t host_int) {
+  return _byteswap_uint64(host_int);
+}
+inline uint32_t gbswap_32(uint32_t host_int) {
+  return _byteswap_ulong(host_int);
+}
+inline uint16_t gbswap_16(uint16_t host_int) {
+  return _byteswap_ushort(host_int);
+}
+
+#elif defined(__APPLE__)
+inline uint64_t gbswap_64(uint64_t host_int) { return OSSwapInt16(host_int); }
+inline uint32_t gbswap_32(uint32_t host_int) { return OSSwapInt32(host_int); }
+inline uint16_t gbswap_16(uint16_t host_int) { return OSSwapInt64(host_int); }
+
+#else
+inline uint64_t gbswap_64(uint64_t host_int) {
+#if defined(__GNUC__) && defined(__x86_64__) && !defined(__APPLE__)
+  // Adapted from /usr/include/byteswap.h.  Not available on Mac.
+  if (__builtin_constant_p(host_int)) {
+    return __bswap_constant_64(host_int);
+  } else {
+    register uint64_t result;
+    __asm__("bswap %0" : "=r"(result) : "0"(host_int));
+    return result;
+  }
+#elif defined(__GLIBC__)
+  return bswap_64(host_int);
+#else
+  return (((x & uint64_t{(0xFF}) << 56) |
+          ((x & uint64_t{(0xFF00}) << 40) |
+          ((x & uint64_t{(0xFF0000}) << 24) |
+          ((x & uint64_t{(0xFF000000}) << 8) |
+          ((x & uint64_t{(0xFF00000000}) >> 8) |
+          ((x & uint64_t{(0xFF0000000000}) >> 24) |
+          ((x & uint64_t{(0xFF000000000000}) >> 40) |
+          ((x & uint64_t{(0xFF00000000000000}) >> 56));
+#endif  // bswap_64
+}
+
+inline uint32_t gbswap_32(uint32_t host_int) {
+#if defined(__GLIBC__)
+  return bswap_32(host_int);
+#else
+  return (((x & 0xFF) << 24) | ((x & 0xFF00) << 8) | ((x & 0xFF0000) >> 8) |
+          ((x & 0xFF000000) >> 24));
+#endif
+}
+
+inline uint16_t gbswap_16(uint16_t host_int) {
+#if defined(__GLIBC__)
+  return bswap_16(host_int);
+#else
+  return uint16_t{((x & 0xFF) << 8) | ((x & 0xFF00) >> 8)};
+#endif
+}
+
+#endif  // intrinics available
+
+#ifdef ABSL_IS_LITTLE_ENDIAN
+
+// Definitions for ntohl etc. that don't require us to include
+// netinet/in.h. We wrap gbswap_32 and gbswap_16 in functions rather
+// than just #defining them because in debug mode, gcc doesn't
+// correctly handle the (rather involved) definitions of bswap_32.
+// gcc guarantees that inline functions are as fast as macros, so
+// this isn't a performance hit.
+inline uint16_t ghtons(uint16_t x) { return gbswap_16(x); }
+inline uint32_t ghtonl(uint32_t x) { return gbswap_32(x); }
+inline uint64_t ghtonll(uint64_t x) { return gbswap_64(x); }
+
+#elif defined ABSL_IS_BIG_ENDIAN
+
+// These definitions are simpler on big-endian machines
+// These are functions instead of macros to avoid self-assignment warnings
+// on calls such as "i = ghtnol(i);".  This also provides type checking.
+inline uint16_t ghtons(uint16_t x) { return x; }
+inline uint32_t ghtonl(uint32_t x) { return x; }
+inline uint64_t ghtonll(uint64_t x) { return x; }
+
+#else
+#error \
+    "Unsupported byte order: Either ABSL_IS_BIG_ENDIAN or " \
+       "ABSL_IS_LITTLE_ENDIAN must be defined"
+#endif  // byte order
+
+inline uint16_t gntohs(uint16_t x) { return ghtons(x); }
+inline uint32_t gntohl(uint32_t x) { return ghtonl(x); }
+inline uint64_t gntohll(uint64_t x) { return ghtonll(x); }
+
+// Utilities to convert numbers between the current hosts's native byte
+// order and little-endian byte order
+//
+// Load/Store methods are alignment safe
+namespace little_endian {
+// Conversion functions.
+#ifdef ABSL_IS_LITTLE_ENDIAN
+
+inline uint16_t FromHost16(uint16_t x) { return x; }
+inline uint16_t ToHost16(uint16_t x) { return x; }
+
+inline uint32_t FromHost32(uint32_t x) { return x; }
+inline uint32_t ToHost32(uint32_t x) { return x; }
+
+inline uint64_t FromHost64(uint64_t x) { return x; }
+inline uint64_t ToHost64(uint64_t x) { return x; }
+
+inline constexpr bool IsLittleEndian() { return true; }
+
+#elif defined ABSL_IS_BIG_ENDIAN
+
+inline uint16_t FromHost16(uint16_t x) { return gbswap_16(x); }
+inline uint16_t ToHost16(uint16_t x) { return gbswap_16(x); }
+
+inline uint32_t FromHost32(uint32_t x) { return gbswap_32(x); }
+inline uint32_t ToHost32(uint32_t x) { return gbswap_32(x); }
+
+inline uint64_t FromHost64(uint64_t x) { return gbswap_64(x); }
+inline uint64_t ToHost64(uint64_t x) { return gbswap_64(x); }
+
+inline constexpr bool IsLittleEndian() { return false; }
+
+#endif /* ENDIAN */
+
+// Functions to do unaligned loads and stores in little-endian order.
+inline uint16_t Load16(const void *p) {
+  return ToHost16(ABSL_INTERNAL_UNALIGNED_LOAD16(p));
+}
+
+inline void Store16(void *p, uint16_t v) {
+  ABSL_INTERNAL_UNALIGNED_STORE16(p, FromHost16(v));
+}
+
+inline uint32_t Load32(const void *p) {
+  return ToHost32(ABSL_INTERNAL_UNALIGNED_LOAD32(p));
+}
+
+inline void Store32(void *p, uint32_t v) {
+  ABSL_INTERNAL_UNALIGNED_STORE32(p, FromHost32(v));
+}
+
+inline uint64_t Load64(const void *p) {
+  return ToHost64(ABSL_INTERNAL_UNALIGNED_LOAD64(p));
+}
+
+inline void Store64(void *p, uint64_t v) {
+  ABSL_INTERNAL_UNALIGNED_STORE64(p, FromHost64(v));
+}
+
+}  // namespace little_endian
+
+// Utilities to convert numbers between the current hosts's native byte
+// order and big-endian byte order (same as network byte order)
+//
+// Load/Store methods are alignment safe
+namespace big_endian {
+#ifdef ABSL_IS_LITTLE_ENDIAN
+
+inline uint16_t FromHost16(uint16_t x) { return gbswap_16(x); }
+inline uint16_t ToHost16(uint16_t x) { return gbswap_16(x); }
+
+inline uint32_t FromHost32(uint32_t x) { return gbswap_32(x); }
+inline uint32_t ToHost32(uint32_t x) { return gbswap_32(x); }
+
+inline uint64_t FromHost64(uint64_t x) { return gbswap_64(x); }
+inline uint64_t ToHost64(uint64_t x) { return gbswap_64(x); }
+
+inline constexpr bool IsLittleEndian() { return true; }
+
+#elif defined ABSL_IS_BIG_ENDIAN
+
+inline uint16_t FromHost16(uint16_t x) { return x; }
+inline uint16_t ToHost16(uint16_t x) { return x; }
+
+inline uint32_t FromHost32(uint32_t x) { return x; }
+inline uint32_t ToHost32(uint32_t x) { return x; }
+
+inline uint64_t FromHost64(uint64_t x) { return x; }
+inline uint64_t ToHost64(uint64_t x) { return x; }
+
+inline constexpr bool IsLittleEndian() { return false; }
+
+#endif /* ENDIAN */
+
+// Functions to do unaligned loads and stores in big-endian order.
+inline uint16_t Load16(const void *p) {
+  return ToHost16(ABSL_INTERNAL_UNALIGNED_LOAD16(p));
+}
+
+inline void Store16(void *p, uint16_t v) {
+  ABSL_INTERNAL_UNALIGNED_STORE16(p, FromHost16(v));
+}
+
+inline uint32_t Load32(const void *p) {
+  return ToHost32(ABSL_INTERNAL_UNALIGNED_LOAD32(p));
+}
+
+inline void Store32(void *p, uint32_t v) {
+  ABSL_INTERNAL_UNALIGNED_STORE32(p, FromHost32(v));
+}
+
+inline uint64_t Load64(const void *p) {
+  return ToHost64(ABSL_INTERNAL_UNALIGNED_LOAD64(p));
+}
+
+inline void Store64(void *p, uint64_t v) {
+  ABSL_INTERNAL_UNALIGNED_STORE64(p, FromHost64(v));
+}
+
+}  // namespace big_endian
+
+}  // namespace absl
+
+#endif  // ABSL_BASE_INTERNAL_ENDIAN_H_

http://git-wip-us.apache.org/repos/asf/marmotta/blob/0eb556da/libraries/ostrich/backend/3rdparty/abseil/absl/base/internal/endian_test.cc
----------------------------------------------------------------------
diff --git a/libraries/ostrich/backend/3rdparty/abseil/absl/base/internal/endian_test.cc b/libraries/ostrich/backend/3rdparty/abseil/absl/base/internal/endian_test.cc
new file mode 100644
index 0000000..f3ff4b3
--- /dev/null
+++ b/libraries/ostrich/backend/3rdparty/abseil/absl/base/internal/endian_test.cc
@@ -0,0 +1,279 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/base/internal/endian.h"
+
+#include <algorithm>
+#include <cstdint>
+#include <limits>
+#include <random>
+#include <vector>
+
+#include "gtest/gtest.h"
+#include "absl/base/config.h"
+
+namespace absl {
+namespace {
+
+const uint64_t kInitialNumber{0x0123456789abcdef};
+const uint64_t k64Value{kInitialNumber};
+const uint32_t k32Value{0x01234567};
+const uint16_t k16Value{0x0123};
+const int kNumValuesToTest = 1000000;
+const int kRandomSeed = 12345;
+
+#ifdef ABSL_IS_BIG_ENDIAN
+const uint64_t kInitialInNetworkOrder{kInitialNumber};
+const uint64_t k64ValueLE{0xefcdab8967452301};
+const uint32_t k32ValueLE{0x67452301};
+const uint16_t k16ValueLE{0x2301};
+const uint8_t k8ValueLE{k8Value};
+const uint64_t k64IValueLE{0xefcdab89674523a1};
+const uint32_t k32IValueLE{0x67452391};
+const uint16_t k16IValueLE{0x85ff};
+const uint8_t k8IValueLE{0xff};
+const uint64_t kDoubleValueLE{0x6e861bf0f9210940};
+const uint32_t kFloatValueLE{0xd00f4940};
+const uint8_t kBoolValueLE{0x1};
+
+const uint64_t k64ValueBE{kInitialNumber};
+const uint32_t k32ValueBE{k32Value};
+const uint16_t k16ValueBE{k16Value};
+const uint8_t k8ValueBE{k8Value};
+const uint64_t k64IValueBE{0xa123456789abcdef};
+const uint32_t k32IValueBE{0x91234567};
+const uint16_t k16IValueBE{0xff85};
+const uint8_t k8IValueBE{0xff};
+const uint64_t kDoubleValueBE{0x400921f9f01b866e};
+const uint32_t kFloatValueBE{0x40490fd0};
+const uint8_t kBoolValueBE{0x1};
+#elif defined ABSL_IS_LITTLE_ENDIAN
+const uint64_t kInitialInNetworkOrder{0xefcdab8967452301};
+const uint64_t k64ValueLE{kInitialNumber};
+const uint32_t k32ValueLE{k32Value};
+const uint16_t k16ValueLE{k16Value};
+
+const uint64_t k64ValueBE{0xefcdab8967452301};
+const uint32_t k32ValueBE{0x67452301};
+const uint16_t k16ValueBE{0x2301};
+#endif
+
+template<typename T>
+std::vector<T> GenerateAllValuesForType() {
+  std::vector<T> result;
+  T next = std::numeric_limits<T>::min();
+  while (true) {
+    result.push_back(next);
+    if (next == std::numeric_limits<T>::max()) {
+      return result;
+    }
+    ++next;
+  }
+}
+
+template<typename T>
+std::vector<T> GenerateRandomIntegers(size_t numValuesToTest) {
+  std::vector<T> result;
+  std::mt19937_64 rng(kRandomSeed);
+  for (size_t i = 0; i < numValuesToTest; ++i) {
+    result.push_back(rng());
+  }
+  return result;
+}
+
+void ManualByteSwap(char* bytes, int length) {
+  if (length == 1)
+    return;
+
+  EXPECT_EQ(0, length % 2);
+  for (int i = 0; i < length / 2; ++i) {
+    int j = (length - 1) - i;
+    using std::swap;
+    swap(bytes[i], bytes[j]);
+  }
+}
+
+template<typename T>
+inline T UnalignedLoad(const char* p) {
+  static_assert(
+      sizeof(T) == 1 || sizeof(T) == 2 || sizeof(T) == 4 || sizeof(T) == 8,
+      "Unexpected type size");
+
+  switch (sizeof(T)) {
+    case 1: return *reinterpret_cast<const T*>(p);
+    case 2:
+      return ABSL_INTERNAL_UNALIGNED_LOAD16(p);
+    case 4:
+      return ABSL_INTERNAL_UNALIGNED_LOAD32(p);
+    case 8:
+      return ABSL_INTERNAL_UNALIGNED_LOAD64(p);
+    default:
+      // Suppresses invalid "not all control paths return a value" on MSVC
+      return {};
+  }
+}
+
+template <typename T, typename ByteSwapper>
+static void GBSwapHelper(const std::vector<T>& host_values_to_test,
+                         const ByteSwapper& byte_swapper) {
+  // Test byte_swapper against a manual byte swap.
+  for (typename std::vector<T>::const_iterator it = host_values_to_test.begin();
+       it != host_values_to_test.end(); ++it) {
+    T host_value = *it;
+
+    char actual_value[sizeof(host_value)];
+    memcpy(actual_value, &host_value, sizeof(host_value));
+    byte_swapper(actual_value);
+
+    char expected_value[sizeof(host_value)];
+    memcpy(expected_value, &host_value, sizeof(host_value));
+    ManualByteSwap(expected_value, sizeof(host_value));
+
+    ASSERT_EQ(0, memcmp(actual_value, expected_value, sizeof(host_value)))
+        << "Swap output for 0x" << std::hex << host_value << " does not match. "
+        << "Expected: 0x" << UnalignedLoad<T>(expected_value) << "; "
+        << "actual: 0x" <<  UnalignedLoad<T>(actual_value);
+  }
+}
+
+void Swap16(char* bytes) {
+  ABSL_INTERNAL_UNALIGNED_STORE16(
+      bytes, gbswap_16(ABSL_INTERNAL_UNALIGNED_LOAD16(bytes)));
+}
+
+void Swap32(char* bytes) {
+  ABSL_INTERNAL_UNALIGNED_STORE32(
+      bytes, gbswap_32(ABSL_INTERNAL_UNALIGNED_LOAD32(bytes)));
+}
+
+void Swap64(char* bytes) {
+  ABSL_INTERNAL_UNALIGNED_STORE64(
+      bytes, gbswap_64(ABSL_INTERNAL_UNALIGNED_LOAD64(bytes)));
+}
+
+TEST(EndianessTest, Uint16) {
+  GBSwapHelper(GenerateAllValuesForType<uint16_t>(), &Swap16);
+}
+
+TEST(EndianessTest, Uint32) {
+  GBSwapHelper(GenerateRandomIntegers<uint32_t>(kNumValuesToTest), &Swap32);
+}
+
+TEST(EndianessTest, Uint64) {
+  GBSwapHelper(GenerateRandomIntegers<uint64_t>(kNumValuesToTest), &Swap64);
+}
+
+TEST(EndianessTest, ghtonll_gntohll) {
+  // Test that absl::ghtonl compiles correctly
+  uint32_t test = 0x01234567;
+  EXPECT_EQ(absl::gntohl(absl::ghtonl(test)), test);
+
+  uint64_t comp = absl::ghtonll(kInitialNumber);
+  EXPECT_EQ(comp, kInitialInNetworkOrder);
+  comp = absl::gntohll(kInitialInNetworkOrder);
+  EXPECT_EQ(comp, kInitialNumber);
+
+  // Test that htonll and ntohll are each others' inverse functions on a
+  // somewhat assorted batch of numbers. 37 is chosen to not be anything
+  // particularly nice base 2.
+  uint64_t value = 1;
+  for (int i = 0; i < 100; ++i) {
+    comp = absl::ghtonll(absl::gntohll(value));
+    EXPECT_EQ(value, comp);
+    comp = absl::gntohll(absl::ghtonll(value));
+    EXPECT_EQ(value, comp);
+    value *= 37;
+  }
+}
+
+TEST(EndianessTest, little_endian) {
+  // Check little_endian uint16_t.
+  uint64_t comp = little_endian::FromHost16(k16Value);
+  EXPECT_EQ(comp, k16ValueLE);
+  comp = little_endian::ToHost16(k16ValueLE);
+  EXPECT_EQ(comp, k16Value);
+
+  // Check little_endian uint32_t.
+  comp = little_endian::FromHost32(k32Value);
+  EXPECT_EQ(comp, k32ValueLE);
+  comp = little_endian::ToHost32(k32ValueLE);
+  EXPECT_EQ(comp, k32Value);
+
+  // Check little_endian uint64_t.
+  comp = little_endian::FromHost64(k64Value);
+  EXPECT_EQ(comp, k64ValueLE);
+  comp = little_endian::ToHost64(k64ValueLE);
+  EXPECT_EQ(comp, k64Value);
+
+  // Check little-endian Load and store functions.
+  uint16_t u16Buf;
+  uint32_t u32Buf;
+  uint64_t u64Buf;
+
+  little_endian::Store16(&u16Buf, k16Value);
+  EXPECT_EQ(u16Buf, k16ValueLE);
+  comp = little_endian::Load16(&u16Buf);
+  EXPECT_EQ(comp, k16Value);
+
+  little_endian::Store32(&u32Buf, k32Value);
+  EXPECT_EQ(u32Buf, k32ValueLE);
+  comp = little_endian::Load32(&u32Buf);
+  EXPECT_EQ(comp, k32Value);
+
+  little_endian::Store64(&u64Buf, k64Value);
+  EXPECT_EQ(u64Buf, k64ValueLE);
+  comp = little_endian::Load64(&u64Buf);
+  EXPECT_EQ(comp, k64Value);
+}
+
+TEST(EndianessTest, big_endian) {
+  // Check big-endian Load and store functions.
+  uint16_t u16Buf;
+  uint32_t u32Buf;
+  uint64_t u64Buf;
+
+  unsigned char buffer[10];
+  big_endian::Store16(&u16Buf, k16Value);
+  EXPECT_EQ(u16Buf, k16ValueBE);
+  uint64_t comp = big_endian::Load16(&u16Buf);
+  EXPECT_EQ(comp, k16Value);
+
+  big_endian::Store32(&u32Buf, k32Value);
+  EXPECT_EQ(u32Buf, k32ValueBE);
+  comp = big_endian::Load32(&u32Buf);
+  EXPECT_EQ(comp, k32Value);
+
+  big_endian::Store64(&u64Buf, k64Value);
+  EXPECT_EQ(u64Buf, k64ValueBE);
+  comp = big_endian::Load64(&u64Buf);
+  EXPECT_EQ(comp, k64Value);
+
+  big_endian::Store16(buffer + 1, k16Value);
+  EXPECT_EQ(u16Buf, k16ValueBE);
+  comp = big_endian::Load16(buffer + 1);
+  EXPECT_EQ(comp, k16Value);
+
+  big_endian::Store32(buffer + 1, k32Value);
+  EXPECT_EQ(u32Buf, k32ValueBE);
+  comp = big_endian::Load32(buffer + 1);
+  EXPECT_EQ(comp, k32Value);
+
+  big_endian::Store64(buffer + 1, k64Value);
+  EXPECT_EQ(u64Buf, k64ValueBE);
+  comp = big_endian::Load64(buffer + 1);
+  EXPECT_EQ(comp, k64Value);
+}
+
+}  // namespace
+}  // namespace absl

http://git-wip-us.apache.org/repos/asf/marmotta/blob/0eb556da/libraries/ostrich/backend/3rdparty/abseil/absl/base/internal/exception_safety_testing.cc
----------------------------------------------------------------------
diff --git a/libraries/ostrich/backend/3rdparty/abseil/absl/base/internal/exception_safety_testing.cc b/libraries/ostrich/backend/3rdparty/abseil/absl/base/internal/exception_safety_testing.cc
new file mode 100644
index 0000000..c6f7c7c
--- /dev/null
+++ b/libraries/ostrich/backend/3rdparty/abseil/absl/base/internal/exception_safety_testing.cc
@@ -0,0 +1,41 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/base/internal/exception_safety_testing.h"
+
+#include "gtest/gtest.h"
+#include "absl/meta/type_traits.h"
+
+namespace absl {
+
+exceptions_internal::NoThrowTag no_throw_ctor;
+exceptions_internal::StrongGuaranteeTagType strong_guarantee;
+
+namespace exceptions_internal {
+
+int countdown = -1;
+
+void MaybeThrow(absl::string_view msg, bool throw_bad_alloc) {
+  if (countdown-- == 0) {
+    if (throw_bad_alloc) throw TestBadAllocException(msg);
+    throw TestException(msg);
+  }
+}
+
+testing::AssertionResult FailureMessage(const TestException& e,
+                                        int countdown) noexcept {
+  return testing::AssertionFailure() << "Exception thrown from " << e.what();
+}
+}  // namespace exceptions_internal
+}  // namespace absl

http://git-wip-us.apache.org/repos/asf/marmotta/blob/0eb556da/libraries/ostrich/backend/3rdparty/abseil/absl/base/internal/exception_safety_testing.h
----------------------------------------------------------------------
diff --git a/libraries/ostrich/backend/3rdparty/abseil/absl/base/internal/exception_safety_testing.h b/libraries/ostrich/backend/3rdparty/abseil/absl/base/internal/exception_safety_testing.h
new file mode 100644
index 0000000..c014fb3
--- /dev/null
+++ b/libraries/ostrich/backend/3rdparty/abseil/absl/base/internal/exception_safety_testing.h
@@ -0,0 +1,997 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Utilities for testing exception-safety
+
+#ifndef ABSL_BASE_INTERNAL_EXCEPTION_SAFETY_TESTING_H_
+#define ABSL_BASE_INTERNAL_EXCEPTION_SAFETY_TESTING_H_
+
+#include <cstddef>
+#include <cstdint>
+#include <functional>
+#include <initializer_list>
+#include <iosfwd>
+#include <string>
+#include <tuple>
+#include <unordered_map>
+
+#include "gtest/gtest.h"
+#include "absl/base/config.h"
+#include "absl/base/internal/pretty_function.h"
+#include "absl/memory/memory.h"
+#include "absl/meta/type_traits.h"
+#include "absl/strings/string_view.h"
+#include "absl/strings/substitute.h"
+#include "absl/types/optional.h"
+
+namespace absl {
+
+// A configuration enum for Throwing*.  Operations whose flags are set will
+// throw, everything else won't.  This isn't meant to be exhaustive, more flags
+// can always be made in the future.
+enum class NoThrow : uint8_t {
+  kNone = 0,
+  kMoveCtor = 1,
+  kMoveAssign = 1 << 1,
+  kAllocation = 1 << 2,
+  kIntCtor = 1 << 3,
+  kNoThrow = static_cast<uint8_t>(-1)
+};
+
+constexpr NoThrow operator|(NoThrow a, NoThrow b) {
+  using T = absl::underlying_type_t<NoThrow>;
+  return static_cast<NoThrow>(static_cast<T>(a) | static_cast<T>(b));
+}
+
+constexpr NoThrow operator&(NoThrow a, NoThrow b) {
+  using T = absl::underlying_type_t<NoThrow>;
+  return static_cast<NoThrow>(static_cast<T>(a) & static_cast<T>(b));
+}
+
+namespace exceptions_internal {
+struct NoThrowTag {};
+struct StrongGuaranteeTagType {};
+
+constexpr bool ThrowingAllowed(NoThrow flags, NoThrow flag) {
+  return !static_cast<bool>(flags & flag);
+}
+
+// A simple exception class.  We throw this so that test code can catch
+// exceptions specifically thrown by ThrowingValue.
+class TestException {
+ public:
+  explicit TestException(absl::string_view msg) : msg_(msg) {}
+  virtual ~TestException() {}
+  virtual const char* what() const noexcept { return msg_.c_str(); }
+
+ private:
+  std::string msg_;
+};
+
+// TestBadAllocException exists because allocation functions must throw an
+// exception which can be caught by a handler of std::bad_alloc.  We use a child
+// class of std::bad_alloc so we can customise the error message, and also
+// derive from TestException so we don't accidentally end up catching an actual
+// bad_alloc exception in TestExceptionSafety.
+class TestBadAllocException : public std::bad_alloc, public TestException {
+ public:
+  explicit TestBadAllocException(absl::string_view msg) : TestException(msg) {}
+  using TestException::what;
+};
+
+extern int countdown;
+
+// Allows the countdown variable to be set manually (defaulting to the initial
+// value of 0)
+inline void SetCountdown(int i = 0) { countdown = i; }
+// Sets the countdown to the terminal value -1
+inline void UnsetCountdown() { SetCountdown(-1); }
+
+void MaybeThrow(absl::string_view msg, bool throw_bad_alloc = false);
+
+testing::AssertionResult FailureMessage(const TestException& e,
+                                        int countdown) noexcept;
+
+class ConstructorTracker;
+
+class TrackedObject {
+ public:
+  TrackedObject(const TrackedObject&) = delete;
+  TrackedObject(TrackedObject&&) = delete;
+
+ protected:
+  explicit TrackedObject(const char* child_ctor) {
+    if (!GetInstanceMap().emplace(this, child_ctor).second) {
+      ADD_FAILURE() << "Object at address " << static_cast<void*>(this)
+                    << " re-constructed in ctor " << child_ctor;
+    }
+  }
+
+  ~TrackedObject() noexcept {
+    if (GetInstanceMap().erase(this) == 0) {
+      ADD_FAILURE() << "Object at address " << static_cast<void*>(this)
+                    << " destroyed improperly";
+    }
+  }
+
+ private:
+  using InstanceMap = std::unordered_map<TrackedObject*, absl::string_view>;
+  static InstanceMap& GetInstanceMap() {
+    static auto* instance_map = new InstanceMap();
+    return *instance_map;
+  }
+
+  friend class ConstructorTracker;
+};
+
+// Inspects the constructions and destructions of anything inheriting from
+// TrackedObject. This allows us to safely "leak" TrackedObjects, as
+// ConstructorTracker will destroy everything left over in its destructor.
+class ConstructorTracker {
+ public:
+  explicit ConstructorTracker(int c)
+      : init_count_(c), init_instances_(TrackedObject::GetInstanceMap()) {}
+  ~ConstructorTracker() {
+    auto& cur_instances = TrackedObject::GetInstanceMap();
+    for (auto it = cur_instances.begin(); it != cur_instances.end();) {
+      if (init_instances_.count(it->first) == 0) {
+        ADD_FAILURE() << "Object at address " << static_cast<void*>(it->first)
+                      << " constructed from " << it->second
+                      << " where the exception countdown was set to "
+                      << init_count_ << " was not destroyed";
+        // Erasing an item inside an unordered_map invalidates the existing
+        // iterator. A new one is returned for iteration to continue.
+        it = cur_instances.erase(it);
+      } else {
+        ++it;
+      }
+    }
+  }
+
+ private:
+  int init_count_;
+  TrackedObject::InstanceMap init_instances_;
+};
+
+template <typename Factory, typename Operation, typename Invariant>
+absl::optional<testing::AssertionResult> TestSingleInvariantAtCountdownImpl(
+    const Factory& factory, const Operation& operation, int count,
+    const Invariant& invariant) {
+  auto t_ptr = factory();
+  absl::optional<testing::AssertionResult> current_res;
+  SetCountdown(count);
+  try {
+    operation(t_ptr.get());
+  } catch (const exceptions_internal::TestException& e) {
+    current_res.emplace(invariant(t_ptr.get()));
+    if (!current_res.value()) {
+      *current_res << e.what() << " failed invariant check";
+    }
+  }
+  UnsetCountdown();
+  return current_res;
+}
+
+template <typename Factory, typename Operation>
+absl::optional<testing::AssertionResult> TestSingleInvariantAtCountdownImpl(
+    const Factory& factory, const Operation& operation, int count,
+    StrongGuaranteeTagType) {
+  using TPtr = typename decltype(factory())::pointer;
+  auto t_is_strong = [&](TPtr t) { return *t == *factory(); };
+  return TestSingleInvariantAtCountdownImpl(factory, operation, count,
+                                            t_is_strong);
+}
+
+template <typename Factory, typename Operation, typename Invariant>
+int TestSingleInvariantAtCountdown(
+    const Factory& factory, const Operation& operation, int count,
+    const Invariant& invariant,
+    absl::optional<testing::AssertionResult>* reduced_res) {
+  // If reduced_res is empty, it means the current call to
+  // TestSingleInvariantAtCountdown(...) is the first test being run so we do
+  // want to run it. Alternatively, if it's not empty (meaning a previous test
+  // has run) we want to check if it passed. If the previous test did pass, we
+  // want to contine running tests so we do want to run the current one. If it
+  // failed, we want to short circuit so as not to overwrite the AssertionResult
+  // output. If that's the case, we do not run the current test and instead we
+  // simply return.
+  if (!reduced_res->has_value() || reduced_res->value()) {
+    *reduced_res = TestSingleInvariantAtCountdownImpl(factory, operation, count,
+                                                      invariant);
+  }
+  return 0;
+}
+
+template <typename Factory, typename Operation, typename... Invariants>
+inline absl::optional<testing::AssertionResult> TestAllInvariantsAtCountdown(
+    const Factory& factory, const Operation& operation, int count,
+    const Invariants&... invariants) {
+  absl::optional<testing::AssertionResult> reduced_res;
+
+  // Run each checker, short circuiting after the first failure
+  int dummy[] = {
+      0, (TestSingleInvariantAtCountdown(factory, operation, count, invariants,
+                                         &reduced_res))...};
+  static_cast<void>(dummy);
+  return reduced_res;
+}
+
+}  // namespace exceptions_internal
+
+extern exceptions_internal::NoThrowTag no_throw_ctor;
+extern exceptions_internal::StrongGuaranteeTagType strong_guarantee;
+
+// A test class which is convertible to bool.  The conversion can be
+// instrumented to throw at a controlled time.
+class ThrowingBool {
+ public:
+  ThrowingBool(bool b) noexcept : b_(b) {}  // NOLINT(runtime/explicit)
+  operator bool() const {                   // NOLINT
+    exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+    return b_;
+  }
+
+ private:
+  bool b_;
+};
+
+// A testing class instrumented to throw an exception at a controlled time.
+//
+// ThrowingValue implements a slightly relaxed version of the Regular concept --
+// that is it's a value type with the expected semantics.  It also implements
+// arithmetic operations.  It doesn't implement member and pointer operators
+// like operator-> or operator[].
+//
+// ThrowingValue can be instrumented to have certain operations be noexcept by
+// using compile-time bitfield flag template arguments.  That is, to make an
+// ThrowingValue which has a noexcept move constructor and noexcept move
+// assignment, use
+// ThrowingValue<absl::NoThrow::kMoveCtor | absl::NoThrow::kMoveAssign>.
+template <NoThrow Flags = NoThrow::kNone>
+class ThrowingValue : private exceptions_internal::TrackedObject {
+ public:
+  ThrowingValue() : TrackedObject(ABSL_PRETTY_FUNCTION) {
+    exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+    dummy_ = 0;
+  }
+
+  ThrowingValue(const ThrowingValue& other)
+      : TrackedObject(ABSL_PRETTY_FUNCTION) {
+    exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+    dummy_ = other.dummy_;
+  }
+
+  ThrowingValue(ThrowingValue&& other) noexcept(
+      !exceptions_internal::ThrowingAllowed(Flags, NoThrow::kMoveCtor))
+      : TrackedObject(ABSL_PRETTY_FUNCTION) {
+    if (exceptions_internal::ThrowingAllowed(Flags, NoThrow::kMoveCtor)) {
+      exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+    }
+    dummy_ = other.dummy_;
+  }
+
+  explicit ThrowingValue(int i) noexcept(
+      !exceptions_internal::ThrowingAllowed(Flags, NoThrow::kIntCtor))
+      : TrackedObject(ABSL_PRETTY_FUNCTION) {
+    if (exceptions_internal::ThrowingAllowed(Flags, NoThrow::kIntCtor)) {
+      exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+    }
+    dummy_ = i;
+  }
+
+  ThrowingValue(int i, exceptions_internal::NoThrowTag) noexcept
+      : TrackedObject(ABSL_PRETTY_FUNCTION), dummy_(i) {}
+
+  // absl expects nothrow destructors
+  ~ThrowingValue() noexcept = default;
+
+  ThrowingValue& operator=(const ThrowingValue& other) {
+    exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+    dummy_ = other.dummy_;
+    return *this;
+  }
+
+  ThrowingValue& operator=(ThrowingValue&& other) noexcept(
+      !exceptions_internal::ThrowingAllowed(Flags, NoThrow::kMoveAssign)) {
+    if (exceptions_internal::ThrowingAllowed(Flags, NoThrow::kMoveAssign)) {
+      exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+    }
+    dummy_ = other.dummy_;
+    return *this;
+  }
+
+  // Arithmetic Operators
+  ThrowingValue operator+(const ThrowingValue& other) const {
+    exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+    return ThrowingValue(dummy_ + other.dummy_, no_throw_ctor);
+  }
+
+  ThrowingValue operator+() const {
+    exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+    return ThrowingValue(dummy_, no_throw_ctor);
+  }
+
+  ThrowingValue operator-(const ThrowingValue& other) const {
+    exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+    return ThrowingValue(dummy_ - other.dummy_, no_throw_ctor);
+  }
+
+  ThrowingValue operator-() const {
+    exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+    return ThrowingValue(-dummy_, no_throw_ctor);
+  }
+
+  ThrowingValue& operator++() {
+    exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+    ++dummy_;
+    return *this;
+  }
+
+  ThrowingValue operator++(int) {
+    exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+    auto out = ThrowingValue(dummy_, no_throw_ctor);
+    ++dummy_;
+    return out;
+  }
+
+  ThrowingValue& operator--() {
+    exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+    --dummy_;
+    return *this;
+  }
+
+  ThrowingValue operator--(int) {
+    exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+    auto out = ThrowingValue(dummy_, no_throw_ctor);
+    --dummy_;
+    return out;
+  }
+
+  ThrowingValue operator*(const ThrowingValue& other) const {
+    exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+    return ThrowingValue(dummy_ * other.dummy_, no_throw_ctor);
+  }
+
+  ThrowingValue operator/(const ThrowingValue& other) const {
+    exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+    return ThrowingValue(dummy_ / other.dummy_, no_throw_ctor);
+  }
+
+  ThrowingValue operator%(const ThrowingValue& other) const {
+    exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+    return ThrowingValue(dummy_ % other.dummy_, no_throw_ctor);
+  }
+
+  ThrowingValue operator<<(int shift) const {
+    exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+    return ThrowingValue(dummy_ << shift, no_throw_ctor);
+  }
+
+  ThrowingValue operator>>(int shift) const {
+    exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+    return ThrowingValue(dummy_ >> shift, no_throw_ctor);
+  }
+
+  // Comparison Operators
+  // NOTE: We use `ThrowingBool` instead of `bool` because most STL
+  // types/containers requires T to be convertible to bool.
+  friend ThrowingBool operator==(const ThrowingValue& a,
+                                 const ThrowingValue& b) {
+    exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+    return a.dummy_ == b.dummy_;
+  }
+  friend ThrowingBool operator!=(const ThrowingValue& a,
+                                 const ThrowingValue& b) {
+    exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+    return a.dummy_ != b.dummy_;
+  }
+  friend ThrowingBool operator<(const ThrowingValue& a,
+                                const ThrowingValue& b) {
+    exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+    return a.dummy_ < b.dummy_;
+  }
+  friend ThrowingBool operator<=(const ThrowingValue& a,
+                                 const ThrowingValue& b) {
+    exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+    return a.dummy_ <= b.dummy_;
+  }
+  friend ThrowingBool operator>(const ThrowingValue& a,
+                                const ThrowingValue& b) {
+    exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+    return a.dummy_ > b.dummy_;
+  }
+  friend ThrowingBool operator>=(const ThrowingValue& a,
+                                 const ThrowingValue& b) {
+    exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+    return a.dummy_ >= b.dummy_;
+  }
+
+  // Logical Operators
+  ThrowingBool operator!() const {
+    exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+    return !dummy_;
+  }
+
+  ThrowingBool operator&&(const ThrowingValue& other) const {
+    exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+    return dummy_ && other.dummy_;
+  }
+
+  ThrowingBool operator||(const ThrowingValue& other) const {
+    exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+    return dummy_ || other.dummy_;
+  }
+
+  // Bitwise Logical Operators
+  ThrowingValue operator~() const {
+    exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+    return ThrowingValue(~dummy_, no_throw_ctor);
+  }
+
+  ThrowingValue operator&(const ThrowingValue& other) const {
+    exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+    return ThrowingValue(dummy_ & other.dummy_, no_throw_ctor);
+  }
+
+  ThrowingValue operator|(const ThrowingValue& other) const {
+    exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+    return ThrowingValue(dummy_ | other.dummy_, no_throw_ctor);
+  }
+
+  ThrowingValue operator^(const ThrowingValue& other) const {
+    exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+    return ThrowingValue(dummy_ ^ other.dummy_, no_throw_ctor);
+  }
+
+  // Compound Assignment operators
+  ThrowingValue& operator+=(const ThrowingValue& other) {
+    exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+    dummy_ += other.dummy_;
+    return *this;
+  }
+
+  ThrowingValue& operator-=(const ThrowingValue& other) {
+    exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+    dummy_ -= other.dummy_;
+    return *this;
+  }
+
+  ThrowingValue& operator*=(const ThrowingValue& other) {
+    exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+    dummy_ *= other.dummy_;
+    return *this;
+  }
+
+  ThrowingValue& operator/=(const ThrowingValue& other) {
+    exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+    dummy_ /= other.dummy_;
+    return *this;
+  }
+
+  ThrowingValue& operator%=(const ThrowingValue& other) {
+    exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+    dummy_ %= other.dummy_;
+    return *this;
+  }
+
+  ThrowingValue& operator&=(const ThrowingValue& other) {
+    exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+    dummy_ &= other.dummy_;
+    return *this;
+  }
+
+  ThrowingValue& operator|=(const ThrowingValue& other) {
+    exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+    dummy_ |= other.dummy_;
+    return *this;
+  }
+
+  ThrowingValue& operator^=(const ThrowingValue& other) {
+    exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+    dummy_ ^= other.dummy_;
+    return *this;
+  }
+
+  ThrowingValue& operator<<=(int shift) {
+    exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+    dummy_ <<= shift;
+    return *this;
+  }
+
+  ThrowingValue& operator>>=(int shift) {
+    exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+    dummy_ >>= shift;
+    return *this;
+  }
+
+  // Pointer operators
+  void operator&() const = delete;  // NOLINT(runtime/operator)
+
+  // Stream operators
+  friend std::ostream& operator<<(std::ostream& os, const ThrowingValue&) {
+    exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+    return os;
+  }
+
+  friend std::istream& operator>>(std::istream& is, const ThrowingValue&) {
+    exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+    return is;
+  }
+
+  // Memory management operators
+  // Args.. allows us to overload regular and placement new in one shot
+  template <typename... Args>
+  static void* operator new(size_t s, Args&&... args) noexcept(
+      !exceptions_internal::ThrowingAllowed(Flags, NoThrow::kAllocation)) {
+    if (exceptions_internal::ThrowingAllowed(Flags, NoThrow::kAllocation)) {
+      exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION, true);
+    }
+    return ::operator new(s, std::forward<Args>(args)...);
+  }
+
+  template <typename... Args>
+  static void* operator new[](size_t s, Args&&... args) noexcept(
+      !exceptions_internal::ThrowingAllowed(Flags, NoThrow::kAllocation)) {
+    if (exceptions_internal::ThrowingAllowed(Flags, NoThrow::kAllocation)) {
+      exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION, true);
+    }
+    return ::operator new[](s, std::forward<Args>(args)...);
+  }
+
+  // Abseil doesn't support throwing overloaded operator delete.  These are
+  // provided so a throwing operator-new can clean up after itself.
+  //
+  // We provide both regular and templated operator delete because if only the
+  // templated version is provided as we did with operator new, the compiler has
+  // no way of knowing which overload of operator delete to call. See
+  // http://en.cppreference.com/w/cpp/memory/new/operator_delete and
+  // http://en.cppreference.com/w/cpp/language/delete for the gory details.
+  void operator delete(void* p) noexcept { ::operator delete(p); }
+
+  template <typename... Args>
+  void operator delete(void* p, Args&&... args) noexcept {
+    ::operator delete(p, std::forward<Args>(args)...);
+  }
+
+  void operator delete[](void* p) noexcept { return ::operator delete[](p); }
+
+  template <typename... Args>
+  void operator delete[](void* p, Args&&... args) noexcept {
+    return ::operator delete[](p, std::forward<Args>(args)...);
+  }
+
+  // Non-standard access to the actual contained value.  No need for this to
+  // throw.
+  int& Get() noexcept { return dummy_; }
+  const int& Get() const noexcept { return dummy_; }
+
+ private:
+  int dummy_;
+};
+// While not having to do with exceptions, explicitly delete comma operator, to
+// make sure we don't use it on user-supplied types.
+template <NoThrow N, typename T>
+void operator,(const ThrowingValue<N>& ef, T&& t) = delete;
+template <NoThrow N, typename T>
+void operator,(T&& t, const ThrowingValue<N>& ef) = delete;
+
+// An allocator type which is instrumented to throw at a controlled time, or not
+// to throw, using NoThrow.  The supported settings are the default of every
+// function which is allowed to throw in a conforming allocator possibly
+// throwing, or nothing throws, in line with the ABSL_ALLOCATOR_THROWS
+// configuration macro.
+template <typename T, NoThrow Flags = NoThrow::kNone>
+class ThrowingAllocator : private exceptions_internal::TrackedObject {
+  static_assert(Flags == NoThrow::kNone || Flags == NoThrow::kNoThrow,
+                "Invalid flag");
+
+ public:
+  using pointer = T*;
+  using const_pointer = const T*;
+  using reference = T&;
+  using const_reference = const T&;
+  using void_pointer = void*;
+  using const_void_pointer = const void*;
+  using value_type = T;
+  using size_type = size_t;
+  using difference_type = ptrdiff_t;
+
+  using is_nothrow = std::integral_constant<bool, Flags == NoThrow::kNoThrow>;
+  using propagate_on_container_copy_assignment = std::true_type;
+  using propagate_on_container_move_assignment = std::true_type;
+  using propagate_on_container_swap = std::true_type;
+  using is_always_equal = std::false_type;
+
+  ThrowingAllocator() : TrackedObject(ABSL_PRETTY_FUNCTION) {
+    exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+    dummy_ = std::make_shared<const int>(next_id_++);
+  }
+
+  template <typename U>
+  ThrowingAllocator(  // NOLINT
+      const ThrowingAllocator<U, Flags>& other) noexcept
+      : TrackedObject(ABSL_PRETTY_FUNCTION), dummy_(other.State()) {}
+
+  // According to C++11 standard [17.6.3.5], Table 28, the move/copy ctors of
+  // allocator shall not exit via an exception, thus they are marked noexcept.
+  ThrowingAllocator(const ThrowingAllocator& other) noexcept
+      : TrackedObject(ABSL_PRETTY_FUNCTION), dummy_(other.State()) {}
+
+  template <typename U>
+  ThrowingAllocator(  // NOLINT
+      ThrowingAllocator<U, Flags>&& other) noexcept
+      : TrackedObject(ABSL_PRETTY_FUNCTION), dummy_(std::move(other.State())) {}
+
+  ThrowingAllocator(ThrowingAllocator&& other) noexcept
+      : TrackedObject(ABSL_PRETTY_FUNCTION), dummy_(std::move(other.State())) {}
+
+  ~ThrowingAllocator() noexcept = default;
+
+  ThrowingAllocator& operator=(const ThrowingAllocator& other) noexcept {
+    dummy_ = other.State();
+    return *this;
+  }
+
+  template <typename U>
+  ThrowingAllocator& operator=(
+      const ThrowingAllocator<U, Flags>& other) noexcept {
+    dummy_ = other.State();
+    return *this;
+  }
+
+  template <typename U>
+  ThrowingAllocator& operator=(ThrowingAllocator<U, Flags>&& other) noexcept {
+    dummy_ = std::move(other.State());
+    return *this;
+  }
+
+  template <typename U>
+  struct rebind {
+    using other = ThrowingAllocator<U, Flags>;
+  };
+
+  pointer allocate(size_type n) noexcept(
+      !exceptions_internal::ThrowingAllowed(Flags, NoThrow::kNoThrow)) {
+    ReadStateAndMaybeThrow(ABSL_PRETTY_FUNCTION);
+    return static_cast<pointer>(::operator new(n * sizeof(T)));
+  }
+  pointer allocate(size_type n, const_void_pointer) noexcept(
+      !exceptions_internal::ThrowingAllowed(Flags, NoThrow::kNoThrow)) {
+    return allocate(n);
+  }
+
+  void deallocate(pointer ptr, size_type) noexcept {
+    ReadState();
+    ::operator delete(static_cast<void*>(ptr));
+  }
+
+  template <typename U, typename... Args>
+  void construct(U* ptr, Args&&... args) noexcept(
+      !exceptions_internal::ThrowingAllowed(Flags, NoThrow::kNoThrow)) {
+    ReadStateAndMaybeThrow(ABSL_PRETTY_FUNCTION);
+    ::new (static_cast<void*>(ptr)) U(std::forward<Args>(args)...);
+  }
+
+  template <typename U>
+  void destroy(U* p) noexcept {
+    ReadState();
+    p->~U();
+  }
+
+  size_type max_size() const noexcept {
+    return std::numeric_limits<difference_type>::max() / sizeof(value_type);
+  }
+
+  ThrowingAllocator select_on_container_copy_construction() noexcept(
+      !exceptions_internal::ThrowingAllowed(Flags, NoThrow::kNoThrow)) {
+    auto& out = *this;
+    ReadStateAndMaybeThrow(ABSL_PRETTY_FUNCTION);
+    return out;
+  }
+
+  template <typename U>
+  bool operator==(const ThrowingAllocator<U, Flags>& other) const noexcept {
+    return dummy_ == other.dummy_;
+  }
+
+  template <typename U>
+  bool operator!=(const ThrowingAllocator<U, Flags>& other) const noexcept {
+    return dummy_ != other.dummy_;
+  }
+
+  template <typename U, NoThrow B>
+  friend class ThrowingAllocator;
+
+ private:
+  const std::shared_ptr<const int>& State() const { return dummy_; }
+  std::shared_ptr<const int>& State() { return dummy_; }
+
+  void ReadState() {
+    // we know that this will never be true, but the compiler doesn't, so this
+    // should safely force a read of the value.
+    if (*dummy_ < 0) std::abort();
+  }
+
+  void ReadStateAndMaybeThrow(absl::string_view msg) const {
+    if (exceptions_internal::ThrowingAllowed(Flags, NoThrow::kNoThrow)) {
+      exceptions_internal::MaybeThrow(
+          absl::Substitute("Allocator id $0 threw from $1", *dummy_, msg));
+    }
+  }
+
+  static int next_id_;
+  std::shared_ptr<const int> dummy_;
+};
+
+template <typename T, NoThrow Throws>
+int ThrowingAllocator<T, Throws>::next_id_ = 0;
+
+// Tests for resource leaks by attempting to construct a T using args repeatedly
+// until successful, using the countdown method.  Side effects can then be
+// tested for resource leaks.
+template <typename T, typename... Args>
+void TestThrowingCtor(Args&&... args) {
+  struct Cleanup {
+    ~Cleanup() { exceptions_internal::UnsetCountdown(); }
+  } c;
+  for (int count = 0;; ++count) {
+    exceptions_internal::ConstructorTracker ct(count);
+    exceptions_internal::SetCountdown(count);
+    try {
+      T temp(std::forward<Args>(args)...);
+      static_cast<void>(temp);
+      break;
+    } catch (const exceptions_internal::TestException&) {
+    }
+  }
+}
+
+namespace exceptions_internal {
+
+// Dummy struct for ExceptionSafetyTester<> partial state.
+struct UninitializedT {};
+
+template <typename T>
+class DefaultFactory {
+ public:
+  explicit DefaultFactory(const T& t) : t_(t) {}
+  std::unique_ptr<T> operator()() const { return absl::make_unique<T>(t_); }
+
+ private:
+  T t_;
+};
+
+template <size_t LazyInvariantsCount, typename LazyFactory,
+          typename LazyOperation>
+using EnableIfTestable = typename absl::enable_if_t<
+    LazyInvariantsCount != 0 &&
+    !std::is_same<LazyFactory, UninitializedT>::value &&
+    !std::is_same<LazyOperation, UninitializedT>::value>;
+
+template <typename Factory = UninitializedT,
+          typename Operation = UninitializedT, typename... Invariants>
+class ExceptionSafetyTester;
+
+}  // namespace exceptions_internal
+
+exceptions_internal::ExceptionSafetyTester<> MakeExceptionSafetyTester();
+
+namespace exceptions_internal {
+
+/*
+ * Builds a tester object that tests if performing a operation on a T follows
+ * exception safety guarantees. Verification is done via invariant assertion
+ * callbacks applied to T instances post-throw.
+ *
+ * Template parameters for ExceptionSafetyTester:
+ *
+ * - Factory: The factory object (passed in via tester.WithFactory(...) or
+ *   tester.WithInitialValue(...)) must be invocable with the signature
+ *   `std::unique_ptr<T> operator()() const` where T is the type being tested.
+ *   It is used for reliably creating identical T instances to test on.
+ *
+ * - Operation: The operation object (passsed in via tester.WithOperation(...)
+ *   or tester.Test(...)) must be invocable with the signature
+ *   `void operator()(T*) const` where T is the type being tested. It is used
+ *   for performing steps on a T instance that may throw and that need to be
+ *   checked for exception safety. Each call to the operation will receive a
+ *   fresh T instance so it's free to modify and destroy the T instances as it
+ *   pleases.
+ *
+ * - Invariants...: The invariant assertion callback objects (passed in via
+ *   tester.WithInvariants(...)) must be invocable with the signature
+ *   `testing::AssertionResult operator()(T*) const` where T is the type being
+ *   tested. Invariant assertion callbacks are provided T instances post-throw.
+ *   They must return testing::AssertionSuccess when the type invariants of the
+ *   provided T instance hold. If the type invariants of the T instance do not
+ *   hold, they must return testing::AssertionFailure. Execution order of
+ *   Invariants... is unspecified. They will each individually get a fresh T
+ *   instance so they are free to modify and destroy the T instances as they
+ *   please.
+ */
+template <typename Factory, typename Operation, typename... Invariants>
+class ExceptionSafetyTester {
+ public:
+  /*
+   * Returns a new ExceptionSafetyTester with an included T factory based on the
+   * provided T instance. The existing factory will not be included in the newly
+   * created tester instance. The created factory returns a new T instance by
+   * copy-constructing the provided const T& t.
+   *
+   * Preconditions for tester.WithInitialValue(const T& t):
+   *
+   * - The const T& t object must be copy-constructible where T is the type
+   *   being tested. For non-copy-constructible objects, use the method
+   *   tester.WithFactory(...).
+   */
+  template <typename T>
+  ExceptionSafetyTester<DefaultFactory<T>, Operation, Invariants...>
+  WithInitialValue(const T& t) const {
+    return WithFactory(DefaultFactory<T>(t));
+  }
+
+  /*
+   * Returns a new ExceptionSafetyTester with the provided T factory included.
+   * The existing factory will not be included in the newly-created tester
+   * instance. This method is intended for use with types lacking a copy
+   * constructor. Types that can be copy-constructed should instead use the
+   * method tester.WithInitialValue(...).
+   */
+  template <typename NewFactory>
+  ExceptionSafetyTester<absl::decay_t<NewFactory>, Operation, Invariants...>
+  WithFactory(const NewFactory& new_factory) const {
+    return {new_factory, operation_, invariants_};
+  }
+
+  /*
+   * Returns a new ExceptionSafetyTester with the provided testable operation
+   * included. The existing operation will not be included in the newly created
+   * tester.
+   */
+  template <typename NewOperation>
+  ExceptionSafetyTester<Factory, absl::decay_t<NewOperation>, Invariants...>
+  WithOperation(const NewOperation& new_operation) const {
+    return {factory_, new_operation, invariants_};
+  }
+
+  /*
+   * Returns a new ExceptionSafetyTester with the provided MoreInvariants...
+   * combined with the Invariants... that were already included in the instance
+   * on which the method was called. Invariants... cannot be removed or replaced
+   * once added to an ExceptionSafetyTester instance. A fresh object must be
+   * created in order to get an empty Invariants... list.
+   *
+   * In addition to passing in custom invariant assertion callbacks, this method
+   * accepts `absl::strong_guarantee` as an argument which checks T instances
+   * post-throw against freshly created T instances via operator== to verify
+   * that any state changes made during the execution of the operation were
+   * properly rolled back.
+   */
+  template <typename... MoreInvariants>
+  ExceptionSafetyTester<Factory, Operation, Invariants...,
+                        absl::decay_t<MoreInvariants>...>
+  WithInvariants(const MoreInvariants&... more_invariants) const {
+    return {factory_, operation_,
+            std::tuple_cat(invariants_,
+                           std::tuple<absl::decay_t<MoreInvariants>...>(
+                               more_invariants...))};
+  }
+
+  /*
+   * Returns a testing::AssertionResult that is the reduced result of the
+   * exception safety algorithm. The algorithm short circuits and returns
+   * AssertionFailure after the first invariant callback returns an
+   * AssertionFailure. Otherwise, if all invariant callbacks return an
+   * AssertionSuccess, the reduced result is AssertionSuccess.
+   *
+   * The passed-in testable operation will not be saved in a new tester instance
+   * nor will it modify/replace the existing tester instance. This is useful
+   * when each operation being tested is unique and does not need to be reused.
+   *
+   * Preconditions for tester.Test(const NewOperation& new_operation):
+   *
+   * - May only be called after at least one invariant assertion callback and a
+   *   factory or initial value have been provided.
+   */
+  template <
+      typename NewOperation,
+      typename = EnableIfTestable<sizeof...(Invariants), Factory, NewOperation>>
+  testing::AssertionResult Test(const NewOperation& new_operation) const {
+    return TestImpl(new_operation, absl::index_sequence_for<Invariants...>());
+  }
+
+  /*
+   * Returns a testing::AssertionResult that is the reduced result of the
+   * exception safety algorithm. The algorithm short circuits and returns
+   * AssertionFailure after the first invariant callback returns an
+   * AssertionFailure. Otherwise, if all invariant callbacks return an
+   * AssertionSuccess, the reduced result is AssertionSuccess.
+   *
+   * Preconditions for tester.Test():
+   *
+   * - May only be called after at least one invariant assertion callback, a
+   *   factory or initial value and a testable operation have been provided.
+   */
+  template <typename LazyOperation = Operation,
+            typename =
+                EnableIfTestable<sizeof...(Invariants), Factory, LazyOperation>>
+  testing::AssertionResult Test() const {
+    return TestImpl(operation_, absl::index_sequence_for<Invariants...>());
+  }
+
+ private:
+  template <typename, typename, typename...>
+  friend class ExceptionSafetyTester;
+
+  friend ExceptionSafetyTester<> absl::MakeExceptionSafetyTester();
+
+  ExceptionSafetyTester() {}
+
+  ExceptionSafetyTester(const Factory& f, const Operation& o,
+                        const std::tuple<Invariants...>& i)
+      : factory_(f), operation_(o), invariants_(i) {}
+
+  template <typename SelectedOperation, size_t... Indices>
+  testing::AssertionResult TestImpl(const SelectedOperation& selected_operation,
+                                    absl::index_sequence<Indices...>) const {
+    // Starting from 0 and counting upwards until one of the exit conditions is
+    // hit...
+    for (int count = 0;; ++count) {
+      exceptions_internal::ConstructorTracker ct(count);
+
+      // Run the full exception safety test algorithm for the current countdown
+      auto reduced_res =
+          TestAllInvariantsAtCountdown(factory_, selected_operation, count,
+                                       std::get<Indices>(invariants_)...);
+      // If there is no value in the optional, no invariants were run because no
+      // exception was thrown. This means that the test is complete and the loop
+      // can exit successfully.
+      if (!reduced_res.has_value()) {
+        return testing::AssertionSuccess();
+      }
+      // If the optional is not empty and the value is falsy, an invariant check
+      // failed so the test must exit to propegate the failure.
+      if (!reduced_res.value()) {
+        return reduced_res.value();
+      }
+      // If the optional is not empty and the value is not falsy, it means
+      // exceptions were thrown but the invariants passed so the test must
+      // continue to run.
+    }
+  }
+
+  Factory factory_;
+  Operation operation_;
+  std::tuple<Invariants...> invariants_;
+};
+
+}  // namespace exceptions_internal
+
+/*
+ * Constructs an empty ExceptionSafetyTester. All ExceptionSafetyTester
+ * objects are immutable and all With[thing] mutation methods return new
+ * instances of ExceptionSafetyTester.
+ *
+ * In order to test a T for exception safety, a factory for that T, a testable
+ * operation, and at least one invariant callback returning an assertion
+ * result must be applied using the respective methods.
+ */
+inline exceptions_internal::ExceptionSafetyTester<>
+MakeExceptionSafetyTester() {
+  return {};
+}
+
+}  // namespace absl
+
+#endif  // ABSL_BASE_INTERNAL_EXCEPTION_SAFETY_TESTING_H_

http://git-wip-us.apache.org/repos/asf/marmotta/blob/0eb556da/libraries/ostrich/backend/3rdparty/abseil/absl/base/internal/exception_testing.h
----------------------------------------------------------------------
diff --git a/libraries/ostrich/backend/3rdparty/abseil/absl/base/internal/exception_testing.h b/libraries/ostrich/backend/3rdparty/abseil/absl/base/internal/exception_testing.h
new file mode 100644
index 0000000..07d7e8e
--- /dev/null
+++ b/libraries/ostrich/backend/3rdparty/abseil/absl/base/internal/exception_testing.h
@@ -0,0 +1,38 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Testing utilities for ABSL types which throw exceptions.
+
+#ifndef ABSL_BASE_INTERNAL_EXCEPTION_TESTING_H_
+#define ABSL_BASE_INTERNAL_EXCEPTION_TESTING_H_
+
+#include "gtest/gtest.h"
+#include "absl/base/config.h"
+
+// ABSL_BASE_INTERNAL_EXPECT_FAIL tests either for a specified thrown exception
+// if exceptions are enabled, or for death with a specified text in the error
+// message
+#ifdef ABSL_HAVE_EXCEPTIONS
+
+#define ABSL_BASE_INTERNAL_EXPECT_FAIL(expr, exception_t, text) \
+  EXPECT_THROW(expr, exception_t)
+
+#else
+
+#define ABSL_BASE_INTERNAL_EXPECT_FAIL(expr, exception_t, text) \
+  EXPECT_DEATH(expr, text)
+
+#endif
+
+#endif  // ABSL_BASE_INTERNAL_EXCEPTION_TESTING_H_

http://git-wip-us.apache.org/repos/asf/marmotta/blob/0eb556da/libraries/ostrich/backend/3rdparty/abseil/absl/base/internal/identity.h
----------------------------------------------------------------------
diff --git a/libraries/ostrich/backend/3rdparty/abseil/absl/base/internal/identity.h b/libraries/ostrich/backend/3rdparty/abseil/absl/base/internal/identity.h
new file mode 100644
index 0000000..a6734b4
--- /dev/null
+++ b/libraries/ostrich/backend/3rdparty/abseil/absl/base/internal/identity.h
@@ -0,0 +1,33 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef ABSL_BASE_INTERNAL_IDENTITY_H_
+#define ABSL_BASE_INTERNAL_IDENTITY_H_
+
+namespace absl {
+namespace internal {
+
+template <typename T>
+struct identity {
+  typedef T type;
+};
+
+template <typename T>
+using identity_t = typename identity<T>::type;
+
+}  //  namespace internal
+}  //  namespace absl
+
+#endif  // ABSL_BASE_INTERNAL_IDENTITY_H_

http://git-wip-us.apache.org/repos/asf/marmotta/blob/0eb556da/libraries/ostrich/backend/3rdparty/abseil/absl/base/internal/inline_variable.h
----------------------------------------------------------------------
diff --git a/libraries/ostrich/backend/3rdparty/abseil/absl/base/internal/inline_variable.h b/libraries/ostrich/backend/3rdparty/abseil/absl/base/internal/inline_variable.h
new file mode 100644
index 0000000..f7bb8c5
--- /dev/null
+++ b/libraries/ostrich/backend/3rdparty/abseil/absl/base/internal/inline_variable.h
@@ -0,0 +1,107 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_BASE_INTERNAL_INLINE_VARIABLE_EMULATION_H_
+#define ABSL_BASE_INTERNAL_INLINE_VARIABLE_EMULATION_H_
+
+#include <type_traits>
+
+#include "absl/base/internal/identity.h"
+
+// File:
+//   This file define a macro that allows the creation of or emulation of C++17
+//   inline variables based on whether or not the feature is supported.
+
+////////////////////////////////////////////////////////////////////////////////
+// Macro: ABSL_INTERNAL_INLINE_CONSTEXPR(type, name, init)
+//
+// Description:
+//   Expands to the equivalent of an inline constexpr instance of the specified
+//   `type` and `name`, initialized to the value `init`. If the compiler being
+//   used is detected as supporting actual inline variables as a language
+//   feature, then the macro expands to an actual inline variable definition.
+//
+// Requires:
+//   `type` is a type that is usable in an extern variable declaration.
+//
+// Requires: `name` is a valid identifier
+//
+// Requires:
+//   `init` is an expression that can be used in the following definition:
+//     constexpr type name = init;
+//
+// Usage:
+//
+//   // Equivalent to: `inline constexpr size_t variant_npos = -1;`
+//   ABSL_INTERNAL_INLINE_CONSTEXPR(size_t, variant_npos, -1);
+//
+// Differences in implementation:
+//   For a direct, language-level inline variable, decltype(name) will be the
+//   type that was specified along with const qualification, whereas for
+//   emulated inline variables, decltype(name) may be different (in practice
+//   it will likely be a reference type).
+////////////////////////////////////////////////////////////////////////////////
+
+#ifdef __cpp_inline_variables
+
+// Clang's -Wmissing-variable-declarations option erroneously warned that
+// inline constexpr objects need to be pre-declared. This has now been fixed,
+// but we will need to support this workaround for people building with older
+// versions of clang.
+//
+// Bug: https://bugs.llvm.org/show_bug.cgi?id=35862
+//
+// Note:
+//   identity_t is used here so that the const and name are in the
+//   appropriate place for pointer types, reference types, function pointer
+//   types, etc..
+#if defined(__clang__)
+#define ABSL_INTERNAL_EXTERN_DECL(type, name) \
+  extern const ::absl::internal::identity_t<type> name;
+#else  // Otherwise, just define the macro to do nothing.
+#define ABSL_INTERNAL_EXTERN_DECL(type, name)
+#endif  // defined(__clang__)
+
+// See above comment at top of file for details.
+#define ABSL_INTERNAL_INLINE_CONSTEXPR(type, name, init) \
+  ABSL_INTERNAL_EXTERN_DECL(type, name)                  \
+  inline constexpr ::absl::internal::identity_t<type> name = init
+
+#else
+
+// See above comment at top of file for details.
+//
+// Note:
+//   identity_t is used here so that the const and name are in the
+//   appropriate place for pointer types, reference types, function pointer
+//   types, etc..
+#define ABSL_INTERNAL_INLINE_CONSTEXPR(var_type, name, init)                  \
+  template <class /*AbslInternalDummy*/ = void>                               \
+  struct AbslInternalInlineVariableHolder##name {                             \
+    static constexpr ::absl::internal::identity_t<var_type> kInstance = init; \
+  };                                                                          \
+                                                                              \
+  template <class AbslInternalDummy>                                          \
+  constexpr ::absl::internal::identity_t<var_type>                            \
+      AbslInternalInlineVariableHolder##name<AbslInternalDummy>::kInstance;   \
+                                                                              \
+  static constexpr const ::absl::internal::identity_t<var_type>&              \
+      name = /* NOLINT */                                                     \
+      AbslInternalInlineVariableHolder##name<>::kInstance;                    \
+  static_assert(sizeof(void (*)(decltype(name))) != 0,                        \
+                "Silence unused variable warnings.")
+
+#endif  // __cpp_inline_variables
+
+#endif  // ABSL_BASE_INTERNAL_INLINE_VARIABLE_EMULATION_H_

http://git-wip-us.apache.org/repos/asf/marmotta/blob/0eb556da/libraries/ostrich/backend/3rdparty/abseil/absl/base/internal/inline_variable_testing.h
----------------------------------------------------------------------
diff --git a/libraries/ostrich/backend/3rdparty/abseil/absl/base/internal/inline_variable_testing.h b/libraries/ostrich/backend/3rdparty/abseil/absl/base/internal/inline_variable_testing.h
new file mode 100644
index 0000000..a0dd2bb
--- /dev/null
+++ b/libraries/ostrich/backend/3rdparty/abseil/absl/base/internal/inline_variable_testing.h
@@ -0,0 +1,44 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_BASE_INLINE_VARIABLE_TESTING_H_
+#define ABSL_BASE_INLINE_VARIABLE_TESTING_H_
+
+#include "absl/base/internal/inline_variable.h"
+
+namespace absl {
+namespace inline_variable_testing_internal {
+
+struct Foo {
+  int value = 5;
+};
+
+ABSL_INTERNAL_INLINE_CONSTEXPR(Foo, inline_variable_foo, {});
+ABSL_INTERNAL_INLINE_CONSTEXPR(Foo, other_inline_variable_foo, {});
+
+ABSL_INTERNAL_INLINE_CONSTEXPR(int, inline_variable_int, 5);
+ABSL_INTERNAL_INLINE_CONSTEXPR(int, other_inline_variable_int, 5);
+
+ABSL_INTERNAL_INLINE_CONSTEXPR(void(*)(), inline_variable_fun_ptr, nullptr);
+
+const Foo& get_foo_a();
+const Foo& get_foo_b();
+
+const int& get_int_a();
+const int& get_int_b();
+
+}  // namespace inline_variable_testing_internal
+}  // namespace absl
+
+#endif  // ABSL_BASE_INLINE_VARIABLE_TESTING_H_

http://git-wip-us.apache.org/repos/asf/marmotta/blob/0eb556da/libraries/ostrich/backend/3rdparty/abseil/absl/base/internal/invoke.h
----------------------------------------------------------------------
diff --git a/libraries/ostrich/backend/3rdparty/abseil/absl/base/internal/invoke.h b/libraries/ostrich/backend/3rdparty/abseil/absl/base/internal/invoke.h
new file mode 100644
index 0000000..8c3f4f6
--- /dev/null
+++ b/libraries/ostrich/backend/3rdparty/abseil/absl/base/internal/invoke.h
@@ -0,0 +1,188 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// absl::base_internal::Invoke(f, args...) is an implementation of
+// INVOKE(f, args...) from section [func.require] of the C++ standard.
+//
+// [func.require]
+// Define INVOKE (f, t1, t2, ..., tN) as follows:
+// 1. (t1.*f)(t2, ..., tN) when f is a pointer to a member function of a class T
+//    and t1 is an object of type T or a reference to an object of type T or a
+//    reference to an object of a type derived from T;
+// 2. ((*t1).*f)(t2, ..., tN) when f is a pointer to a member function of a
+//    class T and t1 is not one of the types described in the previous item;
+// 3. t1.*f when N == 1 and f is a pointer to member data of a class T and t1 is
+//    an object of type T or a reference to an object of type T or a reference
+//    to an object of a type derived from T;
+// 4. (*t1).*f when N == 1 and f is a pointer to member data of a class T and t1
+//    is not one of the types described in the previous item;
+// 5. f(t1, t2, ..., tN) in all other cases.
+//
+// The implementation is SFINAE-friendly: substitution failure within Invoke()
+// isn't an error.
+
+#ifndef ABSL_BASE_INTERNAL_INVOKE_H_
+#define ABSL_BASE_INTERNAL_INVOKE_H_
+
+#include <algorithm>
+#include <type_traits>
+#include <utility>
+
+// The following code is internal implementation detail.  See the comment at the
+// top of this file for the API documentation.
+
+namespace absl {
+namespace base_internal {
+
+// The five classes below each implement one of the clauses from the definition
+// of INVOKE. The inner class template Accept<F, Args...> checks whether the
+// clause is applicable; static function template Invoke(f, args...) does the
+// invocation.
+//
+// By separating the clause selection logic from invocation we make sure that
+// Invoke() does exactly what the standard says.
+
+template <typename Derived>
+struct StrippedAccept {
+  template <typename... Args>
+  struct Accept : Derived::template AcceptImpl<typename std::remove_cv<
+                      typename std::remove_reference<Args>::type>::type...> {};
+};
+
+// (t1.*f)(t2, ..., tN) when f is a pointer to a member function of a class T
+// and t1 is an object of type T or a reference to an object of type T or a
+// reference to an object of a type derived from T.
+struct MemFunAndRef : StrippedAccept<MemFunAndRef> {
+  template <typename... Args>
+  struct AcceptImpl : std::false_type {};
+
+  template <typename R, typename C, typename... Params, typename Obj,
+            typename... Args>
+  struct AcceptImpl<R (C::*)(Params...), Obj, Args...>
+      : std::is_base_of<C, Obj> {};
+
+  template <typename R, typename C, typename... Params, typename Obj,
+            typename... Args>
+  struct AcceptImpl<R (C::*)(Params...) const, Obj, Args...>
+      : std::is_base_of<C, Obj> {};
+
+  template <typename MemFun, typename Obj, typename... Args>
+  static decltype((std::declval<Obj>().*
+                   std::declval<MemFun>())(std::declval<Args>()...))
+  Invoke(MemFun&& mem_fun, Obj&& obj, Args&&... args) {
+    return (std::forward<Obj>(obj).*
+            std::forward<MemFun>(mem_fun))(std::forward<Args>(args)...);
+  }
+};
+
+// ((*t1).*f)(t2, ..., tN) when f is a pointer to a member function of a
+// class T and t1 is not one of the types described in the previous item.
+struct MemFunAndPtr : StrippedAccept<MemFunAndPtr> {
+  template <typename... Args>
+  struct AcceptImpl : std::false_type {};
+
+  template <typename R, typename C, typename... Params, typename Ptr,
+            typename... Args>
+  struct AcceptImpl<R (C::*)(Params...), Ptr, Args...>
+      : std::integral_constant<bool, !std::is_base_of<C, Ptr>::value> {};
+
+  template <typename R, typename C, typename... Params, typename Ptr,
+            typename... Args>
+  struct AcceptImpl<R (C::*)(Params...) const, Ptr, Args...>
+      : std::integral_constant<bool, !std::is_base_of<C, Ptr>::value> {};
+
+  template <typename MemFun, typename Ptr, typename... Args>
+  static decltype(((*std::declval<Ptr>()).*
+                   std::declval<MemFun>())(std::declval<Args>()...))
+  Invoke(MemFun&& mem_fun, Ptr&& ptr, Args&&... args) {
+    return ((*std::forward<Ptr>(ptr)).*
+            std::forward<MemFun>(mem_fun))(std::forward<Args>(args)...);
+  }
+};
+
+// t1.*f when N == 1 and f is a pointer to member data of a class T and t1 is
+// an object of type T or a reference to an object of type T or a reference
+// to an object of a type derived from T.
+struct DataMemAndRef : StrippedAccept<DataMemAndRef> {
+  template <typename... Args>
+  struct AcceptImpl : std::false_type {};
+
+  template <typename R, typename C, typename Obj>
+  struct AcceptImpl<R C::*, Obj> : std::is_base_of<C, Obj> {};
+
+  template <typename DataMem, typename Ref>
+  static decltype(std::declval<Ref>().*std::declval<DataMem>()) Invoke(
+      DataMem&& data_mem, Ref&& ref) {
+    return std::forward<Ref>(ref).*std::forward<DataMem>(data_mem);
+  }
+};
+
+// (*t1).*f when N == 1 and f is a pointer to member data of a class T and t1
+// is not one of the types described in the previous item.
+struct DataMemAndPtr : StrippedAccept<DataMemAndPtr> {
+  template <typename... Args>
+  struct AcceptImpl : std::false_type {};
+
+  template <typename R, typename C, typename Ptr>
+  struct AcceptImpl<R C::*, Ptr>
+      : std::integral_constant<bool, !std::is_base_of<C, Ptr>::value> {};
+
+  template <typename DataMem, typename Ptr>
+  static decltype((*std::declval<Ptr>()).*std::declval<DataMem>()) Invoke(
+      DataMem&& data_mem, Ptr&& ptr) {
+    return (*std::forward<Ptr>(ptr)).*std::forward<DataMem>(data_mem);
+  }
+};
+
+// f(t1, t2, ..., tN) in all other cases.
+struct Callable {
+  // Callable doesn't have Accept because it's the last clause that gets picked
+  // when none of the previous clauses are applicable.
+  template <typename F, typename... Args>
+  static decltype(std::declval<F>()(std::declval<Args>()...)) Invoke(
+      F&& f, Args&&... args) {
+    return std::forward<F>(f)(std::forward<Args>(args)...);
+  }
+};
+
+// Resolves to the first matching clause.
+template <typename... Args>
+struct Invoker {
+  typedef typename std::conditional<
+      MemFunAndRef::Accept<Args...>::value, MemFunAndRef,
+      typename std::conditional<
+          MemFunAndPtr::Accept<Args...>::value, MemFunAndPtr,
+          typename std::conditional<
+              DataMemAndRef::Accept<Args...>::value, DataMemAndRef,
+              typename std::conditional<DataMemAndPtr::Accept<Args...>::value,
+                                        DataMemAndPtr, Callable>::type>::type>::
+          type>::type type;
+};
+
+// The result type of Invoke<F, Args...>.
+template <typename F, typename... Args>
+using InvokeT = decltype(Invoker<F, Args...>::type::Invoke(
+    std::declval<F>(), std::declval<Args>()...));
+
+// Invoke(f, args...) is an implementation of INVOKE(f, args...) from section
+// [func.require] of the C++ standard.
+template <typename F, typename... Args>
+InvokeT<F, Args...> Invoke(F&& f, Args&&... args) {
+  return Invoker<F, Args...>::type::Invoke(std::forward<F>(f),
+                                           std::forward<Args>(args)...);
+}
+}  // namespace base_internal
+}  // namespace absl
+
+#endif  // ABSL_BASE_INTERNAL_INVOKE_H_