You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@marmotta.apache.org by ss...@apache.org on 2018/04/29 19:35:49 UTC

[20/51] [partial] marmotta git commit: * Replace gtest with upstream version, including LICENSE header. * Include absl library for faster and safer string operations. * Update license headers where needed. * Removed custom code replaced by absl.

http://git-wip-us.apache.org/repos/asf/marmotta/blob/0eb556da/libraries/ostrich/backend/3rdparty/abseil/absl/time/clock.cc
----------------------------------------------------------------------
diff --git a/libraries/ostrich/backend/3rdparty/abseil/absl/time/clock.cc b/libraries/ostrich/backend/3rdparty/abseil/absl/time/clock.cc
new file mode 100644
index 0000000..772f852
--- /dev/null
+++ b/libraries/ostrich/backend/3rdparty/abseil/absl/time/clock.cc
@@ -0,0 +1,563 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/time/clock.h"
+
+#include "absl/base/attributes.h"
+
+#ifdef _WIN32
+#include <windows.h>
+#endif
+
+#include <algorithm>
+#include <atomic>
+#include <cerrno>
+#include <cstdint>
+#include <ctime>
+#include <limits>
+
+#include "absl/base/internal/spinlock.h"
+#include "absl/base/internal/unscaledcycleclock.h"
+#include "absl/base/macros.h"
+#include "absl/base/port.h"
+#include "absl/base/thread_annotations.h"
+
+namespace absl {
+Time Now() {
+  // TODO(bww): Get a timespec instead so we don't have to divide.
+  int64_t n = absl::GetCurrentTimeNanos();
+  if (n >= 0) {
+    return time_internal::FromUnixDuration(
+        time_internal::MakeDuration(n / 1000000000, n % 1000000000 * 4));
+  }
+  return time_internal::FromUnixDuration(absl::Nanoseconds(n));
+}
+}  // namespace absl
+
+// Decide if we should use the fast GetCurrentTimeNanos() algorithm
+// based on the cyclecounter, otherwise just get the time directly
+// from the OS on every call. This can be chosen at compile-time via
+// -DABSL_USE_CYCLECLOCK_FOR_GET_CURRENT_TIME_NANOS=[0|1]
+#ifndef ABSL_USE_CYCLECLOCK_FOR_GET_CURRENT_TIME_NANOS
+#if ABSL_USE_UNSCALED_CYCLECLOCK
+#define ABSL_USE_CYCLECLOCK_FOR_GET_CURRENT_TIME_NANOS 1
+#else
+#define ABSL_USE_CYCLECLOCK_FOR_GET_CURRENT_TIME_NANOS 0
+#endif
+#endif
+
+#if defined(__APPLE__)
+#include "absl/time/internal/get_current_time_ios.inc"
+#elif defined(_WIN32)
+#include "absl/time/internal/get_current_time_windows.inc"
+#else
+#include "absl/time/internal/get_current_time_posix.inc"
+#endif
+
+// Allows override by test.
+#ifndef GET_CURRENT_TIME_NANOS_FROM_SYSTEM
+#define GET_CURRENT_TIME_NANOS_FROM_SYSTEM() \
+  ::absl::time_internal::GetCurrentTimeNanosFromSystem()
+#endif
+
+#if !ABSL_USE_CYCLECLOCK_FOR_GET_CURRENT_TIME_NANOS
+namespace absl {
+int64_t GetCurrentTimeNanos() {
+  return GET_CURRENT_TIME_NANOS_FROM_SYSTEM();
+}
+}  // namespace absl
+#else  // Use the cyclecounter-based implementation below.
+
+// Allows override by test.
+#ifndef GET_CURRENT_TIME_NANOS_CYCLECLOCK_NOW
+#define GET_CURRENT_TIME_NANOS_CYCLECLOCK_NOW() \
+  ::absl::time_internal::UnscaledCycleClockWrapperForGetCurrentTime::Now()
+#endif
+
+// The following counters are used only by the test code.
+static int64_t stats_initializations;
+static int64_t stats_reinitializations;
+static int64_t stats_calibrations;
+static int64_t stats_slow_paths;
+static int64_t stats_fast_slow_paths;
+
+namespace absl {
+namespace time_internal {
+// This is a friend wrapper around UnscaledCycleClock::Now()
+// (needed to access UnscaledCycleClock).
+class UnscaledCycleClockWrapperForGetCurrentTime {
+ public:
+  static int64_t Now() { return base_internal::UnscaledCycleClock::Now(); }
+};
+}  // namespace time_internal
+
+// uint64_t is used in this module to provide an extra bit in multiplications
+
+// Return the time in ns as told by the kernel interface.  Place in *cycleclock
+// the value of the cycleclock at about the time of the syscall.
+// This call represents the time base that this module synchronizes to.
+// Ensures that *cycleclock does not step back by up to (1 << 16) from
+// last_cycleclock, to discard small backward counter steps.  (Larger steps are
+// assumed to be complete resyncs, which shouldn't happen.  If they do, a full
+// reinitialization of the outer algorithm should occur.)
+static int64_t GetCurrentTimeNanosFromKernel(uint64_t last_cycleclock,
+                                             uint64_t *cycleclock) {
+  // We try to read clock values at about the same time as the kernel clock.
+  // This value gets adjusted up or down as estimate of how long that should
+  // take, so we can reject attempts that take unusually long.
+  static std::atomic<uint64_t> approx_syscall_time_in_cycles{10 * 1000};
+
+  uint64_t local_approx_syscall_time_in_cycles =  // local copy
+      approx_syscall_time_in_cycles.load(std::memory_order_relaxed);
+
+  int64_t current_time_nanos_from_system;
+  uint64_t before_cycles;
+  uint64_t after_cycles;
+  uint64_t elapsed_cycles;
+  int loops = 0;
+  do {
+    before_cycles = GET_CURRENT_TIME_NANOS_CYCLECLOCK_NOW();
+    current_time_nanos_from_system = GET_CURRENT_TIME_NANOS_FROM_SYSTEM();
+    after_cycles = GET_CURRENT_TIME_NANOS_CYCLECLOCK_NOW();
+    // elapsed_cycles is unsigned, so is large on overflow
+    elapsed_cycles = after_cycles - before_cycles;
+    if (elapsed_cycles >= local_approx_syscall_time_in_cycles &&
+        ++loops == 20) {  // clock changed frequencies?  Back off.
+      loops = 0;
+      if (local_approx_syscall_time_in_cycles < 1000 * 1000) {
+        local_approx_syscall_time_in_cycles =
+            (local_approx_syscall_time_in_cycles + 1) << 1;
+      }
+      approx_syscall_time_in_cycles.store(
+          local_approx_syscall_time_in_cycles,
+          std::memory_order_relaxed);
+    }
+  } while (elapsed_cycles >= local_approx_syscall_time_in_cycles ||
+           last_cycleclock - after_cycles < (static_cast<uint64_t>(1) << 16));
+
+  // Number of times in a row we've seen a kernel time call take substantially
+  // less than approx_syscall_time_in_cycles.
+  static std::atomic<uint32_t> seen_smaller{ 0 };
+
+  // Adjust approx_syscall_time_in_cycles to be within a factor of 2
+  // of the typical time to execute one iteration of the loop above.
+  if ((local_approx_syscall_time_in_cycles >> 1) < elapsed_cycles) {
+    // measured time is no smaller than half current approximation
+    seen_smaller.store(0, std::memory_order_relaxed);
+  } else if (seen_smaller.fetch_add(1, std::memory_order_relaxed) >= 3) {
+    // smaller delays several times in a row; reduce approximation by 12.5%
+    const uint64_t new_approximation =
+        local_approx_syscall_time_in_cycles -
+        (local_approx_syscall_time_in_cycles >> 3);
+    approx_syscall_time_in_cycles.store(new_approximation,
+                                        std::memory_order_relaxed);
+    seen_smaller.store(0, std::memory_order_relaxed);
+  }
+
+  *cycleclock = after_cycles;
+  return current_time_nanos_from_system;
+}
+
+
+// ---------------------------------------------------------------------
+// An implementation of reader-write locks that use no atomic ops in the read
+// case.  This is a generalization of Lamport's method for reading a multiword
+// clock.  Increment a word on each write acquisition, using the low-order bit
+// as a spinlock; the word is the high word of the "clock".  Readers read the
+// high word, then all other data, then the high word again, and repeat the
+// read if the reads of the high words yields different answers, or an odd
+// value (either case suggests possible interference from a writer).
+// Here we use a spinlock to ensure only one writer at a time, rather than
+// spinning on the bottom bit of the word to benefit from SpinLock
+// spin-delay tuning.
+
+// Acquire seqlock (*seq) and return the value to be written to unlock.
+static inline uint64_t SeqAcquire(std::atomic<uint64_t> *seq) {
+  uint64_t x = seq->fetch_add(1, std::memory_order_relaxed);
+
+  // We put a release fence between update to *seq and writes to shared data.
+  // Thus all stores to shared data are effectively release operations and
+  // update to *seq above cannot be re-ordered past any of them.  Note that
+  // this barrier is not for the fetch_add above.  A release barrier for the
+  // fetch_add would be before it, not after.
+  std::atomic_thread_fence(std::memory_order_release);
+
+  return x + 2;   // original word plus 2
+}
+
+// Release seqlock (*seq) by writing x to it---a value previously returned by
+// SeqAcquire.
+static inline void SeqRelease(std::atomic<uint64_t> *seq, uint64_t x) {
+  // The unlock store to *seq must have release ordering so that all
+  // updates to shared data must finish before this store.
+  seq->store(x, std::memory_order_release);  // release lock for readers
+}
+
+// ---------------------------------------------------------------------
+
+// "nsscaled" is unit of time equal to a (2**kScale)th of a nanosecond.
+enum { kScale = 30 };
+
+// The minimum interval between samples of the time base.
+// We pick enough time to amortize the cost of the sample,
+// to get a reasonably accurate cycle counter rate reading,
+// and not so much that calculations will overflow 64-bits.
+static const uint64_t kMinNSBetweenSamples = 2000 << 20;
+
+// We require that kMinNSBetweenSamples shifted by kScale
+// have at least a bit left over for 64-bit calculations.
+static_assert(((kMinNSBetweenSamples << (kScale + 1)) >> (kScale + 1)) ==
+               kMinNSBetweenSamples,
+               "cannot represent kMaxBetweenSamplesNSScaled");
+
+// A reader-writer lock protecting the static locations below.
+// See SeqAcquire() and SeqRelease() above.
+static absl::base_internal::SpinLock lock(
+    absl::base_internal::kLinkerInitialized);
+static std::atomic<uint64_t> seq(0);
+
+// data from a sample of the kernel's time value
+struct TimeSampleAtomic {
+  std::atomic<uint64_t> raw_ns;              // raw kernel time
+  std::atomic<uint64_t> base_ns;             // our estimate of time
+  std::atomic<uint64_t> base_cycles;         // cycle counter reading
+  std::atomic<uint64_t> nsscaled_per_cycle;  // cycle period
+  // cycles before we'll sample again (a scaled reciprocal of the period,
+  // to avoid a division on the fast path).
+  std::atomic<uint64_t> min_cycles_per_sample;
+};
+// Same again, but with non-atomic types
+struct TimeSample {
+  uint64_t raw_ns;                 // raw kernel time
+  uint64_t base_ns;                // our estimate of time
+  uint64_t base_cycles;            // cycle counter reading
+  uint64_t nsscaled_per_cycle;     // cycle period
+  uint64_t min_cycles_per_sample;  // approx cycles before next sample
+};
+
+static struct TimeSampleAtomic last_sample;   // the last sample; under seq
+
+static int64_t GetCurrentTimeNanosSlowPath() ABSL_ATTRIBUTE_COLD;
+
+// Read the contents of *atomic into *sample.
+// Each field is read atomically, but to maintain atomicity between fields,
+// the access must be done under a lock.
+static void ReadTimeSampleAtomic(const struct TimeSampleAtomic *atomic,
+                                 struct TimeSample *sample) {
+  sample->base_ns = atomic->base_ns.load(std::memory_order_relaxed);
+  sample->base_cycles = atomic->base_cycles.load(std::memory_order_relaxed);
+  sample->nsscaled_per_cycle =
+      atomic->nsscaled_per_cycle.load(std::memory_order_relaxed);
+  sample->min_cycles_per_sample =
+      atomic->min_cycles_per_sample.load(std::memory_order_relaxed);
+  sample->raw_ns = atomic->raw_ns.load(std::memory_order_relaxed);
+}
+
+// Public routine.
+// Algorithm:  We wish to compute real time from a cycle counter.  In normal
+// operation, we construct a piecewise linear approximation to the kernel time
+// source, using the cycle counter value.  The start of each line segment is at
+// the same point as the end of the last, but may have a different slope (that
+// is, a different idea of the cycle counter frequency).  Every couple of
+// seconds, the kernel time source is sampled and compared with the current
+// approximation.  A new slope is chosen that, if followed for another couple
+// of seconds, will correct the error at the current position.  The information
+// for a sample is in the "last_sample" struct.  The linear approximation is
+//   estimated_time = last_sample.base_ns +
+//     last_sample.ns_per_cycle * (counter_reading - last_sample.base_cycles)
+// (ns_per_cycle is actually stored in different units and scaled, to avoid
+// overflow).  The base_ns of the next linear approximation is the
+// estimated_time using the last approximation; the base_cycles is the cycle
+// counter value at that time; the ns_per_cycle is the number of ns per cycle
+// measured since the last sample, but adjusted so that most of the difference
+// between the estimated_time and the kernel time will be corrected by the
+// estimated time to the next sample.  In normal operation, this algorithm
+// relies on:
+// - the cycle counter and kernel time rates not changing a lot in a few
+//   seconds.
+// - the client calling into the code often compared to a couple of seconds, so
+//   the time to the next correction can be estimated.
+// Any time ns_per_cycle is not known, a major error is detected, or the
+// assumption about frequent calls is violated, the implementation returns the
+// kernel time.  It records sufficient data that a linear approximation can
+// resume a little later.
+
+int64_t GetCurrentTimeNanos() {
+  // read the data from the "last_sample" struct (but don't need raw_ns yet)
+  // The reads of "seq" and test of the values emulate a reader lock.
+  uint64_t base_ns;
+  uint64_t base_cycles;
+  uint64_t nsscaled_per_cycle;
+  uint64_t min_cycles_per_sample;
+  uint64_t seq_read0;
+  uint64_t seq_read1;
+
+  // If we have enough information to interpolate, the value returned will be
+  // derived from this cycleclock-derived time estimate.  On some platforms
+  // (POWER) the function to retrieve this value has enough complexity to
+  // contribute to register pressure - reading it early before initializing
+  // the other pieces of the calculation minimizes spill/restore instructions,
+  // minimizing icache cost.
+  uint64_t now_cycles = GET_CURRENT_TIME_NANOS_CYCLECLOCK_NOW();
+
+  // Acquire pairs with the barrier in SeqRelease - if this load sees that
+  // store, the shared-data reads necessarily see that SeqRelease's updates
+  // to the same shared data.
+  seq_read0 = seq.load(std::memory_order_acquire);
+
+  base_ns = last_sample.base_ns.load(std::memory_order_relaxed);
+  base_cycles = last_sample.base_cycles.load(std::memory_order_relaxed);
+  nsscaled_per_cycle =
+      last_sample.nsscaled_per_cycle.load(std::memory_order_relaxed);
+  min_cycles_per_sample =
+      last_sample.min_cycles_per_sample.load(std::memory_order_relaxed);
+
+  // This acquire fence pairs with the release fence in SeqAcquire.  Since it
+  // is sequenced between reads of shared data and seq_read1, the reads of
+  // shared data are effectively acquiring.
+  std::atomic_thread_fence(std::memory_order_acquire);
+
+  // The shared-data reads are effectively acquire ordered, and the
+  // shared-data writes are effectively release ordered. Therefore if our
+  // shared-data reads see any of a particular update's shared-data writes,
+  // seq_read1 is guaranteed to see that update's SeqAcquire.
+  seq_read1 = seq.load(std::memory_order_relaxed);
+
+  // Fast path.  Return if min_cycles_per_sample has not yet elapsed since the
+  // last sample, and we read a consistent sample.  The fast path activates
+  // only when min_cycles_per_sample is non-zero, which happens when we get an
+  // estimate for the cycle time.  The predicate will fail if now_cycles <
+  // base_cycles, or if some other thread is in the slow path.
+  //
+  // Since we now read now_cycles before base_ns, it is possible for now_cycles
+  // to be less than base_cycles (if we were interrupted between those loads and
+  // last_sample was updated). This is harmless, because delta_cycles will wrap
+  // and report a time much much bigger than min_cycles_per_sample. In that case
+  // we will take the slow path.
+  uint64_t delta_cycles = now_cycles - base_cycles;
+  if (seq_read0 == seq_read1 && (seq_read0 & 1) == 0 &&
+      delta_cycles < min_cycles_per_sample) {
+    return base_ns + ((delta_cycles * nsscaled_per_cycle) >> kScale);
+  }
+  return GetCurrentTimeNanosSlowPath();
+}
+
+// Return (a << kScale)/b.
+// Zero is returned if b==0.   Scaling is performed internally to
+// preserve precision without overflow.
+static uint64_t SafeDivideAndScale(uint64_t a, uint64_t b) {
+  // Find maximum safe_shift so that
+  //  0 <= safe_shift <= kScale  and  (a << safe_shift) does not overflow.
+  int safe_shift = kScale;
+  while (((a << safe_shift) >> safe_shift) != a) {
+    safe_shift--;
+  }
+  uint64_t scaled_b = b >> (kScale - safe_shift);
+  uint64_t quotient = 0;
+  if (scaled_b != 0) {
+    quotient = (a << safe_shift) / scaled_b;
+  }
+  return quotient;
+}
+
+static uint64_t UpdateLastSample(
+    uint64_t now_cycles, uint64_t now_ns, uint64_t delta_cycles,
+    const struct TimeSample *sample) ABSL_ATTRIBUTE_COLD;
+
+// The slow path of GetCurrentTimeNanos().  This is taken while gathering
+// initial samples, when enough time has elapsed since the last sample, and if
+// any other thread is writing to last_sample.
+//
+// Manually mark this 'noinline' to minimize stack frame size of the fast
+// path.  Without this, sometimes a compiler may inline this big block of code
+// into the fast past.  That causes lots of register spills and reloads that
+// are unnecessary unless the slow path is taken.
+//
+// TODO(absl-team): Remove this attribute when our compiler is smart enough
+// to do the right thing.
+ABSL_ATTRIBUTE_NOINLINE
+static int64_t GetCurrentTimeNanosSlowPath() LOCKS_EXCLUDED(lock) {
+  // Serialize access to slow-path.  Fast-path readers are not blocked yet, and
+  // code below must not modify last_sample until the seqlock is acquired.
+  lock.Lock();
+
+  // Sample the kernel time base.  This is the definition of
+  // "now" if we take the slow path.
+  static uint64_t last_now_cycles;  // protected by lock
+  uint64_t now_cycles;
+  uint64_t now_ns = GetCurrentTimeNanosFromKernel(last_now_cycles, &now_cycles);
+  last_now_cycles = now_cycles;
+
+  uint64_t estimated_base_ns;
+
+  // ----------
+  // Read the "last_sample" values again; this time holding the write lock.
+  struct TimeSample sample;
+  ReadTimeSampleAtomic(&last_sample, &sample);
+
+  // ----------
+  // Try running the fast path again; another thread may have updated the
+  // sample between our run of the fast path and the sample we just read.
+  uint64_t delta_cycles = now_cycles - sample.base_cycles;
+  if (delta_cycles < sample.min_cycles_per_sample) {
+    // Another thread updated the sample.  This path does not take the seqlock
+    // so that blocked readers can make progress without blocking new readers.
+    estimated_base_ns = sample.base_ns +
+        ((delta_cycles * sample.nsscaled_per_cycle) >> kScale);
+    stats_fast_slow_paths++;
+  } else {
+    estimated_base_ns =
+        UpdateLastSample(now_cycles, now_ns, delta_cycles, &sample);
+  }
+
+  lock.Unlock();
+
+  return estimated_base_ns;
+}
+
+// Main part of the algorithm.  Locks out readers, updates the approximation
+// using the new sample from the kernel, and stores the result in last_sample
+// for readers.  Returns the new estimated time.
+static uint64_t UpdateLastSample(uint64_t now_cycles, uint64_t now_ns,
+                                 uint64_t delta_cycles,
+                                 const struct TimeSample *sample)
+    EXCLUSIVE_LOCKS_REQUIRED(lock) {
+  uint64_t estimated_base_ns = now_ns;
+  uint64_t lock_value = SeqAcquire(&seq);  // acquire seqlock to block readers
+
+  // The 5s in the next if-statement limits the time for which we will trust
+  // the cycle counter and our last sample to give a reasonable result.
+  // Errors in the rate of the source clock can be multiplied by the ratio
+  // between this limit and kMinNSBetweenSamples.
+  if (sample->raw_ns == 0 ||  // no recent sample, or clock went backwards
+      sample->raw_ns + static_cast<uint64_t>(5) * 1000 * 1000 * 1000 < now_ns ||
+      now_ns < sample->raw_ns || now_cycles < sample->base_cycles) {
+    // record this sample, and forget any previously known slope.
+    last_sample.raw_ns.store(now_ns, std::memory_order_relaxed);
+    last_sample.base_ns.store(estimated_base_ns, std::memory_order_relaxed);
+    last_sample.base_cycles.store(now_cycles, std::memory_order_relaxed);
+    last_sample.nsscaled_per_cycle.store(0, std::memory_order_relaxed);
+    last_sample.min_cycles_per_sample.store(0, std::memory_order_relaxed);
+    stats_initializations++;
+  } else if (sample->raw_ns + 500 * 1000 * 1000 < now_ns &&
+             sample->base_cycles + 100 < now_cycles) {
+    // Enough time has passed to compute the cycle time.
+    if (sample->nsscaled_per_cycle != 0) {  // Have a cycle time estimate.
+      // Compute time from counter reading, but avoiding overflow
+      // delta_cycles may be larger than on the fast path.
+      uint64_t estimated_scaled_ns;
+      int s = -1;
+      do {
+        s++;
+        estimated_scaled_ns = (delta_cycles >> s) * sample->nsscaled_per_cycle;
+      } while (estimated_scaled_ns / sample->nsscaled_per_cycle !=
+               (delta_cycles >> s));
+      estimated_base_ns = sample->base_ns +
+                          (estimated_scaled_ns >> (kScale - s));
+    }
+
+    // Compute the assumed cycle time kMinNSBetweenSamples ns into the future
+    // assuming the cycle counter rate stays the same as the last interval.
+    uint64_t ns = now_ns - sample->raw_ns;
+    uint64_t measured_nsscaled_per_cycle = SafeDivideAndScale(ns, delta_cycles);
+
+    uint64_t assumed_next_sample_delta_cycles =
+        SafeDivideAndScale(kMinNSBetweenSamples, measured_nsscaled_per_cycle);
+
+    int64_t diff_ns = now_ns - estimated_base_ns;  // estimate low by this much
+
+    // We want to set nsscaled_per_cycle so that our estimate of the ns time
+    // at the assumed cycle time is the assumed ns time.
+    // That is, we want to set nsscaled_per_cycle so:
+    //  kMinNSBetweenSamples + diff_ns  ==
+    //  (assumed_next_sample_delta_cycles * nsscaled_per_cycle) >> kScale
+    // But we wish to damp oscillations, so instead correct only most
+    // of our current error, by solving:
+    //  kMinNSBetweenSamples + diff_ns - (diff_ns / 16) ==
+    //  (assumed_next_sample_delta_cycles * nsscaled_per_cycle) >> kScale
+    ns = kMinNSBetweenSamples + diff_ns - (diff_ns / 16);
+    uint64_t new_nsscaled_per_cycle =
+        SafeDivideAndScale(ns, assumed_next_sample_delta_cycles);
+    if (new_nsscaled_per_cycle != 0 &&
+        diff_ns < 100 * 1000 * 1000 && -diff_ns < 100 * 1000 * 1000) {
+      // record the cycle time measurement
+      last_sample.nsscaled_per_cycle.store(
+          new_nsscaled_per_cycle, std::memory_order_relaxed);
+      uint64_t new_min_cycles_per_sample =
+          SafeDivideAndScale(kMinNSBetweenSamples, new_nsscaled_per_cycle);
+      last_sample.min_cycles_per_sample.store(
+          new_min_cycles_per_sample, std::memory_order_relaxed);
+      stats_calibrations++;
+    } else {  // something went wrong; forget the slope
+      last_sample.nsscaled_per_cycle.store(0, std::memory_order_relaxed);
+      last_sample.min_cycles_per_sample.store(0, std::memory_order_relaxed);
+      estimated_base_ns = now_ns;
+      stats_reinitializations++;
+    }
+    last_sample.raw_ns.store(now_ns, std::memory_order_relaxed);
+    last_sample.base_ns.store(estimated_base_ns, std::memory_order_relaxed);
+    last_sample.base_cycles.store(now_cycles, std::memory_order_relaxed);
+  } else {
+    // have a sample, but no slope; waiting for enough time for a calibration
+    stats_slow_paths++;
+  }
+
+  SeqRelease(&seq, lock_value);  // release the readers
+
+  return estimated_base_ns;
+}
+}  // namespace absl
+#endif  // ABSL_USE_CYCLECLOCK_FOR_GET_CURRENT_TIME_NANOS
+
+namespace absl {
+namespace {
+
+// Returns the maximum duration that SleepOnce() can sleep for.
+constexpr absl::Duration MaxSleep() {
+#ifdef _WIN32
+  // Windows Sleep() takes unsigned long argument in milliseconds.
+  return absl::Milliseconds(
+      std::numeric_limits<unsigned long>::max());  // NOLINT(runtime/int)
+#else
+  return absl::Seconds(std::numeric_limits<time_t>::max());
+#endif
+}
+
+// Sleeps for the given duration.
+// REQUIRES: to_sleep <= MaxSleep().
+void SleepOnce(absl::Duration to_sleep) {
+#ifdef _WIN32
+  Sleep(to_sleep / absl::Milliseconds(1));
+#else
+  struct timespec sleep_time = absl::ToTimespec(to_sleep);
+  while (nanosleep(&sleep_time, &sleep_time) != 0 && errno == EINTR) {
+    // Ignore signals and wait for the full interval to elapse.
+  }
+#endif
+}
+
+}  // namespace
+}  // namespace absl
+
+extern "C" {
+
+ABSL_ATTRIBUTE_WEAK void AbslInternalSleepFor(absl::Duration duration) {
+  while (duration > absl::ZeroDuration()) {
+    absl::Duration to_sleep = std::min(duration, absl::MaxSleep());
+    absl::SleepOnce(to_sleep);
+    duration -= to_sleep;
+  }
+}
+
+}  // extern "C"

http://git-wip-us.apache.org/repos/asf/marmotta/blob/0eb556da/libraries/ostrich/backend/3rdparty/abseil/absl/time/clock.h
----------------------------------------------------------------------
diff --git a/libraries/ostrich/backend/3rdparty/abseil/absl/time/clock.h b/libraries/ostrich/backend/3rdparty/abseil/absl/time/clock.h
new file mode 100644
index 0000000..3753d4e
--- /dev/null
+++ b/libraries/ostrich/backend/3rdparty/abseil/absl/time/clock.h
@@ -0,0 +1,72 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// File: clock.h
+// -----------------------------------------------------------------------------
+//
+// This header file contains utility functions for working with the system-wide
+// realtime clock. For descriptions of the main time abstractions used within
+// this header file, consult the time.h header file.
+#ifndef ABSL_TIME_CLOCK_H_
+#define ABSL_TIME_CLOCK_H_
+
+#include "absl/base/macros.h"
+#include "absl/time/time.h"
+
+namespace absl {
+
+// Now()
+//
+// Returns the current time, expressed as an `absl::Time` absolute time value.
+absl::Time Now();
+
+// GetCurrentTimeNanos()
+//
+// Returns the current time, expressed as a count of nanoseconds since the Unix
+// Epoch (https://en.wikipedia.org/wiki/Unix_time). Prefer `absl::Now()` instead
+// for all but the most performance-sensitive cases (i.e. when you are calling
+// this function hundreds of thousands of times per second).
+int64_t GetCurrentTimeNanos();
+
+// SleepFor()
+//
+// Sleeps for the specified duration, expressed as an `absl::Duration`.
+//
+// Notes:
+// * Signal interruptions will not reduce the sleep duration.
+// * Returns immediately when passed a nonpositive duration.
+void SleepFor(absl::Duration duration);
+
+}  // namespace absl
+
+// -----------------------------------------------------------------------------
+// Implementation Details
+// -----------------------------------------------------------------------------
+
+// In some build configurations we pass --detect-odr-violations to the
+// gold linker.  This causes it to flag weak symbol overrides as ODR
+// violations.  Because ODR only applies to C++ and not C,
+// --detect-odr-violations ignores symbols not mangled with C++ names.
+// By changing our extension points to be extern "C", we dodge this
+// check.
+extern "C" {
+void AbslInternalSleepFor(absl::Duration duration);
+}  // extern "C"
+
+inline void absl::SleepFor(absl::Duration duration) {
+  AbslInternalSleepFor(duration);
+}
+
+#endif  // ABSL_TIME_CLOCK_H_

http://git-wip-us.apache.org/repos/asf/marmotta/blob/0eb556da/libraries/ostrich/backend/3rdparty/abseil/absl/time/clock_test.cc
----------------------------------------------------------------------
diff --git a/libraries/ostrich/backend/3rdparty/abseil/absl/time/clock_test.cc b/libraries/ostrich/backend/3rdparty/abseil/absl/time/clock_test.cc
new file mode 100644
index 0000000..f143c03
--- /dev/null
+++ b/libraries/ostrich/backend/3rdparty/abseil/absl/time/clock_test.cc
@@ -0,0 +1,70 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/time/clock.h"
+
+#include "absl/base/config.h"
+#if defined(ABSL_HAVE_ALARM)
+#include <signal.h>
+#include <unistd.h>
+#elif defined(__linux__) || defined(__APPLE__)
+#error all known Linux and Apple targets have alarm
+#endif
+
+#include "gtest/gtest.h"
+#include "absl/time/time.h"
+
+namespace {
+
+TEST(Time, Now) {
+  const absl::Time before = absl::FromUnixNanos(absl::GetCurrentTimeNanos());
+  const absl::Time now = absl::Now();
+  const absl::Time after = absl::FromUnixNanos(absl::GetCurrentTimeNanos());
+  EXPECT_GE(now, before);
+  EXPECT_GE(after, now);
+}
+
+TEST(SleepForTest, BasicSanity) {
+  absl::Duration sleep_time = absl::Milliseconds(2500);
+  absl::Time start = absl::Now();
+  absl::SleepFor(sleep_time);
+  absl::Time end = absl::Now();
+  EXPECT_LE(sleep_time - absl::Milliseconds(100), end - start);
+  EXPECT_GE(sleep_time + absl::Milliseconds(200), end - start);
+}
+
+#ifdef ABSL_HAVE_ALARM
+// Helper for test SleepFor.
+bool alarm_handler_invoked = false;
+void AlarmHandler(int signo) {
+  ASSERT_EQ(signo, SIGALRM);
+  alarm_handler_invoked = true;
+}
+
+TEST(SleepForTest, AlarmSupport) {
+  alarm_handler_invoked = false;
+  sig_t old_alarm = signal(SIGALRM, AlarmHandler);
+  alarm(2);
+  absl::Duration sleep_time = absl::Milliseconds(3500);
+  absl::Time start = absl::Now();
+  absl::SleepFor(sleep_time);
+  absl::Time end = absl::Now();
+  EXPECT_TRUE(alarm_handler_invoked);
+  EXPECT_LE(sleep_time - absl::Milliseconds(100), end - start);
+  EXPECT_GE(sleep_time + absl::Milliseconds(200), end - start);
+  signal(SIGALRM, old_alarm);
+}
+#endif  // ABSL_HAVE_ALARM
+
+}  // namespace

http://git-wip-us.apache.org/repos/asf/marmotta/blob/0eb556da/libraries/ostrich/backend/3rdparty/abseil/absl/time/duration.cc
----------------------------------------------------------------------
diff --git a/libraries/ostrich/backend/3rdparty/abseil/absl/time/duration.cc b/libraries/ostrich/backend/3rdparty/abseil/absl/time/duration.cc
new file mode 100644
index 0000000..82b4d98
--- /dev/null
+++ b/libraries/ostrich/backend/3rdparty/abseil/absl/time/duration.cc
@@ -0,0 +1,908 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// The implementation of the absl::Duration class, which is declared in
+// //absl/time.h.  This class behaves like a numeric type; it has no public
+// methods and is used only through the operators defined here.
+//
+// Implementation notes:
+//
+// An absl::Duration is represented as
+//
+//   rep_hi_ : (int64_t)  Whole seconds
+//   rep_lo_ : (uint32_t) Fractions of a second
+//
+// The seconds value (rep_hi_) may be positive or negative as appropriate.
+// The fractional seconds (rep_lo_) is always a positive offset from rep_hi_.
+// The API for Duration guarantees at least nanosecond resolution, which
+// means rep_lo_ could have a max value of 1B - 1 if it stored nanoseconds.
+// However, to utilize more of the available 32 bits of space in rep_lo_,
+// we instead store quarters of a nanosecond in rep_lo_ resulting in a max
+// value of 4B - 1.  This allows us to correctly handle calculations like
+// 0.5 nanos + 0.5 nanos = 1 nano.  The following example shows the actual
+// Duration rep using quarters of a nanosecond.
+//
+//    2.5 sec = {rep_hi_=2,  rep_lo_=2000000000}  // lo = 4 * 500000000
+//   -2.5 sec = {rep_hi_=-3, rep_lo_=2000000000}
+//
+// Infinite durations are represented as Durations with the rep_lo_ field set
+// to all 1s.
+//
+//   +InfiniteDuration:
+//     rep_hi_ : kint64max
+//     rep_lo_ : ~0U
+//
+//   -InfiniteDuration:
+//     rep_hi_ : kint64min
+//     rep_lo_ : ~0U
+//
+// Arithmetic overflows/underflows to +/- infinity and saturates.
+
+#include <algorithm>
+#include <cassert>
+#include <cctype>
+#include <cerrno>
+#include <cmath>
+#include <cstdint>
+#include <cstdlib>
+#include <cstring>
+#include <ctime>
+#include <functional>
+#include <limits>
+#include <string>
+
+#include "absl/base/casts.h"
+#include "absl/numeric/int128.h"
+#include "absl/time/time.h"
+
+namespace absl {
+
+namespace {
+
+using time_internal::kTicksPerNanosecond;
+using time_internal::kTicksPerSecond;
+
+constexpr int64_t kint64max = std::numeric_limits<int64_t>::max();
+constexpr int64_t kint64min = std::numeric_limits<int64_t>::min();
+
+// Can't use std::isinfinite() because it doesn't exist on windows.
+inline bool IsFinite(double d) {
+  return d != std::numeric_limits<double>::infinity() &&
+         d != -std::numeric_limits<double>::infinity();
+}
+
+// Can't use std::round() because it is only available in C++11.
+// Note that we ignore the possibility of floating-point over/underflow.
+template <typename Double>
+inline double Round(Double d) {
+  return d < 0 ? std::ceil(d - 0.5) : std::floor(d + 0.5);
+}
+
+// *sec may be positive or negative.  *ticks must be in the range
+// -kTicksPerSecond < *ticks < kTicksPerSecond.  If *ticks is negative it
+// will be normalized to a positive value by adjusting *sec accordingly.
+inline void NormalizeTicks(int64_t* sec, int64_t* ticks) {
+  if (*ticks < 0) {
+    --*sec;
+    *ticks += kTicksPerSecond;
+  }
+}
+
+// Makes a uint128 from the absolute value of the given scalar.
+inline uint128 MakeU128(int64_t a) {
+  uint128 u128 = 0;
+  if (a < 0) {
+    ++u128;
+    ++a;  // Makes it safe to negate 'a'
+    a = -a;
+  }
+  u128 += static_cast<uint64_t>(a);
+  return u128;
+}
+
+// Makes a uint128 count of ticks out of the absolute value of the Duration.
+inline uint128 MakeU128Ticks(Duration d) {
+  int64_t rep_hi = time_internal::GetRepHi(d);
+  uint32_t rep_lo = time_internal::GetRepLo(d);
+  if (rep_hi < 0) {
+    ++rep_hi;
+    rep_hi = -rep_hi;
+    rep_lo = kTicksPerSecond - rep_lo;
+  }
+  uint128 u128 = static_cast<uint64_t>(rep_hi);
+  u128 *= static_cast<uint64_t>(kTicksPerSecond);
+  u128 += rep_lo;
+  return u128;
+}
+
+// Breaks a uint128 of ticks into a Duration.
+inline Duration MakeDurationFromU128(uint128 u128, bool is_neg) {
+  int64_t rep_hi;
+  uint32_t rep_lo;
+  const uint64_t h64 = Uint128High64(u128);
+  const uint64_t l64 = Uint128Low64(u128);
+  if (h64 == 0) {  // fastpath
+    const uint64_t hi = l64 / kTicksPerSecond;
+    rep_hi = static_cast<int64_t>(hi);
+    rep_lo = static_cast<uint32_t>(l64 - hi * kTicksPerSecond);
+  } else {
+    // kMaxRepHi64 is the high 64 bits of (2^63 * kTicksPerSecond).
+    // Any positive tick count whose high 64 bits are >= kMaxRepHi64
+    // is not representable as a Duration.  A negative tick count can
+    // have its high 64 bits == kMaxRepHi64 but only when the low 64
+    // bits are all zero, otherwise it is not representable either.
+    const uint64_t kMaxRepHi64 = 0x77359400UL;
+    if (h64 >= kMaxRepHi64) {
+      if (is_neg && h64 == kMaxRepHi64 && l64 == 0) {
+        // Avoid trying to represent -kint64min below.
+        return time_internal::MakeDuration(kint64min);
+      }
+      return is_neg ? -InfiniteDuration() : InfiniteDuration();
+    }
+    const uint128 kTicksPerSecond128 = static_cast<uint64_t>(kTicksPerSecond);
+    const uint128 hi = u128 / kTicksPerSecond128;
+    rep_hi = static_cast<int64_t>(Uint128Low64(hi));
+    rep_lo =
+        static_cast<uint32_t>(Uint128Low64(u128 - hi * kTicksPerSecond128));
+  }
+  if (is_neg) {
+    rep_hi = -rep_hi;
+    if (rep_lo != 0) {
+      --rep_hi;
+      rep_lo = kTicksPerSecond - rep_lo;
+    }
+  }
+  return time_internal::MakeDuration(rep_hi, rep_lo);
+}
+
+// Convert between int64_t and uint64_t, preserving representation. This
+// allows us to do arithmetic in the unsigned domain, where overflow has
+// well-defined behavior. See operator+=() and operator-=().
+//
+// C99 7.20.1.1.1, as referenced by C++11 18.4.1.2, says, "The typedef
+// name intN_t designates a signed integer type with width N, no padding
+// bits, and a two's complement representation." So, we can convert to
+// and from the corresponding uint64_t value using a bit cast.
+inline uint64_t EncodeTwosComp(int64_t v) {
+  return absl::bit_cast<uint64_t>(v);
+}
+inline int64_t DecodeTwosComp(uint64_t v) { return absl::bit_cast<int64_t>(v); }
+
+// Note: The overflow detection in this function is done using greater/less *or
+// equal* because kint64max/min is too large to be represented exactly in a
+// double (which only has 53 bits of precision). In order to avoid assigning to
+// rep->hi a double value that is too large for an int64_t (and therefore is
+// undefined), we must consider computations that equal kint64max/min as a
+// double as overflow cases.
+inline bool SafeAddRepHi(double a_hi, double b_hi, Duration* d) {
+  double c = a_hi + b_hi;
+  if (c >= kint64max) {
+    *d = InfiniteDuration();
+    return false;
+  }
+  if (c <= kint64min) {
+    *d = -InfiniteDuration();
+    return false;
+  }
+  *d = time_internal::MakeDuration(c, time_internal::GetRepLo(*d));
+  return true;
+}
+
+// A functor that's similar to std::multiplies<T>, except this returns the max
+// T value instead of overflowing. This is only defined for uint128.
+template <typename Ignored>
+struct SafeMultiply {
+  uint128 operator()(uint128 a, uint128 b) const {
+    // b hi is always zero because it originated as an int64_t.
+    assert(Uint128High64(b) == 0);
+    // Fastpath to avoid the expensive overflow check with division.
+    if (Uint128High64(a) == 0) {
+      return (((Uint128Low64(a) | Uint128Low64(b)) >> 32) == 0)
+                 ? static_cast<uint128>(Uint128Low64(a) * Uint128Low64(b))
+                 : a * b;
+    }
+    return b == 0 ? b : (a > kuint128max / b) ? kuint128max : a * b;
+  }
+};
+
+// Scales (i.e., multiplies or divides, depending on the Operation template)
+// the Duration d by the int64_t r.
+template <template <typename> class Operation>
+inline Duration ScaleFixed(Duration d, int64_t r) {
+  const uint128 a = MakeU128Ticks(d);
+  const uint128 b = MakeU128(r);
+  const uint128 q = Operation<uint128>()(a, b);
+  const bool is_neg = (time_internal::GetRepHi(d) < 0) != (r < 0);
+  return MakeDurationFromU128(q, is_neg);
+}
+
+// Scales (i.e., multiplies or divides, depending on the Operation template)
+// the Duration d by the double r.
+template <template <typename> class Operation>
+inline Duration ScaleDouble(Duration d, double r) {
+  Operation<double> op;
+  double hi_doub = op(time_internal::GetRepHi(d), r);
+  double lo_doub = op(time_internal::GetRepLo(d), r);
+
+  double hi_int = 0;
+  double hi_frac = std::modf(hi_doub, &hi_int);
+
+  // Moves hi's fractional bits to lo.
+  lo_doub /= kTicksPerSecond;
+  lo_doub += hi_frac;
+
+  double lo_int = 0;
+  double lo_frac = std::modf(lo_doub, &lo_int);
+
+  // Rolls lo into hi if necessary.
+  int64_t lo64 = Round(lo_frac * kTicksPerSecond);
+
+  Duration ans;
+  if (!SafeAddRepHi(hi_int, lo_int, &ans)) return ans;
+  int64_t hi64 = time_internal::GetRepHi(ans);
+  if (!SafeAddRepHi(hi64, lo64 / kTicksPerSecond, &ans)) return ans;
+  hi64 = time_internal::GetRepHi(ans);
+  lo64 %= kTicksPerSecond;
+  NormalizeTicks(&hi64, &lo64);
+  return time_internal::MakeDuration(hi64, lo64);
+}
+
+// Tries to divide num by den as fast as possible by looking for common, easy
+// cases. If the division was done, the quotient is in *q and the remainder is
+// in *rem and true will be returned.
+inline bool IDivFastPath(const Duration num, const Duration den, int64_t* q,
+                         Duration* rem) {
+  // Bail if num or den is an infinity.
+  if (time_internal::IsInfiniteDuration(num) ||
+      time_internal::IsInfiniteDuration(den))
+    return false;
+
+  int64_t num_hi = time_internal::GetRepHi(num);
+  uint32_t num_lo = time_internal::GetRepLo(num);
+  int64_t den_hi = time_internal::GetRepHi(den);
+  uint32_t den_lo = time_internal::GetRepLo(den);
+
+  if (den_hi == 0 && den_lo == kTicksPerNanosecond) {
+    // Dividing by 1ns
+    if (num_hi >= 0 && num_hi < (kint64max - kTicksPerSecond) / 1000000000) {
+      *q = num_hi * 1000000000 + num_lo / kTicksPerNanosecond;
+      *rem = time_internal::MakeDuration(0, num_lo % den_lo);
+      return true;
+    }
+  } else if (den_hi == 0 && den_lo == 100 * kTicksPerNanosecond) {
+    // Dividing by 100ns (common when converting to Universal time)
+    if (num_hi >= 0 && num_hi < (kint64max - kTicksPerSecond) / 10000000) {
+      *q = num_hi * 10000000 + num_lo / (100 * kTicksPerNanosecond);
+      *rem = time_internal::MakeDuration(0, num_lo % den_lo);
+      return true;
+    }
+  } else if (den_hi == 0 && den_lo == 1000 * kTicksPerNanosecond) {
+    // Dividing by 1us
+    if (num_hi >= 0 && num_hi < (kint64max - kTicksPerSecond) / 1000000) {
+      *q = num_hi * 1000000 + num_lo / (1000 * kTicksPerNanosecond);
+      *rem = time_internal::MakeDuration(0, num_lo % den_lo);
+      return true;
+    }
+  } else if (den_hi == 0 && den_lo == 1000000 * kTicksPerNanosecond) {
+    // Dividing by 1ms
+    if (num_hi >= 0 && num_hi < (kint64max - kTicksPerSecond) / 1000) {
+      *q = num_hi * 1000 + num_lo / (1000000 * kTicksPerNanosecond);
+      *rem = time_internal::MakeDuration(0, num_lo % den_lo);
+      return true;
+    }
+  } else if (den_hi > 0 && den_lo == 0) {
+    // Dividing by positive multiple of 1s
+    if (num_hi >= 0) {
+      if (den_hi == 1) {
+        *q = num_hi;
+        *rem = time_internal::MakeDuration(0, num_lo);
+        return true;
+      }
+      *q = num_hi / den_hi;
+      *rem = time_internal::MakeDuration(num_hi % den_hi, num_lo);
+      return true;
+    }
+    if (num_lo != 0) {
+      num_hi += 1;
+    }
+    int64_t quotient = num_hi / den_hi;
+    int64_t rem_sec = num_hi % den_hi;
+    if (rem_sec > 0) {
+      rem_sec -= den_hi;
+      quotient += 1;
+    }
+    if (num_lo != 0) {
+      rem_sec -= 1;
+    }
+    *q = quotient;
+    *rem = time_internal::MakeDuration(rem_sec, num_lo);
+    return true;
+  }
+
+  return false;
+}
+
+}  // namespace
+
+namespace time_internal {
+
+// The 'satq' argument indicates whether the quotient should saturate at the
+// bounds of int64_t.  If it does saturate, the difference will spill over to
+// the remainder.  If it does not saturate, the remainder remain accurate,
+// but the returned quotient will over/underflow int64_t and should not be used.
+int64_t IDivDuration(bool satq, const Duration num, const Duration den,
+                   Duration* rem) {
+  int64_t q = 0;
+  if (IDivFastPath(num, den, &q, rem)) {
+    return q;
+  }
+
+  const bool num_neg = num < ZeroDuration();
+  const bool den_neg = den < ZeroDuration();
+  const bool quotient_neg = num_neg != den_neg;
+
+  if (time_internal::IsInfiniteDuration(num) || den == ZeroDuration()) {
+    *rem = num_neg ? -InfiniteDuration() : InfiniteDuration();
+    return quotient_neg ? kint64min : kint64max;
+  }
+  if (time_internal::IsInfiniteDuration(den)) {
+    *rem = num;
+    return 0;
+  }
+
+  const uint128 a = MakeU128Ticks(num);
+  const uint128 b = MakeU128Ticks(den);
+  uint128 quotient128 = a / b;
+
+  if (satq) {
+    // Limits the quotient to the range of int64_t.
+    if (quotient128 > uint128(static_cast<uint64_t>(kint64max))) {
+      quotient128 = quotient_neg ? uint128(static_cast<uint64_t>(kint64min))
+                                 : uint128(static_cast<uint64_t>(kint64max));
+    }
+  }
+
+  const uint128 remainder128 = a - quotient128 * b;
+  *rem = MakeDurationFromU128(remainder128, num_neg);
+
+  if (!quotient_neg || quotient128 == 0) {
+    return Uint128Low64(quotient128) & kint64max;
+  }
+  // The quotient needs to be negated, but we need to carefully handle
+  // quotient128s with the top bit on.
+  return -static_cast<int64_t>(Uint128Low64(quotient128 - 1) & kint64max) - 1;
+}
+
+}  // namespace time_internal
+
+//
+// Additive operators.
+//
+
+Duration& Duration::operator+=(Duration rhs) {
+  if (time_internal::IsInfiniteDuration(*this)) return *this;
+  if (time_internal::IsInfiniteDuration(rhs)) return *this = rhs;
+  const int64_t orig_rep_hi = rep_hi_;
+  rep_hi_ =
+      DecodeTwosComp(EncodeTwosComp(rep_hi_) + EncodeTwosComp(rhs.rep_hi_));
+  if (rep_lo_ >= kTicksPerSecond - rhs.rep_lo_) {
+    rep_hi_ = DecodeTwosComp(EncodeTwosComp(rep_hi_) + 1);
+    rep_lo_ -= kTicksPerSecond;
+  }
+  rep_lo_ += rhs.rep_lo_;
+  if (rhs.rep_hi_ < 0 ? rep_hi_ > orig_rep_hi : rep_hi_ < orig_rep_hi) {
+    return *this = rhs.rep_hi_ < 0 ? -InfiniteDuration() : InfiniteDuration();
+  }
+  return *this;
+}
+
+Duration& Duration::operator-=(Duration rhs) {
+  if (time_internal::IsInfiniteDuration(*this)) return *this;
+  if (time_internal::IsInfiniteDuration(rhs)) {
+    return *this = rhs.rep_hi_ >= 0 ? -InfiniteDuration() : InfiniteDuration();
+  }
+  const int64_t orig_rep_hi = rep_hi_;
+  rep_hi_ =
+      DecodeTwosComp(EncodeTwosComp(rep_hi_) - EncodeTwosComp(rhs.rep_hi_));
+  if (rep_lo_ < rhs.rep_lo_) {
+    rep_hi_ = DecodeTwosComp(EncodeTwosComp(rep_hi_) - 1);
+    rep_lo_ += kTicksPerSecond;
+  }
+  rep_lo_ -= rhs.rep_lo_;
+  if (rhs.rep_hi_ < 0 ? rep_hi_ < orig_rep_hi : rep_hi_ > orig_rep_hi) {
+    return *this = rhs.rep_hi_ >= 0 ? -InfiniteDuration() : InfiniteDuration();
+  }
+  return *this;
+}
+
+//
+// Multiplicative operators.
+//
+
+Duration& Duration::operator*=(int64_t r) {
+  if (time_internal::IsInfiniteDuration(*this)) {
+    const bool is_neg = (r < 0) != (rep_hi_ < 0);
+    return *this = is_neg ? -InfiniteDuration() : InfiniteDuration();
+  }
+  return *this = ScaleFixed<SafeMultiply>(*this, r);
+}
+
+Duration& Duration::operator*=(double r) {
+  if (time_internal::IsInfiniteDuration(*this) || !IsFinite(r)) {
+    const bool is_neg = (std::signbit(r) != 0) != (rep_hi_ < 0);
+    return *this = is_neg ? -InfiniteDuration() : InfiniteDuration();
+  }
+  return *this = ScaleDouble<std::multiplies>(*this, r);
+}
+
+Duration& Duration::operator/=(int64_t r) {
+  if (time_internal::IsInfiniteDuration(*this) || r == 0) {
+    const bool is_neg = (r < 0) != (rep_hi_ < 0);
+    return *this = is_neg ? -InfiniteDuration() : InfiniteDuration();
+  }
+  return *this = ScaleFixed<std::divides>(*this, r);
+}
+
+Duration& Duration::operator/=(double r) {
+  if (time_internal::IsInfiniteDuration(*this) || r == 0.0) {
+    const bool is_neg = (std::signbit(r) != 0) != (rep_hi_ < 0);
+    return *this = is_neg ? -InfiniteDuration() : InfiniteDuration();
+  }
+  return *this = ScaleDouble<std::divides>(*this, r);
+}
+
+Duration& Duration::operator%=(Duration rhs) {
+  time_internal::IDivDuration(false, *this, rhs, this);
+  return *this;
+}
+
+double FDivDuration(Duration num, Duration den) {
+  // Arithmetic with infinity is sticky.
+  if (time_internal::IsInfiniteDuration(num) || den == ZeroDuration()) {
+    return (num < ZeroDuration()) == (den < ZeroDuration())
+               ? std::numeric_limits<double>::infinity()
+               : -std::numeric_limits<double>::infinity();
+  }
+  if (time_internal::IsInfiniteDuration(den)) return 0.0;
+
+  double a =
+      static_cast<double>(time_internal::GetRepHi(num)) * kTicksPerSecond +
+      time_internal::GetRepLo(num);
+  double b =
+      static_cast<double>(time_internal::GetRepHi(den)) * kTicksPerSecond +
+      time_internal::GetRepLo(den);
+  return a / b;
+}
+
+//
+// Trunc/Floor/Ceil.
+//
+
+Duration Trunc(Duration d, Duration unit) {
+  return d - (d % unit);
+}
+
+Duration Floor(const Duration d, const Duration unit) {
+  const absl::Duration td = Trunc(d, unit);
+  return td <= d ? td : td - AbsDuration(unit);
+}
+
+Duration Ceil(const Duration d, const Duration unit) {
+  const absl::Duration td = Trunc(d, unit);
+  return td >= d ? td : td + AbsDuration(unit);
+}
+
+//
+// Factory functions.
+//
+
+Duration DurationFromTimespec(timespec ts) {
+  if (static_cast<uint64_t>(ts.tv_nsec) < 1000 * 1000 * 1000) {
+    int64_t ticks = ts.tv_nsec * kTicksPerNanosecond;
+    return time_internal::MakeDuration(ts.tv_sec, ticks);
+  }
+  return Seconds(ts.tv_sec) + Nanoseconds(ts.tv_nsec);
+}
+
+Duration DurationFromTimeval(timeval tv) {
+  if (static_cast<uint64_t>(tv.tv_usec) < 1000 * 1000) {
+    int64_t ticks = tv.tv_usec * 1000 * kTicksPerNanosecond;
+    return time_internal::MakeDuration(tv.tv_sec, ticks);
+  }
+  return Seconds(tv.tv_sec) + Microseconds(tv.tv_usec);
+}
+
+//
+// Conversion to other duration types.
+//
+
+int64_t ToInt64Nanoseconds(Duration d) {
+  if (time_internal::GetRepHi(d) >= 0 &&
+      time_internal::GetRepHi(d) >> 33 == 0) {
+    return (time_internal::GetRepHi(d) * 1000 * 1000 * 1000) +
+           (time_internal::GetRepLo(d) / kTicksPerNanosecond);
+  }
+  return d / Nanoseconds(1);
+}
+int64_t ToInt64Microseconds(Duration d) {
+  if (time_internal::GetRepHi(d) >= 0 &&
+      time_internal::GetRepHi(d) >> 43 == 0) {
+    return (time_internal::GetRepHi(d) * 1000 * 1000) +
+           (time_internal::GetRepLo(d) / (kTicksPerNanosecond * 1000));
+  }
+  return d / Microseconds(1);
+}
+int64_t ToInt64Milliseconds(Duration d) {
+  if (time_internal::GetRepHi(d) >= 0 &&
+      time_internal::GetRepHi(d) >> 53 == 0) {
+    return (time_internal::GetRepHi(d) * 1000) +
+           (time_internal::GetRepLo(d) / (kTicksPerNanosecond * 1000 * 1000));
+  }
+  return d / Milliseconds(1);
+}
+int64_t ToInt64Seconds(Duration d) {
+  int64_t hi = time_internal::GetRepHi(d);
+  if (time_internal::IsInfiniteDuration(d)) return hi;
+  if (hi < 0 && time_internal::GetRepLo(d) != 0) ++hi;
+  return hi;
+}
+int64_t ToInt64Minutes(Duration d) {
+  int64_t hi = time_internal::GetRepHi(d);
+  if (time_internal::IsInfiniteDuration(d)) return hi;
+  if (hi < 0 && time_internal::GetRepLo(d) != 0) ++hi;
+  return hi / 60;
+}
+int64_t ToInt64Hours(Duration d) {
+  int64_t hi = time_internal::GetRepHi(d);
+  if (time_internal::IsInfiniteDuration(d)) return hi;
+  if (hi < 0 && time_internal::GetRepLo(d) != 0) ++hi;
+  return hi / (60 * 60);
+}
+
+double ToDoubleNanoseconds(Duration d) {
+  return FDivDuration(d, Nanoseconds(1));
+}
+double ToDoubleMicroseconds(Duration d) {
+  return FDivDuration(d, Microseconds(1));
+}
+double ToDoubleMilliseconds(Duration d) {
+  return FDivDuration(d, Milliseconds(1));
+}
+double ToDoubleSeconds(Duration d) {
+  return FDivDuration(d, Seconds(1));
+}
+double ToDoubleMinutes(Duration d) {
+  return FDivDuration(d, Minutes(1));
+}
+double ToDoubleHours(Duration d) {
+  return FDivDuration(d, Hours(1));
+}
+
+timespec ToTimespec(Duration d) {
+  timespec ts;
+  if (!time_internal::IsInfiniteDuration(d)) {
+    int64_t rep_hi = time_internal::GetRepHi(d);
+    uint32_t rep_lo = time_internal::GetRepLo(d);
+    if (rep_hi < 0) {
+      // Tweak the fields so that unsigned division of rep_lo
+      // maps to truncation (towards zero) for the timespec.
+      rep_lo += kTicksPerNanosecond - 1;
+      if (rep_lo >= kTicksPerSecond) {
+        rep_hi += 1;
+        rep_lo -= kTicksPerSecond;
+      }
+    }
+    ts.tv_sec = rep_hi;
+    if (ts.tv_sec == rep_hi) {  // no time_t narrowing
+      ts.tv_nsec = rep_lo / kTicksPerNanosecond;
+      return ts;
+    }
+  }
+  if (d >= ZeroDuration()) {
+    ts.tv_sec = std::numeric_limits<time_t>::max();
+    ts.tv_nsec = 1000 * 1000 * 1000 - 1;
+  } else {
+    ts.tv_sec = std::numeric_limits<time_t>::min();
+    ts.tv_nsec = 0;
+  }
+  return ts;
+}
+
+timeval ToTimeval(Duration d) {
+  timeval tv;
+  timespec ts = ToTimespec(d);
+  if (ts.tv_sec < 0) {
+    // Tweak the fields so that positive division of tv_nsec
+    // maps to truncation (towards zero) for the timeval.
+    ts.tv_nsec += 1000 - 1;
+    if (ts.tv_nsec >= 1000 * 1000 * 1000) {
+      ts.tv_sec += 1;
+      ts.tv_nsec -= 1000 * 1000 * 1000;
+    }
+  }
+  tv.tv_sec = ts.tv_sec;
+  if (tv.tv_sec != ts.tv_sec) {  // narrowing
+    if (ts.tv_sec < 0) {
+      tv.tv_sec = std::numeric_limits<decltype(tv.tv_sec)>::min();
+      tv.tv_usec = 0;
+    } else {
+      tv.tv_sec = std::numeric_limits<decltype(tv.tv_sec)>::max();
+      tv.tv_usec = 1000 * 1000 - 1;
+    }
+    return tv;
+  }
+  tv.tv_usec = static_cast<int>(ts.tv_nsec / 1000);  // suseconds_t
+  return tv;
+}
+
+std::chrono::nanoseconds ToChronoNanoseconds(Duration d) {
+  return time_internal::ToChronoDuration<std::chrono::nanoseconds>(d);
+}
+std::chrono::microseconds ToChronoMicroseconds(Duration d) {
+  return time_internal::ToChronoDuration<std::chrono::microseconds>(d);
+}
+std::chrono::milliseconds ToChronoMilliseconds(Duration d) {
+  return time_internal::ToChronoDuration<std::chrono::milliseconds>(d);
+}
+std::chrono::seconds ToChronoSeconds(Duration d) {
+  return time_internal::ToChronoDuration<std::chrono::seconds>(d);
+}
+std::chrono::minutes ToChronoMinutes(Duration d) {
+  return time_internal::ToChronoDuration<std::chrono::minutes>(d);
+}
+std::chrono::hours ToChronoHours(Duration d) {
+  return time_internal::ToChronoDuration<std::chrono::hours>(d);
+}
+
+//
+// To/From std::string formatting.
+//
+
+namespace {
+
+// Formats a positive 64-bit integer in the given field width.  Note that
+// it is up to the caller of Format64() to ensure that there is sufficient
+// space before ep to hold the conversion.
+char* Format64(char* ep, int width, int64_t v) {
+  do {
+    --width;
+    *--ep = '0' + (v % 10);  // contiguous digits
+  } while (v /= 10);
+  while (--width >= 0) *--ep = '0';  // zero pad
+  return ep;
+}
+
+// Helpers for FormatDuration() that format 'n' and append it to 'out'
+// followed by the given 'unit'.  If 'n' formats to "0", nothing is
+// appended (not even the unit).
+
+// A type that encapsulates how to display a value of a particular unit. For
+// values that are displayed with fractional parts, the precision indicates
+// where to round the value. The precision varies with the display unit because
+// a Duration can hold only quarters of a nanosecond, so displaying information
+// beyond that is just noise.
+//
+// For example, a microsecond value of 42.00025xxxxx should not display beyond 5
+// fractional digits, because it is in the noise of what a Duration can
+// represent.
+struct DisplayUnit {
+  const char* abbr;
+  int prec;
+  double pow10;
+};
+const DisplayUnit kDisplayNano = {"ns", 2, 1e2};
+const DisplayUnit kDisplayMicro = {"us", 5, 1e5};
+const DisplayUnit kDisplayMilli = {"ms", 8, 1e8};
+const DisplayUnit kDisplaySec = {"s", 11, 1e11};
+const DisplayUnit kDisplayMin = {"m", -1, 0.0};   // prec ignored
+const DisplayUnit kDisplayHour = {"h", -1, 0.0};  // prec ignored
+
+void AppendNumberUnit(std::string* out, int64_t n, DisplayUnit unit) {
+  char buf[sizeof("2562047788015216")];  // hours in max duration
+  char* const ep = buf + sizeof(buf);
+  char* bp = Format64(ep, 0, n);
+  if (*bp != '0' || bp + 1 != ep) {
+    out->append(bp, ep - bp);
+    out->append(unit.abbr);
+  }
+}
+
+// Note: unit.prec is limited to double's digits10 value (typically 15) so it
+// always fits in buf[].
+void AppendNumberUnit(std::string* out, double n, DisplayUnit unit) {
+  const int buf_size = std::numeric_limits<double>::digits10;
+  const int prec = std::min(buf_size, unit.prec);
+  char buf[buf_size];  // also large enough to hold integer part
+  char* ep = buf + sizeof(buf);
+  double d = 0;
+  int64_t frac_part = Round(std::modf(n, &d) * unit.pow10);
+  int64_t int_part = d;
+  if (int_part != 0 || frac_part != 0) {
+    char* bp = Format64(ep, 0, int_part);  // always < 1000
+    out->append(bp, ep - bp);
+    if (frac_part != 0) {
+      out->push_back('.');
+      bp = Format64(ep, prec, frac_part);
+      while (ep[-1] == '0') --ep;
+      out->append(bp, ep - bp);
+    }
+    out->append(unit.abbr);
+  }
+}
+
+}  // namespace
+
+// From Go's doc at http://golang.org/pkg/time/#Duration.String
+//   [FormatDuration] returns a std::string representing the duration in the
+//   form "72h3m0.5s".  Leading zero units are omitted.  As a special
+//   case, durations less than one second format use a smaller unit
+//   (milli-, micro-, or nanoseconds) to ensure that the leading digit
+//   is non-zero.  The zero duration formats as 0, with no unit.
+std::string FormatDuration(Duration d) {
+  const Duration min_duration = Seconds(kint64min);
+  if (d == min_duration) {
+    // Avoid needing to negate kint64min by directly returning what the
+    // following code should produce in that case.
+    return "-2562047788015215h30m8s";
+  }
+  std::string s;
+  if (d < ZeroDuration()) {
+    s.append("-");
+    d = -d;
+  }
+  if (d == InfiniteDuration()) {
+    s.append("inf");
+  } else if (d < Seconds(1)) {
+    // Special case for durations with a magnitude < 1 second.  The duration
+    // is printed as a fraction of a single unit, e.g., "1.2ms".
+    if (d < Microseconds(1)) {
+      AppendNumberUnit(&s, FDivDuration(d, Nanoseconds(1)), kDisplayNano);
+    } else if (d < Milliseconds(1)) {
+      AppendNumberUnit(&s, FDivDuration(d, Microseconds(1)), kDisplayMicro);
+    } else {
+      AppendNumberUnit(&s, FDivDuration(d, Milliseconds(1)), kDisplayMilli);
+    }
+  } else {
+    AppendNumberUnit(&s, IDivDuration(d, Hours(1), &d), kDisplayHour);
+    AppendNumberUnit(&s, IDivDuration(d, Minutes(1), &d), kDisplayMin);
+    AppendNumberUnit(&s, FDivDuration(d, Seconds(1)), kDisplaySec);
+  }
+  if (s.empty() || s == "-") {
+    s = "0";
+  }
+  return s;
+}
+
+namespace {
+
+// A helper for ParseDuration() that parses a leading number from the given
+// std::string and stores the result in *int_part/*frac_part/*frac_scale.  The
+// given std::string pointer is modified to point to the first unconsumed char.
+bool ConsumeDurationNumber(const char** dpp, int64_t* int_part,
+                           int64_t* frac_part, int64_t* frac_scale) {
+  *int_part = 0;
+  *frac_part = 0;
+  *frac_scale = 1;  // invariant: *frac_part < *frac_scale
+  const char* start = *dpp;
+  for (; std::isdigit(**dpp); *dpp += 1) {
+    const int d = **dpp - '0';  // contiguous digits
+    if (*int_part > kint64max / 10) return false;
+    *int_part *= 10;
+    if (*int_part > kint64max - d) return false;
+    *int_part += d;
+  }
+  const bool int_part_empty = (*dpp == start);
+  if (**dpp != '.') return !int_part_empty;
+  for (*dpp += 1; std::isdigit(**dpp); *dpp += 1) {
+    const int d = **dpp - '0';  // contiguous digits
+    if (*frac_scale <= kint64max / 10) {
+      *frac_part *= 10;
+      *frac_part += d;
+      *frac_scale *= 10;
+    }
+  }
+  return !int_part_empty || *frac_scale != 1;
+}
+
+// A helper for ParseDuration() that parses a leading unit designator (e.g.,
+// ns, us, ms, s, m, h) from the given std::string and stores the resulting unit
+// in "*unit".  The given std::string pointer is modified to point to the first
+// unconsumed char.
+bool ConsumeDurationUnit(const char** start, Duration* unit) {
+  const char *s = *start;
+  bool ok = true;
+  if (strncmp(s, "ns", 2) == 0) {
+    s += 2;
+    *unit = Nanoseconds(1);
+  } else if (strncmp(s, "us", 2) == 0) {
+    s += 2;
+    *unit = Microseconds(1);
+  } else if (strncmp(s, "ms", 2) == 0) {
+    s += 2;
+    *unit = Milliseconds(1);
+  } else if (strncmp(s, "s", 1) == 0) {
+    s += 1;
+    *unit = Seconds(1);
+  } else if (strncmp(s, "m", 1) == 0) {
+    s += 1;
+    *unit = Minutes(1);
+  } else if (strncmp(s, "h", 1) == 0) {
+    s += 1;
+    *unit = Hours(1);
+  } else {
+    ok = false;
+  }
+  *start = s;
+  return ok;
+}
+
+}  // namespace
+
+// From Go's doc at http://golang.org/pkg/time/#ParseDuration
+//   [ParseDuration] parses a duration std::string.  A duration std::string is
+//   a possibly signed sequence of decimal numbers, each with optional
+//   fraction and a unit suffix, such as "300ms", "-1.5h" or "2h45m".
+//   Valid time units are "ns", "us" "ms", "s", "m", "h".
+bool ParseDuration(const std::string& dur_string, Duration* d) {
+  const char* start = dur_string.c_str();
+  int sign = 1;
+
+  if (*start == '-' || *start == '+') {
+    sign = *start == '-' ? -1 : 1;
+    ++start;
+  }
+
+  // Can't parse a duration from an empty std::string.
+  if (*start == '\0') {
+    return false;
+  }
+
+  // Special case for a std::string of "0".
+  if (*start == '0' && *(start + 1) == '\0') {
+    *d = ZeroDuration();
+    return true;
+  }
+
+  if (strcmp(start, "inf") == 0) {
+    *d = sign * InfiniteDuration();
+    return true;
+  }
+
+  Duration dur;
+  while (*start != '\0') {
+    int64_t int_part;
+    int64_t frac_part;
+    int64_t frac_scale;
+    Duration unit;
+    if (!ConsumeDurationNumber(&start, &int_part, &frac_part, &frac_scale) ||
+        !ConsumeDurationUnit(&start, &unit)) {
+      return false;
+    }
+    if (int_part != 0) dur += sign * int_part * unit;
+    if (frac_part != 0) dur += sign * frac_part * unit / frac_scale;
+  }
+  *d = dur;
+  return true;
+}
+
+// TODO(absl-team): Remove once dependencies are removed.
+bool ParseFlag(const std::string& text, Duration* dst, std::string* /* err */) {
+  return ParseDuration(text, dst);
+}
+
+std::string UnparseFlag(Duration d) {
+  return FormatDuration(d);
+}
+
+}  // namespace absl